From b10acc1ee8de0b7a0048758585a4ce1d41383184 Mon Sep 17 00:00:00 2001 From: xiaosu3109 Date: Sun, 23 Oct 2022 14:36:23 +0800 Subject: [PATCH 1/2] new qla2xxx driver 10.02.08.00.a7-k --- drivers/scsi/qla2xxx/Kconfig | 6 +- drivers/scsi/qla2xxx/LICENSE.qla2xxx | 290 ++ drivers/scsi/qla2xxx/Makefile | 216 +- drivers/scsi/qla2xxx/Module.supported | 2 + drivers/scsi/qla2xxx/qla_attr.c | 749 ++++- drivers/scsi/qla2xxx/qla_bsg.c | 1133 +++++++- drivers/scsi/qla2xxx/qla_bsg.h | 365 ++- drivers/scsi/qla2xxx/qla_compat.h | 566 ++++ drivers/scsi/qla2xxx/qla_dbg.c | 1017 ++++--- drivers/scsi/qla2xxx/qla_dbg.h | 769 +++-- drivers/scsi/qla2xxx/qla_def.h | 1995 +++++++++---- drivers/scsi/qla2xxx/qla_dfs.c | 593 +++- drivers/scsi/qla2xxx/qla_edif.c | 3814 +++++++++++++++++++++++++ drivers/scsi/qla2xxx/qla_edif.h | 180 ++ drivers/scsi/qla2xxx/qla_edif_bsg.h | 258 ++ drivers/scsi/qla2xxx/qla_fw.h | 873 +++--- drivers/scsi/qla2xxx/qla_gbl.h | 237 +- drivers/scsi/qla2xxx/qla_gs.c | 806 ++---- drivers/scsi/qla2xxx/qla_init.c | 2176 ++++++++++---- drivers/scsi/qla2xxx/qla_inline.h | 294 +- drivers/scsi/qla2xxx/qla_iocb.c | 693 +++-- drivers/scsi/qla2xxx/qla_isr.c | 2218 ++++++++++---- drivers/scsi/qla2xxx/qla_mbx.c | 755 ++++- drivers/scsi/qla2xxx/qla_mid.c | 314 +- drivers/scsi/qla2xxx/qla_mr.c | 157 +- drivers/scsi/qla2xxx/qla_mr.h | 35 +- drivers/scsi/qla2xxx/qla_nvme.c | 364 ++- drivers/scsi/qla2xxx/qla_nvme.h | 73 +- drivers/scsi/qla2xxx/qla_nx.c | 250 +- drivers/scsi/qla2xxx/qla_nx.h | 39 +- drivers/scsi/qla2xxx/qla_nx2.c | 35 +- drivers/scsi/qla2xxx/qla_nx2.h | 3 +- drivers/scsi/qla2xxx/qla_os.c | 1857 ++++++------ drivers/scsi/qla2xxx/qla_scm.c | 2178 ++++++++++++++ drivers/scsi/qla2xxx/qla_settings.h | 3 +- drivers/scsi/qla2xxx/qla_sup.c | 446 +-- drivers/scsi/qla2xxx/qla_target.c | 697 +++-- drivers/scsi/qla2xxx/qla_target.h | 266 +- drivers/scsi/qla2xxx/qla_tmpl.c | 81 +- drivers/scsi/qla2xxx/qla_tmpl.h | 9 +- drivers/scsi/qla2xxx/qla_version.h | 10 +- drivers/scsi/qla2xxx/tcm_qla2xxx.c | 1957 ------------- 42 files changed, 21034 insertions(+), 7745 deletions(-) create mode 100644 drivers/scsi/qla2xxx/LICENSE.qla2xxx create mode 100644 drivers/scsi/qla2xxx/Module.supported create mode 100644 drivers/scsi/qla2xxx/qla_compat.h create mode 100644 drivers/scsi/qla2xxx/qla_edif.c create mode 100644 drivers/scsi/qla2xxx/qla_edif.h create mode 100644 drivers/scsi/qla2xxx/qla_edif_bsg.h create mode 100644 drivers/scsi/qla2xxx/qla_scm.c diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 802c373fd6d92..764501838e214 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -6,7 +6,7 @@ config SCSI_QLA_FC depends on NVME_FC || !NVME_FC select FW_LOADER select BTREE - help + ---help--- This qla2xxx driver supports all QLogic Fibre Channel PCI and PCIe host adapters. @@ -37,14 +37,14 @@ config TCM_QLA2XXX depends on LIBFC select BTREE default n - help + ---help--- Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs if TCM_QLA2XXX config TCM_QLA2XXX_DEBUG bool "TCM_QLA2XXX fabric module DEBUG mode for QLogic 24xx+ series target mode HBAs" default n - help + ---help--- Say Y here to enable the TCM_QLA2XXX fabric module DEBUG for QLogic 24xx+ series target mode HBAs This will include code to enable the SCSI command jammer endif diff --git a/drivers/scsi/qla2xxx/LICENSE.qla2xxx b/drivers/scsi/qla2xxx/LICENSE.qla2xxx new file mode 100644 index 0000000000000..4da8638089e74 --- /dev/null +++ b/drivers/scsi/qla2xxx/LICENSE.qla2xxx @@ -0,0 +1,290 @@ +Copyright (c) 2003-2016 QLogic Corporation +QLogic Linux FC-FCoE Driver + +This program includes a device driver for Linux 3.x. +You may modify and redistribute the device driver code under the +GNU General Public License (a copy of which is attached hereto as +Exhibit A) published by the Free Software Foundation (version 2). + + + +EXHIBIT A + + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile index 17d5bc1cc56bf..8421100329337 100644 --- a/drivers/scsi/qla2xxx/Makefile +++ b/drivers/scsi/qla2xxx/Makefile @@ -1,7 +1,219 @@ -# SPDX-License-Identifier: GPL-2.0 +OPTIONS := -mindirect-branch-register +OBJECT_FILES_NON_STANDARD := y + +sles_distro := $(wildcard /etc/SuSE-release) +rhel_distro := $(wildcard /etc/redhat-release) + +# Check to see if we should use thunk-extern for SLES +ifeq ($(sles_distro),) +SLES_VERSION = $(shell cat /etc/SuSE-release | grep VERSION | grep -o -P [0-9]+) +SLES_PATCHLEVEL = $(shell cat /etc/SuSE-release | grep PATCHLEVEL | grep -o -P [0-9]+) +PADDED_PATCHLEVEL = $(shell if [ 10 -gt $(SLES_PATCHLEVEL) ]; then echo 0$(SLES_PATCHLEVEL); else echo $(SLES_PATCHLEVEL); fi) +SLES_DISTRO_VER = "0x$(SLES_VERSION)$(PADDED_PATCHLEVEL)" +endif + +SUSE_BRAND = $(shell cat /etc/SUSE-brand 2>/dev/null | grep VERSION | sed 's/VERSION = //') +SUSE_PATCHLEVEL = $(shell cat /etc/SuSE-release 2>/dev/null | grep PATCHLEVEL | sed 's/PATCHLEVEL = //') + +ifeq ($(SUSE_BRAND), 12) + ifneq ($(shell test $(SUSE_PATCHLEVEL) -gt 3 && echo thunk_extern),) + USE_THUNK_EXTERN = 1 + endif +endif + +ifeq ($(SUSE_BRAND), 15) + USE_THUNK_EXTERN = 1 +endif + +ifneq ($(rhel_distro),) +RHEL_MAJVER := $(shell grep "RHEL_MAJOR" /usr/include/linux/version.h | sed -e 's/.*MAJOR \([0-9]\)/\1/') +RHEL_MINVER := $(shell grep "RHEL_MINOR" /usr/include/linux/version.h | sed -e 's/.*MINOR \([0-9]\)/\1/') +RHEL_DISTRO_VER = 0x$(RHEL_MAJVER)$(RHEL_MINVER) +endif + +ifeq ($(RHEL_DISTRO_VER), 0x0610) + USE_THUNK_EXTERN = 1 +endif + +ifeq ($(RHEL_MAJVER), 7) + ifneq ($(shell test $(RHEL_MINVER) -gt 4 && echo thunk_extern),) + $(warning OS version is $(RHEL_MAJVER).$(RHEL_MINVER)) + USE_THUNK_EXTERN = 1 + endif +endif + +ifneq ($(RHEL_MAJVER),) + RHEL_VER_VCHK = $(shell test $(RHEL_MAJVER) -ge 8 && echo 1) + ifeq ($(RHEL_VER_VCHK), 1) + USE_THUNK_EXTERN = 1 + endif +endif + +ifeq ($(USE_THUNK_EXTERN),1) + OPTIONS += -mindirect-branch=thunk-extern +else + OPTIONS += -mindirect-branch=thunk-inline +endif + +# ae +ifeq ($(SLES),) + SLES := $(shell grep -so "SLES" /etc/os-release) +endif + +CITRIX := $(shell grep -so "xenenterprise" /etc/redhat-release) +UBUNTU := $(shell lsb_release -is 2> /dev/null | grep Ubuntu) + +KVER := $(shell uname -r) +INC_DIR := /lib/modules/$(KVER)/build/include +ifneq ($(SLES),) + _KVER=$(shell echo $(KVER) | cut -d "-" -f1,2) + INC_DIR := /usr/src/linux-$(_KVER)/include +endif + +# Disable target builds +CONFIG_TCM_QLA2XXX := m +# Check if the base OS is SLES, and latest upstream kernel +# is cloned and installed +ifeq ($(wildcard $(INC_DIR)/scsi/scsi.h),) + ifeq ($(SLES),) + KVER := $(shell uname -r) + INC_DIR := /lib/modules/$(KVER)/build/include + endif + ifeq ($(UBUNTU),) + INC_DIR := /lib/modules/$(KVER)/source/include + endif +endif + +ifneq ($(debug),) +$(warning INC_DIR=$(INC_DIR)) +endif + +# +# set-def:- +# $(call set-def,,,) +# - returns if pattern is in include file. +# +# pattern should have word boundaries (-w option) and should not have +# embedded space (use \s instead). +# +define set-def +$(shell grep -qsw "$(strip $3)" \ + $(INC_DIR)/$(strip $2) && echo "$(strip $1)") +endef + +# +# set-def-ext:- +# $(call set-def-ext,,,) +# - returns if pattern is in output +# +# pattern should have word boundaries (-w option) and should not have +# embedded space (use \s instead). +# +# Command invocation is something like: +# $ |grep +# +define set-def-ext +$(shell $4 $(INC_DIR)/$(strip $2) |grep -qsw "$(strip $3)" \ + && echo "$(strip $1)") +endef + +DEFINES += $(call set-def,SCSI_CHANGE_QDEPTH,\ + scsi/scsi_device.h,scsi_change_queue_depth) +DEFINES += $(call set-def,SCSI_CHANGE_QTYPE,scsi/scsi_host.h,change_queue_type) +DEFINES += $(call set-def,SCSI_USE_CLUSTERING,scsi/scsi_host.h,use_clustering) +DEFINES += $(call set-def,SCSI_MAP_QUEUES,scsi/scsi_host.h,map_queues) +DEFINES += $(call set-def,SCSI_MARGINAL_PATH_SUPPORT,scsi/scsi_host.h,\ + eh_should_retry_cmd) +DEFINES += $(call set-def,SCSI_CHANGE_QDEPTH_3ARGS,\ + scsi/scsi_host.h,change_queue_depth.*int.\sint) +DEFINES += $(call set-def,SCSI_HOST_WIDE_TAGS,\ + scsi/scsi_host.h,use_host_wide_tags) +DEFINES += $(call set-def,SCSI_USE_BLK_MQ,scsi/scsi_host.h,shost_use_blk_mq) +DEFINES += $(call set-def,SCSI_HAS_TCQ,scsi/scsi_tcq.h,scsi_activate_tcq) +DEFINES += $(call set-def,SCSI_CMD_TAG_ATTR,\ + scsi/scsi_tcq.h,scsi_populate_tag_msg) +DEFINES += $(call set-def,SCSI_FC_BSG_JOB,scsi/scsi_transport_fc.h,fc_bsg_job) +DEFINES += $(call set-def,REFCOUNT_READ,linux/refcount.h,refcount_read) +DEFINES += $(call set-def,TIMER_SETUP,linux/timer.h,define\stimer_setup) +DEFINES += $(call set-def,DMA_ZALLOC_COHERENT,\ + linux/dma-mapping.h,dma_zalloc_coherent) +DEFINES += $(call set-def,KTIME_GET_REAL_SECONDS,\ + linux/timekeeping.h,ktime_get_real_seconds) +DEFINES += $(call set-def,NVME_POLL_QUEUE,linux/nvme-fc-driver.h,(.poll_queue)) +DEFINES += $(call set-def,DEFINED_FPIN_RCV,scsi/scsi_transport_fc.h,\ + fc_host_fpin_rcv) +DEFINES += $(call set-def,SCSI_CMD_PRIV,scsi/sci_cmnd.h,scsi_cmd_priv) +DEFINES += $(call set-def,SCSI_CMD_PRIV,scsi/sci_cmnd.h,scsi_cmd_priv) +DEFINES += $(call set-def,T10_PI_APP_ESC,linux/t10-pi.h,T10_PI_APP_ESCAPE) +DEFINES += $(call set-def,T10_PI_REF_ESC,linux/t10-pi.h,T10_PI_REF_ESCAPE) +DEFINES += $(call set-def,T10_PI_TUPLE,linux/t10-pi.h,t10_pi_tuple) +DEFINES += $(call set-def,FC_EH_TIMED_OUT,scsi/sci_transport_fc.h,fc_eh_timed_out) +DEFINES += $(call set-def,SCSI_TRACK_QUE_DEPTH,scsi/scsi_host.h,track_queue_depth) +DEFINES += $(call set-def,PCI_ERR_RESET_PREPARE,linux/pci.h,reset_prepare) +DEFINES += $(call set-def,PCI_ERR_RESET_DONE,linux/pci.h,reset_done) +DEFINES += $(call set-def,TGT_FREE_TAG,target/target_core_base.h,target_free_tag) +DEFINES += $(call set-def,TGT_SBITMAP_QUE,target/target_core_base.h,sbitmap_queue) +DEFINES += $(call set-def,TGT_SET_RM_SESSION,target/target_core_fabric.h,\ + target_remove_session) +DEFINES += $(call set-def,TGT_FABRIC_OPS_FABRIC_NAME,target/target_core_fabric.h,\ + fabric_name;) +DEFINES += $(call set-def,BLK_MQ_HCTX_TYPE,linux/blk-mq.h,hctx_type) +DEFINES += $(call set-def,SCSI_CHANGE_Q_DEPTH,scsi/scsi_device.h,scsi_change_queue_depth) +DEFINES += $(call set-def,FPIN_EVENT_TYPES,uapi/scsi/fc/fc_els.h,fc_fpin_deli_event_types) +DEFINES += $(call set-def,SET_DRIVER_BYTE,scsi/scsi_cmnd.h,set_driver_byte) +DEFINES += $(call set-def,LIST_IS_FIRST,linux/list.h,list_is_first) + +DEFINES += $(call set-def-ext,TGT_MAKE_TPG_PARAM_CFG_GROUP,\ + target/target_core_fabric.h,config_group,grep -A1 fabric_make_tpg) +DEFINES += $(call set-def-ext,BLK_PCI_MAPQ_3_ARGS,\ + linux/blk-mq-pci.h,offset,grep -A1 blk_mq_pci_map_queues) +DEFINES += $(call set-def-ext,NVME_FC_PORT_TEMPLATE_HV_MODULE,\ + linux/nvme-fc-driver.h,module,grep -A1 "nvme_fc_port_template {") + +# +# set-def-ifndef:- +# $(call set-def,,,) +# - returns if pattern is "NOT" in include file. +# +define set-def-ifndef +$(shell grep -qsw "$(strip $3)" $(INC_DIR)/$(strip $2) && : || echo "$(strip $1)") +endef + +DEFINES += $(call set-def-ifndef, BE_ARRAY, linux/byteorder/generic.h, be32_to_cpu_array) + + +ifneq ($(RHEL_DISTRO_VER),) +DEFINES += RHEL_DISTRO_VERSION=$(RHEL_DISTRO_VER) +endif + +override EXTRA_CFLAGS += $(addprefix -D,$(DEFINES)) + +# Addition defines via command line, call: make EXTRA_DEFINES=XYZ +override EXTRA_CFLAGS += $(addprefix -D,$(EXTRA_DEFINES)) + +#For analyzing performance of driver, uncomment below flag +override EXTRA_CFLAGS += -DQLA2XXX_LATENCY_MEASURE + +ifneq ($(debug),) +$(warning EXTRA_CFLAGS=($(EXTRA_CFLAGS))) +endif + +ifneq ($(debug),) +$(warning DEFINES=($(DEFINES))) +endif + +# ae +ifneq ($(shell echo 'int main(){}' | gcc -x c $(OPTIONS) - 2>/dev/null && echo thunk), ) +$(warning compiling with $(OPTIONS)) +ccflags-y += $(OPTIONS) +else +ccflags-y += -DRETPOLINE +endif + qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ - qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o + qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o \ + qla_edif.o qla_scm.o obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o diff --git a/drivers/scsi/qla2xxx/Module.supported b/drivers/scsi/qla2xxx/Module.supported new file mode 100644 index 0000000000000..20f282b99803a --- /dev/null +++ b/drivers/scsi/qla2xxx/Module.supported @@ -0,0 +1,2 @@ +qla2xxx.ko external +tcm_qla2xxx.ko external diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index d0407f44de78d..5f954e28085a1 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -1,7 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include "qla_target.h" @@ -26,7 +27,7 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj, int rval = 0; if (!(ha->fw_dump_reading || ha->mctp_dump_reading || - ha->mpi_fw_dump_reading)) + ha->mpi_fw_dump_reading)) return 0; mutex_lock(&ha->optrom_mutex); @@ -44,8 +45,7 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj, MCTP_DUMP_SIZE); } else if (ha->mpi_fw_dumped && ha->mpi_fw_dump_reading) { rval = memory_read_from_buffer(buf, count, &off, - ha->mpi_fw_dump, - ha->mpi_fw_dump_len); + ha->mpi_fw_dump, ha->mpi_fw_dump_len); } else if (ha->fw_dump_reading) { rval = memory_read_from_buffer(buf, count, &off, ha->fw_dump, ha->fw_dump_len); @@ -83,7 +83,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, qla82xx_md_prep(vha); } ha->fw_dump_reading = 0; - ha->fw_dumped = false; + ha->fw_dumped = 0; break; case 1: if (ha->fw_dumped && !ha->fw_dump_reading) { @@ -144,7 +144,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, if (!ha->mpi_fw_dump_reading) break; ql_log(ql_log_info, vha, 0x70e7, - "MPI firmware dump cleared on (%ld).\n", vha->host_no); + "MPI firmware dump cleared on (%ld).\n", vha->host_no); ha->mpi_fw_dump_reading = 0; ha->mpi_fw_dumped = 0; break; @@ -152,18 +152,28 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, if (ha->mpi_fw_dumped && !ha->mpi_fw_dump_reading) { ha->mpi_fw_dump_reading = 1; ql_log(ql_log_info, vha, 0x70e8, - "Raw MPI firmware dump ready for read on (%ld).\n", - vha->host_no); + "Raw MPI firmware dump ready for read on (%ld).\n", + vha->host_no); } break; case 10: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ql_log(ql_log_info, vha, 0x70e9, - "Issuing MPI firmware dump on host#%ld.\n", - vha->host_no); + "Issuing MPI firmware dump on host#%ld.\n", + vha->host_no); ha->isp_ops->mpi_fw_dump(vha, 0); } break; + case 11: + vha->hw->isp_ops->fw_dump(vha, 0); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + break; + + case 12: + ql_log(ql_log_info, vha, 0x70e8, + "Simulate fw being wedged by stopping FW\n"); + qla2x00_stop_firmware(vha); + break; } return count; } @@ -234,9 +244,10 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, /* Checksum NVRAM. */ if (IS_FWI2_CAPABLE(ha)) { - __le32 *iter = (__force __le32 *)buf; + uint32_t *iter; uint32_t chksum; + iter = (uint32_t *)buf; chksum = 0; for (cnt = 0; cnt < ((count >> 2) - 1); cnt++, iter++) chksum += le32_to_cpu(*iter); @@ -710,6 +721,12 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x706e, "Issuing ISP reset.\n"); + if (vha->hw->flags.port_isolated) { + ql_log(ql_log_info, vha, 0x706e, + "Port is isolated, returning.\n"); + return -EINVAL; + } + scsi_block_requests(vha->host); if (IS_QLA82XX(ha)) { ha->flags.isp82xx_no_md_cap = 1; @@ -733,7 +750,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, break; case 0x2025d: if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && - !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return -EPERM; ql_log(ql_log_info, vha, 0x706f, @@ -1350,7 +1367,7 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr, static ssize_t qla2x00_beacon_config_show(struct device *dev, struct device_attribute *attr, - char *buf) + char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; @@ -1368,7 +1385,7 @@ qla2x00_beacon_config_show(struct device *dev, struct device_attribute *attr, static ssize_t qla2x00_beacon_config_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) + const char *buf, size_t count) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); struct qla_hw_data *ha = vha->hw; @@ -1639,7 +1656,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); int rval = QLA_FUNCTION_FAILED; - uint16_t state[6]; + uint16_t state[16]; uint32_t pstate; if (IS_QLAFX00(vha->hw)) { @@ -1647,6 +1664,8 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate); } + memset(state, -1, sizeof(state)); + mutex_lock(&vha->hw->optrom_mutex); if (qla2x00_chip_is_down(vha)) { mutex_unlock(&vha->hw->optrom_mutex); @@ -1661,15 +1680,67 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr, rval = qla2x00_get_firmware_state(vha, state); mutex_unlock(&vha->hw->optrom_mutex); out: - if (rval != QLA_SUCCESS) { - memset(state, -1, sizeof(state)); - rval = qla2x00_get_firmware_state(vha, state); - } return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0], state[1], state[2], state[3], state[4], state[5]); } +static ssize_t +qla2x00_mpi_fw_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int rval = QLA_FUNCTION_FAILED; + uint16_t state[16]; + uint16_t mpi_state; + struct qla_hw_data *ha = vha->hw; + + if (!(IS_QLA27XX(ha) || IS_QLA28XX(ha))) + return scnprintf(buf, PAGE_SIZE, + "MPI state reporting is not supported for this HBA.\n"); + + memset(state, 0, sizeof(state)); + + mutex_lock(&vha->hw->optrom_mutex); + if (qla2x00_chip_is_down(vha)) { + mutex_unlock(&vha->hw->optrom_mutex); + ql_dbg(ql_dbg_user, vha, 0x70df, + "ISP reset is in progress, failing mpi_fw_state.\n"); + return -EBUSY; + } else if (vha->hw->flags.eeh_busy) { + mutex_unlock(&vha->hw->optrom_mutex); + ql_dbg(ql_dbg_user, vha, 0x70ea, + "HBA in PCI error state, failing mpi_fw_state.\n"); + return -EBUSY; + } + + rval = qla2x00_get_firmware_state(vha, state); + mutex_unlock(&vha->hw->optrom_mutex); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_user, vha, 0x70eb, + "MB Command to retrieve MPI state failed (%d), failing mpi_fw_state.\n", + rval); + return -EIO; + } + + mpi_state = state[11]; + + if (!(mpi_state & BIT_15)) + return scnprintf(buf, PAGE_SIZE, + "MPI firmware state reporting is not supported by this firmware. (0x%02x)\n", + mpi_state); + + if (!(mpi_state & BIT_8)) + return scnprintf(buf, PAGE_SIZE, + "MPI firmware is disabled. (0x%02x)\n", + mpi_state); + + return scnprintf(buf, PAGE_SIZE, + "MPI firmware is enabled, state is %s. (0x%02x)\n", + mpi_state & BIT_9 ? "active" : "inactive", + mpi_state); +} + static ssize_t qla2x00_diag_requests_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1862,7 +1933,7 @@ qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr, static const struct { u16 rate; char *str; -} port_speed_str[] = { +} port_speed_str [] = { { PORT_SPEED_4GB, "4" }, { PORT_SPEED_8GB, "8" }, { PORT_SPEED_16GB, "16" }, @@ -1898,6 +1969,40 @@ qla2x00_port_speed_show(struct device *dev, struct device_attribute *attr, return scnprintf(buf, PAGE_SIZE, "%s\n", speed); } +static ssize_t +qla2x00_mpi_pause_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + int rval = 0; + + if (sscanf(buf, "%d", &rval) != 1) + return -EINVAL; + + if (vha->hw->flags.port_isolated) { + ql_log(ql_log_info, vha, 0x70ff, + "Port is isolated, returning.\n"); + return -EINVAL; + } + + ql_log(ql_log_warn, vha, 0x7089, "Pausing MPI...\n"); + + if (IS_QLA83XX(vha->hw)) + /* pegtune halt */ + rval = qla83xx_wr_reg(vha, 0x1110003c, 1); + else + rval = qla83xx_wr_reg(vha, 0x002012d4, 0x30000001); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x708a, "Unable to pause MPI.\n"); + count = 0; + } + + return count; +} + +static DEVICE_ATTR(mpi_pause, S_IWUSR, NULL, qla2x00_mpi_pause_store); + /* ----- */ static ssize_t @@ -1943,8 +2048,9 @@ static char *mode_to_str[] = { }; #define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT) -static void qla_set_ini_mode(scsi_qla_host_t *vha, int op) +static int qla_set_ini_mode(scsi_qla_host_t *vha, int op) { + int rc = 0; enum { NO_ACTION, MODE_CHANGE_ACCEPT, @@ -2217,6 +2323,8 @@ static void qla_set_ini_mode(scsi_qla_host_t *vha, int op) vha->ql2xexchoffld, vha->u_ql2xexchoffld); break; } + + return rc; } static ssize_t @@ -2363,13 +2471,130 @@ qla2x00_port_no_show(struct device *dev, struct device_attribute *attr, } static ssize_t -qla2x00_dport_diagnostics_show(struct device *dev, - struct device_attribute *attr, char *buf) +qla27xx_nvme_connect_str_show(struct device *dev, struct device_attribute *attr, + char *buf) { scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct nvme_fc_remote_port *rport; + struct nvme_fc_local_port *lport; + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport; + char temp[150] = {0}; + char *rportstate = ""; - if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && - !IS_QLA28XX(vha->hw)) + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + if (!vha->flags.nvme_enabled) + return scnprintf(buf, PAGE_SIZE, "%s\n", + "FC-NVMe is not enabled"); + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (!fcport) { + scnprintf(buf, PAGE_SIZE, "No FC host\n"); + return strlen(buf); + } + + if (!vha->nvme_local_port) { + scnprintf(buf, PAGE_SIZE, + "FC-NVMe Initiator on 0x%16llx not registered.\n", + wwn_to_u64(fcport->port_name)); + return strlen(buf); + } + + if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)) + continue; + + rport = fcport->nvme_remote_port; + + lport = vha->nvme_local_port; + + scnprintf(temp, sizeof(temp), + "FC-NVMe LPORT: host%ld nn-0x%16llx:pn-0x%16llx port_id %06x %s\n", + vha->host_no, lport->node_name, + lport->port_name, lport->port_id, "ONLINE"); + + if (strlcat(buf, temp, PAGE_SIZE) >= PAGE_SIZE) + goto done; + + scnprintf(temp, sizeof(temp), + "FC-NVMe RPORT: host%ld nn-0x%llx:pn-0x%llx port_id %06x ", + vha->host_no, rport->node_name, + rport->port_name, rport->port_id); + + /* Find out Rport State */ + if (rport->port_state & FC_OBJSTATE_ONLINE) + rportstate = "ONLINE"; + + if (rport->port_state & FC_OBJSTATE_UNKNOWN) + rportstate = "UNKNOWN"; + + if (rport->port_state & ~(FC_OBJSTATE_ONLINE | + FC_OBJSTATE_UNKNOWN)) + rportstate = "UNSUPPORTED"; + + if (strlcat(buf, temp, PAGE_SIZE) >= + PAGE_SIZE) + goto done; + + if (rport->port_role & + (FC_PORT_ROLE_NVME_INITIATOR | + FC_PORT_ROLE_NVME_TARGET | + FC_PORT_ROLE_NVME_DISCOVERY)) { + if (rport->port_role & + FC_PORT_ROLE_NVME_INITIATOR) + if (strlcat(buf, "INITIATOR ", + PAGE_SIZE) >= PAGE_SIZE) + goto done; + + if (rport->port_role & + FC_PORT_ROLE_NVME_TARGET) + if (strlcat(buf, "TARGET ", + PAGE_SIZE) >= PAGE_SIZE) + goto done; + + if (rport->port_role & + FC_PORT_ROLE_NVME_DISCOVERY) + if (strlcat(buf, "DISCOVERY ", + PAGE_SIZE) >= PAGE_SIZE) + goto done; + } else { + if (strlcat(buf, "UNKNOWN_ROLE ", + PAGE_SIZE) >= PAGE_SIZE) + goto done; + } + scnprintf(temp, sizeof(temp), "%s\n", rportstate); + + if (strlcat (buf, temp, PAGE_SIZE) >= PAGE_SIZE) + goto done; + + scnprintf(temp, sizeof(temp), + "NVMECLI: host-traddr=nn-0x%16llx:pn-0x%16llx traddr=nn-0x%16llx:pn-0x%16llx\n", + lport->node_name, lport->port_name, + rport->node_name, rport->port_name); + + if (strlcat(buf, temp, PAGE_SIZE) >= PAGE_SIZE) + goto done; + } + + return strlen(buf); + +done: + ql_log(ql_log_warn, vha, 0xffff, + "NVME connect string buffer size 0x%lx exceeds 0x%lx\n", + sizeof(*buf), PAGE_SIZE); + return strlen(buf); +} +static DEVICE_ATTR(nvme_connect_str, S_IRUGO, + qla27xx_nvme_connect_str_show, NULL); + +static ssize_t +qla2x00_dport_diagnostics_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw) && !IS_QLA28XX(vha->hw)) return scnprintf(buf, PAGE_SIZE, "\n"); if (!*vha->dport_data) @@ -2379,9 +2604,344 @@ qla2x00_dport_diagnostics_show(struct device *dev, vha->dport_data[0], vha->dport_data[1], vha->dport_data[2], vha->dport_data[3]); } -static DEVICE_ATTR(dport_diagnostics, 0444, +static DEVICE_ATTR(dport_diagnostics, S_IRUGO, qla2x00_dport_diagnostics_show, NULL); +static ssize_t +qla2xxx_uscm_stat_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport; + char temp[250] = {0}; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return scnprintf(buf, PAGE_SIZE, "\n"); + + scnprintf(temp, sizeof(temp), + "Host: Congested:%d\n" + "Cleared Congn = %llu\n" + "Throttled Down = %llu\n" + "Throttled Up = %llu\n" + "Returned Busy = %llu\n" + "Throttle (RD) = %llu\n" + "Throttle (WR) = %llu\n" + "Bottomed out = %llu\n" + "I/O throttling = %d\n", + qla_scmr_is_congested(&ha->sfc), + (u64)ha->scm.rstats.throttle_cleared, + (u64)ha->scm.rstats.throttle_down_count, + (u64)ha->scm.rstats.throttle_up_count, + (u64)ha->scm.rstats.busy_status_count, + (u64)atomic_read(&ha->throttle_read), + (u64)atomic_read(&ha->throttle_write), + (u64)ha->scm.rstats.throttle_hit_low_wm, + qla_get_throttling_state(&ha->sfc) + ); + if (strlcat(buf, temp, PAGE_SIZE) >= PAGE_SIZE) + goto done; + + scnprintf(temp, sizeof(temp), + "------------------------------\n" + "Link Integrity Notification = %llu\n", + ha->scm.stats.li_failure_unknown + + ha->scm.stats.li_link_failure_count + + ha->scm.stats.li_loss_of_sync_count + + ha->scm.stats.li_loss_of_signals_count + + ha->scm.stats.li_prim_seq_err_count + + ha->scm.stats.li_invalid_tx_word_count + + ha->scm.stats.li_invalid_crc_count + + ha->scm.stats.li_device_specific + ); + + if (strlcat(buf, temp, PAGE_SIZE) >= PAGE_SIZE) + goto done; + + scnprintf(temp, sizeof(temp), + "Delivery Notification = %llu\n", + ha->scm.stats.dn_unknown + + ha->scm.stats.dn_timeout + + ha->scm.stats.dn_unable_to_route + + ha->scm.stats.dn_device_specific + ); + if (strlcat(buf, temp, PAGE_SIZE) >= PAGE_SIZE) + goto done; + + scnprintf(temp, sizeof(temp), + "Congestion Notification= %llu\n", + ha->scm.stats.cn_clear + + ha->scm.stats.cn_lost_credit + + ha->scm.stats.cn_credit_stall + + ha->scm.stats.cn_oversubscription + + ha->scm.stats.cn_device_specific + ); + + if (strlcat(buf, temp, PAGE_SIZE) >= PAGE_SIZE) + goto done; + + scnprintf(temp, sizeof(temp), + "------------------------------\n" + "Congestion Alarm (sig) = %llu\n" + "Congestion Warning (sig) = %llu\n" + "Congestion Alarm (fpin) = %llu\n" + "Congestion Warning (fpin) = %llu\n", + ha->sig_sev.cn_alarm_sig, + ha->sig_sev.cn_warning_sig, + (ha->scm.sev.cn_alarm - ha->sig_sev.cn_alarm_sig), + (ha->scm.sev.cn_warning - ha->sig_sev.cn_warning_sig) + ); + + if (strlcat(buf, temp, PAGE_SIZE) >= PAGE_SIZE) + goto done; + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + + if (!(fcport->port_type & FCT_TARGET) && + !(fcport->port_type & FCT_NVME_TARGET)) + continue; + + + scnprintf(temp, sizeof(temp), + "=============================\n" + "TGT WWPN-0x%16llx: Congested:%d\n" + "Cleared Congn = %llu\n" + "Throttled Down = %llu\n" + "Throttled Up = %llu\n" + "Returned Busy = %llu\n" + "Bottomed out = %llu\n" + "I/O throttling = %d\n", + wwn_to_u64(fcport->port_name), + qla_scmr_is_congested(&fcport->sfc), + (u64)fcport->scm.rstats.throttle_cleared, + (u64)fcport->scm.rstats.throttle_down_count, + (u64)fcport->scm.rstats.throttle_up_count, + (u64)fcport->scm.rstats.busy_status_count, + (u64)fcport->scm.rstats.throttle_hit_low_wm, + qla_get_throttling_state(&fcport->sfc) + ); + + if (strlcat(buf, temp, PAGE_SIZE) >= PAGE_SIZE) + goto done; + + if (ha->scm.display_mode == QLA_DISP_MODE_COMPACT) { + scnprintf(temp, sizeof(temp), + "------------------------------\n" + "Link Integrity = %llu\n", + fcport->scm.stats.li_failure_unknown + + fcport->scm.stats.li_link_failure_count + + fcport->scm.stats.li_loss_of_sync_count + + fcport->scm.stats.li_loss_of_signals_count + + fcport->scm.stats.li_prim_seq_err_count + + fcport->scm.stats.li_invalid_tx_word_count + + fcport->scm.stats.li_invalid_crc_count + + fcport->scm.stats.li_device_specific + ); + } else if (ha->scm.display_mode == QLA_DISP_MODE_DETAILED) { + scnprintf(temp, sizeof(temp), + "------------------------------\n" + "LI: unknown = %llu\n" + "LI: failure = %llu\n" + "LI: loss of sync = %llu\n" + "LI: loss of signal = %llu\n" + "LI: prim seq prot err = %llu\n" + "LI: invalid tx words = %llu\n" + "LI: invalid CRC = %llu\n" + "LI: dev specific = %llu\n", + fcport->scm.stats.li_failure_unknown, + fcport->scm.stats.li_link_failure_count, + fcport->scm.stats.li_loss_of_sync_count, + fcport->scm.stats.li_loss_of_signals_count, + fcport->scm.stats.li_prim_seq_err_count, + fcport->scm.stats.li_invalid_tx_word_count, + fcport->scm.stats.li_invalid_crc_count, + fcport->scm.stats.li_device_specific + ); + } + + if (strlcat(buf, temp, PAGE_SIZE) >= PAGE_SIZE) + goto done; + + if (ha->scm.display_mode == QLA_DISP_MODE_COMPACT) { + scnprintf(temp, sizeof(temp), + "Delivery Notification = %llu\n", + fcport->scm.stats.dn_unknown + + fcport->scm.stats.dn_timeout + + fcport->scm.stats.dn_unable_to_route + + fcport->scm.stats.dn_device_specific + ); + } else if (ha->scm.display_mode == QLA_DISP_MODE_DETAILED) { + scnprintf(temp, sizeof(temp), + "D: unknown = %llu\n" + "D: tmo = %llu\n" + "D: unable to route = %llu\n" + "D: dev specific = %llu\n", + fcport->scm.stats.dn_unknown, + fcport->scm.stats.dn_timeout, + fcport->scm.stats.dn_unable_to_route, + fcport->scm.stats.dn_device_specific + ); + } + + if (strlcat(buf, temp, PAGE_SIZE) >= PAGE_SIZE) + goto done; + + if (ha->scm.display_mode == QLA_DISP_MODE_COMPACT) { + scnprintf(temp, sizeof(temp), + "Peer Congestion = %llu\n", + fcport->scm.stats.cn_clear + + fcport->scm.stats.cn_lost_credit + + fcport->scm.stats.cn_credit_stall + + fcport->scm.stats.cn_oversubscription + + fcport->scm.stats.cn_device_specific + ); + } else if (ha->scm.display_mode == QLA_DISP_MODE_DETAILED) { + scnprintf(temp, sizeof(temp), + "PC: clear = %llu\n" + "PC: lost credit = %llu\n" + "PC: credit stall = %llu\n" + "PC: oversubscription = %llu\n" + "PC: dev specific = %llu\n" + "PC: PUN = %llu\n", + fcport->scm.stats.cn_clear, + fcport->scm.stats.cn_lost_credit, + fcport->scm.stats.cn_credit_stall, + fcport->scm.stats.cn_oversubscription, + fcport->scm.stats.cn_device_specific, + fcport->scm.stats.pun_count + ); + } + if (strlcat(buf, temp, PAGE_SIZE) >= PAGE_SIZE) + goto done; + } + + return strlen(buf); + +done: + ql_log(ql_log_warn, vha, 0xffff, + "scmr string buffer size 0x%lx exceeds 0x%lx\n", + sizeof(*buf), PAGE_SIZE); + return strlen(buf); +} + +DECLARE_ENUM2STR_LOOKUP(qla_get_profile_type, ql_scm_profile_type, + QL_SCM_PROFILE_TYPES_INIT); +static ssize_t +qla2xxx_uscm_profile_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + const char *profile = qla_get_profile_type(vha->hw->sfc.profile.scmr_profile); + + return scnprintf(buf, PAGE_SIZE, " %s\n",profile); +} + +static ssize_t +qla2xxx_uscm_profile_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + fc_port_t *fcport; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + if (val < 0 || val > 3) { + ql_log(ql_log_warn, vha, 0x0302, + "Invalid profile input %d.\n", val); + return -EINVAL; + } + + ql_log(ql_log_info, vha, 0x0303, + "Setting profile to : %s\n", qla_get_profile_type(val)); + vha->hw->sfc.profile.scmr_profile = val; + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (!(fcport->port_type & FCT_TARGET) && + !(fcport->port_type & FCT_NVME_TARGET)) + continue; + fcport->sfc.profile.scmr_profile = val; + } + + if (vha->hw->sfc.profile.scmr_profile == 0) {/* Monitor profile */ + qla2xxx_scmr_clear_throttle(&vha->hw->sfc); + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (!(fcport->port_type & FCT_TARGET) && + !(fcport->port_type & FCT_NVME_TARGET)) + continue; + qla2xxx_scmr_clear_throttle(&fcport->sfc); + if (vha->hw->flags.conn_fabric_cisco_er_rdy) {// VL + if (!qla_scmr_is_congested(&fcport->sfc)) + qla2xxx_switch_vl(&fcport->sfc, VL_NORMAL); + } + } + } + + return strlen(buf); +} + +static ssize_t +qla2xxx_uscm_stat_set(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int val; + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + if (val == 0) { + vha->hw->scm.display_mode = QLA_DISP_MODE_COMPACT; + ql_log(ql_log_warn, vha, 0x0301, + "SCMR: Setting display mode to compact\n"); + } + + if (val == 1) { + vha->hw->scm.display_mode = QLA_DISP_MODE_DETAILED; + ql_log(ql_log_warn, vha, 0x0301, + "SCMR: Setting display mode to detailed\n"); + } + + return strlen(buf); +} + +static char *uscm_vl[3] = {"Fast", "Slow", "Normal"}; + +static ssize_t +qla2xxx_uscm_vl_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport; + char temp[200]; + + if (!ha->flags.conn_fabric_cisco_er_rdy) + return scnprintf(buf, PAGE_SIZE, "Not Supported \n"); + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + + if (!(fcport->port_type & FCT_TARGET) && + !(fcport->port_type & FCT_NVME_TARGET)) + continue; + + scnprintf(temp, sizeof(temp), + "TGT WWPN-0x%16llx: VL=%s \n", + wwn_to_u64(fcport->port_name), + uscm_vl[fcport->vl.v_lane]); + + if (strlcat(buf, temp, PAGE_SIZE) >= PAGE_SIZE) + goto done; + } + return strlen(buf); +done: + ql_log(ql_log_warn, vha, 0xffff, + "uscm string buffer size 0x%lx exceeds 0x%lx\n", + sizeof(*buf), PAGE_SIZE); + + return strlen(buf); +} + static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_driver_version_show, NULL); static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL); static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL); @@ -2396,7 +2956,7 @@ static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show, qla2x00_zio_timer_store); static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show, qla2x00_beacon_store); -static DEVICE_ATTR(beacon_config, 0644, qla2x00_beacon_config_show, +static DEVICE_ATTR(beacon_config, S_IRUGO | S_IWUSR, qla2x00_beacon_config_show, qla2x00_beacon_config_store); static DEVICE_ATTR(optrom_bios_version, S_IRUGO, qla2x00_optrom_bios_version_show, NULL); @@ -2446,7 +3006,13 @@ static DEVICE_ATTR(port_speed, 0644, qla2x00_port_speed_show, qla2x00_port_speed_store); static DEVICE_ATTR(port_no, 0444, qla2x00_port_no_show, NULL); static DEVICE_ATTR(fw_attr, 0444, qla2x00_fw_attr_show, NULL); - +static DEVICE_ATTR(uscm_stat, 0600, qla2xxx_uscm_stat_show, + qla2xxx_uscm_stat_set); +static DEVICE_ATTR(uscm_profile, S_IWUSR | S_IRUGO, qla2xxx_uscm_profile_show, + qla2xxx_uscm_profile_set); +static DEVICE_ATTR(uscm_vl, S_IRUGO, qla2xxx_uscm_vl_show, + NULL); +static DEVICE_ATTR(mpi_fw_state, S_IRUGO, qla2x00_mpi_fw_state_show, NULL); struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_driver_version, @@ -2490,7 +3056,13 @@ struct device_attribute *qla2x00_host_attrs[] = { &dev_attr_port_speed, &dev_attr_port_no, &dev_attr_fw_attr, + &dev_attr_nvme_connect_str, &dev_attr_dport_diagnostics, + &dev_attr_mpi_pause, + &dev_attr_uscm_stat, + &dev_attr_uscm_profile, + &dev_attr_uscm_vl, + &dev_attr_mpi_fw_state, NULL, /* reserve for qlini_mode */ NULL, /* reserve for ql2xiniexchg */ NULL, /* reserve for ql2xexchoffld */ @@ -2659,7 +3231,13 @@ qla2x00_get_starget_port_id(struct scsi_target *starget) static inline void qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) { + fc_port_t *fcport = *(fc_port_t **)rport->dd_data; + rport->dev_loss_tmo = timeout ? timeout : 1; + + if (IS_ENABLED(CONFIG_NVME_FC) && fcport && fcport->nvme_remote_port) + nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, + rport->dev_loss_tmo); } static void @@ -2672,17 +3250,27 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) if (!fcport) return; - /* Now that the rport has been deleted, set the fcport state to - FCS_DEVICE_DEAD */ - qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD); + ql_dbg(ql_dbg_async, fcport->vha, 0x5101, + DBG_FCPORT_PRFMT(fcport, "dev_loss_tmo expiry, rport_state=%d", + rport->port_state)); + + /* + * Now that the rport has been deleted, set the fcport state to + * FCS_DEVICE_DEAD, if the fcport is still lost. + */ + if (fcport->scan_state != QLA_FCPORT_FOUND) + qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD); /* * Transport has effectively 'deleted' the rport, clear * all local references. */ spin_lock_irqsave(host->host_lock, flags); - fcport->rport = fcport->drport = NULL; - *((fc_port_t **)rport->dd_data) = NULL; + /* Confirm port has not reappeared before clearing pointers. */ + if (rport->port_state != FC_PORTSTATE_ONLINE) { + fcport->rport = NULL; + *((fc_port_t **)rport->dd_data) = NULL; + } spin_unlock_irqrestore(host->host_lock, flags); if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) @@ -2715,13 +3303,22 @@ qla2x00_terminate_rport_io(struct fc_rport *rport) /* * At this point all fcport's software-states are cleared. Perform any * final cleanup of firmware resources (PCBs and XCBs). + * + * Attempt to cleanup only lost devices. */ if (fcport->loop_id != FC_NO_LOOP_ID) { - if (IS_FWI2_CAPABLE(fcport->vha->hw)) - fcport->vha->hw->isp_ops->fabric_logout(fcport->vha, - fcport->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa); - else + if (IS_FWI2_CAPABLE(fcport->vha->hw) && + fcport->scan_state != QLA_FCPORT_FOUND) { + if (fcport->loop_id != FC_NO_LOOP_ID) + fcport->logout_on_delete = 1; + + if (!EDIF_NEGOTIATION_PENDING(fcport)) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x911e, + "%s %d sched delete\n", __func__, __LINE__); + + qlt_schedule_sess_for_deletion(fcport); + } + } else if (!IS_FWI2_CAPABLE(fcport->vha->hw)) qla2x00_port_logout(fcport->vha, fcport); } } @@ -2734,6 +3331,9 @@ qla2x00_issue_lip(struct Scsi_Host *shost) if (IS_QLAFX00(vha->hw)) return 0; + if (vha->hw->flags.port_isolated) + return 0; + qla2x00_loop_reset(vha); return 0; } @@ -2750,7 +3350,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) struct fc_host_statistics *p = &vha->fc_host_stat; struct qla_qpair *qpair; int i; - u64 ib = 0, ob = 0, ir = 0, or = 0; + u64 ib, ob, ir, or; memset(p, -1, sizeof(*p)); @@ -2788,6 +3388,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) goto done_free; /* --- */ + ib = ob = ir = or = 0; for (i = 0; i < vha->hw->max_qpairs; i++) { qpair = vha->hw->queue_pair_map[i]; if (!qpair) @@ -2831,7 +3432,6 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost) p->tx_words = ob >> 2; } } - p->fcp_control_requests = vha->qla_stats.control_requests; p->fcp_input_requests = ir; p->fcp_output_requests = or; @@ -2872,8 +3472,6 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost) vha->qla_stats.jiffies_at_last_reset = get_jiffies_64(); if (IS_FWI2_CAPABLE(ha)) { - int rval; - stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma, GFP_KERNEL); if (!stats) { @@ -2883,11 +3481,7 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost) } /* reset firmware statistics */ - rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0); - if (rval != QLA_SUCCESS) - ql_log(ql_log_warn, vha, 0x70de, - "Resetting ISP statistics failed: rval = %d\n", - rval); + qla24xx_get_isp_stats(base_vha, stats, stats_dma, BIT_0); dma_free_coherent(&ha->pdev->dev, sizeof(*stats), stats, stats_dma); @@ -2918,7 +3512,7 @@ qla2x00_get_host_fabric_name(struct Scsi_Host *shost) static const uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; - u64 fabric_name = wwn_to_u64(node_name); + u64 fabric_name = wwn_to_u64((u8 *)node_name); if (vha->device_flags & SWITCH_FOUND) fabric_name = wwn_to_u64(vha->fabric_node_name); @@ -3027,8 +3621,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable) guard = SHOST_DIX_GUARD_CRC; - if (IS_PI_IPGUARD_CAPABLE(ha) && - (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) + if (IS_PI_IPGUARD_CAPABLE(ha) && ql2xenabledif) guard |= SHOST_DIX_GUARD_IP; scsi_host_set_guard(vha->host, guard); @@ -3106,15 +3699,16 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) set_bit(VPORT_DELETE, &vha->dpc_flags); - while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) || - test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) + while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags)) msleep(1000); - qla24xx_disable_vp(vha); - qla2x00_wait_for_sess_deletion(vha); qla_nvme_delete(vha); + + qla_enode_stop(vha); + qla_edb_stop(vha); + vha->flags.delete_progress = 1; qlt_remove_target(ha, vha); @@ -3276,7 +3870,52 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha) fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; - speeds = qla25xx_fdmi_port_speed_capability(ha); + if (IS_CNA_CAPABLE(ha)) + speeds = FC_PORTSPEED_10GBIT; + else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { + if (ha->max_supported_speed == 2) { + if (ha->min_supported_speed <= 6) + speeds |= FC_PORTSPEED_64GBIT; + } + if (ha->max_supported_speed == 2 || + ha->max_supported_speed == 1) { + if (ha->min_supported_speed <= 5) + speeds |= FC_PORTSPEED_32GBIT; + } + if (ha->max_supported_speed == 2 || + ha->max_supported_speed == 1 || + ha->max_supported_speed == 0) { + if (ha->min_supported_speed <= 4) + speeds |= FC_PORTSPEED_16GBIT; + } + if (ha->max_supported_speed == 1 || + ha->max_supported_speed == 0) { + if (ha->min_supported_speed <= 3) + speeds |= FC_PORTSPEED_8GBIT; + } + if (ha->max_supported_speed == 0) { + if (ha->min_supported_speed <= 2) + speeds |= FC_PORTSPEED_4GBIT; + } + } else if (IS_QLA2031(ha)) + if ((ha->pdev->subsystem_vendor == 0x103C) && + ((ha->pdev->subsystem_device == 0x8002) || + (ha->pdev->subsystem_device == 0x8086))) { + speeds = FDMI_PORT_SPEED_16GB; + } else { + speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB| + FDMI_PORT_SPEED_4GB; + } + else if (IS_QLA25XX(ha) || IS_QLAFX00(ha)) + speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT| + FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT; + else if (IS_QLA24XX_TYPE(ha)) + speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT| + FC_PORTSPEED_1GBIT; + else if (IS_QLA23XX(ha)) + speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT; + else + speeds = FC_PORTSPEED_1GBIT; fc_host_supported_speeds(vha->host) = speeds; } diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 1fd292a6ac881..486c46872606b 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -1,9 +1,11 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" +#include "qla_gbl.h" #include #include @@ -21,10 +23,15 @@ static void qla2xxx_free_fcport_work(struct work_struct *work) /* BSG support for ELS/CT pass through */ void qla2x00_bsg_job_done(srb_t *sp, int res) { - struct bsg_job *bsg_job = sp->u.bsg_job; + bsg_job_t *bsg_job = sp->u.bsg_job; struct fc_bsg_reply *bsg_reply = bsg_job->reply; - sp->free(sp); + ql_dbg(ql_dbg_user, sp->vha, 0x7009, + "%s: sp hdl %x, result=%x bsg ptr %px\n", + __func__, sp->handle, res, bsg_job); + + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); bsg_reply->result = res; bsg_job_done(bsg_job, bsg_reply->result, @@ -34,7 +41,7 @@ void qla2x00_bsg_job_done(srb_t *sp, int res) void qla2x00_bsg_sp_free(srb_t *sp) { struct qla_hw_data *ha = sp->vha->hw; - struct bsg_job *bsg_job = sp->u.bsg_job; + bsg_job_t *bsg_job = sp->u.bsg_job; struct fc_bsg_request *bsg_request = bsg_job->request; struct qla_mt_iocb_rqst_fx00 *piocb_rqst; @@ -52,11 +59,19 @@ void qla2x00_bsg_sp_free(srb_t *sp) bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); } else { + + if (sp->remap.remapped) { + dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf, + sp->remap.rsp.dma); + dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf, + sp->remap.req.dma); + } else { dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); + } } if (sp->type == SRB_CT_CMD || @@ -124,7 +139,7 @@ qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha, } static int -qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job) +qla24xx_proc_fcp_prio_cfg_cmd(bsg_job_t *bsg_job) { struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); struct fc_bsg_request *bsg_request = bsg_job->request; @@ -223,7 +238,8 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job) /* validate fcp priority data */ - if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) { + if (!qla24xx_fcp_prio_cfg_valid(vha, + (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) { bsg_reply->result = (DID_ERROR << 16); ret = -EINVAL; /* If buffer was invalidatic int @@ -252,7 +268,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job) } static int -qla2x00_process_els(struct bsg_job *bsg_job) +qla2x00_process_els(bsg_job_t *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_rport *rport; @@ -264,10 +280,15 @@ qla2x00_process_els(struct bsg_job *bsg_job) const char *type; int req_sg_cnt, rsp_sg_cnt; int rval = (DID_ERROR << 16); - uint16_t nextlid = 0; + uint32_t els_cmd=0; + int qla_port_allocated = 0; if (bsg_request->msgcode == FC_BSG_RPT_ELS) { rport = fc_bsg_to_rport(bsg_job); + if (!rport) { + rval = -ENOMEM; + goto done; + } fcport = *(fc_port_t **) rport->dd_data; host = rport_to_shost(rport); vha = shost_priv(host); @@ -278,6 +299,9 @@ qla2x00_process_els(struct bsg_job *bsg_job) vha = shost_priv(host); ha = vha->hw; type = "FC_BSG_HST_ELS_NOLOGIN"; + els_cmd=bsg_request->rqst_data.h_els.command_code; + if (els_cmd == ELS_AUTH_ELS) + return qla_edif_process_els(vha, bsg_job); } if (!vha->flags.online) { @@ -296,7 +320,7 @@ qla2x00_process_els(struct bsg_job *bsg_job) /* Multiple SG's are not supported for ELS requests */ if (bsg_job->request_payload.sg_cnt > 1 || - bsg_job->reply_payload.sg_cnt > 1) { + bsg_job->reply_payload.sg_cnt > 1) { ql_dbg(ql_dbg_user, vha, 0x7002, "Multiple SG's are not supported for ELS requests, " "request_sg_cnt=%x reply_sg_cnt=%x.\n", @@ -311,9 +335,9 @@ qla2x00_process_els(struct bsg_job *bsg_job) /* make sure the rport is logged in, * if not perform fabric login */ - if (qla2x00_fabric_login(vha, fcport, &nextlid)) { + if (atomic_read(&fcport->state) != FCS_ONLINE) { ql_dbg(ql_dbg_user, vha, 0x7003, - "Failed to login port %06X for ELS passthru.\n", + "Port %06X is not online for ELS passthru.\n", fcport->d_id.b24); rval = -EIO; goto done; @@ -330,6 +354,7 @@ qla2x00_process_els(struct bsg_job *bsg_job) goto done; } + qla_port_allocated = 1; /* Initialize all required fields of fcport */ fcport->vha = vha; fcport->d_id.b.al_pa = @@ -355,7 +380,7 @@ qla2x00_process_els(struct bsg_job *bsg_job) rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); - if (!rsp_sg_cnt) { + if (!rsp_sg_cnt) { dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); rval = -ENOMEM; @@ -372,7 +397,6 @@ qla2x00_process_els(struct bsg_job *bsg_job) rval = -EAGAIN; goto done_unmap_sg; } - /* Alloc SRB structure */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) { @@ -414,7 +438,7 @@ qla2x00_process_els(struct bsg_job *bsg_job) goto done_free_fcport; done_free_fcport: - if (bsg_request->msgcode != FC_BSG_RPT_ELS) + if (qla_port_allocated) qla2x00_free_fcport(fcport); done: return rval; @@ -435,7 +459,7 @@ qla24xx_calc_ct_iocbs(uint16_t dsds) } static int -qla2x00_process_ct(struct bsg_job *bsg_job) +qla2x00_process_ct(bsg_job_t *bsg_job) { srb_t *sp; struct fc_bsg_request *bsg_request = bsg_job->request; @@ -489,7 +513,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job) >> 24; switch (loop_id) { case 0xFC: - loop_id = NPH_SNS; + loop_id = cpu_to_le16(NPH_SNS); break; case 0xFA: loop_id = vha->mgmt_svr_loop_id; @@ -690,7 +714,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, * dump and reset the chip. */ if (ret) { - qla2xxx_dump_fw(vha); + ha->isp_ops->fw_dump(vha, 0); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } rval = -EINVAL; @@ -713,7 +737,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config, } static int -qla2x00_process_loopback(struct bsg_job *bsg_job) +qla2x00_process_loopback(bsg_job_t *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_bsg_reply *bsg_reply = bsg_job->reply; @@ -807,7 +831,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job) if (atomic_read(&vha->loop_state) == LOOP_READY && ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) || ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) && - get_unaligned_le32(req_data) == ELS_OPCODE_BYTE && + le32_to_cpup(req_data) == ELS_OPCODE_BYTE && req_data_len == MAX_ELS_FRAME_PAYLOAD && elreq.options == EXTERNAL_LOOPBACK))) { type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; @@ -895,7 +919,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job) * doesn't work take FCoE dump and then * reset the chip. */ - qla2xxx_dump_fw(vha); + ha->isp_ops->fw_dump(vha, 0); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } @@ -929,9 +953,8 @@ qla2x00_process_loopback(struct bsg_job *bsg_job) bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t); - fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply); - memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response, - sizeof(response)); + fw_sts_ptr = qla_fwsts_ptr(bsg_job); + memcpy(fw_sts_ptr, response, sizeof(response)); fw_sts_ptr += sizeof(response); *fw_sts_ptr = command_sent; @@ -956,7 +979,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job) } static int -qla84xx_reset(struct bsg_job *bsg_job) +qla84xx_reset(bsg_job_t *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); @@ -992,7 +1015,7 @@ qla84xx_reset(struct bsg_job *bsg_job) } static int -qla84xx_updatefw(struct bsg_job *bsg_job) +qla84xx_updatefw(bsg_job_t *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_bsg_reply *bsg_reply = bsg_job->reply; @@ -1102,7 +1125,7 @@ qla84xx_updatefw(struct bsg_job *bsg_job) } static int -qla84xx_mgmt_cmd(struct bsg_job *bsg_job) +qla84xx_mgmt_cmd(bsg_job_t *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_bsg_reply *bsg_reply = bsg_job->reply; @@ -1298,7 +1321,7 @@ qla84xx_mgmt_cmd(struct bsg_job *bsg_job) } static int -qla24xx_iidma(struct bsg_job *bsg_job) +qla24xx_iidma(bsg_job_t *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_bsg_reply *bsg_reply = bsg_job->reply; @@ -1387,7 +1410,7 @@ qla24xx_iidma(struct bsg_job *bsg_job) } static int -qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha, +qla2x00_optrom_setup(bsg_job_t *bsg_job, scsi_qla_host_t *vha, uint8_t is_update) { struct fc_bsg_request *bsg_request = bsg_job->request; @@ -1457,7 +1480,7 @@ qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha, } static int -qla2x00_read_optrom(struct bsg_job *bsg_job) +qla2x00_read_optrom(bsg_job_t *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); @@ -1494,7 +1517,7 @@ qla2x00_read_optrom(struct bsg_job *bsg_job) } static int -qla2x00_update_optrom(struct bsg_job *bsg_job) +qla2x00_update_optrom(bsg_job_t *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); @@ -1520,22 +1543,32 @@ qla2x00_update_optrom(struct bsg_job *bsg_job) ha->optrom_region_start, ha->optrom_region_size); if (rval) { - bsg_reply->result = -EINVAL; - rval = -EINVAL; + if (rval == QLA_FLASH_LOCKDOWN) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_ADAPTER_IN_LOCKDOWN_MODE; + bsg_reply->result = DID_OK; + rval = QLA_SUCCESS; + } else { + bsg_reply->result = -EINVAL; + rval = -EINVAL; + } } else { bsg_reply->result = DID_OK; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_OK; } vfree(ha->optrom_buffer); ha->optrom_buffer = NULL; ha->optrom_state = QLA_SWAITING; mutex_unlock(&ha->optrom_mutex); + bsg_job->reply_len = sizeof(struct fc_bsg_reply); bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); return rval; } static int -qla2x00_update_fru_versions(struct bsg_job *bsg_job) +qla2x00_update_fru_versions(bsg_job_t *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); @@ -1588,7 +1621,7 @@ qla2x00_update_fru_versions(struct bsg_job *bsg_job) } static int -qla2x00_read_fru_status(struct bsg_job *bsg_job) +qla2x00_read_fru_status(bsg_job_t *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); @@ -1639,7 +1672,7 @@ qla2x00_read_fru_status(struct bsg_job *bsg_job) } static int -qla2x00_write_fru_status(struct bsg_job *bsg_job) +qla2x00_write_fru_status(bsg_job_t *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); @@ -1686,7 +1719,7 @@ qla2x00_write_fru_status(struct bsg_job *bsg_job) } static int -qla2x00_write_i2c(struct bsg_job *bsg_job) +qla2x00_write_i2c(bsg_job_t *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); @@ -1732,7 +1765,7 @@ qla2x00_write_i2c(struct bsg_job *bsg_job) } static int -qla2x00_read_i2c(struct bsg_job *bsg_job) +qla2x00_read_i2c(bsg_job_t *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); @@ -1782,7 +1815,7 @@ qla2x00_read_i2c(struct bsg_job *bsg_job) } static int -qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) +qla24xx_process_bidir_cmd(bsg_job_t *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); @@ -1960,7 +1993,7 @@ qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) } static int -qlafx00_mgmt_cmd(struct bsg_job *bsg_job) +qlafx00_mgmt_cmd(bsg_job_t *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); @@ -2041,7 +2074,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job) /* Initialize all required fields of fcport */ fcport->vha = vha; - fcport->loop_id = le32_to_cpu(piocb_rqst->dataword); + fcport->loop_id = piocb_rqst->dataword; sp->type = SRB_FXIOCB_BCMD; sp->name = "bsg_fx_mgmt"; @@ -2083,7 +2116,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job) } static int -qla26xx_serdes_op(struct bsg_job *bsg_job) +qla26xx_serdes_op(bsg_job_t *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); @@ -2125,7 +2158,7 @@ qla26xx_serdes_op(struct bsg_job *bsg_job) } static int -qla8044_serdes_op(struct bsg_job *bsg_job) +qla8044_serdes_op(bsg_job_t *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); @@ -2167,7 +2200,7 @@ qla8044_serdes_op(struct bsg_job *bsg_job) } static int -qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job) +qla27xx_get_flash_upd_cap(bsg_job_t *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); @@ -2199,7 +2232,7 @@ qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job) } static int -qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job) +qla27xx_set_flash_upd_cap(bsg_job_t *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); @@ -2245,7 +2278,7 @@ qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job) } static int -qla27xx_get_bbcr_data(struct bsg_job *bsg_job) +qla27xx_get_bbcr_data(bsg_job_t *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); @@ -2304,7 +2337,7 @@ qla27xx_get_bbcr_data(struct bsg_job *bsg_job) } static int -qla2x00_get_priv_stats(struct bsg_job *bsg_job) +qla2x00_get_priv_stats(bsg_job_t *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_bsg_reply *bsg_reply = bsg_job->reply; @@ -2363,7 +2396,7 @@ qla2x00_get_priv_stats(struct bsg_job *bsg_job) } static int -qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job) +qla2x00_do_dport_diagnostics(bsg_job_t *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); @@ -2407,7 +2440,91 @@ qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job) } static int -qla2x00_get_flash_image_status(struct bsg_job *bsg_job) +qla2x00_do_dport_diagnostics_v2(bsg_job_t *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + int rval; + struct qla_dport_diag_v2 *dd; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + uint16_t options; + + if (!IS_DPORT_CAPABLE(vha->hw)) + return -EPERM; + + dd = kzalloc(sizeof(*dd), GFP_KERNEL); + if (!dd) + return -ENOMEM; + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, dd, sizeof(*dd)); + + options = dd->options; + + /* Check dport Test in progress */ + if ((QLA_GET_DPORT_RESULT_V2 == options) && + (DPORT_DIAG_IN_PROGRESS & vha->dport_status)) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_DPORT_DIAG_IN_PROCESS; + goto dportcomplete; + } + + /* Check chip reset in progress and start/restart requests arrive */ + if ((DPORT_DIAG_CHIP_RESET_IN_PROGRESS & vha->dport_status) && + ((QLA_START_DPORT_TEST_V2 == options) || + (QLA_RESTART_DPORT_TEST_V2 == options))) { + vha->dport_status &= ~DPORT_DIAG_CHIP_RESET_IN_PROGRESS; + } + + /* Check chip reset in progress and get result request arrive */ + if ((DPORT_DIAG_CHIP_RESET_IN_PROGRESS & vha->dport_status) && + (QLA_GET_DPORT_RESULT_V2 == options)) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_DPORT_DIAG_NOT_RUNNING; + goto dportcomplete; + } + + rval = qla26xx_dport_diagnostics_v2(vha, dd, mcp); + + if (QLA_SUCCESS == rval) { + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_OK; + if (QLA_START_DPORT_TEST_V2 == options || + QLA_RESTART_DPORT_TEST_V2 == options) { + dd->mbx1 = mcp->mb[0]; + dd->mbx2 = mcp->mb[1]; + vha->dport_status |= DPORT_DIAG_IN_PROGRESS; + } else if (QLA_GET_DPORT_RESULT_V2 == options) { + dd->mbx1 = vha->dport_data[1]; + dd->mbx2 = vha->dport_data[2]; + } + } else { + dd->mbx1 = mcp->mb[0]; + dd->mbx2 = mcp->mb[1]; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_DPORT_DIAG_ERR; + } + + +dportcomplete: + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd)); + + bsg_reply->reply_payload_rcv_len = sizeof(*dd); + bsg_job->reply_len = sizeof(*bsg_reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + kfree(dd); + + return 0; +} + +static int +qla2x00_get_flash_image_status(bsg_job_t *bsg_job) { scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); struct fc_bsg_reply *bsg_reply = bsg_job->reply; @@ -2418,19 +2535,23 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job) qla27xx_get_active_image(vha, &active_regions); regions.global_image = active_regions.global; + if (IS_QLA27XX(ha)) + regions.nvme_params = QLA27XX_PRIMARY_IMAGE; + if (IS_QLA28XX(ha)) { qla28xx_get_aux_images(vha, &active_regions); regions.board_config = active_regions.aux.board_config; regions.vpd_nvram = active_regions.aux.vpd_nvram; regions.npiv_config_0_1 = active_regions.aux.npiv_config_0_1; regions.npiv_config_2_3 = active_regions.aux.npiv_config_2_3; + regions.nvme_params = active_regions.aux.nvme_params; } ql_dbg(ql_dbg_user, vha, 0x70e1, - "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u\n", + "%s(%lu): FW=%u BCFG=%u VPDNVR=%u NPIV01=%u NPIV02=%u NVME_PARAMS=%u\n", __func__, vha->host_no, regions.global_image, regions.board_config, regions.vpd_nvram, - regions.npiv_config_0_1, regions.npiv_config_2_3); + regions.npiv_config_0_1, regions.npiv_config_2_3, regions.nvme_params); sg_copy_from_buffer(bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, ®ions, sizeof(regions)); @@ -2446,7 +2567,744 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job) } static int -qla2x00_process_vendor_specific(struct bsg_job *bsg_job) +qla2x00_get_drv_attr(bsg_job_t *bsg_job) +{ + struct qla_drv_attr drv_attr; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + + memset(&drv_attr, 0, sizeof(struct qla_drv_attr)); + /* Additional check should be added if SCM is not enabled + * by default for a given driver version. + */ + drv_attr.attributes |= QLA_DRV_ATTR_SCM_SUPPORTED; + drv_attr.attributes |= QLA_DRV_ATTR_SCM_2_SUPPORTED; + drv_attr.attributes |= QLA_DRV_ATTR_LOCKDOWN_SUPPORT; + drv_attr.attributes |= QLA_DRV_ATTR_SCMR_PROFILE_SUPPORT; + drv_attr.attributes |= QLA_DRV_ATTR_DPORT_V2_SUPPORT; + drv_attr.attributes |= QLA_DRV_ATTR_VIRTUAL_LANE_SUPPORT; + drv_attr.attributes |= QLA_DRV_ATTR_IO_THROTTLING_SUPPORT; + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &drv_attr, + sizeof(struct qla_drv_attr)); + + bsg_reply->reply_payload_rcv_len = sizeof(struct qla_drv_attr); + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + + bsg_job->reply_len = sizeof(*bsg_job->reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); + + return 0; +} + +static int +qla2x00_get_port_scm(bsg_job_t *bsg_job) +{ + struct Scsi_Host *shost = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(shost); + struct qla_hw_data *ha = vha->hw; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct qla_scm_port *scm_stats; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return -EPERM; + + scm_stats = kzalloc(sizeof(struct qla_scm_port), GFP_KERNEL); + if (scm_stats == NULL) { + ql_log(ql_log_warn, vha, 0x7024, + "Failed to allocate memory for host scm stats.\n"); + return -ENOMEM; + } + + scm_stats->current_events = ha->scm.current_events; + memcpy(&scm_stats->link_integrity, &ha->scm.link_integrity, + (sizeof(struct qla_scm_link_event))); + memcpy(&scm_stats->delivery, &ha->scm.delivery, + (sizeof(struct qla_scm_delivery_event))); + memcpy(&scm_stats->congestion, &ha->scm.congestion, + (sizeof(struct qla_scm_congestion_event))); + scm_stats->scm_congestion_alarm = ha->scm.sev.cn_alarm; + scm_stats->scm_congestion_warning = ha->scm.sev.cn_warning; + scm_stats->scm_fabric_connection_flags = + ha->scm.scm_fabric_connection_flags; + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, scm_stats, sizeof(struct qla_scm_port)); + + bsg_reply->reply_payload_rcv_len = sizeof(struct qla_scm_port); + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + + bsg_job->reply_len = sizeof(*bsg_job->reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); + + kfree(scm_stats); + return 0; +} + +static int +qla2x00_get_port_scm_v2(bsg_job_t *bsg_job) +{ + struct Scsi_Host *shost = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(shost); + struct qla_hw_data *ha = vha->hw; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct qla_scm_port_v2 *scm_stats; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return -EPERM; + + scm_stats = kzalloc(sizeof(struct qla_scm_port_v2), GFP_KERNEL); + if (scm_stats == NULL) { + ql_log(ql_log_warn, vha, 0x7024, + "Failed to allocate memory for host scm stats.\n"); + return -ENOMEM; + } + + memcpy(&scm_stats->stats, &ha->scm.stats, + (sizeof(struct qla_scm_stats))); + memcpy(&scm_stats->rstats, &ha->scm.rstats, + (sizeof(struct qla_scmr_stats))); + + scm_stats->scm_fabric_connection_flags = + ha->scm.scm_fabric_connection_flags; + + /* For V2 */ + if (qla_scmr_is_congested(&ha->sfc)) { + scm_stats->current_state = SCM_STATE_CONGESTED; + } else { + scm_stats->current_state = SCM_STATE_HEALTHY; + } + memcpy(&scm_stats->sev, &ha->scm.sev, + (sizeof(struct qla_fpin_severity))); + scm_stats->scm_events = ha->scm.current_events; + + scm_stats->secs_since_last_event = qla_get_real_seconds() - + ha->scm.last_event_timestamp; + if (ha->flags.scm_supported_vl == 0) + scm_stats->vl_mode = QLA_VL_MODE_DISABLED; + else { /* VL is enabled, check if it is negotiated */ + if (ha->flags.conn_fabric_cisco_er_rdy) + scm_stats->vl_mode = QLA_VL_MODE_OPERATIONAL; + else + scm_stats->vl_mode = QLA_VL_MODE_NON_OPERATIONAL; + } + + scm_stats->io_throttling = qla_get_throttling_state(&ha->sfc); + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, scm_stats, + sizeof(struct qla_scm_port_v2)); + + bsg_reply->reply_payload_rcv_len = sizeof(struct qla_scm_port_v2); + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + + bsg_job->reply_len = sizeof(*bsg_job->reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); + + kfree(scm_stats); + return 0; +} + +static int +qla2x00_get_target_scm(bsg_job_t *bsg_job) +{ + struct Scsi_Host *shost = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(shost); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport = NULL; + int rval; + struct qla_scm_target *scm_stats = NULL; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return -EPERM; + + scm_stats = kzalloc(sizeof(struct qla_scm_target), GFP_KERNEL); + if (scm_stats == NULL) { + ql_log(ql_log_warn, vha, 0x7024, + "Failed to allocate memory for target scm stats.\n"); + return -ENOMEM; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, scm_stats, + sizeof(struct qla_scm_target)); + + + fcport = qla2x00_find_fcport_by_wwpn(vha, scm_stats->wwpn, 0); + if (!fcport) { + rval = EXT_STATUS_ERR; + goto complete; + } + + /* Copy SCM Target data to local struct, keep WWPN from user */ + scm_stats->current_events = fcport->scm.current_events; + memcpy(&scm_stats->link_integrity, &fcport->scm.link_integrity, + (sizeof(struct qla_scm_link_event))); + memcpy(&scm_stats->delivery, &fcport->scm.delivery, + (sizeof(struct qla_scm_delivery_event))); + memcpy(&scm_stats->peer_congestion, &fcport->scm.peer_congestion, + (sizeof(struct qla_scm_peer_congestion_event))); + + /* TODO: Destination stats are u32 */ + scm_stats->link_unknown_event = fcport->scm.stats.li_failure_unknown; + scm_stats->link_failure_count = fcport->scm.stats.li_link_failure_count; + scm_stats->loss_of_sync_count = + fcport->scm.stats.li_loss_of_sync_count; + scm_stats->loss_of_signals_count = + fcport->scm.stats.li_loss_of_signals_count; + scm_stats->primitive_seq_protocol_err_count = + fcport->scm.stats.li_prim_seq_err_count; + scm_stats->invalid_transmission_word_count = + fcport->scm.stats.li_invalid_tx_word_count; + scm_stats->invalid_crc_count = fcport->scm.stats.li_invalid_crc_count; + scm_stats->link_device_specific_event = + fcport->scm.stats.li_device_specific; + scm_stats->delivery_failure_unknown = fcport->scm.stats.dn_unknown; + scm_stats->delivery_timeout = fcport->scm.stats.dn_timeout; + scm_stats->delivery_unable_to_route = + fcport->scm.stats.dn_unable_to_route; + scm_stats->delivery_failure_device_specific = + fcport->scm.stats.dn_device_specific; + scm_stats->peer_congestion_clear = fcport->scm.stats.cn_clear; + scm_stats->peer_congestion_lost_credit = + fcport->scm.stats.cn_lost_credit; + scm_stats->peer_congestion_credit_stall = + fcport->scm.stats.cn_credit_stall; + scm_stats->peer_congestion_oversubscription = + fcport->scm.stats.cn_oversubscription; + scm_stats->peer_congestion_device_specific = + fcport->scm.stats.cn_device_specific; + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, scm_stats, + sizeof(struct qla_scm_target)); + rval = EXT_STATUS_OK; + +complete: + bsg_reply->reply_payload_rcv_len = sizeof(struct qla_scm_target); + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; + + bsg_job->reply_len = sizeof(*bsg_job->reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + kfree(scm_stats); + return 0; +} + +static int +qla2x00_get_target_scm_v2(bsg_job_t *bsg_job) +{ + struct Scsi_Host *shost = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(shost); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport = NULL; + int rval; + struct qla_scm_target_v2 *scm_stats = NULL; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return -EPERM; + + scm_stats = kzalloc(sizeof(struct qla_scm_target_v2), GFP_KERNEL); + if (scm_stats == NULL) { + ql_log(ql_log_warn, vha, 0x7024, + "Failed to allocate memory for target scm stats.\n"); + return -ENOMEM; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, scm_stats, + sizeof(struct qla_scm_target_v2)); + + + fcport = qla2x00_find_fcport_by_wwpn(vha, scm_stats->wwpn, 0); + if (!fcport) { + rval = EXT_STATUS_ERR; + goto complete; + } + + /* Copy SCM Target data to local struct, keep WWPN from user */ + memcpy(&scm_stats->stats, &fcport->scm.stats, + (sizeof(struct qla_scm_stats))); + memcpy(&scm_stats->rstats, &fcport->scm.rstats, + (sizeof(struct qla_scmr_stats))); + + if (qla_scmr_is_congested(&fcport->sfc)) + scm_stats->current_state = SCM_STATE_CONGESTED; + else + scm_stats->current_state = SCM_STATE_HEALTHY; + + scm_stats->scm_events = fcport->scm.current_events; + scm_stats->secs_since_last_event = qla_get_real_seconds() - + fcport->scm.last_event_timestamp; + + if (ha->flags.conn_fabric_cisco_er_rdy) { + switch (fcport->vl.v_lane) { + case VL_SLOW: + scm_stats->vl_state = QLA_VL_STATE_SLOW; + break; + case VL_NORMAL: + scm_stats->vl_state = QLA_VL_STATE_NORMAL; + break; + case VL_FAST: + scm_stats->vl_state = QLA_VL_STATE_FAST; + break; + } + } else + scm_stats->vl_state = QLA_VL_STATE_DISABLED; + + scm_stats->io_throttling = qla_get_throttling_state(&fcport->sfc); + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, scm_stats, + sizeof(struct qla_scm_target_v2)); + rval = EXT_STATUS_OK; + +complete: + bsg_reply->reply_payload_rcv_len = sizeof(struct qla_scm_target_v2); + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval; + + bsg_job->reply_len = sizeof(*bsg_job->reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + kfree(scm_stats); + return 0; +} + +DECLARE_ENUM2STR_LOOKUP(qla_get_profile_type, ql_scm_profile_type, + QL_SCM_PROFILE_TYPES_INIT); +static int +qla2x00_bidi_scm_mgmt(bsg_job_t *bsg_job) +{ + struct Scsi_Host *shost = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(shost); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct qla_hw_data *ha = vha->hw; + struct qla_scm_host_config *scm_config = NULL; + fc_port_t *fcport; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return -EPERM; + + scm_config = kzalloc(sizeof(struct qla_scm_host_config), GFP_KERNEL); + if (scm_config == NULL) { + ql_log(ql_log_warn, vha, 0x7024, + "Failed to allocate memory for host scm config.\n"); + return -ENOMEM; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, scm_config, + sizeof(struct qla_scm_host_config)); + + if (scm_config->controls & QLA_RESET_SCM_STATS) { + ql_log(ql_log_info, vha, 0x709b, "Clearing USCM stats\n"); + qla2xxx_clear_scm_stats(vha); + } + + if (scm_config->controls & QLA_RESET_SCMR_STATS) { + ql_log(ql_log_info, vha, 0x709c, "Clearing USCM throttling stats\n"); + qla2xxx_clear_scmr_stats(vha); + } + + /* Update the SCMR profile */ + if (scm_config->controls & QLA_APPLY_SCMR_PROFILE) { + ql_log(ql_log_info, vha, 0x709d, "Changing SCMR profile from %s to %s\n", + qla_get_profile_type(ha->sfc.profile.scmr_profile), + qla_get_profile_type(scm_config->profile.scmr_profile)); + memcpy(&ha->sfc.profile, &scm_config->profile, + (sizeof(struct qla_scmr_port_profile))); + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (!(fcport->port_type & FCT_TARGET) && + !(fcport->port_type & FCT_NVME_TARGET)) + continue; + memcpy(&fcport->sfc.profile, &scm_config->profile, + (sizeof(struct qla_scmr_port_profile))); + } + if (scm_config->profile.scmr_control_flags == 0) { + /* Restore driver defaults */ + ha->sfc.profile.scmr_profile = ql2x_scmr_profile; + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (!(fcport->port_type & FCT_TARGET) && + !(fcport->port_type & FCT_NVME_TARGET)) + continue; + fcport->sfc.profile.scmr_profile = ql2x_scmr_profile; + } + ql_dbg(ql_dbg_user, vha, 0x709e, + "SCM profile restored to driver defaults, New Profile: %s\n", + qla_get_profile_type(ha->sfc.profile.scmr_profile)); + } + if (ha->sfc.profile.scmr_profile == 0) {/* Monitor profile */ + qla2xxx_scmr_clear_throttle(&ha->sfc); + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (!(fcport->port_type & FCT_TARGET) && + !(fcport->port_type & FCT_NVME_TARGET)) + continue; + qla2xxx_scmr_clear_throttle(&fcport->sfc); + if (vha->hw->flags.conn_fabric_cisco_er_rdy) {// VL + if (!qla_scmr_is_congested(&fcport->sfc)) + qla2xxx_switch_vl(&fcport->sfc, VL_NORMAL); + } + } + } + } + + /* Provide the in-use SCMR profile */ + if (scm_config->controls & QLA_GET_SCMR_PROFILE) { + memcpy(&scm_config->profile, &ha->sfc.profile, + (sizeof(struct qla_scmr_port_profile))); + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, scm_config, + sizeof(struct qla_scm_host_config)); + + bsg_reply->reply_payload_rcv_len = + sizeof(struct qla_scm_host_config); + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_OK; + bsg_job->reply_len = sizeof(*bsg_job->reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return 0; + } + + bsg_reply->reply_payload_rcv_len = 0; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + + bsg_job->reply_len = sizeof(*bsg_job->reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return 0; +} + +static int +qla2x00_manage_host_stats(bsg_job_t *bsg_job) +{ + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct ql_vnd_mng_host_stats_param *req_data; + struct ql_vnd_mng_host_stats_resp rsp_data; + uint32_t req_data_len; + int ret = 0; + + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n"); + return -EIO; + } + + req_data_len = bsg_job->request_payload.payload_len; + + if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) { + ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); + return -EIO; + } + + req_data = kzalloc(sizeof(struct ql_vnd_mng_host_stats_param), GFP_KERNEL); + if (!req_data) { + ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); + return -ENOMEM; + } + + /* Copy the request buffer in req_data */ + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, req_data, req_data_len); + + switch (req_data->action) { + case stop: + ret = qla2xxx_stop_stats(vha->host, req_data->stat_type); + break; + case start: + ret = qla2xxx_start_stats(vha->host, req_data->stat_type); + break; + case clear: + ret = qla2xxx_reset_stats(vha->host, req_data->stat_type); + break; + default: + ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n"); + ret = -EIO; + break; + } + + kfree(req_data); + + /* Prepare response */ + rsp_data.status = ret; + bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); + + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &rsp_data, + sizeof(struct ql_vnd_mng_host_stats_resp)); + + bsg_reply->result = DID_OK; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return ret; +} + +static int +qla2x00_get_host_stats(bsg_job_t *bsg_job) +{ + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct ql_vnd_stats_param *req_data; + struct ql_vnd_host_stats_resp rsp_data; + uint32_t req_data_len; + int ret = 0; + uint64_t ini_entry_count = 0; + uint64_t entry_count = 0; + uint64_t tgt_num = 0; + uint64_t tmp_stat_type = 0; + uint64_t response_len = 0; + void *data; + + req_data_len = bsg_job->request_payload.payload_len; + + if (req_data_len != sizeof(struct ql_vnd_stats_param)) { + ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); + return -EIO; + } + + req_data = kzalloc(sizeof(struct ql_vnd_stats_param), GFP_KERNEL); + if (!req_data) { + ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); + return -ENOMEM; + } + + /* Copy the request buffer in req_data */ + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, req_data, req_data_len); + + /* Copy stat type to work on it */ + tmp_stat_type = req_data->stat_type; + + if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) + { + /* Num of tgts connected to this host */ + tgt_num = qla2x00_get_num_tgts(vha); + /* unset BIT_17 */ + tmp_stat_type &= ~ (1 << 17); + } + + /* Total ini stats */ + ini_entry_count = qla2x00_count_set_bits(tmp_stat_type); + + /* Total number of entries */ + entry_count = ini_entry_count + tgt_num; + + response_len = sizeof(struct ql_vnd_host_stats_resp) + + (sizeof(struct ql_vnd_stat_entry) * entry_count); + + if (response_len > bsg_job->reply_payload.payload_len) { + rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL; + bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); + + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &rsp_data, + sizeof(struct ql_vnd_mng_host_stats_resp)); + + bsg_reply->result = DID_OK; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + goto host_stat_out; + } + + data = kzalloc(response_len, GFP_KERNEL); + + ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type, + data, response_len); + + rsp_data.status = EXT_STATUS_OK; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, data, + response_len); + bsg_reply->result = DID_OK; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + kfree(data); +host_stat_out: + kfree(req_data); + return 0; +} +static struct fc_rport * +qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num) +{ + fc_port_t *fcport = NULL; + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->rport->number == tgt_num) + return fcport->rport; + } + return NULL; +} + +static int +qla2x00_get_tgt_stats(bsg_job_t *bsg_job) +{ + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct ql_vnd_tgt_stats_param *req_data; + uint32_t req_data_len; + int ret = 0; + uint64_t response_len = 0; + struct ql_vnd_tgt_stats_resp *data = NULL; + struct fc_rport *rport = NULL; + + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n"); + return -EIO; + } + + req_data_len = bsg_job->request_payload.payload_len; + + if (req_data_len != sizeof(struct ql_vnd_stat_entry)) { + ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); + return -EIO; + } + + req_data = kzalloc(sizeof(struct ql_vnd_tgt_stats_param), GFP_KERNEL); + if (!req_data) { + ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); + return -ENOMEM; + } + + /* Copy the request buffer in req_data */ + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, req_data, req_data_len); + + response_len = sizeof(struct ql_vnd_tgt_stats_resp) + + sizeof(struct ql_vnd_stat_entry); + + /* structure + size for one entry */ + data = kzalloc(response_len, GFP_KERNEL); + if (!data) { + kfree(req_data); + return -ENOMEM; + } + + if (response_len > bsg_job->reply_payload.payload_len) { + data->status = EXT_STATUS_BUFFER_TOO_SMALL; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL; + bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp); + + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &data, + sizeof(struct ql_vnd_tgt_stats_resp)); + + bsg_reply->result = DID_OK; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + goto tgt_stat_out; + } + + rport = qla2xxx_find_rport(vha, req_data->tgt_id); + if (rport == NULL) { + ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id); + ret = EXT_STATUS_INVALID_PARAM; + data->status = EXT_STATUS_INVALID_PARAM; + goto reply; + } + + ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type, + rport, (void *)data, response_len); + + + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; +reply: + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, data, + response_len); + bsg_reply->result = DID_OK; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + +tgt_stat_out: + kfree(data); + kfree(req_data); + + return 0; + +} + +static int +qla2x00_manage_host_port(bsg_job_t *bsg_job) +{ + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct ql_vnd_mng_host_port_param *req_data; + struct ql_vnd_mng_host_port_resp rsp_data; + uint32_t req_data_len; + int ret = 0; + + + req_data_len = bsg_job->request_payload.payload_len; + + if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) { + ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n"); + return -EIO; + } + + req_data = kzalloc(sizeof(struct ql_vnd_mng_host_port_param), GFP_KERNEL); + if (!req_data) { + ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n"); + return -ENOMEM; + } + + /* Copy the request buffer in req_data */ + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, req_data, req_data_len); + + switch (req_data->action) { + case enable: + ret = qla2xxx_enable_port(vha->host); + break; + case disable: + ret = qla2xxx_disable_port(vha->host); + break; + default: + ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n"); + ret = -EIO; + break; + } + + kfree(req_data); + + /* Prepare response */ + rsp_data.status = ret; + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp); + + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &rsp_data, + sizeof(struct ql_vnd_mng_host_port_resp)); + bsg_reply->result = DID_OK; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + + return ret; +} + +static int +qla2x00_process_vendor_specific(struct scsi_qla_host *vha, bsg_job_t *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; @@ -2518,16 +3376,56 @@ qla2x00_process_vendor_specific(struct bsg_job *bsg_job) case QL_VND_DPORT_DIAGNOSTICS: return qla2x00_do_dport_diagnostics(bsg_job); + case QL_VND_DPORT_DIAGNOSTICS_V2: + return qla2x00_do_dport_diagnostics_v2(bsg_job); + + case QL_VND_EDIF_MGMT: + return qla_edif_app_mgmt(bsg_job); + case QL_VND_SS_GET_FLASH_IMAGE_STATUS: return qla2x00_get_flash_image_status(bsg_job); + case QL_VND_GET_PORT_SCM: + return qla2x00_get_port_scm(bsg_job); + + case QL_VND_BIDI_SCM_MGMT: + return qla2x00_bidi_scm_mgmt(bsg_job); + + case QL_VND_GET_TARGET_SCM: + return qla2x00_get_target_scm(bsg_job); + + case QL_VND_GET_PORT_SCM_V2: + return qla2x00_get_port_scm_v2(bsg_job); + + case QL_VND_GET_TARGET_SCM_V2: + return qla2x00_get_target_scm_v2(bsg_job); + + case QL_VND_GET_DRV_ATTR: + return qla2x00_get_drv_attr(bsg_job); + + case QL_VND_MANAGE_HOST_STATS: + return qla2x00_manage_host_stats(bsg_job); + + case QL_VND_GET_HOST_STATS: + return qla2x00_get_host_stats(bsg_job); + + case QL_VND_GET_TGT_STATS: + return qla2x00_get_tgt_stats(bsg_job); + + case QL_VND_MANAGE_HOST_PORT: + return qla2x00_manage_host_port(bsg_job); + + case QL_VND_SYSTEM_LOCKDOWN_INFO: + return qla2x00_sys_ld_info(bsg_job); + case QL_VND_MBX_PASSTHRU: + return qla2x00_mailbox_passthru(bsg_job); default: return -ENOSYS; } } int -qla24xx_bsg_request(struct bsg_job *bsg_job) +qla24xx_bsg_request(bsg_job_t *bsg_job) { struct fc_bsg_request *bsg_request = bsg_job->request; struct fc_bsg_reply *bsg_reply = bsg_job->reply; @@ -2541,6 +3439,8 @@ qla24xx_bsg_request(struct bsg_job *bsg_job) if (bsg_request->msgcode == FC_BSG_RPT_ELS) { rport = fc_bsg_to_rport(bsg_job); + if (!rport) + return ret; host = rport_to_shost(rport); vha = shost_priv(host); } else { @@ -2548,15 +3448,34 @@ qla24xx_bsg_request(struct bsg_job *bsg_job) vha = shost_priv(host); } + /* Disable port will bring down the chip, allow enable command */ + if ((bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT) || + (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS)) + goto skip_chip_chk; + + if ( test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) { + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + return -EIO; + } + + if (vha->hw->flags.port_isolated) { + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + /* operation not permitted */ + return -EPERM; + } + if (qla2x00_chip_is_down(vha)) { ql_dbg(ql_dbg_user, vha, 0x709f, "BSG: ISP abort active/needed -- cmd=%d.\n", bsg_request->msgcode); + SET_DID_STATUS(bsg_reply->result, DID_ERROR); return -EBUSY; } - ql_dbg(ql_dbg_user, vha, 0x7000, - "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode); +skip_chip_chk: + ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000, + "Entered %s msgcode=0x%x. bsg ptr %px\n", + __func__, bsg_request->msgcode, bsg_job); switch (bsg_request->msgcode) { case FC_BSG_RPT_ELS: @@ -2567,7 +3486,7 @@ qla24xx_bsg_request(struct bsg_job *bsg_job) ret = qla2x00_process_ct(bsg_job); break; case FC_BSG_HST_VENDOR: - ret = qla2x00_process_vendor_specific(bsg_job); + ret = qla2x00_process_vendor_specific(vha, bsg_job); break; case FC_BSG_HST_ADD_RPORT: case FC_BSG_HST_DEL_RPORT: @@ -2576,11 +3495,15 @@ qla24xx_bsg_request(struct bsg_job *bsg_job) ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n"); break; } + + ql_dbg(ql_dbg_user + ql_dbg_verbose, vha, 0x7000, + "%s done with return %x\n", __func__, ret); + return ret; } int -qla24xx_bsg_timeout(struct bsg_job *bsg_job) +qla24xx_bsg_timeout(bsg_job_t *bsg_job) { struct fc_bsg_reply *bsg_reply = bsg_job->reply; scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); @@ -2590,6 +3513,15 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job) unsigned long flags; struct req_que *req; + ql_log(ql_log_info, vha, 0x708b, "%s CMD timeout. bsg ptr %px.\n", + __func__, bsg_job); + + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x9007, + "PCI/Register disconnect.\n"); + qla_pci_set_eeh_busy(vha); + } + /* find the bsg job from the active list of commands */ spin_lock_irqsave(&ha->hardware_lock, flags); for (que = 0; que < ha->max_req_queues; que++) { @@ -2599,27 +3531,28 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; - if (sp) { - if (((sp->type == SRB_CT_CMD) || - (sp->type == SRB_ELS_CMD_HST) || - (sp->type == SRB_FXIOCB_BCMD)) - && (sp->u.bsg_job == bsg_job)) { - req->outstanding_cmds[cnt] = NULL; - spin_unlock_irqrestore(&ha->hardware_lock, flags); - if (ha->isp_ops->abort_command(sp)) { - ql_log(ql_log_warn, vha, 0x7089, - "mbx abort_command " - "failed.\n"); - bsg_reply->result = -EIO; - } else { - ql_dbg(ql_dbg_user, vha, 0x708a, - "mbx abort_command " - "success.\n"); - bsg_reply->result = 0; - } - spin_lock_irqsave(&ha->hardware_lock, flags); - goto done; + if (sp && ((sp->type == SRB_CT_CMD) || + (sp->type == SRB_ELS_CMD_HST) || + (sp->type == SRB_ELS_CMD_HST_NOLOGIN) || + (sp->type == SRB_FXIOCB_BCMD)) + && (sp->u.bsg_job == bsg_job)) { + req->outstanding_cmds[cnt] = NULL; + spin_unlock_irqrestore(&ha->hardware_lock, flags); + + if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) { + ql_log(ql_log_warn, vha, 0x7089, + "mbx abort_command " + "failed.\n"); + bsg_reply->result = -EIO; + } else { + ql_dbg(ql_dbg_user, vha, 0x708a, + "mbx abort_command " + "success.\n"); + bsg_reply->result = 0; } + spin_lock_irqsave(&ha->hardware_lock, flags); + goto done; + } } } @@ -2630,6 +3563,52 @@ qla24xx_bsg_timeout(struct bsg_job *bsg_job) done: spin_unlock_irqrestore(&ha->hardware_lock, flags); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return 0; } + +int qla2x00_mailbox_passthru(bsg_job_t *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); + int ret = -EINVAL; + int ptsize = sizeof(struct qla_mbx_passthru); + struct qla_mbx_passthru *req_data = NULL; + uint32_t req_data_len; + + req_data_len = bsg_job->request_payload.payload_len; + if (req_data_len != ptsize) { + ql_log(ql_log_warn, vha, 0xf0a3, "req_data_len invalid.\n"); + return -EIO; + } + req_data = kzalloc(ptsize, GFP_KERNEL); + if (!req_data) { + ql_log(ql_log_warn, vha, 0xf0a4, \ + "req_data memory allocation failure.\n"); + return -ENOMEM; + } + + /* Copy the request buffer in req_data */ + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, req_data, ptsize); + ret = qla_mailbox_passthru(vha, req_data->mbx_in, req_data->mbx_out); + + /* Copy the req_data in request buffer */ + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, req_data, ptsize); + + bsg_reply->reply_payload_rcv_len = ptsize; + if (ret == QLA_SUCCESS) + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + else + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_ERR; + + bsg_job->reply_len = sizeof(*bsg_job->reply); + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); + + kfree(req_data); + + return ret; +} diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h index 1a09b5512267a..3c20016e42c85 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.h +++ b/drivers/scsi/qla2xxx/qla_bsg.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #ifndef __QLA_BSG_H #define __QLA_BSG_H @@ -31,6 +32,20 @@ #define QL_VND_DPORT_DIAGNOSTICS 0x19 #define QL_VND_GET_PRIV_STATS_EX 0x1A #define QL_VND_SS_GET_FLASH_IMAGE_STATUS 0x1E +#define QL_VND_EDIF_MGMT 0X1F +#define QL_VND_GET_PORT_SCM 0x20 +#define QL_VND_GET_TARGET_SCM 0x21 +#define QL_VND_GET_DRV_ATTR 0x22 +#define QL_VND_MANAGE_HOST_STATS 0x23 +#define QL_VND_GET_HOST_STATS 0x24 +#define QL_VND_GET_TGT_STATS 0x25 +#define QL_VND_MANAGE_HOST_PORT 0x26 +#define QL_VND_SYSTEM_LOCKDOWN_INFO 0x27 +#define QL_VND_BIDI_SCM_MGMT 0x28 +#define QL_VND_GET_PORT_SCM_V2 0x29 +#define QL_VND_GET_TARGET_SCM_V2 0x2A +#define QL_VND_MBX_PASSTHRU 0x2B +#define QL_VND_DPORT_DIAGNOSTICS_V2 0x2C /* BSG Vendor specific subcode returns */ #define EXT_STATUS_OK 0 @@ -40,6 +55,7 @@ #define EXT_STATUS_DATA_OVERRUN 7 #define EXT_STATUS_DATA_UNDERRUN 8 #define EXT_STATUS_MAILBOX 11 +#define EXT_STATUS_BUFFER_TOO_SMALL 16 #define EXT_STATUS_NO_MEMORY 17 #define EXT_STATUS_DEVICE_OFFLINE 22 @@ -53,6 +69,13 @@ #define EXT_STATUS_TIMEOUT 30 #define EXT_STATUS_THREAD_FAILED 31 #define EXT_STATUS_DATA_CMP_FAILED 32 +#define EXT_STATUS_ADAPTER_IN_LOCKDOWN_MODE 39 + +#define EXT_STATUS_DPORT_DIAG_ERR 40 +#define EXT_STATUS_DPORT_DIAG_IN_PROCESS 41 +#define EXT_STATUS_DPORT_DIAG_NOT_RUNNING 42 + +#define EXT_STATUS_UNSUPPORTED_FW 43 /* BSG definations for interpreting CommandSent field */ #define INT_DEF_LB_LOOPBACK_CMD 0 @@ -275,6 +298,17 @@ struct qla_dport_diag { uint8_t unused[62]; } __packed; +#define QLA_GET_DPORT_RESULT_V2 0 /* Get Result */ +#define QLA_RESTART_DPORT_TEST_V2 1 /* Restart test */ +#define QLA_START_DPORT_TEST_V2 2 /* Start test */ +struct qla_dport_diag_v2 { + uint16_t options; + uint16_t mbx1; + uint16_t mbx2; + uint8_t unused[58]; + uint8_t buf[1024]; /* Test Result */ +} __packed; + /* D_Port options */ #define QLA_DPORT_RESULT 0x0 #define QLA_DPORT_START 0x2 @@ -286,7 +320,334 @@ struct qla_active_regions { uint8_t vpd_nvram; uint8_t npiv_config_0_1; uint8_t npiv_config_2_3; - uint8_t reserved[32]; + uint8_t nvme_params; + uint8_t reserved[31]; +} __packed; + +enum ql_fpin_li_event_types { + QL_FPIN_LI_UNKNOWN = 0x0, + QL_FPIN_LI_LINK_FAILURE = 0x1, + QL_FPIN_LI_LOSS_OF_SYNC = 0x2, + QL_FPIN_LI_LOSS_OF_SIG = 0x3, + QL_FPIN_LI_PRIM_SEQ_ERR = 0x4, + QL_FPIN_LI_INVALID_TX_WD = 0x5, + QL_FPIN_LI_INVALID_CRC = 0x6, + QL_FPIN_LI_DEVICE_SPEC = 0xF, +}; + +/* + * Initializer useful for decoding table. + * Please keep this in sync with the above definitions. + */ +#define QL_FPIN_LI_EVT_TYPES_INIT { \ + { QL_FPIN_LI_UNKNOWN, "Unknown" }, \ + { QL_FPIN_LI_LINK_FAILURE, "Link Failure" }, \ + { QL_FPIN_LI_LOSS_OF_SYNC, "Loss of Synchronization" }, \ + { QL_FPIN_LI_LOSS_OF_SIG, "Loss of Signal" }, \ + { QL_FPIN_LI_PRIM_SEQ_ERR, "Primitive Sequence Protocol Error" }, \ + { QL_FPIN_LI_INVALID_TX_WD, "Invalid Transmission Word" }, \ + { QL_FPIN_LI_INVALID_CRC, "Invalid CRC" }, \ + { QL_FPIN_LI_DEVICE_SPEC, "Device Specific" }, \ +} + +#define SCM_LINK_EVENT_V1_SIZE 20 +struct qla_scm_link_event { + uint64_t timestamp; + uint16_t event_type; + uint16_t event_modifier; + uint32_t event_threshold; + uint32_t event_count; + uint8_t reserved[12]; +} __packed; + +#define QL_FPIN_DELI_EVT_TYPES_INIT { \ + { FPIN_DELI_UNKNOWN, "Unknown" }, \ + { FPIN_DELI_TIMEOUT, "Timeout" }, \ + { FPIN_DELI_UNABLE_TO_ROUTE, "Unable to Route" }, \ + { FPIN_DELI_DEVICE_SPEC, "Device Specific" }, \ +} + +struct qla_scm_delivery_event { + uint64_t timestamp; + uint32_t delivery_reason; + uint8_t deliver_frame_hdr[24]; + uint8_t reserved[28]; + +} __packed; + +struct qla_scm_peer_congestion_event { + uint64_t timestamp; + uint16_t event_type; + uint16_t event_modifier; + uint32_t event_period; + uint8_t reserved[16]; +} __packed; + +#define SCM_CONGESTION_SEVERITY_WARNING 0xF1 +#define SCM_CONGESTION_SEVERITY_ERROR 0xF7 +struct qla_scm_congestion_event { + uint64_t timestamp; + uint16_t event_type; + uint16_t event_modifier; + uint32_t event_period; + uint8_t severity; + uint8_t reserved[15]; +} __packed; + +#define SCM_FLAG_RDF_REJECT 0x00 +#define SCM_FLAG_RDF_COMPLETED 0x01 +#define SCM_FLAG_BROCADE_CONNECTED 0x02 +#define SCM_FLAG_CISCO_CONNECTED 0x04 + +enum ql_fpin_event_types { + SCM_EVENT_NONE = 0x0, + SCM_EVENT_CONGESTION = 0x1, + SCM_EVENT_DELIVERY = 0x2, + SCM_EVENT_LINK_INTEGRITY = 0x4, + SCM_EVENT_PEER_CONGESTION = 0x8, +}; + +#define QL_FPIN_EVENT_TYPES_INIT { \ + { SCM_EVENT_NONE, "None" }, \ + { SCM_EVENT_CONGESTION, "Congestion" }, \ + { SCM_EVENT_DELIVERY, "Delivery" }, \ + { SCM_EVENT_LINK_INTEGRITY, "Link Integrity" }, \ + { SCM_EVENT_PEER_CONGESTION, "Peer Congestion" }, \ +} + +#define SCM_STATE_HEALTHY 0x0 +#define SCM_STATE_CONGESTED 0x1 + +#define QLA_CON_PRIMITIVE_RECEIVED 0x1 +#define QLA_CONGESTION_ARB_WARNING 0x1 +#define QLA_CONGESTION_ARB_ALARM 0x2 + +/* Virtual Lane Support */ +#define QLA_VL_MODE_DISABLED 0x0 /* Administratively disabled */ +#define QLA_VL_MODE_OPERATIONAL 0x1 /* Negotiated with switch and operational */ +#define QLA_VL_MODE_NON_OPERATIONAL 0x2 /* Administratively enabled, switch negotiation failed */ + +/* Virtual Lane States */ +#define QLA_VL_STATE_DISABLED 0x0 +#define QLA_VL_STATE_SLOW 0x1 +#define QLA_VL_STATE_NORMAL 0x2 +#define QLA_VL_STATE_FAST 0x3 +/* + * Fabric Performance Impact Notification Statistics + */ +struct qla_scm_stats { + /* Delivery */ + u64 dn_unknown; + u64 dn_timeout; + u64 dn_unable_to_route; + u64 dn_device_specific; + + /* Link Integrity */ + u64 li_failure_unknown; + u64 li_link_failure_count; + u64 li_loss_of_sync_count; + u64 li_loss_of_signals_count; + u64 li_prim_seq_err_count; + u64 li_invalid_tx_word_count; + u64 li_invalid_crc_count; + u64 li_device_specific; + + /* Congestion/Peer Congestion */ + u64 cn_clear; + u64 cn_lost_credit; + u64 cn_credit_stall; + u64 cn_oversubscription; + u64 cn_device_specific; + + /* PUN Stats */ + u64 pun_count; + u64 pun_clear_count; +} __packed; + +struct qla_scmr_stats { + uint64_t throttle_cleared; + uint64_t throttle_down_count; + uint64_t throttle_up_count; + uint64_t busy_status_count; + uint64_t throttle_hit_low_wm; +} __packed; + +struct qla_fpin_severity { + uint64_t cn_alarm; + uint64_t cn_warning; +} __packed; + +enum ql_scm_profile_type { + QL_SCM_MONITOR = 0, + QL_SCM_CONSERVATIVE = 1, + QL_SCM_MODERATE = 2, + QL_SCM_AGGRESSIVE = 3 +}; + +#define MAX_SCM_PROFILE 4 + +#define QL_SCM_PROFILE_TYPES_INIT { \ + { QL_SCM_MONITOR, "Monitor" }, \ + { QL_SCM_CONSERVATIVE, "Conservative" }, \ + { QL_SCM_MODERATE, "Moderate" }, \ + { QL_SCM_AGGRESSIVE, "Aggressive" }, \ +} + +struct qla_scmr_port_profile { +#define QLA_USE_NVRAM_CONFIG BIT(0) +#define QLA_USE_FW_SLOW_QUEUE BIT(1) +#define QLA_APPLY_SCMR_THROTTLING BIT(2) + uint8_t scmr_control_flags; + uint8_t scmr_profile; + uint8_t rsvd[6]; +} __packed; + +struct qla_scm_host_config { +#define QLA_RESET_SCM_STATS BIT(0) +#define QLA_RESET_SCMR_STATS BIT(1) +#define QLA_APPLY_SCMR_PROFILE BIT(2) +#define QLA_GET_SCMR_PROFILE BIT(3) + uint8_t controls; + struct qla_scmr_port_profile profile; + uint8_t reserved[15]; +} __packed; + +/* Driver's internal data structure */ +struct qla_scm_port_combined { + struct qla_scm_link_event link_integrity; + struct qla_scm_delivery_event delivery; + struct qla_scm_congestion_event congestion; + struct qla_scm_stats stats; + struct qla_fpin_severity sev; + struct qla_scmr_stats rstats; + + uint32_t last_event_timestamp; + uint8_t current_events; +#define QLA_DISP_MODE_COMPACT 0x0 +#define QLA_DISP_MODE_DETAILED 0x1 + uint8_t display_mode; + uint8_t scm_fabric_connection_flags; + uint8_t current_state; +} __packed; + +struct qla_scm_port_v2 { + struct qla_scm_stats stats; + struct qla_fpin_severity sev; + struct qla_scmr_stats rstats; + uint8_t scm_fabric_connection_flags; + uint8_t current_state; + uint32_t secs_since_last_event; + uint8_t scm_events; + uint8_t vl_mode; + uint8_t io_throttling; + uint8_t reserved[63]; +} __packed; + +struct qla_scm_port { + uint32_t current_events; + + struct qla_scm_link_event link_integrity; + struct qla_scm_delivery_event delivery; + struct qla_scm_congestion_event congestion; + uint64_t scm_congestion_alarm; + uint64_t scm_congestion_warning; + uint8_t scm_fabric_connection_flags; + uint8_t reserved[43]; +} __packed; + +/* Driver's internal data structure */ +struct qla_scm_target_combined { + uint8_t wwpn[8]; + + struct qla_scm_link_event link_integrity; + struct qla_scm_delivery_event delivery; + struct qla_scm_peer_congestion_event peer_congestion; + + struct qla_scm_stats stats; + struct qla_scmr_stats rstats; + uint32_t last_event_timestamp; + uint8_t current_events; + uint8_t current_state; +}; + +struct qla_scm_target_v2 { + uint8_t wwpn[8]; + struct qla_scm_stats stats; + struct qla_scmr_stats rstats; + uint8_t current_state; + uint32_t secs_since_last_event; + uint8_t scm_events; + uint8_t vl_state; + uint8_t io_throttling; + uint8_t reserved[64]; +} __packed; + +struct qla_scm_target { + uint8_t wwpn[8]; + uint32_t current_events; + + struct qla_scm_link_event link_integrity; + struct qla_scm_delivery_event delivery; + struct qla_scm_peer_congestion_event peer_congestion; + + uint32_t link_failure_count; + uint32_t loss_of_sync_count; + uint32_t loss_of_signals_count; + uint32_t primitive_seq_protocol_err_count; + uint32_t invalid_transmission_word_count; + uint32_t invalid_crc_count; + + uint32_t delivery_failure_unknown; + uint32_t delivery_timeout; + uint32_t delivery_unable_to_route; + uint32_t delivery_failure_device_specific; + + uint32_t peer_congestion_clear; + uint32_t peer_congestion_lost_credit; + uint32_t peer_congestion_credit_stall; + uint32_t peer_congestion_oversubscription; + uint32_t peer_congestion_device_specific; + uint32_t link_unknown_event; + uint32_t link_device_specific_event; + uint8_t reserved[48]; +} __packed; + +#define QLA_DRV_ATTR_SCM_SUPPORTED 0x00800000 +#define QLA_DRV_ATTR_LOCKDOWN_SUPPORT 0x02000000 +#define QLA_DRV_ATTR_SCM_2_SUPPORTED 0x04000000 /* Bit 26 */ +#define QLA_DRV_ATTR_SCM_UPSTREAM_SUPPORT 0x08000000 /* Bit 27 */ +#define QLA_DRV_ATTR_SCMR_PROFILE_SUPPORT 0x10000000 /* Bit 28 */ +#define QLA_DRV_ATTR_DPORT_V2_SUPPORT 0x20000000 /* Bit 29 */ +#define QLA_DRV_ATTR_VIRTUAL_LANE_SUPPORT 0x40000000 /* Bit 30 */ +#define QLA_DRV_ATTR_IO_THROTTLING_SUPPORT 0x80000000 /* Bit 31 */ + +struct qla_drv_attr { + uint32_t attributes; + uint8_t reserved[28]; } __packed; +struct qla_mpi_lockdown_info { + uint32_t config_disable_flags; //mbx3 + uint32_t fw_update_disable_flags; //mbx4 + uint32_t mpi_disable_flags; //mbx5 + uint32_t lockdown_support; //mbx2 +} __attribute__ ((packed)); + +struct qla_lockdown_info { + uint8_t signature[4]; + struct qla_mpi_lockdown_info mpi_fw_lockdown; + uint32_t isp_fw_lockdown; + uint8_t reserved[40]; +} __attribute__ ((packed)); + + +struct qla_mbx_passthru { + uint16_t reserved1[2]; + uint16_t mbx_in[32]; + uint16_t mbx_out[32]; + uint32_t reserved2[16]; +} __attribute__ ((packed)); + +#include "qla_edif_bsg.h" + #endif diff --git a/drivers/scsi/qla2xxx/qla_compat.h b/drivers/scsi/qla2xxx/qla_compat.h new file mode 100644 index 0000000000000..66de699731635 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_compat.h @@ -0,0 +1,566 @@ +/* + * Cavium Fibre Channel HBA Driver + * Copyright (c) 2003-2016 QLogic Corporation + * Copyright (C) 2016-2017 Cavium Inc + * Copyright (C) 2020- Marvell Technology Group Ltd. + * + * See LICENSE.qla2xxx for copyright and licensing details. + */ +#ifndef __QLA_COMPAT_H +#define __QLA_COMPAT_H + +#ifndef DEFINED_FPIN_RCV +#define fc_host_fpin_rcv(_a, _b, _c) +#endif /* DEFINED_FPIN_RCV */ + +#ifdef SCSI_CHANGE_QDEPTH +#define QLA_SCSI_QUEUE_DEPTH \ + .change_queue_depth = scsi_change_queue_depth, +#else /* SCSI_CHANGE_QDEPTH */ +#include +#define QLA_SCSI_QUEUE_DEPTH \ + .change_queue_depth = qla2x00_change_queue_depth, +static inline +void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth) +{ + fc_port_t *fcport = sdev->hostdata; + struct scsi_qla_host *vha = fcport->vha; + struct req_que *req = NULL; + + req = vha->req; + if (!req) + return; + + if (req->max_q_depth <= sdev->queue_depth || req->max_q_depth < qdepth) + return; + + if (sdev->ordered_tags) + scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, qdepth); + else + scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth); + + ql_dbg(ql_dbg_io, vha, 0x302a, + "Queue depth adjusted-up to %d for nexus=%ld:%d:%d.\n", + sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun); +} + +static inline +void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth) +{ + fc_port_t *fcport = (struct fc_port *) sdev->hostdata; + + if (!scsi_track_queue_full(sdev, qdepth)) + return; + + ql_dbg(ql_dbg_io, fcport->vha, 0x3029, + "Queue depth adjusted-down to %d for nexus=%ld:%d:%d.\n", + sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun); +} + +static inline +int qla2x00_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) +{ + switch (reason) { + case SCSI_QDEPTH_DEFAULT: + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); + break; + case SCSI_QDEPTH_QFULL: + qla2x00_handle_queue_full(sdev, qdepth); + break; + case SCSI_QDEPTH_RAMP_UP: + qla2x00_adjust_sdev_qdepth_up(sdev, qdepth); + break; + default: + return -EOPNOTSUPP; + } + + return sdev->queue_depth; +} +#endif /* SCSI_CHANGE_QDEPTH */ + +#ifdef SCSI_MARGINAL_PATH_SUPPORT +#define QLA_SCSI_MARGINAL_PATH \ + .eh_should_retry_cmd = fc_eh_should_retry_cmd, +#else + #define QLA_SCSI_MARGINAL_PATH +#endif /* SCSI_MARGINAL_PATH_SUPPORT */ + +#ifdef SCSI_CHANGE_QTYPE +#define QLA_SCSI_QUEUE_TYPE \ + .change_queue_type = qla2x00_change_queue_type, +static inline int +qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type) +{ + if (sdev->tagged_supported) { + scsi_set_tag_type(sdev, tag_type); + if (tag_type) + scsi_activate_tcq(sdev, sdev->queue_depth); + else + scsi_deactivate_tcq(sdev, sdev->queue_depth); + } else + tag_type = 0; + + return tag_type; +} + +#else /* SCSI_CHANGE_QTYPE */ +#define QLA_SCSI_QUEUE_TYPE +#endif /* SCSI_CHANGE_QTYPE */ + +#ifdef SCSI_MAP_QUEUES +#define QLA_SCSI_MAP_QUEUES \ + .map_queues = qla2xxx_map_queues, +#include +static inline int qla2xxx_map_queues(struct Scsi_Host *shost) +{ + int rc = -EINVAL; + scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; +#ifdef BLK_MQ_HCTX_TYPE + struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; + + if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase) + rc = blk_mq_map_queues(qmap); + else + rc = blk_mq_pci_map_queues(qmap, + vha->hw->pdev, vha->irq_offset); +#else + + if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase) + rc = blk_mq_map_queues(&shost->tag_set); + else +#ifdef BLK_PCI_MAPQ_3_ARGS + rc = blk_mq_pci_map_queues( + (struct blk_mq_tag_set *)&shost->tag_set, + vha->hw->pdev, vha->irq_offset); +#else /* BLK_PCI_MAPQ_3_ARGS */ + rc = blk_mq_pci_map_queues( + (struct blk_mq_tag_set *)&shost->tag_set, + vha->hw->pdev); +#endif /* BLK_PCI_MAPQ_3_ARGS */ +#endif + return rc; +} +#else /* SCSI_MAP_QUEUES */ +#define QLA_SCSI_MAP_QUEUES +#endif /* SCSI_MAP_QUEUES */ + + +#ifdef SCSI_HOST_WIDE_TAGS +#define QLA_SCSI_HOST_WIDE_TAGS \ + .use_host_wide_tags = 1, +#else /* SCSI_HOST_WIDE_TAGS */ +#define QLA_SCSI_HOST_WIDE_TAGS +#endif /* SCSI_HOST_WIDE_TAGS */ + +#define lun_cast(_a) (long long)(_a) + +#ifdef SCSI_HAS_TCQ +static inline +void qla_scsi_tcq_handler(struct scsi_device *sdev) +{ + scsi_qla_host_t *vha = shost_priv(sdev->host); + struct req_que *req = vha->req; + + if (sdev->tagged_supported) + scsi_activate_tcq(sdev, req->max_q_depth); + else + scsi_deactivate_tcq(sdev, req->max_q_depth); +} +#else /* SCSI_HAS_TCQ */ +#define qla_scsi_tcq_handler(_sdev) +#endif /* SCSI_HAS_TCQ */ + +#ifdef SCSI_CMD_TAG_ATTR +#include +static inline +int qla_scsi_get_task_attr(struct scsi_cmnd *cmd) +{ + char tag[2]; + if (scsi_populate_tag_msg(cmd, tag)) { + switch (tag[0]) { + case HEAD_OF_QUEUE_TAG: + return TSK_HEAD_OF_QUEUE; + case ORDERED_QUEUE_TAG: + return TSK_ORDERED; + default: + return TSK_SIMPLE; + } + } + return TSK_SIMPLE; +} +#else /* SCSI_CMD_TAG_ATTR */ +#define qla_scsi_get_task_attr(_cmd) (TSK_SIMPLE) +#endif /* SCSI_CMD_TAG_ATTR */ + +#ifdef SCSI_FC_BSG_JOB +#define fc_bsg_to_shost(_job) (_job)->shost +#define fc_bsg_to_rport(_job) (_job)->rport +#define bsg_job_done(_job, _res, _len) (_job)->job_done(_job) +#define qla_fwsts_ptr(_job) ((uint8_t *)(_job)->req->sense) + \ + sizeof(struct fc_bsg_reply) +#else /* SCSI_FC_BSG_JOB */ +#define qla_fwsts_ptr(_job) ((_job)->reply + sizeof(struct fc_bsg_reply)) +#endif /* SCSI_FC_BSG_JOB */ + +#ifdef TIMER_SETUP +#define qla_timer_setup(_tmr, _func, _flags, _cb) \ + timer_setup(_tmr, _func, _flags) +#define qla_from_timer(_var, _timer_arg, _field) \ + (typeof(*_var) *)from_timer(_var, _timer_arg, _field) +#else /* TIMER_SETUP */ +#define qla_timer_setup(_tmr, _func, _flags, _cb) \ + init_timer(_tmr); \ + (_tmr)->data = (qla_timer_arg_t) (_cb); \ + (_tmr)->function = (void (*)(unsigned long))_func; +#define qla_from_timer(_var, _timer_arg, _field) \ + (typeof(*_var) *)(_timer_arg) +#endif /* TIMER_SETUP */ + +#ifdef DMA_ZALLOC_COHERENT +#else /* DMA_ZALLOC_COHERENT */ +/* This version of dma_alloc_coherent() does zero out memory. */ +#define dma_zalloc_coherent(_dev, _sz, _hdl, _flag) \ + dma_alloc_coherent(_dev, _sz, _hdl, _flag) +#endif /* DMA_ZALLOC_COHERENT */ + +#ifdef SCSI_USE_CLUSTERING +#define QLA_SCSI_USER_CLUSETERING\ + .use_clustering = ENABLE_CLUSTERING, +#else /* SCSI_USE_CLUSTERING */ +#define QLA_SCSI_USER_CLUSETERING +#endif /* SCSI_USE_CLUSTERING */ + +#ifdef KTIME_GET_REAL_SECONDS +#define qla_get_real_seconds() ktime_get_real_seconds() +#else /* KTIME_GET_REAL_SECONDS */ +static inline +u64 qla_get_real_seconds(void) +{ + struct timeval tv; + do_gettimeofday(&tv); + return tv.tv_sec; +} +#endif /* KTIME_GET_REAL_SECONDS */ + +#ifndef FC_PORTSPEED_64GBIT +#define FC_PORTSPEED_64GBIT 0x1000 +#endif +#ifndef FC_PORTSPEED_128GBIT +#define FC_PORTSPEED_128GBIT 0x2000 +#endif + +#ifdef BE_ARRAY +static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len) +{ + int i; + + for (i = 0; i < len; i++) + dst[i] = cpu_to_be32(src[i]); +} + +static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len) +{ + int i; + + for (i = 0; i < len; i++) + dst[i] = be32_to_cpu(src[i]); +} +#endif + +#ifdef SCSI_USE_BLK_MQ +# ifdef RHEL_DISTRO_VERSION +# define rhel_set_blk_mq(_host) \ + (_host)->use_blk_mq = ql2xmqsupport ? true : false; +# else /* RHEL_DISTRO_VERSION */ +# define rhel_set_blk_mq(_host) +# endif /* RHEL_DISTRO_VERSION */ +#else /* SCSI_USE_BLK_MQ */ +/* Legacy was killed off, return 1 always. */ +#define shost_use_blk_mq(_host) 1 +#define rhel_set_blk_mq(_host) +#endif /* SCSI_USE_BLK_MQ */ + +#ifdef NVME_POLL_QUEUE +static inline +void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle) +{ + struct qla_qpair *qpair = hw_queue_handle; + unsigned long flags; + struct scsi_qla_host *vha = lport->private; + + spin_lock_irqsave(&qpair->qp_lock, flags); + queue_work(vha->hw->wq, &qpair->q_work); + spin_unlock_irqrestore(&qpair->qp_lock, flags); +} +#define QLA_NVME_POLL_QUEUE \ + .poll_queue = qla_nvme_poll, +#else /* NVME_POLL_QUEUE */ +#define QLA_NVME_POLL_QUEUE +#endif /* NVME_POLL_QUEUE */ + +#define qla_scsi_templ_compat_entries \ + QLA_SCSI_QUEUE_DEPTH \ + QLA_SCSI_QUEUE_TYPE \ + QLA_SCSI_HOST_WIDE_TAGS \ + QLA_SCSI_USER_CLUSETERING \ + QLA_SCSI_MAP_QUEUES \ + QLA_SCSI_FC_EH_TIMED_OUT \ + QLA_SCSI_TRACK_QUE_DEPTH \ + QLA_SCSI_MARGINAL_PATH + +#define qla_nvme_templ_compat_entries \ + QLA_NVME_POLL_QUEUE + + +#define qla_pci_err_handler_compat_entries \ + QLA_PCI_ERR_RESET_PREPARE \ + QLA_PCI_ERR_RESET_DONE + +#ifdef SCSI_CMD_PRIV +typedef scsi_cmd_priv ql_scsi_cmd_priv; +#else /* SCSI_CMD_PRIV */ +static inline void *ql_scsi_cmd_priv(struct scsi_cmnd *cmd) +{ + return cmd + 1; +} +#endif /*SCSI_CMD_PRIV */ + +#ifdef FC_EH_TIMED_OUT +#define QLA_SCSI_FC_EH_TIMED_OUT \ + .eh_timed_out = fc_eh_timed_out, +#else /* FC_EH_TIMED_OUT */ +#define QLA_SCSI_FC_EH_TIMED_OUT +#endif /* FC_EH_TIMED_OUT */ + +#ifdef SCSI_TRACK_QUE_DEPTH +#define QLA_SCSI_TRACK_QUE_DEPTH \ + .track_queue_depth = 1, +#else /* SCSI_TRACK_QUE_DEPTH */ +#define QLA_SCSI_TRACK_QUE_DEPTH +#endif /* SCSI_TRACK_QUE_DEPTH */ + +#ifdef PCI_ERR_RESET_PREPARE +#define QLA_PCI_ERR_RESET_PREPARE \ + .reset_prepare = qla_pci_reset_prepare, + +static inline void +qla_pci_reset_prepare(struct pci_dev *pdev) +{ + scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); + struct qla_hw_data *ha = base_vha->hw; + struct qla_qpair *qpair; + + ql_log(ql_log_warn, base_vha, 0xffff, + "%s.\n", __func__); + + /* + * PCI FLR/function reset is about to reset the + * slot. Stop the chip to stop all DMA access. + * It is assumed that pci_reset_done will be called + * after FLR to resume Chip operation. + */ + ha->flags.eeh_busy = 1; + mutex_lock(&ha->mq_lock); + list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) + qpair->online = 0; + mutex_unlock(&ha->mq_lock); + + set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + qla2x00_abort_isp_cleanup(base_vha); + qla2x00_abort_all_cmds(base_vha, DID_RESET << 16); +} +#else /* PCI_ERR_RESET_PREPARE */ +#define QLA_PCI_ERR_RESET_PREPARE +#endif /* PCI_ERR_RESET_PREPARE */ + +#ifdef PCI_ERR_RESET_DONE +#define QLA_PCI_ERR_RESET_DONE\ + .reset_done = qla_pci_reset_done, +static inline void +qla_pci_reset_done(struct pci_dev *pdev) +{ + scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); + struct qla_hw_data *ha = base_vha->hw; + struct qla_qpair *qpair; + + ql_log(ql_log_warn, base_vha, 0xffff, + "%s.\n", __func__); + + /* + * FLR just completed by PCI layer. Resume adapter + */ + ha->flags.eeh_busy = 0; + mutex_lock(&ha->mq_lock); + list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) + qpair->online = 1; + mutex_unlock(&ha->mq_lock); + + base_vha->flags.online = 1; + ha->isp_ops->abort_isp(base_vha); + clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); +} +#else /* PCI_ERR_RESET_DONE */ +#define QLA_PCI_ERR_RESET_DONE +#endif /* PCI_ERR_RESET_DONE */ + +#ifndef MIN_NICE +#define MIN_NICE -20 +#endif + +#ifdef T10_PI_APP_ESC +#include +#define QL_T10_PI_APP_ESCAPE T10_PI_APP_ESCAPE +#else /* T10_PI_APP_ESC */ +#define QL_T10_PI_APP_ESCAPE 0xffff +#endif /* T10_PI_APP_ESC */ + +#ifdef T10_PI_REF_ESC +#include +#define QL_T10_PI_REF_ESCAPE T10_PI_REF_ESCAPE +#else /* T10_PI_REF_ESC */ +#define QL_T10_PI_REF_ESCAPE 0xffffffff +#endif /* T10_PI_REF_ESC */ + +#ifdef T10_PI_TUPLE +#include +typedef struct t10_pi_tuple QL_T10_PI_TUPLE; +#else /* T10_PI_TUPLE */ +/* + * (sd.h is not exported, hence local inclusion) + * Data Integrity Field tuple. + */ +struct sd_dif_tuple { + __be16 guard_tag; /* Checksum */ + __be16 app_tag; /* Opaque storage */ + __be32 ref_tag; /* Target LBA or indirect LBA */ +}; + +typedef struct sd_dif_tuple QL_T10_PI_TUPLE; +#endif /* T10_PI_TUPLE */ + +#ifdef TGT_FREE_TAG +#define QL_TGT_FREE_TAG(cmd) (target_free_tag(cmd->sess->se_sess, &cmd->se_cmd)) +#else /* TGT_FREE_TAG */ +#define QL_TGT_FREE_TAG(cmd) (tcm_qla2xxx_rel_cmd(cmd)) +#endif /* TGT_FREE_TAG */ + +#ifdef TGT_MAKE_TPG_PARAM_CFG_GROUP +#define TCM_MAKE_TPG_ARGS(_a1, _a2, _a3) _a1, _a2, _a3 +#else /* TGT_MAKE_TPG_PARAM_CFG_GROUP */ +#define TCM_MAKE_TPG_ARGS(_a1, _a2, _a3) _a1, _a3 +#endif /* TGT_MAKE_TPG_PARAM_CFG_GROUP */ + +#ifdef TGT_SET_RM_SESSION +#define TARGET_REMOVE_SESSION target_remove_session(se_sess) +#else /* TGT_SET_RM_SESSION */ +#define TARGET_REMOVE_SESSION +#endif /* TGT_SET_RM_SESSION */ + +#ifdef TGT_FABRIC_OPS_FABRIC_NAME +#define TCM_FABRIC_NAME .fabric_name = "qla2xxx", +#define TCM_FABRIC_NAME_NPIV .fabric_name = "qla2xxx_npiv", +#else /* TGT_FABRIC_OPS_FABRIC_NAME */ +#define TCM_FABRIC_NAME .name = "qla2xxx", +#define TCM_FABRIC_NAME_NPIV .name = "qla2xxx_npiv", +#endif /* TGT_FABRIC_OPS_FABRIC_NAME */ + +#ifdef FPIN_EVENT_TYPES +#define DECLARE_ENUM2STR_LOOKUP_DELI_EVENT DECLARE_ENUM2STR_LOOKUP( \ + qla_get_dn_event_type, fc_fpin_deli_event_types, \ + QL_FPIN_DELI_EVT_TYPES_INIT); +#define DECLARE_ENUM2STR_LOOKUP_CONGN_EVENT DECLARE_ENUM2STR_LOOKUP( \ + qla_get_congn_event_type, fc_fpin_congn_event_types, \ + FC_FPIN_CONGN_EVT_TYPES_INIT); +#else +#define DECLARE_ENUM2STR_LOOKUP_DELI_EVENT DECLARE_ENUM2STR_LOOKUP( \ + qla_get_dn_event_type, ql_fpin_deli_event_types, \ + QL_FPIN_DELI_EVT_TYPES_INIT); +#define DECLARE_ENUM2STR_LOOKUP_CONGN_EVENT DECLARE_ENUM2STR_LOOKUP( \ + qla_get_congn_event_type, ql_fpin_congn_event_types, \ + QL_FPIN_CONGN_EVT_TYPES_INIT); +/* + * Delivery event types + */ +enum ql_fpin_deli_event_types { + FPIN_DELI_UNKNOWN = 0x0, + FPIN_DELI_TIMEOUT = 0x1, + FPIN_DELI_UNABLE_TO_ROUTE = 0x2, + FPIN_DELI_DEVICE_SPEC = 0xF, +}; + +/* + * Congestion event types + */ +enum ql_fpin_congn_event_types { + FPIN_CONGN_CLEAR = 0x0, + FPIN_CONGN_LOST_CREDIT = 0x1, + FPIN_CONGN_CREDIT_STALL = 0x2, + FPIN_CONGN_OVERSUBSCRIPTION = 0x3, + FPIN_CONGN_DEVICE_SPEC = 0xF, +}; + +/* + * Initializer useful for decoding table. + * Please keep this in sync with the above definitions. + */ +#define QL_FPIN_CONGN_EVT_TYPES_INIT { \ + { FPIN_CONGN_CLEAR, "Clear" }, \ + { FPIN_CONGN_LOST_CREDIT, "Lost Credit" }, \ + { FPIN_CONGN_CREDIT_STALL, "Credit Stall" }, \ + { FPIN_CONGN_OVERSUBSCRIPTION, "Oversubscription" }, \ + { FPIN_CONGN_DEVICE_SPEC, "Device Specific" }, \ +} + +#endif + +#define tcm_qla2xxx_ops_compat_entries \ + TCM_FABRIC_NAME + +#define tcm_qla2xxx_npiv_ops_compat_entries \ + TCM_FABRIC_NAME_NPIV + +#ifdef NVME_FC_PORT_TEMPLATE_HV_MODULE +#define NVME_FC_PORT_TEMPLATE_MODULE .module = THIS_MODULE, +#else +#define NVME_FC_PORT_TEMPLATE_MODULE +#endif + +#ifndef fallthrough +# if defined(__GNUC__) && __GNUC__ >= 7 +# define fallthrough __attribute__((__fallthrough__)) +# else +# define fallthrough do {} while (0) /* fallthrough */ +# endif +#endif + + + +/* rhel 9.0 support */ + +#ifndef ioremap_nocache +#define ioremap_nocache ioremap +#endif + + +#ifndef SET_DRIVER_BYTE +#define DRIVER_SENSE 0x08 +static inline void set_driver_byte(struct scsi_cmnd *cmd, char status) +{ + cmd->result = (cmd->result & 0x00ffffff) | (status << 24); +} +#endif + +#ifndef LIST_IS_FIRST +/** + * list_is_first -- tests whether @ list is the first entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int list_is_first(const struct list_head *list, + const struct list_head *head) +{ + return list->prev == head; +} +#endif + +#endif /* __QLA_COMPAT_H */ diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 00b4d033b07a9..49b48b1ea688a 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -1,7 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ /* @@ -22,8 +23,8 @@ * | | | 0x3036,0x3038 | * | | | 0x303a | * | DPC Thread | 0x4023 | 0x4002,0x4013 | - * | Async Events | 0x509c | | * | Timer Routines | 0x6012 | | + * | Async Events | 0x509c | | * | User Space Interactions | 0x70e3 | 0x7018,0x702e | * | | | 0x7020,0x7024 | * | | | 0x7039,0x7045 | @@ -65,10 +66,8 @@ #include "qla_def.h" #include -#define CREATE_TRACE_POINTS -#include -static uint32_t ql_dbg_offset = 0x800; +uint32_t ql_dbg_offset = 0x800; static inline void qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump) @@ -107,37 +106,45 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, { struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; dma_addr_t dump_dma = ha->gid_list_dma; - uint32_t *chunk = (uint32_t *)ha->gid_list; + uint32_t *chunk = (void *)ha->gid_list; uint32_t dwords = qla2x00_gid_list_size(ha) / 4; uint32_t stat; ulong i, j, timer = 6000000; int rval = QLA_FUNCTION_FAILED; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + + if (qla_pci_disconnected(vha, reg)) + return rval; + for (i = 0; i < ram_dwords; i += dwords, addr += dwords) { if (i + dwords > ram_dwords) dwords = ram_dwords - i; - wrt_reg_word(®->mailbox0, MBC_LOAD_DUMP_MPI_RAM); - wrt_reg_word(®->mailbox1, LSW(addr)); - wrt_reg_word(®->mailbox8, MSW(addr)); + WRT_REG_WORD(®->mailbox0, MBC_LOAD_DUMP_MPI_RAM); + WRT_REG_WORD(®->mailbox1, LSW(addr)); + WRT_REG_WORD(®->mailbox8, MSW(addr)); - wrt_reg_word(®->mailbox2, MSW(LSD(dump_dma))); - wrt_reg_word(®->mailbox3, LSW(LSD(dump_dma))); - wrt_reg_word(®->mailbox6, MSW(MSD(dump_dma))); - wrt_reg_word(®->mailbox7, LSW(MSD(dump_dma))); + WRT_REG_WORD(®->mailbox2, MSW(LSD(dump_dma))); + WRT_REG_WORD(®->mailbox3, LSW(LSD(dump_dma))); + WRT_REG_WORD(®->mailbox6, MSW(MSD(dump_dma))); + WRT_REG_WORD(®->mailbox7, LSW(MSD(dump_dma))); - wrt_reg_word(®->mailbox4, MSW(dwords)); - wrt_reg_word(®->mailbox5, LSW(dwords)); + WRT_REG_WORD(®->mailbox4, MSW(dwords)); + WRT_REG_WORD(®->mailbox5, LSW(dwords)); - wrt_reg_word(®->mailbox9, 0); - wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); + WRT_REG_WORD(®->mailbox9, 0); + WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); ha->flags.mbox_int = 0; while (timer--) { udelay(5); - stat = rd_reg_dword(®->host_status); + if (qla_pci_disconnected(vha, reg)) + return rval; + + stat = RD_REG_DWORD(®->host_status); /* Check for pending interrupts. */ if (!(stat & HSRX_RISC_INT)) continue; @@ -147,15 +154,15 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, stat != 0x10 && stat != 0x11) { /* Clear this intr; it wasn't a mailbox intr */ - wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); - rd_reg_dword(®->hccr); + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); + RD_REG_DWORD(®->hccr); continue; } set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - rval = rd_reg_word(®->mailbox0) & MBS_MASK; - wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); - rd_reg_dword(®->hccr); + rval = RD_REG_WORD(®->mailbox0) & MBS_MASK; + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); + RD_REG_DWORD(®->hccr); break; } ha->flags.mbox_int = 1; @@ -181,42 +188,48 @@ qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, } int -qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram, - uint32_t ram_dwords, void **nxt) +qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram, + uint32_t ram_dwords, void **nxt) { int rval = QLA_FUNCTION_FAILED; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; dma_addr_t dump_dma = ha->gid_list_dma; - uint32_t *chunk = (uint32_t *)ha->gid_list; + uint32_t *chunk = (void *)ha->gid_list; uint32_t dwords = qla2x00_gid_list_size(ha) / 4; uint32_t stat; ulong i, j, timer = 6000000; + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); + if (qla_pci_disconnected(vha, reg)) + return rval; + for (i = 0; i < ram_dwords; i += dwords, addr += dwords) { if (i + dwords > ram_dwords) dwords = ram_dwords - i; - wrt_reg_word(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); - wrt_reg_word(®->mailbox1, LSW(addr)); - wrt_reg_word(®->mailbox8, MSW(addr)); - wrt_reg_word(®->mailbox10, 0); + WRT_REG_WORD(®->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED); + WRT_REG_WORD(®->mailbox1, LSW(addr)); + WRT_REG_WORD(®->mailbox8, MSW(addr)); + WRT_REG_WORD(®->mailbox10, 0); - wrt_reg_word(®->mailbox2, MSW(LSD(dump_dma))); - wrt_reg_word(®->mailbox3, LSW(LSD(dump_dma))); - wrt_reg_word(®->mailbox6, MSW(MSD(dump_dma))); - wrt_reg_word(®->mailbox7, LSW(MSD(dump_dma))); + WRT_REG_WORD(®->mailbox2, MSW(LSD(dump_dma))); + WRT_REG_WORD(®->mailbox3, LSW(LSD(dump_dma))); + WRT_REG_WORD(®->mailbox6, MSW(MSD(dump_dma))); + WRT_REG_WORD(®->mailbox7, LSW(MSD(dump_dma))); - wrt_reg_word(®->mailbox4, MSW(dwords)); - wrt_reg_word(®->mailbox5, LSW(dwords)); - wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); + WRT_REG_WORD(®->mailbox4, MSW(dwords)); + WRT_REG_WORD(®->mailbox5, LSW(dwords)); + WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); ha->flags.mbox_int = 0; while (timer--) { udelay(5); - stat = rd_reg_dword(®->host_status); + if (qla_pci_disconnected(vha, reg)) + return rval; + stat = RD_REG_DWORD(®->host_status); /* Check for pending interrupts. */ if (!(stat & HSRX_RISC_INT)) continue; @@ -224,15 +237,15 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram, stat &= 0xff; if (stat != 0x1 && stat != 0x2 && stat != 0x10 && stat != 0x11) { - wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); - rd_reg_dword(®->hccr); + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); + RD_REG_DWORD(®->hccr); continue; } set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - rval = rd_reg_word(®->mailbox0) & MBS_MASK; - wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); - rd_reg_dword(®->hccr); + rval = RD_REG_WORD(®->mailbox0) & MBS_MASK; + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); + RD_REG_DWORD(®->hccr); break; } ha->flags.mbox_int = 1; @@ -247,9 +260,9 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram, return rval; } for (j = 0; j < dwords; j++) { - ram[i + j] = (__force __be32) - ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? - chunk[j] : swab32(chunk[j])); + ram[i + j] = + (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ? + chunk[j] : swab32(chunk[j]); } } @@ -258,8 +271,8 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram, } static int -qla24xx_dump_memory(struct qla_hw_data *ha, __be32 *code_ram, - uint32_t cram_size, void **nxt) +qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram, + uint32_t cram_size, void **nxt) { int rval; @@ -279,16 +292,16 @@ qla24xx_dump_memory(struct qla_hw_data *ha, __be32 *code_ram, return rval; } -static __be32 * +static uint32_t * qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, - uint32_t count, __be32 *buf) + uint32_t count, uint32_t *buf) { - __le32 __iomem *dmp_reg; + uint32_t __iomem *dmp_reg; - wrt_reg_dword(®->iobase_addr, iobase); + WRT_REG_DWORD(®->iobase_addr, iobase); dmp_reg = ®->iobase_window; for ( ; count--; dmp_reg++) - *buf++ = htonl(rd_reg_dword(dmp_reg)); + *buf++ = htonl(RD_REG_DWORD(dmp_reg)); return buf; } @@ -296,11 +309,11 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase, void qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha) { - wrt_reg_dword(®->hccr, HCCRX_SET_RISC_PAUSE); + WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_PAUSE); /* 100 usec delay is sufficient enough for hardware to pause RISC */ udelay(100); - if (rd_reg_dword(®->host_status) & HSRX_RISC_PAUSED) + if (RD_REG_DWORD(®->host_status) & HSRX_RISC_PAUSED) set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags); } @@ -317,17 +330,17 @@ qla24xx_soft_reset(struct qla_hw_data *ha) * Driver can proceed with the reset sequence after waiting * for a timeout period. */ - wrt_reg_dword(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); + WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); for (cnt = 0; cnt < 30000; cnt++) { - if ((rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) + if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) break; udelay(10); } - if (!(rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)) + if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)) set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); - wrt_reg_dword(®->ctrl_status, + WRT_REG_DWORD(®->ctrl_status, CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); @@ -335,19 +348,19 @@ qla24xx_soft_reset(struct qla_hw_data *ha) /* Wait for soft-reset to complete. */ for (cnt = 0; cnt < 30000; cnt++) { - if ((rd_reg_dword(®->ctrl_status) & + if ((RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET) == 0) break; udelay(10); } - if (!(rd_reg_dword(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) + if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags); - wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET); - rd_reg_dword(®->hccr); /* PCI Posting. */ + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); + RD_REG_DWORD(®->hccr); /* PCI Posting. */ - for (cnt = 10000; rd_reg_word(®->mailbox0) != 0 && + for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(10); @@ -361,7 +374,7 @@ qla24xx_soft_reset(struct qla_hw_data *ha) } static int -qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram, +qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram, uint32_t ram_words, void **nxt) { int rval; @@ -369,7 +382,7 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram, uint16_t mb0; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; dma_addr_t dump_dma = ha->gid_list_dma; - __le16 *dump = (__force __le16 *)ha->gid_list; + uint16_t *dump = (uint16_t *)ha->gid_list; rval = QLA_SUCCESS; mb0 = 0; @@ -392,11 +405,11 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram, WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma))); WRT_MAILBOX_REG(ha, reg, 4, words); - wrt_reg_word(®->hccr, HCCR_SET_HOST_INT); + WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT); for (timer = 6000000; timer; timer--) { /* Check for pending interrupts. */ - stat = rd_reg_dword(®->u.isp2300.host_status); + stat = RD_REG_DWORD(®->u.isp2300.host_status); if (stat & HSR_RISC_INT) { stat &= 0xff; @@ -407,10 +420,10 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram, mb0 = RD_MAILBOX_REG(ha, reg, 0); /* Release mailbox registers. */ - wrt_reg_word(®->semaphore, 0); - wrt_reg_word(®->hccr, + WRT_REG_WORD(®->semaphore, 0); + WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); - rd_reg_word(®->hccr); + RD_REG_WORD(®->hccr); break; } else if (stat == 0x10 || stat == 0x11) { set_bit(MBX_INTERRUPT, @@ -418,15 +431,15 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram, mb0 = RD_MAILBOX_REG(ha, reg, 0); - wrt_reg_word(®->hccr, + WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); - rd_reg_word(®->hccr); + RD_REG_WORD(®->hccr); break; } /* clear this intr; it wasn't a mailbox intr */ - wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); - rd_reg_word(®->hccr); + WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); + RD_REG_WORD(®->hccr); } udelay(5); } @@ -434,8 +447,7 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram, if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) { rval = mb0 & MBS_MASK; for (idx = 0; idx < words; idx++) - ram[cnt + idx] = - cpu_to_be16(le16_to_cpu(dump[idx])); + ram[cnt + idx] = swab16(dump[idx]); } else { rval = QLA_FUNCTION_FAILED; } @@ -447,12 +459,12 @@ qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram, static inline void qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count, - __be16 *buf) + uint16_t *buf) { - __le16 __iomem *dmp_reg = ®->u.isp2300.fb_cmd; + uint16_t __iomem *dmp_reg = ®->u.isp2300.fb_cmd; for ( ; count--; dmp_reg++) - *buf++ = htons(rd_reg_word(dmp_reg)); + *buf++ = htons(RD_REG_WORD(dmp_reg)); } static inline void * @@ -466,10 +478,10 @@ qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr) } static inline void * -qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) +qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) { uint32_t cnt; - __be32 *iter_reg; + uint32_t *iter_reg; struct qla2xxx_fce_chain *fcec = ptr; if (!ha->fce) @@ -493,7 +505,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) } static inline void * -qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) +qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) { struct qla2xxx_offld_chain *c = ptr; @@ -511,11 +523,11 @@ qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) ptr += sizeof(struct qla2xxx_offld_chain); memcpy(ptr, ha->exlogin_buf, ha->exlogin_size); - return (char *)ptr + be32_to_cpu(c->size); + return (char *)ptr + cpu_to_be32(c->size); } static inline void * -qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) +qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) { struct qla2xxx_offld_chain *c = ptr; @@ -533,12 +545,12 @@ qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) ptr += sizeof(struct qla2xxx_offld_chain); memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size); - return (char *)ptr + be32_to_cpu(c->size); + return (char *)ptr + cpu_to_be32(c->size); } static inline void * qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, - __be32 **last_chain) + uint32_t **last_chain) { struct qla2xxx_mqueue_chain *q; struct qla2xxx_mqueue_header *qh; @@ -585,7 +597,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr, } static inline void * -qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) +qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) { struct qla2xxx_mqueue_chain *q; struct qla2xxx_mqueue_header *qh; @@ -656,7 +668,7 @@ qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) } static inline void * -qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) +qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) { uint32_t cnt, que_idx; uint8_t que_cnt; @@ -679,13 +691,13 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, __be32 **last_chain) reg = ISP_QUE_REG(ha, cnt); que_idx = cnt * 4; mq->qregs[que_idx] = - htonl(rd_reg_dword(®->isp25mq.req_q_in)); + htonl(RD_REG_DWORD(®->isp25mq.req_q_in)); mq->qregs[que_idx+1] = - htonl(rd_reg_dword(®->isp25mq.req_q_out)); + htonl(RD_REG_DWORD(®->isp25mq.req_q_out)); mq->qregs[que_idx+2] = - htonl(rd_reg_dword(®->isp25mq.rsp_q_in)); + htonl(RD_REG_DWORD(®->isp25mq.rsp_q_in)); mq->qregs[que_idx+3] = - htonl(rd_reg_dword(®->isp25mq.rsp_q_out)); + htonl(RD_REG_DWORD(®->isp25mq.rsp_q_out)); } return ptr + sizeof(struct qla2xxx_mq_chain); @@ -700,47 +712,45 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) ql_log(ql_log_warn, vha, 0xd000, "Failed to dump firmware (%x), dump status flags (0x%lx).\n", rval, ha->fw_dump_cap_flags); - ha->fw_dumped = false; + ha->fw_dumped = 0; } else { ql_log(ql_log_info, vha, 0xd001, "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n", vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags); - ha->fw_dumped = true; + ha->fw_dumped = 1; qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); } } -void qla2xxx_dump_fw(scsi_qla_host_t *vha) -{ - unsigned long flags; - - spin_lock_irqsave(&vha->hw->hardware_lock, flags); - vha->hw->isp_ops->fw_dump(vha); - spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); -} - /** * qla2300_fw_dump() - Dumps binary data from the 2300 firmware. * @vha: HA context + * @hardware_locked: Called with the hardware_lock */ void -qla2300_fw_dump(scsi_qla_host_t *vha) +qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { int rval; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; - __le16 __iomem *dmp_reg; + uint16_t __iomem *dmp_reg; + unsigned long flags; struct qla2300_fw_dump *fw; void *nxt; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - lockdep_assert_held(&ha->hardware_lock); + flags = 0; + +#ifndef __CHECKER__ + if (!hardware_locked) + spin_lock_irqsave(&ha->hardware_lock, flags); +#endif if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd002, "No buffer available for dump.\n"); - return; + goto qla2300_fw_dump_failed; } if (ha->fw_dumped) { @@ -748,19 +758,19 @@ qla2300_fw_dump(scsi_qla_host_t *vha) "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); - return; + goto qla2300_fw_dump_failed; } fw = &ha->fw_dump->isp.isp23; qla2xxx_prep_dump(ha, ha->fw_dump); rval = QLA_SUCCESS; - fw->hccr = htons(rd_reg_word(®->hccr)); + fw->hccr = htons(RD_REG_WORD(®->hccr)); /* Pause RISC. */ - wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); + WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); if (IS_QLA2300(ha)) { for (cnt = 30000; - (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 && + (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); @@ -768,74 +778,74 @@ qla2300_fw_dump(scsi_qla_host_t *vha) rval = QLA_FUNCTION_TIMEOUT; } } else { - rd_reg_word(®->hccr); /* PCI Posting. */ + RD_REG_WORD(®->hccr); /* PCI Posting. */ udelay(10); } if (rval == QLA_SUCCESS) { dmp_reg = ®->flash_address; - for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++) - fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg)); + for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++) + fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); dmp_reg = ®->u.isp2300.req_q_in; - for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_host_reg); + for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++, dmp_reg++) - fw->risc_host_reg[cnt] = htons(rd_reg_word(dmp_reg)); + fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); dmp_reg = ®->u.isp2300.mailbox0; - for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); + for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, dmp_reg++) - fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg)); + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); - wrt_reg_word(®->ctrl_status, 0x40); + WRT_REG_WORD(®->ctrl_status, 0x40); qla2xxx_read_window(reg, 32, fw->resp_dma_reg); - wrt_reg_word(®->ctrl_status, 0x50); + WRT_REG_WORD(®->ctrl_status, 0x50); qla2xxx_read_window(reg, 48, fw->dma_reg); - wrt_reg_word(®->ctrl_status, 0x00); + WRT_REG_WORD(®->ctrl_status, 0x00); dmp_reg = ®->risc_hw; - for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); + for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++) - fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg)); + fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); - wrt_reg_word(®->pcr, 0x2000); + WRT_REG_WORD(®->pcr, 0x2000); qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); - wrt_reg_word(®->pcr, 0x2200); + WRT_REG_WORD(®->pcr, 0x2200); qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); - wrt_reg_word(®->pcr, 0x2400); + WRT_REG_WORD(®->pcr, 0x2400); qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); - wrt_reg_word(®->pcr, 0x2600); + WRT_REG_WORD(®->pcr, 0x2600); qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); - wrt_reg_word(®->pcr, 0x2800); + WRT_REG_WORD(®->pcr, 0x2800); qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); - wrt_reg_word(®->pcr, 0x2A00); + WRT_REG_WORD(®->pcr, 0x2A00); qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); - wrt_reg_word(®->pcr, 0x2C00); + WRT_REG_WORD(®->pcr, 0x2C00); qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); - wrt_reg_word(®->pcr, 0x2E00); + WRT_REG_WORD(®->pcr, 0x2E00); qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); - wrt_reg_word(®->ctrl_status, 0x10); + WRT_REG_WORD(®->ctrl_status, 0x10); qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg); - wrt_reg_word(®->ctrl_status, 0x20); + WRT_REG_WORD(®->ctrl_status, 0x20); qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); - wrt_reg_word(®->ctrl_status, 0x30); + WRT_REG_WORD(®->ctrl_status, 0x30); qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); /* Reset RISC. */ - wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); + WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); for (cnt = 0; cnt < 30000; cnt++) { - if ((rd_reg_word(®->ctrl_status) & + if ((RD_REG_WORD(®->ctrl_status) & CSR_ISP_SOFT_RESET) == 0) break; @@ -856,12 +866,12 @@ qla2300_fw_dump(scsi_qla_host_t *vha) /* Get RISC SRAM. */ if (rval == QLA_SUCCESS) rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram, - ARRAY_SIZE(fw->risc_ram), &nxt); + sizeof(fw->risc_ram) / 2, &nxt); /* Get stack SRAM. */ if (rval == QLA_SUCCESS) rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram, - ARRAY_SIZE(fw->stack_ram), &nxt); + sizeof(fw->stack_ram) / 2, &nxt); /* Get data SRAM. */ if (rval == QLA_SUCCESS) @@ -872,31 +882,48 @@ qla2300_fw_dump(scsi_qla_host_t *vha) qla2xxx_copy_queues(ha, nxt); qla2xxx_dump_post_process(base_vha, rval); + +qla2300_fw_dump_failed: +#ifndef __CHECKER__ + if (!hardware_locked) + spin_unlock_irqrestore(&ha->hardware_lock, flags); +#else + ; +#endif } /** * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware. * @vha: HA context + * @hardware_locked: Called with the hardware_lock */ void -qla2100_fw_dump(scsi_qla_host_t *vha) +qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { int rval; uint32_t cnt, timer; - uint16_t risc_address = 0; - uint16_t mb0 = 0, mb2 = 0; + uint16_t risc_address; + uint16_t mb0, mb2; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; - __le16 __iomem *dmp_reg; + uint16_t __iomem *dmp_reg; + unsigned long flags; struct qla2100_fw_dump *fw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - lockdep_assert_held(&ha->hardware_lock); + risc_address = 0; + mb0 = mb2 = 0; + flags = 0; + +#ifndef __CHECKER__ + if (!hardware_locked) + spin_lock_irqsave(&ha->hardware_lock, flags); +#endif if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd004, "No buffer available for dump.\n"); - return; + goto qla2100_fw_dump_failed; } if (ha->fw_dumped) { @@ -904,17 +931,17 @@ qla2100_fw_dump(scsi_qla_host_t *vha) "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); - return; + goto qla2100_fw_dump_failed; } fw = &ha->fw_dump->isp.isp21; qla2xxx_prep_dump(ha, ha->fw_dump); rval = QLA_SUCCESS; - fw->hccr = htons(rd_reg_word(®->hccr)); + fw->hccr = htons(RD_REG_WORD(®->hccr)); /* Pause RISC. */ - wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); - for (cnt = 30000; (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 && + WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); + for (cnt = 30000; (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); @@ -923,61 +950,61 @@ qla2100_fw_dump(scsi_qla_host_t *vha) } if (rval == QLA_SUCCESS) { dmp_reg = ®->flash_address; - for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++) - fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg)); + for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++) + fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); dmp_reg = ®->u.isp2100.mailbox0; for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) { if (cnt == 8) dmp_reg = ®->u_end.isp2200.mailbox8; - fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg)); + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); } dmp_reg = ®->u.isp2100.unused_2[0]; - for (cnt = 0; cnt < ARRAY_SIZE(fw->dma_reg); cnt++, dmp_reg++) - fw->dma_reg[cnt] = htons(rd_reg_word(dmp_reg)); + for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++) + fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); - wrt_reg_word(®->ctrl_status, 0x00); + WRT_REG_WORD(®->ctrl_status, 0x00); dmp_reg = ®->risc_hw; - for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); cnt++, dmp_reg++) - fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg)); + for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++) + fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg)); - wrt_reg_word(®->pcr, 0x2000); + WRT_REG_WORD(®->pcr, 0x2000); qla2xxx_read_window(reg, 16, fw->risc_gp0_reg); - wrt_reg_word(®->pcr, 0x2100); + WRT_REG_WORD(®->pcr, 0x2100); qla2xxx_read_window(reg, 16, fw->risc_gp1_reg); - wrt_reg_word(®->pcr, 0x2200); + WRT_REG_WORD(®->pcr, 0x2200); qla2xxx_read_window(reg, 16, fw->risc_gp2_reg); - wrt_reg_word(®->pcr, 0x2300); + WRT_REG_WORD(®->pcr, 0x2300); qla2xxx_read_window(reg, 16, fw->risc_gp3_reg); - wrt_reg_word(®->pcr, 0x2400); + WRT_REG_WORD(®->pcr, 0x2400); qla2xxx_read_window(reg, 16, fw->risc_gp4_reg); - wrt_reg_word(®->pcr, 0x2500); + WRT_REG_WORD(®->pcr, 0x2500); qla2xxx_read_window(reg, 16, fw->risc_gp5_reg); - wrt_reg_word(®->pcr, 0x2600); + WRT_REG_WORD(®->pcr, 0x2600); qla2xxx_read_window(reg, 16, fw->risc_gp6_reg); - wrt_reg_word(®->pcr, 0x2700); + WRT_REG_WORD(®->pcr, 0x2700); qla2xxx_read_window(reg, 16, fw->risc_gp7_reg); - wrt_reg_word(®->ctrl_status, 0x10); + WRT_REG_WORD(®->ctrl_status, 0x10); qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg); - wrt_reg_word(®->ctrl_status, 0x20); + WRT_REG_WORD(®->ctrl_status, 0x20); qla2xxx_read_window(reg, 64, fw->fpm_b0_reg); - wrt_reg_word(®->ctrl_status, 0x30); + WRT_REG_WORD(®->ctrl_status, 0x30); qla2xxx_read_window(reg, 64, fw->fpm_b1_reg); /* Reset the ISP. */ - wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); + WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); } for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 && @@ -990,11 +1017,11 @@ qla2100_fw_dump(scsi_qla_host_t *vha) /* Pause RISC. */ if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) && - (rd_reg_word(®->mctr) & (BIT_1 | BIT_0)) != 0))) { + (RD_REG_WORD(®->mctr) & (BIT_1 | BIT_0)) != 0))) { - wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); + WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); for (cnt = 30000; - (rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0 && + (RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0 && rval == QLA_SUCCESS; cnt--) { if (cnt) udelay(100); @@ -1004,13 +1031,13 @@ qla2100_fw_dump(scsi_qla_host_t *vha) if (rval == QLA_SUCCESS) { /* Set memory configuration and timing. */ if (IS_QLA2100(ha)) - wrt_reg_word(®->mctr, 0xf1); + WRT_REG_WORD(®->mctr, 0xf1); else - wrt_reg_word(®->mctr, 0xf2); - rd_reg_word(®->mctr); /* PCI Posting. */ + WRT_REG_WORD(®->mctr, 0xf2); + RD_REG_WORD(®->mctr); /* PCI Posting. */ /* Release RISC. */ - wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); + WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); } } @@ -1020,29 +1047,29 @@ qla2100_fw_dump(scsi_qla_host_t *vha) WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD); clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); } - for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_ram) && rval == QLA_SUCCESS; + for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS; cnt++, risc_address++) { WRT_MAILBOX_REG(ha, reg, 1, risc_address); - wrt_reg_word(®->hccr, HCCR_SET_HOST_INT); + WRT_REG_WORD(®->hccr, HCCR_SET_HOST_INT); for (timer = 6000000; timer != 0; timer--) { /* Check for pending interrupts. */ - if (rd_reg_word(®->istatus) & ISR_RISC_INT) { - if (rd_reg_word(®->semaphore) & BIT_0) { + if (RD_REG_WORD(®->istatus) & ISR_RISC_INT) { + if (RD_REG_WORD(®->semaphore) & BIT_0) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); mb0 = RD_MAILBOX_REG(ha, reg, 0); mb2 = RD_MAILBOX_REG(ha, reg, 2); - wrt_reg_word(®->semaphore, 0); - wrt_reg_word(®->hccr, + WRT_REG_WORD(®->semaphore, 0); + WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); - rd_reg_word(®->hccr); + RD_REG_WORD(®->hccr); break; } - wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); - rd_reg_word(®->hccr); + WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); + RD_REG_WORD(®->hccr); } udelay(5); } @@ -1056,38 +1083,51 @@ qla2100_fw_dump(scsi_qla_host_t *vha) } if (rval == QLA_SUCCESS) - qla2xxx_copy_queues(ha, &fw->queue_dump[0]); + qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]); qla2xxx_dump_post_process(base_vha, rval); + +qla2100_fw_dump_failed: +#ifndef __CHECKER__ + if (!hardware_locked) + spin_unlock_irqrestore(&ha->hardware_lock, flags); +#else + ; +#endif } void -qla24xx_fw_dump(scsi_qla_host_t *vha) +qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { int rval; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - __le32 __iomem *dmp_reg; - __be32 *iter_reg; - __le16 __iomem *mbx_reg; + uint32_t __iomem *dmp_reg; + uint32_t *iter_reg; + uint16_t __iomem *mbx_reg; + unsigned long flags; struct qla24xx_fw_dump *fw; void *nxt; void *nxt_chain; - __be32 *last_chain = NULL; + uint32_t *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - lockdep_assert_held(&ha->hardware_lock); - if (IS_P3P_TYPE(ha)) return; + flags = 0; ha->fw_dump_cap_flags = 0; +#ifndef __CHECKER__ + if (!hardware_locked) + spin_lock_irqsave(&ha->hardware_lock, flags); +#endif + if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd006, "No buffer available for dump.\n"); - return; + goto qla24xx_fw_dump_failed; } if (ha->fw_dumped) { @@ -1095,13 +1135,13 @@ qla24xx_fw_dump(scsi_qla_host_t *vha) "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); - return; + goto qla24xx_fw_dump_failed; } QLA_FW_STOPPED(ha); fw = &ha->fw_dump->isp.isp24; qla2xxx_prep_dump(ha, ha->fw_dump); - fw->host_status = htonl(rd_reg_dword(®->host_status)); + fw->host_status = htonl(RD_REG_DWORD(®->host_status)); /* * Pause RISC. No need to track timeout, as resetting the chip @@ -1111,41 +1151,41 @@ qla24xx_fw_dump(scsi_qla_host_t *vha) /* Host interface registers. */ dmp_reg = ®->flash_addr; - for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) - fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); + for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++) + fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); /* Disable interrupts. */ - wrt_reg_dword(®->ictrl, 0); - rd_reg_dword(®->ictrl); + WRT_REG_DWORD(®->ictrl, 0); + RD_REG_DWORD(®->ictrl); /* Shadow registers. */ - wrt_reg_dword(®->iobase_addr, 0x0F70); - rd_reg_dword(®->iobase_addr); - wrt_reg_dword(®->iobase_select, 0xB0000000); - fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_addr, 0x0F70); + RD_REG_DWORD(®->iobase_addr); + WRT_REG_DWORD(®->iobase_select, 0xB0000000); + fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0100000); - fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0100000); + fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0200000); - fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0200000); + fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0300000); - fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0300000); + fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0400000); - fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0400000); + fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0500000); - fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0500000); + fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0600000); - fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0600000); + fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); /* Mailbox registers. */ mbx_reg = ®->mailbox0; - for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) - fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); + for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; @@ -1184,19 +1224,19 @@ qla24xx_fw_dump(scsi_qla_host_t *vha) iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = ®->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) - *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = ®->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) - *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = ®->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) - *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; @@ -1305,31 +1345,44 @@ qla24xx_fw_dump(scsi_qla_host_t *vha) qla24xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); + +qla24xx_fw_dump_failed: +#ifndef __CHECKER__ + if (!hardware_locked) + spin_unlock_irqrestore(&ha->hardware_lock, flags); +#else + ; +#endif } void -qla25xx_fw_dump(scsi_qla_host_t *vha) +qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { int rval; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - __le32 __iomem *dmp_reg; - __be32 *iter_reg; - __le16 __iomem *mbx_reg; + uint32_t __iomem *dmp_reg; + uint32_t *iter_reg; + uint16_t __iomem *mbx_reg; + unsigned long flags; struct qla25xx_fw_dump *fw; void *nxt, *nxt_chain; - __be32 *last_chain = NULL; + uint32_t *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - lockdep_assert_held(&ha->hardware_lock); - + flags = 0; ha->fw_dump_cap_flags = 0; +#ifndef __CHECKER__ + if (!hardware_locked) + spin_lock_irqsave(&ha->hardware_lock, flags); +#endif + if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd008, "No buffer available for dump.\n"); - return; + goto qla25xx_fw_dump_failed; } if (ha->fw_dumped) { @@ -1337,14 +1390,14 @@ qla25xx_fw_dump(scsi_qla_host_t *vha) "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); - return; + goto qla25xx_fw_dump_failed; } QLA_FW_STOPPED(ha); fw = &ha->fw_dump->isp.isp25; qla2xxx_prep_dump(ha, ha->fw_dump); ha->fw_dump->version = htonl(2); - fw->host_status = htonl(rd_reg_dword(®->host_status)); + fw->host_status = htonl(RD_REG_DWORD(®->host_status)); /* * Pause RISC. No need to track timeout, as resetting the chip @@ -1358,73 +1411,73 @@ qla25xx_fw_dump(scsi_qla_host_t *vha) qla24xx_read_window(reg, 0x7010, 16, iter_reg); /* PCIe registers. */ - wrt_reg_dword(®->iobase_addr, 0x7C00); - rd_reg_dword(®->iobase_addr); - wrt_reg_dword(®->iobase_window, 0x01); + WRT_REG_DWORD(®->iobase_addr, 0x7C00); + RD_REG_DWORD(®->iobase_addr); + WRT_REG_DWORD(®->iobase_window, 0x01); dmp_reg = ®->iobase_c4; - fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); + fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg)); dmp_reg++; - fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); + fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg)); dmp_reg++; - fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); - fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window)); + fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); + fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); - wrt_reg_dword(®->iobase_window, 0x00); - rd_reg_dword(®->iobase_window); + WRT_REG_DWORD(®->iobase_window, 0x00); + RD_REG_DWORD(®->iobase_window); /* Host interface registers. */ dmp_reg = ®->flash_addr; - for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) - fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); + for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++) + fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); /* Disable interrupts. */ - wrt_reg_dword(®->ictrl, 0); - rd_reg_dword(®->ictrl); + WRT_REG_DWORD(®->ictrl, 0); + RD_REG_DWORD(®->ictrl); /* Shadow registers. */ - wrt_reg_dword(®->iobase_addr, 0x0F70); - rd_reg_dword(®->iobase_addr); - wrt_reg_dword(®->iobase_select, 0xB0000000); - fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_addr, 0x0F70); + RD_REG_DWORD(®->iobase_addr); + WRT_REG_DWORD(®->iobase_select, 0xB0000000); + fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0100000); - fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0100000); + fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0200000); - fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0200000); + fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0300000); - fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0300000); + fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0400000); - fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0400000); + fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0500000); - fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0500000); + fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0600000); - fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0600000); + fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0700000); - fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0700000); + fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0800000); - fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0800000); + fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0900000); - fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0900000); + fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0A00000); - fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0A00000); + fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); /* RISC I/O register. */ - wrt_reg_dword(®->iobase_addr, 0x0010); - fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window)); + WRT_REG_DWORD(®->iobase_addr, 0x0010); + fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); /* Mailbox registers. */ mbx_reg = ®->mailbox0; - for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) - fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); + for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; @@ -1488,19 +1541,19 @@ qla25xx_fw_dump(scsi_qla_host_t *vha) iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = ®->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) - *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = ®->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) - *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = ®->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) - *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; @@ -1618,31 +1671,44 @@ qla25xx_fw_dump(scsi_qla_host_t *vha) qla25xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); + +qla25xx_fw_dump_failed: +#ifndef __CHECKER__ + if (!hardware_locked) + spin_unlock_irqrestore(&ha->hardware_lock, flags); +#else + ; +#endif } void -qla81xx_fw_dump(scsi_qla_host_t *vha) +qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { int rval; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - __le32 __iomem *dmp_reg; - __be32 *iter_reg; - __le16 __iomem *mbx_reg; + uint32_t __iomem *dmp_reg; + uint32_t *iter_reg; + uint16_t __iomem *mbx_reg; + unsigned long flags; struct qla81xx_fw_dump *fw; void *nxt, *nxt_chain; - __be32 *last_chain = NULL; + uint32_t *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - lockdep_assert_held(&ha->hardware_lock); - + flags = 0; ha->fw_dump_cap_flags = 0; +#ifndef __CHECKER__ + if (!hardware_locked) + spin_lock_irqsave(&ha->hardware_lock, flags); +#endif + if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd00a, "No buffer available for dump.\n"); - return; + goto qla81xx_fw_dump_failed; } if (ha->fw_dumped) { @@ -1650,12 +1716,12 @@ qla81xx_fw_dump(scsi_qla_host_t *vha) "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); - return; + goto qla81xx_fw_dump_failed; } fw = &ha->fw_dump->isp.isp81; qla2xxx_prep_dump(ha, ha->fw_dump); - fw->host_status = htonl(rd_reg_dword(®->host_status)); + fw->host_status = htonl(RD_REG_DWORD(®->host_status)); /* * Pause RISC. No need to track timeout, as resetting the chip @@ -1669,73 +1735,73 @@ qla81xx_fw_dump(scsi_qla_host_t *vha) qla24xx_read_window(reg, 0x7010, 16, iter_reg); /* PCIe registers. */ - wrt_reg_dword(®->iobase_addr, 0x7C00); - rd_reg_dword(®->iobase_addr); - wrt_reg_dword(®->iobase_window, 0x01); + WRT_REG_DWORD(®->iobase_addr, 0x7C00); + RD_REG_DWORD(®->iobase_addr); + WRT_REG_DWORD(®->iobase_window, 0x01); dmp_reg = ®->iobase_c4; - fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); + fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg)); dmp_reg++; - fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); + fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg)); dmp_reg++; - fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); - fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window)); + fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); + fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); - wrt_reg_dword(®->iobase_window, 0x00); - rd_reg_dword(®->iobase_window); + WRT_REG_DWORD(®->iobase_window, 0x00); + RD_REG_DWORD(®->iobase_window); /* Host interface registers. */ dmp_reg = ®->flash_addr; - for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) - fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); + for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++) + fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); /* Disable interrupts. */ - wrt_reg_dword(®->ictrl, 0); - rd_reg_dword(®->ictrl); + WRT_REG_DWORD(®->ictrl, 0); + RD_REG_DWORD(®->ictrl); /* Shadow registers. */ - wrt_reg_dword(®->iobase_addr, 0x0F70); - rd_reg_dword(®->iobase_addr); - wrt_reg_dword(®->iobase_select, 0xB0000000); - fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_addr, 0x0F70); + RD_REG_DWORD(®->iobase_addr); + WRT_REG_DWORD(®->iobase_select, 0xB0000000); + fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0100000); - fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0100000); + fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0200000); - fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0200000); + fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0300000); - fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0300000); + fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0400000); - fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0400000); + fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0500000); - fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0500000); + fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0600000); - fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0600000); + fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0700000); - fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0700000); + fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0800000); - fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0800000); + fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0900000); - fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0900000); + fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0A00000); - fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0A00000); + fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); /* RISC I/O register. */ - wrt_reg_dword(®->iobase_addr, 0x0010); - fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window)); + WRT_REG_DWORD(®->iobase_addr, 0x0010); + fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); /* Mailbox registers. */ mbx_reg = ®->mailbox0; - for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) - fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); + for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; @@ -1799,19 +1865,19 @@ qla81xx_fw_dump(scsi_qla_host_t *vha) iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = ®->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) - *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = ®->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) - *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = ®->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) - *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; @@ -1933,44 +1999,57 @@ qla81xx_fw_dump(scsi_qla_host_t *vha) qla81xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); + +qla81xx_fw_dump_failed: +#ifndef __CHECKER__ + if (!hardware_locked) + spin_unlock_irqrestore(&ha->hardware_lock, flags); +#else + ; +#endif } void -qla83xx_fw_dump(scsi_qla_host_t *vha) +qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { int rval; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - __le32 __iomem *dmp_reg; - __be32 *iter_reg; - __le16 __iomem *mbx_reg; + uint32_t __iomem *dmp_reg; + uint32_t *iter_reg; + uint16_t __iomem *mbx_reg; + unsigned long flags; struct qla83xx_fw_dump *fw; void *nxt, *nxt_chain; - __be32 *last_chain = NULL; + uint32_t *last_chain = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - lockdep_assert_held(&ha->hardware_lock); - + flags = 0; ha->fw_dump_cap_flags = 0; +#ifndef __CHECKER__ + if (!hardware_locked) + spin_lock_irqsave(&ha->hardware_lock, flags); +#endif + if (!ha->fw_dump) { ql_log(ql_log_warn, vha, 0xd00c, "No buffer available for dump!!!\n"); - return; + goto qla83xx_fw_dump_failed; } if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xd00d, "Firmware has been previously dumped (%p) -- ignoring " "request...\n", ha->fw_dump); - return; + goto qla83xx_fw_dump_failed; } QLA_FW_STOPPED(ha); fw = &ha->fw_dump->isp.isp83; qla2xxx_prep_dump(ha, ha->fw_dump); - fw->host_status = htonl(rd_reg_dword(®->host_status)); + fw->host_status = htonl(RD_REG_DWORD(®->host_status)); /* * Pause RISC. No need to track timeout, as resetting the chip @@ -1978,24 +2057,24 @@ qla83xx_fw_dump(scsi_qla_host_t *vha) */ qla24xx_pause_risc(reg, ha); - wrt_reg_dword(®->iobase_addr, 0x6000); + WRT_REG_DWORD(®->iobase_addr, 0x6000); dmp_reg = ®->iobase_window; - rd_reg_dword(dmp_reg); - wrt_reg_dword(dmp_reg, 0); + RD_REG_DWORD(dmp_reg); + WRT_REG_DWORD(dmp_reg, 0); dmp_reg = ®->unused_4_1[0]; - rd_reg_dword(dmp_reg); - wrt_reg_dword(dmp_reg, 0); + RD_REG_DWORD(dmp_reg); + WRT_REG_DWORD(dmp_reg, 0); - wrt_reg_dword(®->iobase_addr, 0x6010); + WRT_REG_DWORD(®->iobase_addr, 0x6010); dmp_reg = ®->unused_4_1[2]; - rd_reg_dword(dmp_reg); - wrt_reg_dword(dmp_reg, 0); + RD_REG_DWORD(dmp_reg); + WRT_REG_DWORD(dmp_reg, 0); /* select PCR and disable ecc checking and correction */ - wrt_reg_dword(®->iobase_addr, 0x0F70); - rd_reg_dword(®->iobase_addr); - wrt_reg_dword(®->iobase_select, 0x60000000); /* write to F0h = PCR */ + WRT_REG_DWORD(®->iobase_addr, 0x0F70); + RD_REG_DWORD(®->iobase_addr); + WRT_REG_DWORD(®->iobase_select, 0x60000000); /* write to F0h = PCR */ /* Host/Risc registers. */ iter_reg = fw->host_risc_reg; @@ -2004,73 +2083,73 @@ qla83xx_fw_dump(scsi_qla_host_t *vha) qla24xx_read_window(reg, 0x7040, 16, iter_reg); /* PCIe registers. */ - wrt_reg_dword(®->iobase_addr, 0x7C00); - rd_reg_dword(®->iobase_addr); - wrt_reg_dword(®->iobase_window, 0x01); + WRT_REG_DWORD(®->iobase_addr, 0x7C00); + RD_REG_DWORD(®->iobase_addr); + WRT_REG_DWORD(®->iobase_window, 0x01); dmp_reg = ®->iobase_c4; - fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg)); + fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg)); dmp_reg++; - fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg)); + fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg)); dmp_reg++; - fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg)); - fw->pcie_regs[3] = htonl(rd_reg_dword(®->iobase_window)); + fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg)); + fw->pcie_regs[3] = htonl(RD_REG_DWORD(®->iobase_window)); - wrt_reg_dword(®->iobase_window, 0x00); - rd_reg_dword(®->iobase_window); + WRT_REG_DWORD(®->iobase_window, 0x00); + RD_REG_DWORD(®->iobase_window); /* Host interface registers. */ dmp_reg = ®->flash_addr; - for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++) - fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg)); + for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++) + fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg)); /* Disable interrupts. */ - wrt_reg_dword(®->ictrl, 0); - rd_reg_dword(®->ictrl); + WRT_REG_DWORD(®->ictrl, 0); + RD_REG_DWORD(®->ictrl); /* Shadow registers. */ - wrt_reg_dword(®->iobase_addr, 0x0F70); - rd_reg_dword(®->iobase_addr); - wrt_reg_dword(®->iobase_select, 0xB0000000); - fw->shadow_reg[0] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_addr, 0x0F70); + RD_REG_DWORD(®->iobase_addr); + WRT_REG_DWORD(®->iobase_select, 0xB0000000); + fw->shadow_reg[0] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0100000); - fw->shadow_reg[1] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0100000); + fw->shadow_reg[1] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0200000); - fw->shadow_reg[2] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0200000); + fw->shadow_reg[2] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0300000); - fw->shadow_reg[3] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0300000); + fw->shadow_reg[3] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0400000); - fw->shadow_reg[4] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0400000); + fw->shadow_reg[4] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0500000); - fw->shadow_reg[5] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0500000); + fw->shadow_reg[5] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0600000); - fw->shadow_reg[6] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0600000); + fw->shadow_reg[6] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0700000); - fw->shadow_reg[7] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0700000); + fw->shadow_reg[7] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0800000); - fw->shadow_reg[8] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0800000); + fw->shadow_reg[8] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0900000); - fw->shadow_reg[9] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0900000); + fw->shadow_reg[9] = htonl(RD_REG_DWORD(®->iobase_sdata)); - wrt_reg_dword(®->iobase_select, 0xB0A00000); - fw->shadow_reg[10] = htonl(rd_reg_dword(®->iobase_sdata)); + WRT_REG_DWORD(®->iobase_select, 0xB0A00000); + fw->shadow_reg[10] = htonl(RD_REG_DWORD(®->iobase_sdata)); /* RISC I/O register. */ - wrt_reg_dword(®->iobase_addr, 0x0010); - fw->risc_io_reg = htonl(rd_reg_dword(®->iobase_window)); + WRT_REG_DWORD(®->iobase_addr, 0x0010); + fw->risc_io_reg = htonl(RD_REG_DWORD(®->iobase_window)); /* Mailbox registers. */ mbx_reg = ®->mailbox0; - for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++) - fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg)); + for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++) + fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg)); /* Transfer sequence registers. */ iter_reg = fw->xseq_gp_reg; @@ -2166,19 +2245,19 @@ qla83xx_fw_dump(scsi_qla_host_t *vha) iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg); dmp_reg = ®->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) - *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); iter_reg = fw->resp0_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg); dmp_reg = ®->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) - *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); iter_reg = fw->req1_dma_reg; iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg); dmp_reg = ®->iobase_q; for (cnt = 0; cnt < 7; cnt++, dmp_reg++) - *iter_reg++ = htonl(rd_reg_dword(dmp_reg)); + *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg)); /* Transmit DMA registers. */ iter_reg = fw->xmt0_dma_reg; @@ -2384,16 +2463,16 @@ qla83xx_fw_dump(scsi_qla_host_t *vha) ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n"); - wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); - rd_reg_dword(®->hccr); + WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); + RD_REG_DWORD(®->hccr); - wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); - rd_reg_dword(®->hccr); + WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); + RD_REG_DWORD(®->hccr); - wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET); - rd_reg_dword(®->hccr); + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); + RD_REG_DWORD(®->hccr); - for (cnt = 30000; cnt && (rd_reg_word(®->mailbox0)); cnt--) + for (cnt = 30000; cnt && (RD_REG_WORD(®->mailbox0)); cnt--) udelay(5); if (!cnt) { @@ -2434,29 +2513,20 @@ qla83xx_fw_dump(scsi_qla_host_t *vha) qla83xx_fw_dump_failed_0: qla2xxx_dump_post_process(base_vha, rval); + +qla83xx_fw_dump_failed: +#ifndef __CHECKER__ + if (!hardware_locked) + spin_unlock_irqrestore(&ha->hardware_lock, flags); +#else + ; +#endif } /****************************************************************************/ /* Driver Debug Functions. */ /****************************************************************************/ -/* Write the debug message prefix into @pbuf. */ -static void ql_dbg_prefix(char *pbuf, int pbuf_size, - const scsi_qla_host_t *vha, uint msg_id) -{ - if (vha) { - const struct pci_dev *pdev = vha->hw->pdev; - - /* []-:: */ - snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%lu: ", QL_MSGHDR, - dev_name(&(pdev->dev)), msg_id, vha->host_no); - } else { - /* []-: : */ - snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR, - "0000:00:00.0", msg_id); - } -} - /* * This function is for formatting and logging debug information. * It is to be used when vha is available. It formats the message @@ -2475,9 +2545,10 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...) { va_list va; struct va_format vaf; - char pbuf[64]; - if (!ql_mask_match(level) && !trace_ql_dbg_log_enabled()) + ql_msg_trace(1, level, vha, NULL, id, fmt); + + if (!ql_mask_match(level)) return; va_start(va, fmt); @@ -2485,12 +2556,16 @@ ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...) vaf.fmt = fmt; vaf.va = &va; - ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id); - - if (!ql_mask_match(level)) - trace_ql_dbg_log(pbuf, &vaf); - else - pr_warn("%s%pV", pbuf, &vaf); + if (vha != NULL) { + const struct pci_dev *pdev = vha->hw->pdev; + /* : Message */ + pr_warn("%s [%s]-%04x:%ld: %pV", + QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, + vha->host_no, &vaf); + } else { + pr_warn("%s [%s]-%04x: : %pV", + QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf); + } va_end(va); @@ -2515,10 +2590,12 @@ ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...) { va_list va; struct va_format vaf; - char pbuf[128]; if (pdev == NULL) return; + + ql_msg_trace(1, level, NULL, pdev, id, fmt); + if (!ql_mask_match(level)) return; @@ -2527,8 +2604,9 @@ ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...) vaf.fmt = fmt; vaf.va = &va; - ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id + ql_dbg_offset); - pr_warn("%s%pV", pbuf, &vaf); + /* : Message */ + pr_warn("%s [%s]-%04x: : %pV", + QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf); va_end(va); } @@ -2556,7 +2634,18 @@ ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...) if (level > ql_errlev) return; - ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), vha, id); + ql_msg_trace(0, level, vha, NULL, id, fmt); + + if (vha != NULL) { + const struct pci_dev *pdev = vha->hw->pdev; + /* : Message */ + snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ", + QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no); + } else { + snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", + QL_MSGHDR, "0000:00:00.0", id); + } + pbuf[sizeof(pbuf) - 1] = 0; va_start(va, fmt); @@ -2607,7 +2696,12 @@ ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...) if (level > ql_errlev) return; - ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, id); + ql_msg_trace(0, level, NULL, pdev, id, fmt); + + /* : Message */ + snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", + QL_MSGHDR, dev_name(&(pdev->dev)), id); + pbuf[sizeof(pbuf) - 1] = 0; va_start(va, fmt); @@ -2640,7 +2734,7 @@ ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id) struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; - __le16 __iomem *mbx_reg; + uint16_t __iomem *mbx_reg; if (!ql_mask_match(level)) return; @@ -2655,9 +2749,10 @@ ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id) ql_dbg(level, vha, id, "Mailbox registers:\n"); for (i = 0; i < 6; i++, mbx_reg++) ql_dbg(level, vha, id, - "mbox[%d] %#04x\n", i, rd_reg_word(mbx_reg)); + "mbox[%d] %#04x\n", i, RD_REG_WORD(mbx_reg)); } + void ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf, uint size) @@ -2678,6 +2773,23 @@ ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf, } } +void +ql_scm_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, void *buf, + uint size) +{ + uint cnt; + + ql_dbg(level, vha, id, + "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size); + ql_dbg(level, vha, id, + "----- -----------------------------------------------\n"); + for (cnt = 0; cnt < size; cnt += 16) { + ql_dbg(level, vha, id, "%04x: ", cnt); + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, + buf + cnt, min(16U, size - cnt), false); + } +} + /* * This function is for formatting and logging log messages. * It is to be used when vha is available. It formats the message @@ -2702,7 +2814,16 @@ ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, if (level > ql_errlev) return; - ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL, id); + if (qpair != NULL) { + const struct pci_dev *pdev = qpair->pdev; + /* : Message */ + snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: ", + QL_MSGHDR, dev_name(&(pdev->dev)), id); + } else { + snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ", + QL_MSGHDR, "0000:00:00.0", id); + } + pbuf[sizeof(pbuf) - 1] = 0; va_start(va, fmt); @@ -2746,7 +2867,6 @@ ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, { va_list va; struct va_format vaf; - char pbuf[128]; if (!ql_mask_match(level)) return; @@ -2756,10 +2876,31 @@ ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id, vaf.fmt = fmt; vaf.va = &va; - ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), qpair ? qpair->vha : NULL, - id + ql_dbg_offset); - pr_warn("%s%pV", pbuf, &vaf); + if (qpair != NULL) { + const struct pci_dev *pdev = qpair->pdev; + /* : Message */ + pr_warn("%s [%s]-%04x: %pV", + QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, + &vaf); + } else { + pr_warn("%s [%s]-%04x: : %pV", + QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf); + } va_end(va); } + +#ifdef QLA_TRACING +void qla_tracing_init(void) +{ + if (is_kdump_kernel()) + return; + + qla_trace_init(&qla_message_trace, "message_trace", ql2xnum_msg_trace); +} +void qla_tracing_exit(void) +{ + qla_trace_uninit(&qla_message_trace); +} +#endif /* QLA_TRACING */ diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index 2e59e75c62b59..85c062f30882f 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -1,216 +1,217 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" +#include /* * Firmware Dump structure definition */ struct qla2300_fw_dump { - __be16 hccr; - __be16 pbiu_reg[8]; - __be16 risc_host_reg[8]; - __be16 mailbox_reg[32]; - __be16 resp_dma_reg[32]; - __be16 dma_reg[48]; - __be16 risc_hdw_reg[16]; - __be16 risc_gp0_reg[16]; - __be16 risc_gp1_reg[16]; - __be16 risc_gp2_reg[16]; - __be16 risc_gp3_reg[16]; - __be16 risc_gp4_reg[16]; - __be16 risc_gp5_reg[16]; - __be16 risc_gp6_reg[16]; - __be16 risc_gp7_reg[16]; - __be16 frame_buf_hdw_reg[64]; - __be16 fpm_b0_reg[64]; - __be16 fpm_b1_reg[64]; - __be16 risc_ram[0xf800]; - __be16 stack_ram[0x1000]; - __be16 data_ram[1]; + uint16_t hccr; + uint16_t pbiu_reg[8]; + uint16_t risc_host_reg[8]; + uint16_t mailbox_reg[32]; + uint16_t resp_dma_reg[32]; + uint16_t dma_reg[48]; + uint16_t risc_hdw_reg[16]; + uint16_t risc_gp0_reg[16]; + uint16_t risc_gp1_reg[16]; + uint16_t risc_gp2_reg[16]; + uint16_t risc_gp3_reg[16]; + uint16_t risc_gp4_reg[16]; + uint16_t risc_gp5_reg[16]; + uint16_t risc_gp6_reg[16]; + uint16_t risc_gp7_reg[16]; + uint16_t frame_buf_hdw_reg[64]; + uint16_t fpm_b0_reg[64]; + uint16_t fpm_b1_reg[64]; + uint16_t risc_ram[0xf800]; + uint16_t stack_ram[0x1000]; + uint16_t data_ram[1]; }; struct qla2100_fw_dump { - __be16 hccr; - __be16 pbiu_reg[8]; - __be16 mailbox_reg[32]; - __be16 dma_reg[48]; - __be16 risc_hdw_reg[16]; - __be16 risc_gp0_reg[16]; - __be16 risc_gp1_reg[16]; - __be16 risc_gp2_reg[16]; - __be16 risc_gp3_reg[16]; - __be16 risc_gp4_reg[16]; - __be16 risc_gp5_reg[16]; - __be16 risc_gp6_reg[16]; - __be16 risc_gp7_reg[16]; - __be16 frame_buf_hdw_reg[16]; - __be16 fpm_b0_reg[64]; - __be16 fpm_b1_reg[64]; - __be16 risc_ram[0xf000]; - u8 queue_dump[]; + uint16_t hccr; + uint16_t pbiu_reg[8]; + uint16_t mailbox_reg[32]; + uint16_t dma_reg[48]; + uint16_t risc_hdw_reg[16]; + uint16_t risc_gp0_reg[16]; + uint16_t risc_gp1_reg[16]; + uint16_t risc_gp2_reg[16]; + uint16_t risc_gp3_reg[16]; + uint16_t risc_gp4_reg[16]; + uint16_t risc_gp5_reg[16]; + uint16_t risc_gp6_reg[16]; + uint16_t risc_gp7_reg[16]; + uint16_t frame_buf_hdw_reg[16]; + uint16_t fpm_b0_reg[64]; + uint16_t fpm_b1_reg[64]; + uint16_t risc_ram[0xf000]; }; struct qla24xx_fw_dump { - __be32 host_status; - __be32 host_reg[32]; - __be32 shadow_reg[7]; - __be16 mailbox_reg[32]; - __be32 xseq_gp_reg[128]; - __be32 xseq_0_reg[16]; - __be32 xseq_1_reg[16]; - __be32 rseq_gp_reg[128]; - __be32 rseq_0_reg[16]; - __be32 rseq_1_reg[16]; - __be32 rseq_2_reg[16]; - __be32 cmd_dma_reg[16]; - __be32 req0_dma_reg[15]; - __be32 resp0_dma_reg[15]; - __be32 req1_dma_reg[15]; - __be32 xmt0_dma_reg[32]; - __be32 xmt1_dma_reg[32]; - __be32 xmt2_dma_reg[32]; - __be32 xmt3_dma_reg[32]; - __be32 xmt4_dma_reg[32]; - __be32 xmt_data_dma_reg[16]; - __be32 rcvt0_data_dma_reg[32]; - __be32 rcvt1_data_dma_reg[32]; - __be32 risc_gp_reg[128]; - __be32 lmc_reg[112]; - __be32 fpm_hdw_reg[192]; - __be32 fb_hdw_reg[176]; - __be32 code_ram[0x2000]; - __be32 ext_mem[1]; + uint32_t host_status; + uint32_t host_reg[32]; + uint32_t shadow_reg[7]; + uint16_t mailbox_reg[32]; + uint32_t xseq_gp_reg[128]; + uint32_t xseq_0_reg[16]; + uint32_t xseq_1_reg[16]; + uint32_t rseq_gp_reg[128]; + uint32_t rseq_0_reg[16]; + uint32_t rseq_1_reg[16]; + uint32_t rseq_2_reg[16]; + uint32_t cmd_dma_reg[16]; + uint32_t req0_dma_reg[15]; + uint32_t resp0_dma_reg[15]; + uint32_t req1_dma_reg[15]; + uint32_t xmt0_dma_reg[32]; + uint32_t xmt1_dma_reg[32]; + uint32_t xmt2_dma_reg[32]; + uint32_t xmt3_dma_reg[32]; + uint32_t xmt4_dma_reg[32]; + uint32_t xmt_data_dma_reg[16]; + uint32_t rcvt0_data_dma_reg[32]; + uint32_t rcvt1_data_dma_reg[32]; + uint32_t risc_gp_reg[128]; + uint32_t lmc_reg[112]; + uint32_t fpm_hdw_reg[192]; + uint32_t fb_hdw_reg[176]; + uint32_t code_ram[0x2000]; + uint32_t ext_mem[1]; }; struct qla25xx_fw_dump { - __be32 host_status; - __be32 host_risc_reg[32]; - __be32 pcie_regs[4]; - __be32 host_reg[32]; - __be32 shadow_reg[11]; - __be32 risc_io_reg; - __be16 mailbox_reg[32]; - __be32 xseq_gp_reg[128]; - __be32 xseq_0_reg[48]; - __be32 xseq_1_reg[16]; - __be32 rseq_gp_reg[128]; - __be32 rseq_0_reg[32]; - __be32 rseq_1_reg[16]; - __be32 rseq_2_reg[16]; - __be32 aseq_gp_reg[128]; - __be32 aseq_0_reg[32]; - __be32 aseq_1_reg[16]; - __be32 aseq_2_reg[16]; - __be32 cmd_dma_reg[16]; - __be32 req0_dma_reg[15]; - __be32 resp0_dma_reg[15]; - __be32 req1_dma_reg[15]; - __be32 xmt0_dma_reg[32]; - __be32 xmt1_dma_reg[32]; - __be32 xmt2_dma_reg[32]; - __be32 xmt3_dma_reg[32]; - __be32 xmt4_dma_reg[32]; - __be32 xmt_data_dma_reg[16]; - __be32 rcvt0_data_dma_reg[32]; - __be32 rcvt1_data_dma_reg[32]; - __be32 risc_gp_reg[128]; - __be32 lmc_reg[128]; - __be32 fpm_hdw_reg[192]; - __be32 fb_hdw_reg[192]; - __be32 code_ram[0x2000]; - __be32 ext_mem[1]; + uint32_t host_status; + uint32_t host_risc_reg[32]; + uint32_t pcie_regs[4]; + uint32_t host_reg[32]; + uint32_t shadow_reg[11]; + uint32_t risc_io_reg; + uint16_t mailbox_reg[32]; + uint32_t xseq_gp_reg[128]; + uint32_t xseq_0_reg[48]; + uint32_t xseq_1_reg[16]; + uint32_t rseq_gp_reg[128]; + uint32_t rseq_0_reg[32]; + uint32_t rseq_1_reg[16]; + uint32_t rseq_2_reg[16]; + uint32_t aseq_gp_reg[128]; + uint32_t aseq_0_reg[32]; + uint32_t aseq_1_reg[16]; + uint32_t aseq_2_reg[16]; + uint32_t cmd_dma_reg[16]; + uint32_t req0_dma_reg[15]; + uint32_t resp0_dma_reg[15]; + uint32_t req1_dma_reg[15]; + uint32_t xmt0_dma_reg[32]; + uint32_t xmt1_dma_reg[32]; + uint32_t xmt2_dma_reg[32]; + uint32_t xmt3_dma_reg[32]; + uint32_t xmt4_dma_reg[32]; + uint32_t xmt_data_dma_reg[16]; + uint32_t rcvt0_data_dma_reg[32]; + uint32_t rcvt1_data_dma_reg[32]; + uint32_t risc_gp_reg[128]; + uint32_t lmc_reg[128]; + uint32_t fpm_hdw_reg[192]; + uint32_t fb_hdw_reg[192]; + uint32_t code_ram[0x2000]; + uint32_t ext_mem[1]; }; struct qla81xx_fw_dump { - __be32 host_status; - __be32 host_risc_reg[32]; - __be32 pcie_regs[4]; - __be32 host_reg[32]; - __be32 shadow_reg[11]; - __be32 risc_io_reg; - __be16 mailbox_reg[32]; - __be32 xseq_gp_reg[128]; - __be32 xseq_0_reg[48]; - __be32 xseq_1_reg[16]; - __be32 rseq_gp_reg[128]; - __be32 rseq_0_reg[32]; - __be32 rseq_1_reg[16]; - __be32 rseq_2_reg[16]; - __be32 aseq_gp_reg[128]; - __be32 aseq_0_reg[32]; - __be32 aseq_1_reg[16]; - __be32 aseq_2_reg[16]; - __be32 cmd_dma_reg[16]; - __be32 req0_dma_reg[15]; - __be32 resp0_dma_reg[15]; - __be32 req1_dma_reg[15]; - __be32 xmt0_dma_reg[32]; - __be32 xmt1_dma_reg[32]; - __be32 xmt2_dma_reg[32]; - __be32 xmt3_dma_reg[32]; - __be32 xmt4_dma_reg[32]; - __be32 xmt_data_dma_reg[16]; - __be32 rcvt0_data_dma_reg[32]; - __be32 rcvt1_data_dma_reg[32]; - __be32 risc_gp_reg[128]; - __be32 lmc_reg[128]; - __be32 fpm_hdw_reg[224]; - __be32 fb_hdw_reg[208]; - __be32 code_ram[0x2000]; - __be32 ext_mem[1]; + uint32_t host_status; + uint32_t host_risc_reg[32]; + uint32_t pcie_regs[4]; + uint32_t host_reg[32]; + uint32_t shadow_reg[11]; + uint32_t risc_io_reg; + uint16_t mailbox_reg[32]; + uint32_t xseq_gp_reg[128]; + uint32_t xseq_0_reg[48]; + uint32_t xseq_1_reg[16]; + uint32_t rseq_gp_reg[128]; + uint32_t rseq_0_reg[32]; + uint32_t rseq_1_reg[16]; + uint32_t rseq_2_reg[16]; + uint32_t aseq_gp_reg[128]; + uint32_t aseq_0_reg[32]; + uint32_t aseq_1_reg[16]; + uint32_t aseq_2_reg[16]; + uint32_t cmd_dma_reg[16]; + uint32_t req0_dma_reg[15]; + uint32_t resp0_dma_reg[15]; + uint32_t req1_dma_reg[15]; + uint32_t xmt0_dma_reg[32]; + uint32_t xmt1_dma_reg[32]; + uint32_t xmt2_dma_reg[32]; + uint32_t xmt3_dma_reg[32]; + uint32_t xmt4_dma_reg[32]; + uint32_t xmt_data_dma_reg[16]; + uint32_t rcvt0_data_dma_reg[32]; + uint32_t rcvt1_data_dma_reg[32]; + uint32_t risc_gp_reg[128]; + uint32_t lmc_reg[128]; + uint32_t fpm_hdw_reg[224]; + uint32_t fb_hdw_reg[208]; + uint32_t code_ram[0x2000]; + uint32_t ext_mem[1]; }; struct qla83xx_fw_dump { - __be32 host_status; - __be32 host_risc_reg[48]; - __be32 pcie_regs[4]; - __be32 host_reg[32]; - __be32 shadow_reg[11]; - __be32 risc_io_reg; - __be16 mailbox_reg[32]; - __be32 xseq_gp_reg[256]; - __be32 xseq_0_reg[48]; - __be32 xseq_1_reg[16]; - __be32 xseq_2_reg[16]; - __be32 rseq_gp_reg[256]; - __be32 rseq_0_reg[32]; - __be32 rseq_1_reg[16]; - __be32 rseq_2_reg[16]; - __be32 rseq_3_reg[16]; - __be32 aseq_gp_reg[256]; - __be32 aseq_0_reg[32]; - __be32 aseq_1_reg[16]; - __be32 aseq_2_reg[16]; - __be32 aseq_3_reg[16]; - __be32 cmd_dma_reg[64]; - __be32 req0_dma_reg[15]; - __be32 resp0_dma_reg[15]; - __be32 req1_dma_reg[15]; - __be32 xmt0_dma_reg[32]; - __be32 xmt1_dma_reg[32]; - __be32 xmt2_dma_reg[32]; - __be32 xmt3_dma_reg[32]; - __be32 xmt4_dma_reg[32]; - __be32 xmt_data_dma_reg[16]; - __be32 rcvt0_data_dma_reg[32]; - __be32 rcvt1_data_dma_reg[32]; - __be32 risc_gp_reg[128]; - __be32 lmc_reg[128]; - __be32 fpm_hdw_reg[256]; - __be32 rq0_array_reg[256]; - __be32 rq1_array_reg[256]; - __be32 rp0_array_reg[256]; - __be32 rp1_array_reg[256]; - __be32 queue_control_reg[16]; - __be32 fb_hdw_reg[432]; - __be32 at0_array_reg[128]; - __be32 code_ram[0x2400]; - __be32 ext_mem[1]; + uint32_t host_status; + uint32_t host_risc_reg[48]; + uint32_t pcie_regs[4]; + uint32_t host_reg[32]; + uint32_t shadow_reg[11]; + uint32_t risc_io_reg; + uint16_t mailbox_reg[32]; + uint32_t xseq_gp_reg[256]; + uint32_t xseq_0_reg[48]; + uint32_t xseq_1_reg[16]; + uint32_t xseq_2_reg[16]; + uint32_t rseq_gp_reg[256]; + uint32_t rseq_0_reg[32]; + uint32_t rseq_1_reg[16]; + uint32_t rseq_2_reg[16]; + uint32_t rseq_3_reg[16]; + uint32_t aseq_gp_reg[256]; + uint32_t aseq_0_reg[32]; + uint32_t aseq_1_reg[16]; + uint32_t aseq_2_reg[16]; + uint32_t aseq_3_reg[16]; + uint32_t cmd_dma_reg[64]; + uint32_t req0_dma_reg[15]; + uint32_t resp0_dma_reg[15]; + uint32_t req1_dma_reg[15]; + uint32_t xmt0_dma_reg[32]; + uint32_t xmt1_dma_reg[32]; + uint32_t xmt2_dma_reg[32]; + uint32_t xmt3_dma_reg[32]; + uint32_t xmt4_dma_reg[32]; + uint32_t xmt_data_dma_reg[16]; + uint32_t rcvt0_data_dma_reg[32]; + uint32_t rcvt1_data_dma_reg[32]; + uint32_t risc_gp_reg[128]; + uint32_t lmc_reg[128]; + uint32_t fpm_hdw_reg[256]; + uint32_t rq0_array_reg[256]; + uint32_t rq1_array_reg[256]; + uint32_t rp0_array_reg[256]; + uint32_t rp1_array_reg[256]; + uint32_t queue_control_reg[16]; + uint32_t fb_hdw_reg[432]; + uint32_t at0_array_reg[128]; + uint32_t code_ram[0x2400]; + uint32_t ext_mem[1]; }; #define EFT_NUM_BUFFERS 4 @@ -223,45 +224,44 @@ struct qla83xx_fw_dump { #define fce_calc_size(b) ((FCE_BYTES_PER_BUFFER) * (b)) struct qla2xxx_fce_chain { - __be32 type; - __be32 chain_size; + uint32_t type; + uint32_t chain_size; - __be32 size; - __be32 addr_l; - __be32 addr_h; - __be32 eregs[8]; + uint32_t size; + uint32_t addr_l; + uint32_t addr_h; + uint32_t eregs[8]; }; /* used by exchange off load and extended login offload */ struct qla2xxx_offld_chain { - __be32 type; - __be32 chain_size; + uint32_t type; + uint32_t chain_size; - __be32 size; - __be32 reserved; - __be64 addr; + uint32_t size; + u64 addr; }; struct qla2xxx_mq_chain { - __be32 type; - __be32 chain_size; + uint32_t type; + uint32_t chain_size; - __be32 count; - __be32 qregs[4 * QLA_MQ_SIZE]; + uint32_t count; + uint32_t qregs[4 * QLA_MQ_SIZE]; }; struct qla2xxx_mqueue_header { - __be32 queue; + uint32_t queue; #define TYPE_REQUEST_QUEUE 0x1 #define TYPE_RESPONSE_QUEUE 0x2 #define TYPE_ATIO_QUEUE 0x3 - __be32 number; - __be32 size; + uint32_t number; + uint32_t size; }; struct qla2xxx_mqueue_chain { - __be32 type; - __be32 chain_size; + uint32_t type; + uint32_t chain_size; }; #define DUMP_CHAIN_VARIANT 0x80000000 @@ -274,28 +274,28 @@ struct qla2xxx_mqueue_chain { struct qla2xxx_fw_dump { uint8_t signature[4]; - __be32 version; + uint32_t version; - __be32 fw_major_version; - __be32 fw_minor_version; - __be32 fw_subminor_version; - __be32 fw_attributes; + uint32_t fw_major_version; + uint32_t fw_minor_version; + uint32_t fw_subminor_version; + uint32_t fw_attributes; - __be32 vendor; - __be32 device; - __be32 subsystem_vendor; - __be32 subsystem_device; + uint32_t vendor; + uint32_t device; + uint32_t subsystem_vendor; + uint32_t subsystem_device; - __be32 fixed_size; - __be32 mem_size; - __be32 req_q_size; - __be32 rsp_q_size; + uint32_t fixed_size; + uint32_t mem_size; + uint32_t req_q_size; + uint32_t rsp_q_size; - __be32 eft_size; - __be32 eft_addr_l; - __be32 eft_addr_h; + uint32_t eft_size; + uint32_t eft_addr_l; + uint32_t eft_addr_h; - __be32 header_size; + uint32_t header_size; union { struct qla2100_fw_dump isp21; @@ -308,7 +308,7 @@ struct qla2xxx_fw_dump { }; #define QL_MSGHDR "qla2xxx" -#define QL_DBG_DEFAULT1_MASK 0x1e400000 +#define QL_DBG_DEFAULT1_MASK 0x1e600000 #define ql_log_fatal 0 /* display fatal errors */ #define ql_log_warn 1 /* display critical errors */ @@ -321,6 +321,293 @@ struct qla2xxx_fw_dump { extern uint ql_errlev; +#ifdef QLA_TRACING +#include + +#define QLA_MTRC_DEF_NUM_REC (4*1024) /* Has to be power of 2 */ +#define QLA_MESSAGE_TRACE_DEFINES \ + struct qla_trace qla_message_trace; \ + int ql2xextended_error_logging_msg_trace = 1; \ + module_param(ql2xextended_error_logging_msg_trace, int, 0600); \ + MODULE_PARM_DESC(ql2xextended_error_logging_msg_trace, \ + "Option to log console messages to buffer; uses same " \ + "ql2xextended_error_logging masks."); \ + \ + int ql2xnum_msg_trace = QLA_MTRC_DEF_NUM_REC; \ + module_param(ql2xnum_msg_trace, int, 0600); \ + MODULE_PARM_DESC(ql2xnum_msg_trace, \ + "Number of trace entries in power of 2. (default 4k)"); + +extern int ql2xnum_msg_trace; +extern int ql2xextended_error_logging_msg_trace; + +#define QLA_SRB_TRACE_DEFINES \ + struct qla_trace qla_srb_trace; \ + int ql2xextended_error_logging_srb_trace = 1; \ + module_param(ql2xextended_error_logging_srb_trace, int, \ + S_IRUGO|S_IWUSR); \ + MODULE_PARM_DESC(ql2xextended_error_logging_srb_trace, \ + "Option to log srb messages to buffer; uses same " \ + "ql2xextended_error_logging masks."); \ + \ + int ql2xnum_srb_trace = 0; \ + module_param(ql2xnum_srb_trace, int, S_IRUGO); \ + MODULE_PARM_DESC(ql2xnum_srb_trace, \ + "Number of srb trace entries in power of 2. (default 0)"); + +extern int ql2xnum_srb_trace; +extern int ql2xextended_error_logging_srb_trace; + +extern struct qla_trace qla_message_trace; +extern void qla_tracing_init(void); +extern void qla_tracing_exit(void); + +static inline int +ql_mask_match_ext(uint level, int *log_tunable) +{ + if (*log_tunable == 1) + *log_tunable = QL_DBG_DEFAULT1_MASK; + + return (level & *log_tunable) == level; +} + +static inline int +__qla_trace_get(struct qla_trace *trc) +{ + if (test_bit(QLA_TRACE_QUIESCE, &trc->flags)) + return -EIO; + atomic_inc(&trc->ref_count); + return 0; +} + +static inline int +qla_trace_get(struct qla_trace *trc) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&trc->trc_lock, flags); + ret = __qla_trace_get(trc); + spin_unlock_irqrestore(&trc->trc_lock, flags); + + return ret; +} + +static inline void +qla_trace_put(struct qla_trace *trc) +{ + wmb(); + atomic_dec(&trc->ref_count); +} + +static inline char * +qla_get_trace_next(struct qla_trace *trc) +{ + uint32_t t_ind; + char *buf; + unsigned long flags; + + spin_lock_irqsave(&trc->trc_lock, flags); + if (!test_bit(QLA_TRACE_ENABLED, &trc->flags) || + __qla_trace_get(trc)) { + spin_unlock_irqrestore(&trc->trc_lock, flags); + return NULL; + } + t_ind = trc->trace_ind = qla_trace_ind_norm(trc, trc->trace_ind + 1); + spin_unlock_irqrestore(&trc->trc_lock, flags); + + if (!t_ind) + set_bit(QLA_TRACE_WRAPPED, &trc->flags); + + buf = qla_trace_record(trc, t_ind); + /* Put an end marker '>' for the next record. */ + qla_trace_record(trc, qla_trace_ind_norm(trc, t_ind + 1))[0] = '>'; + + return buf; +} + +static inline int +qla_trace_quiesce(struct qla_trace *trc) +{ + unsigned long flags; + u32 cnt = 0; + int ret = 0; + + set_bit(QLA_TRACE_QUIESCE, &trc->flags); + + spin_lock_irqsave(&trc->trc_lock, flags); + while (atomic_read(&trc->ref_count)) { + spin_unlock_irqrestore(&trc->trc_lock, flags); + + msleep(1); + + spin_lock_irqsave(&trc->trc_lock, flags); + cnt++; + if (cnt > 10 * 1000) { + pr_info("qla2xxx: Trace could not be quiesced now (count=%d).", + atomic_read(&trc->ref_count)); + /* Leave trace enabled */ + clear_bit(QLA_TRACE_QUIESCE, &trc->flags); + ret = -EIO; + break; + } + } + spin_unlock_irqrestore(&trc->trc_lock, flags); + return ret; +} + +#define ql_msg_trace(dbg_msg, level, vha, pdev, id, fmt) do { \ + struct va_format _vaf; \ + va_list _va; \ + u32 dbg_off = dbg_msg ? ql_dbg_offset : 0; \ + \ + if (!test_bit(QLA_TRACE_ENABLED, &qla_message_trace.flags)) \ + break; \ + \ + if (dbg_msg && !ql_mask_match_ext(level, \ + &ql2xextended_error_logging_msg_trace)) \ + break; \ + \ + va_start(_va, fmt); \ + \ + _vaf.fmt = fmt; \ + _vaf.va = &_va; \ + __ql_msg_trace(&qla_message_trace, vha, pdev, \ + id + dbg_off, &_vaf); \ + \ + va_end(_va); \ +} while(0) + +/* Messages beyond QLA_TRACE_LINE_SIZE characters are not printed */ +static inline void +__ql_msg_trace(struct qla_trace *trc, scsi_qla_host_t *vha, + struct pci_dev *pdev, uint id, struct va_format *vaf) +{ + int tl; + char *buf; + u64 t_us = ktime_to_us(ktime_get()); + uint cpu = raw_smp_processor_id(); + + buf = qla_get_trace_next(trc); + if (!buf) + return; + + if (vha) { + const struct pci_dev *_pdev = vha->hw->pdev; + tl = snprintf(buf, QLA_TRACE_LINE_SIZE, + "%12llu %03u %s [%s]-%04x:%ld: %pV", t_us, cpu, + QL_MSGHDR, dev_name(&(_pdev->dev)), id, + vha->host_no, vaf); + } else { + tl = snprintf(buf, QLA_TRACE_LINE_SIZE, + "%12llu %03u %s [%s]-%04x: : %pV", t_us, cpu, QL_MSGHDR, + pdev ? dev_name(&(pdev->dev)) : "0000:00:00.0", + id, vaf); + } + + tl = min(tl, QLA_TRACE_LINE_SIZE - 1); + buf[tl] = '\0'; + + qla_trace_put(trc); +} + +#define ql_srb_trace_ext(_level, _vha, _fp, _fmt, _args...) do { \ + struct fc_port *_fcport = _fp; \ + if (_fcport) { \ + __ql_srb_trace(_level, _vha, \ + DBG_FCPORT_PRFMT(_fcport, _fmt, ##_args)); \ + } else { \ + __ql_srb_trace(_level, _vha, \ + "%s: " _fmt "\n", __func__, ##_args); \ + } \ +} while(0) + +#define ql_srb_trace(_level, _vha, _fmt, _args...) \ + __ql_srb_trace(_level, _vha, _fmt, ##_args) + +static void +__ql_srb_trace(int level, scsi_qla_host_t *vha, const char *fmt, ...) +{ + int tl; + char *buf; + u64 t_us; + uint cpu; + struct va_format vaf; + va_list va; + + if (!test_bit(QLA_TRACE_ENABLED, &vha->hw->srb_trace.flags)) + return; + + if (!ql_mask_match_ext(level, &ql2xextended_error_logging_srb_trace)) + return; + + t_us = ktime_to_us(ktime_get()); + cpu = raw_smp_processor_id(); + buf = qla_get_trace_next(&vha->hw->srb_trace); + if (!buf) + return; + + va_start(va, fmt); + + vaf.fmt = fmt; + vaf.va = &va; + + tl = snprintf(buf, QLA_TRACE_LINE_SIZE, "%12llu %03u %pV", + t_us, cpu, &vaf); + + tl = min(tl, QLA_TRACE_LINE_SIZE - 1); + buf[tl] = '\0'; + + qla_trace_put(&vha->hw->srb_trace); + + va_end(va); +} + +static inline void +qla_trace_init(struct qla_trace *trc, char *name, u32 num_entries) +{ + if (trc->recs) + return; + + memset(trc, 0, sizeof(*trc)); + + trc->name = name; + spin_lock_init(&trc->trc_lock); + if (!num_entries) + return; + trc->num_entries = num_entries; + trc->recs = vzalloc(trc->num_entries * + sizeof(struct qla_trace_rec)); + if (!trc->recs) + return; + + set_bit(QLA_TRACE_ENABLED, &trc->flags); +} + +static inline void +qla_trace_uninit(struct qla_trace *trc) +{ + if (!trc->recs) + return; + + vfree(trc->recs); + trc->recs = NULL; + clear_bit(QLA_TRACE_ENABLED, &trc->flags); +} + +#else /* QLA_TRACING */ +#define ql_msg_trace(dbg_msg, level, vha, pdev, id, fmt) do { } while(0) +#define qla_trace_init(trc, name, num) +#define qla_trace_uninit(trc) +#define qla_tracing_init() +#define qla_tracing_exit() +#define QLA_MESSAGE_TRACE_DEFINES + +#define ql_srb_trace_ext(_level, _vha, _fcport, _fmt, _args...) do { } while(0) +#define ql_srb_trace(_level, _vha, _fmt, _args...) do { } while(0) +#define QLA_SRB_TRACE_DEFINES +#endif /* QLA_TRACING */ + void __attribute__((format (printf, 4, 5))) ql_dbg(uint, scsi_qla_host_t *vha, uint, const char *fmt, ...); void __attribute__((format (printf, 4, 5))) @@ -337,6 +624,34 @@ ql_log_pci(uint, struct pci_dev *pdev, uint, const char *fmt, ...); void __attribute__((format (printf, 4, 5))) ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...); +/* + * Macro that declares tables and a routine to perform enum type to + * ascii string lookup. + * + * Defines a table for an enum. Uses xxx_INIT defines for + * the enum to populate the table. Macro defines a routine (named + * by caller) that will search all elements of the table for the key + * and return the name string if found or "Unrecognized" if not found. + */ +#define DECLARE_ENUM2STR_LOOKUP(routine, enum_name, enum_init) \ +static struct { \ + enum enum_name value; \ + char *name; \ +} ql_##enum_name##_e2str_names[] = enum_init; \ +static const char *routine(enum enum_name table_key) \ +{ \ + int i; \ + char *name = "Unrecognized"; \ + \ + for (i = 0; i < ARRAY_SIZE(ql_##enum_name##_e2str_names); i++) {\ + if (ql_##enum_name##_e2str_names[i].value == table_key) {\ + name = ql_##enum_name##_e2str_names[i].name; \ + break; \ + } \ + } \ + return name; \ +} + /* Debug Levels */ /* The 0x40000000 is the max value any debug level can have * as ql2xextended_error_logging is of type signed int @@ -367,10 +682,12 @@ ql_log_qp(uint32_t, struct qla_qpair *, int32_t, const char *fmt, ...); #define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ #define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ #define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */ +#define ql_dbg_edif 0x00000400 /* edif and purex debug */ +#define ql_dbg_scm 0x00000200 /* SCM related debug */ extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, uint32_t, void **); -extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, __be32 *, +extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *, uint32_t, void **); extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *, struct qla_hw_data *); @@ -382,5 +699,5 @@ ql_mask_match(uint level) if (ql2xextended_error_logging == 1) ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK; - return (level & ql2xextended_error_logging) == level; + return level && ((level & ql2xextended_error_logging) == level); } diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 8a8e0920d2b41..9a9d1de9f15c5 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #ifndef __QLA_DEF_H #define __QLA_DEF_H @@ -33,8 +34,45 @@ #include #include +#include #include +#define QLA_TRACING /* Captures driver messages to buffer */ + +#ifdef QLA_TRACING +#define QLA_TRACE_LINE_SIZE 256 /* Biggest so far is ~215 */ +#define qla_trace_ind_norm(_trc, _ind) ((_ind) >= (_trc)->num_entries ? \ + 0 : (_ind)) +#define qla_trace_record(_trc, __ind) ((_trc)->recs[__ind].buf) +#define qla_trace_record_len (sizeof(struct qla_trace_rec)) +#define qla_trace_start(_trc) qla_trace_record(_trc, 0) +#define qla_trace_len(_trc) (_trc)->num_entries +#define qla_trace_size(_trc) (qla_trace_record_len * \ + (_trc)->num_entries) +#define qla_trace_cur_ind(_trc) ((_trc)->trace_ind) +struct qla_trace_rec { + char buf[QLA_TRACE_LINE_SIZE]; +}; + +struct qla_trace { +#define QLA_TRACE_ENABLED 0 /* allow trace writes or not */ +#define QLA_TRACE_WRAPPED 1 +#define QLA_TRACE_QUIESCE 2 + unsigned long flags; + atomic_t ref_count; + u32 num_entries; + u32 trace_ind; + spinlock_t trc_lock; + char *name; + struct qla_trace_rec *recs; +}; +#endif /* QLA_TRACING */ + +#define QLA_DFS_DEFINE_DENTRY(_debugfs_file_name) \ + struct dentry *dfs_##_debugfs_file_name +#define QLA_DFS_ROOT_DEFINE_DENTRY(_debugfs_file_name) \ + struct dentry *qla_dfs_##_debugfs_file_name + /* Big endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */ typedef struct { uint8_t domain; @@ -49,14 +87,50 @@ typedef struct { uint8_t domain; } le_id_t; +/* + * 24 bit port ID type definition. + */ +typedef union { + uint32_t b24 : 24; + struct { +#ifdef __BIG_ENDIAN + uint8_t domain; + uint8_t area; + uint8_t al_pa; +#elif defined(__LITTLE_ENDIAN) + uint8_t al_pa; + uint8_t area; + uint8_t domain; +#else +#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!" +#endif + uint8_t rsvd_1; + } b; +} port_id_t; +#define INVALID_PORT_ID 0xFFFFFF +#define ISP_REG16_DISCONNECT 0xFFFF + #include "qla_bsg.h" #include "qla_dsd.h" #include "qla_nx.h" #include "qla_nx2.h" #include "qla_nvme.h" + +struct auth_els_header_t { + uint8_t els_code; + uint8_t els_flags; + uint8_t message_code; + uint8_t protocol_version; +#define AUTH_ELS_PROTOCOL_VERSION 2 + uint32_t message_length; + uint32_t transaction_identifier; + uint8_t payload[0]; /* payload for cmd */ +} __attribute__ ((packed)); +typedef struct auth_els_header_t auth_els_header_t; + #define QLA2XXX_DRIVER_NAME "qla2xxx" #define QLA2XXX_APIDEV "ql2xapidev" -#define QLA2XXX_MANUFACTURER "QLogic Corporation" +#define QLA2XXX_MANUFACTURER "Marvell Semiconductor, Inc." /* * We have MAILBOX_REGISTER_COUNT sized arrays in a few places, @@ -120,59 +194,21 @@ typedef struct { #define LSD(x) ((uint32_t)((uint64_t)(x))) #define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16)) -static inline uint32_t make_handle(uint16_t x, uint16_t y) -{ - return ((uint32_t)x << 16) | y; -} +#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y))) /* * I/O register */ -static inline u8 rd_reg_byte(const volatile u8 __iomem *addr) -{ - return readb(addr); -} - -static inline u16 rd_reg_word(const volatile __le16 __iomem *addr) -{ - return readw(addr); -} - -static inline u32 rd_reg_dword(const volatile __le32 __iomem *addr) -{ - return readl(addr); -} - -static inline u8 rd_reg_byte_relaxed(const volatile u8 __iomem *addr) -{ - return readb_relaxed(addr); -} - -static inline u16 rd_reg_word_relaxed(const volatile __le16 __iomem *addr) -{ - return readw_relaxed(addr); -} - -static inline u32 rd_reg_dword_relaxed(const volatile __le32 __iomem *addr) -{ - return readl_relaxed(addr); -} - -static inline void wrt_reg_byte(volatile u8 __iomem *addr, u8 data) -{ - return writeb(data, addr); -} - -static inline void wrt_reg_word(volatile __le16 __iomem *addr, u16 data) -{ - return writew(data, addr); -} - -static inline void wrt_reg_dword(volatile __le32 __iomem *addr, u32 data) -{ - return writel(data, addr); -} +#define RD_REG_BYTE(addr) readb(addr) +#define RD_REG_WORD(addr) readw(addr) +#define RD_REG_DWORD(addr) readl(addr) +#define RD_REG_BYTE_RELAXED(addr) readb_relaxed(addr) +#define RD_REG_WORD_RELAXED(addr) readw_relaxed(addr) +#define RD_REG_DWORD_RELAXED(addr) readl_relaxed(addr) +#define WRT_REG_BYTE(addr, data) writeb(data, addr) +#define WRT_REG_WORD(addr, data) writew(data, addr) +#define WRT_REG_DWORD(addr, data) writel(data, addr) /* * ISP83XX specific remote register addresses @@ -319,6 +355,14 @@ struct name_list_extended { u32 size; u8 sent; }; + + +struct els_reject { + struct fc_els_ls_rjt *c; + dma_addr_t cdma; + u16 size; +}; + /* * Timeout timer counts in seconds */ @@ -345,9 +389,18 @@ struct name_list_extended { #define FW_MAX_EXCHANGES_CNT (32 * 1024) #define REDUCE_EXCHANGES_CNT (8 * 1024) +#define SET_DID_STATUS(stat_var, status) (stat_var = status << 16) + struct req_que; struct qla_tgt_sess; +struct qla_buf_dsc { + u16 tag; +#define TAG_FREED 0xffff + void *buf; + dma_addr_t buf_dma; +}; + /* * SCSI Request Block */ @@ -356,46 +409,29 @@ struct srb_cmd { uint32_t request_sense_length; uint32_t fw_sense_length; uint8_t *request_sense_ptr; - struct ct6_dsd *ct6_ctx; struct crc_context *crc_ctx; + struct ct6_dsd ct6_ctx; + struct qla_buf_dsc buf_dsc; }; /* * SRB flag definitions */ #define SRB_DMA_VALID BIT_0 /* Command sent to ISP */ +#define SRB_GOT_BUF BIT_1 #define SRB_FCP_CMND_DMA_VALID BIT_12 /* DIF: DSD List valid */ #define SRB_CRC_CTX_DMA_VALID BIT_2 /* DIF: context DMA valid */ #define SRB_CRC_PROT_DMA_VALID BIT_4 /* DIF: prot DMA valid */ #define SRB_CRC_CTX_DSD_VALID BIT_5 /* DIF: dsd_list valid */ #define SRB_WAKEUP_ON_COMP BIT_6 #define SRB_DIF_BUNDL_DMA_VALID BIT_7 /* DIF: DMA list valid */ +#define SRB_EDIF_CLEANUP_DELETE BIT_9 + /* To identify if a srb is of T10-CRC type. @sp => srb_t pointer */ #define IS_PROT_IO(sp) (sp->flags & SRB_CRC_CTX_DSD_VALID) -/* - * 24 bit port ID type definition. - */ -typedef union { - uint32_t b24 : 24; - - struct { -#ifdef __BIG_ENDIAN - uint8_t domain; - uint8_t area; - uint8_t al_pa; -#elif defined(__LITTLE_ENDIAN) - uint8_t al_pa; - uint8_t area; - uint8_t domain; -#else -#error "__BIG_ENDIAN or __LITTLE_ENDIAN must be defined!" -#endif - uint8_t rsvd_1; - } b; -} port_id_t; -#define INVALID_PORT_ID 0xFFFFFF +#define IS_EDIF_CLEANUP_DELETE(sp) (sp->flags & SRB_EDIF_CLEANUP_DELETE) static inline le_id_t be_id_to_le(be_id_t id) { @@ -442,6 +478,15 @@ static inline be_id_t port_id_to_be_id(port_id_t port_id) return res; } +struct tmf_arg { + struct qla_qpair *qpair; + struct fc_port *fcport; + struct scsi_qla_host *vha; + u64 lun; + u32 flags; + uint8_t modifier; +}; + struct els_logo_payload { uint8_t opcode; uint8_t rsvd[3]; @@ -453,7 +498,7 @@ struct els_logo_payload { struct els_plogi_payload { uint8_t opcode; uint8_t rsvd[3]; - __be32 data[112 / 4]; + uint8_t data[112]; }; struct ct_arg { @@ -470,6 +515,121 @@ struct ct_arg { port_id_t id; }; +/* TODO - the definitions below can + * also be obtained from + * uapi/scsi/fc/fc_els.h. Defining them here + * so that it works for all distros + * While upstreaming, these need to be separated + */ + +#define RDF_OPCODE 0x19 +#define EDC_OPCODE 0x17 + +enum scm_fc_ls_edc_dtag { + ELS_EDC_LFC_INFO = 0x0001000D, + /* Link Fault Cap Descriptor Tag */ + ELS_EDC_CONG_SIG_INFO = 0x0001000F, + /* Congestion Signal Descriptor Tag */ +}; + +/* Refer to FC-LS 5.01 for a detailed description */ +struct edc_els_descriptor { + __be32 link_fault_cap_descriptor_tag; /* 0001 000D */ + __be32 lfc_descriptor_length; /* 12 */ + __be32 degrade_activate_threshold; + __be32 degrade_deactivate_threshold; + __be32 fec_degrade_interval; + + __be32 cong_sig_cap_descriptor_tag; /* 0001 000F */ + __be32 csc_descriptor_length; /* 16 */ + __be32 tx_signal_cap; + __be32 tx_signal_freq; + __be32 rx_signal_cap; + __be32 rx_signal_freq; +}; + +struct edc_els_payload { + __be32 els_code; /* 0x17 */ + __be32 desc_len; + struct edc_els_descriptor edc_desc; +}; + +struct edc_els_link_srv_descriptor { + __be32 link_srv_info_descriptor_tag; /* 0000 0001 */ + __be32 ls_info_descriptor_length; /* 4 */ + __be32 els_code; /* 0x17 */ +}; + +struct edc_els_resp_payload { + __be32 resp_code; /* ACC or REJ */ + __be32 desc_len; + struct edc_els_link_srv_descriptor edc_ls_desc; + struct edc_els_descriptor edc_desc; +}; + +/* + * LS TLV Descriptor Tag Values + */ +enum scm_fc_ls_tlv_dtag { + QLA_ELS_DTAG_LS_REQ_INFO = 0x00000001, + /* Link Service Request Information Descriptor */ + QLA_ELS_DTAG_LNK_INTEGRITY = 0x00020001, + /* Link Integrity Notification Descriptor */ + QLA_ELS_DTAG_DELIVERY = 0x00020002, + /* Delivery Notification Descriptor */ + QLA_ELS_DTAG_PEER_CONGEST = 0x00020003, + /* Peer Congestion Notification Descriptor */ + QLA_ELS_DTAG_CONGESTION = 0x00020004, + /* Congestion Notification Descriptor */ + QLA_ELS_DTAG_PUN = 0x00020005, + /* Priority Update Notification Descriptor */ + QLA_ELS_DTAG_FPIN_REGISTER = 0x00030001, + /* FPIN Registration Descriptor */ +}; +#define ELS_RDF_REG_TAG_CNT 5 +#define ELS_RDF_RSP_DESC_LEN 12 +#define ER_RDY_DESC_TAG 0x10 + +struct rdf_els_descriptor { + __be32 desc_tag; /* 0003 0001 */ + __be32 desc_len; + __be32 desc_cnt; + __be32 desc_tags[ELS_RDF_REG_TAG_CNT]; +}; + +struct rdf_els_payload { + __be32 els_code; /* 0x19000000 */ + __be32 desc_len; + struct rdf_els_descriptor rdf_desc; +}; + +struct els_resp { + __be32 resp_code; /* 02 - LS_ACC */ + __be32 desc_list_len; + __be32 desc_tag; /* 0000 0001 */ + __be32 desc_len; /* 4 */ + __be32 els_code; + union { + struct rdf_els_descriptor rdf_desc; + struct edc_els_descriptor edc_desc; + } u; +}; +#define MAX_USCM_ELS_RETRIES 4 +#define QLA_DRV_SEND_ELS(ha) \ + ((ql2xcontrol_edc_rdf == 1) && \ + (ha->flags.scm_supported_f == 1) && (ha->flags.scm_supported_a == 1)) + +struct qla_els_pt_arg { + u8 els_opcode; + u8 vp_idx; + __le16 nport_handle; + u16 control_flags, ox_id; + __le32 rx_xchg_address; + port_id_t did, sid; + u32 tx_len, tx_byte_count, rx_len, rx_byte_count; + dma_addr_t tx_addr, rx_addr; +}; + /* * SRB extensions. */ @@ -482,6 +642,7 @@ struct srb_iocb { #define SRB_LOGIN_SKIP_PRLI BIT_2 #define SRB_LOGIN_NVME_PRLI BIT_3 #define SRB_LOGIN_PRLI_ONLY BIT_4 +#define SRB_LOGIN_FCSP BIT_5 uint16_t data[2]; u32 iop[2]; } logio; @@ -505,7 +666,7 @@ struct srb_iocb { u32 rx_size; dma_addr_t els_plogi_pyld_dma; dma_addr_t els_resp_pyld_dma; - __le32 fw_status[3]; + uint32_t fw_status[3]; __le16 comp_status; __le16 len; } els_plogi; @@ -520,6 +681,10 @@ struct srb_iocb { uint32_t data; struct completion comp; __le16 comp_status; + + uint8_t modifier; + uint8_t vp_index; + uint16_t loop_id; } tmf; struct { #define SRB_FXDISC_REQ_DMA_VALID BIT_0 @@ -556,8 +721,8 @@ struct srb_iocb { #define MAX_IOCB_MB_REG 28 #define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t)) struct { - u16 in_mb[MAX_IOCB_MB_REG]; /* from FW */ - u16 out_mb[MAX_IOCB_MB_REG]; /* to FW */ + __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */ + __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */ void *out, *in; dma_addr_t out_dma, in_dma; struct completion comp; @@ -568,7 +733,7 @@ struct srb_iocb { } nack; struct { __le16 comp_status; - __le16 rsp_pyld_len; + uint16_t rsp_pyld_len; uint8_t aen_op; void *desc; @@ -586,6 +751,27 @@ struct srb_iocb { u16 cmd; u16 vp_index; } ctrlvp; + struct { + struct edif_sa_ctl *sa_ctl; + struct qla_sa_update_frame sa_frame; + } sa_update; + struct { + struct { + struct edc_els_payload edc_cmd; + struct rdf_els_payload rdf_cmd; + } els_req; + struct els_resp els_rsp; + struct qla_els_pt_arg els_pt_arg; + uint32_t timeout; /* Timeout for ELS */ + struct { + int cmd_len; + dma_addr_t cmd_dma; + void *cmd_buf; + int rsp_len; + dma_addr_t rsp_dma; + void *rsp_buf; + } dma_addr; + } drv_els; } u; struct timer_list timer; @@ -616,6 +802,12 @@ struct srb_iocb { #define SRB_PRLI_CMD 21 #define SRB_CTRL_VP 22 #define SRB_PRLO_CMD 23 +#define SRB_SA_UPDATE 25 +#define SRB_ELS_CMD_HST_NOLOGIN 26 +#define SRB_SA_REPLACE 27 +#define SRB_ELS_RDF 28 +#define SRB_ELS_EDC 29 +#define SRB_MARKER 30 enum { TYPE_SRB, @@ -625,17 +817,48 @@ enum { struct iocb_resource { u8 res_type; - u8 pad; + u8 exch_cnt; u16 iocb_cnt; }; +/******************** qla_compat entries ****************************/ +/* + * Entries that are needed much earlier, like system structures + * that are used by qla2xxx structures used in this file go here, + * rest goes to qla_compat.h. + */ +#ifdef SCSI_FC_BSG_JOB +/* check for bad side effects. */ +typedef struct fc_bsg_job bsg_job_t; +#else /* SCSI_FC_BSG_JOB */ +/* Use the bsg_job listed in bsg-lib.h. */ +#include +typedef struct bsg_job bsg_job_t; +#endif /* SCSI_FC_BSG_JOB */ + +#ifdef TIMER_SETUP +typedef struct timer_list * qla_timer_arg_t; +#else /* TIMER_SETUP */ +typedef unsigned long qla_timer_arg_t; +#endif /* TIMER_SETUP */ + +/******************** qla_compat entries ****************************/ + +struct bsg_cmd { + bsg_job_t *bsg_job; + union { + struct qla_els_pt_arg els_arg; + }u; +}; + typedef struct srb { /* * Do not move cmd_type field, it needs to * line up with qla_tgt_cmd->cmd_type */ uint8_t cmd_type; - uint8_t pad[3]; + enum dma_data_direction dir; + uint8_t pad[2]; struct iocb_resource iores; struct kref cmd_kref; /* need to migrate ref_count over to this */ void *priv; @@ -643,6 +866,9 @@ typedef struct srb { struct fc_port *fcport; struct scsi_qla_host *vha; unsigned int start_timer:1; + unsigned int abort:1; + unsigned int aborted:1; + unsigned int completed:1; uint32_t handle; uint16_t flags; @@ -657,11 +883,31 @@ typedef struct srb { int rc; int retry_count; struct completion *comp; +#ifdef QLA2XXX_LATENCY_MEASURE + struct timespec64 q_cmd; + struct timespec64 cmd_to_req_q; + struct timespec64 cmd_from_rsp_q; + struct timespec64 cmd_to_ml; +#endif union { struct srb_iocb iocb_cmd; - struct bsg_job *bsg_job; + bsg_job_t *bsg_job; struct srb_cmd scmd; + struct bsg_cmd bsg_cmd; } u; + struct { + bool remapped; + struct { + dma_addr_t dma; + void *buf; + uint len; + } req; + struct { + dma_addr_t dma; + void *buf; + uint len; + } rsp; + } remap; /* * Report completion status @res and call sp_put(@sp). @res is * an NVMe status code, a SCSI result (e.g. DID_OK << 16) or a @@ -675,6 +921,13 @@ typedef struct srb { * code. */ void (*put_fn)(struct kref *kref); + unsigned long start_jiffies; + unsigned long done_jiffies; + + /* + * Report completition for asynchronous commands. + */ + void (*async_done)(struct srb *sp, int res); } srb_t; #define GET_CMD_SP(sp) (sp->u.scmd.cmd) @@ -706,23 +959,23 @@ struct msg_echo_lb { * ISP I/O Register Set structure definitions. */ struct device_reg_2xxx { - __le16 flash_address; /* Flash BIOS address */ - __le16 flash_data; /* Flash BIOS data */ - __le16 unused_1[1]; /* Gap */ - __le16 ctrl_status; /* Control/Status */ + uint16_t flash_address; /* Flash BIOS address */ + uint16_t flash_data; /* Flash BIOS data */ + uint16_t unused_1[1]; /* Gap */ + uint16_t ctrl_status; /* Control/Status */ #define CSR_FLASH_64K_BANK BIT_3 /* Flash upper 64K bank select */ #define CSR_FLASH_ENABLE BIT_1 /* Flash BIOS Read/Write enable */ #define CSR_ISP_SOFT_RESET BIT_0 /* ISP soft reset */ - __le16 ictrl; /* Interrupt control */ + uint16_t ictrl; /* Interrupt control */ #define ICR_EN_INT BIT_15 /* ISP enable interrupts. */ #define ICR_EN_RISC BIT_3 /* ISP enable RISC interrupts. */ - __le16 istatus; /* Interrupt status */ + uint16_t istatus; /* Interrupt status */ #define ISR_RISC_INT BIT_3 /* RISC interrupt */ - __le16 semaphore; /* Semaphore */ - __le16 nvram; /* NVRAM register. */ + uint16_t semaphore; /* Semaphore */ + uint16_t nvram; /* NVRAM register. */ #define NVR_DESELECT 0 #define NVR_BUSY BIT_15 #define NVR_WRT_ENABLE BIT_14 /* Write enable */ @@ -736,80 +989,80 @@ struct device_reg_2xxx { union { struct { - __le16 mailbox0; - __le16 mailbox1; - __le16 mailbox2; - __le16 mailbox3; - __le16 mailbox4; - __le16 mailbox5; - __le16 mailbox6; - __le16 mailbox7; - __le16 unused_2[59]; /* Gap */ + uint16_t mailbox0; + uint16_t mailbox1; + uint16_t mailbox2; + uint16_t mailbox3; + uint16_t mailbox4; + uint16_t mailbox5; + uint16_t mailbox6; + uint16_t mailbox7; + uint16_t unused_2[59]; /* Gap */ } __attribute__((packed)) isp2100; struct { /* Request Queue */ - __le16 req_q_in; /* In-Pointer */ - __le16 req_q_out; /* Out-Pointer */ + uint16_t req_q_in; /* In-Pointer */ + uint16_t req_q_out; /* Out-Pointer */ /* Response Queue */ - __le16 rsp_q_in; /* In-Pointer */ - __le16 rsp_q_out; /* Out-Pointer */ + uint16_t rsp_q_in; /* In-Pointer */ + uint16_t rsp_q_out; /* Out-Pointer */ /* RISC to Host Status */ - __le32 host_status; + uint32_t host_status; #define HSR_RISC_INT BIT_15 /* RISC interrupt */ #define HSR_RISC_PAUSED BIT_8 /* RISC Paused */ /* Host to Host Semaphore */ - __le16 host_semaphore; - __le16 unused_3[17]; /* Gap */ - __le16 mailbox0; - __le16 mailbox1; - __le16 mailbox2; - __le16 mailbox3; - __le16 mailbox4; - __le16 mailbox5; - __le16 mailbox6; - __le16 mailbox7; - __le16 mailbox8; - __le16 mailbox9; - __le16 mailbox10; - __le16 mailbox11; - __le16 mailbox12; - __le16 mailbox13; - __le16 mailbox14; - __le16 mailbox15; - __le16 mailbox16; - __le16 mailbox17; - __le16 mailbox18; - __le16 mailbox19; - __le16 mailbox20; - __le16 mailbox21; - __le16 mailbox22; - __le16 mailbox23; - __le16 mailbox24; - __le16 mailbox25; - __le16 mailbox26; - __le16 mailbox27; - __le16 mailbox28; - __le16 mailbox29; - __le16 mailbox30; - __le16 mailbox31; - __le16 fb_cmd; - __le16 unused_4[10]; /* Gap */ + uint16_t host_semaphore; + uint16_t unused_3[17]; /* Gap */ + uint16_t mailbox0; + uint16_t mailbox1; + uint16_t mailbox2; + uint16_t mailbox3; + uint16_t mailbox4; + uint16_t mailbox5; + uint16_t mailbox6; + uint16_t mailbox7; + uint16_t mailbox8; + uint16_t mailbox9; + uint16_t mailbox10; + uint16_t mailbox11; + uint16_t mailbox12; + uint16_t mailbox13; + uint16_t mailbox14; + uint16_t mailbox15; + uint16_t mailbox16; + uint16_t mailbox17; + uint16_t mailbox18; + uint16_t mailbox19; + uint16_t mailbox20; + uint16_t mailbox21; + uint16_t mailbox22; + uint16_t mailbox23; + uint16_t mailbox24; + uint16_t mailbox25; + uint16_t mailbox26; + uint16_t mailbox27; + uint16_t mailbox28; + uint16_t mailbox29; + uint16_t mailbox30; + uint16_t mailbox31; + uint16_t fb_cmd; + uint16_t unused_4[10]; /* Gap */ } __attribute__((packed)) isp2300; } u; - __le16 fpm_diag_config; - __le16 unused_5[0x4]; /* Gap */ - __le16 risc_hw; - __le16 unused_5_1; /* Gap */ - __le16 pcr; /* Processor Control Register. */ - __le16 unused_6[0x5]; /* Gap */ - __le16 mctr; /* Memory Configuration and Timing. */ - __le16 unused_7[0x3]; /* Gap */ - __le16 fb_cmd_2100; /* Unused on 23XX */ - __le16 unused_8[0x3]; /* Gap */ - __le16 hccr; /* Host command & control register. */ + uint16_t fpm_diag_config; + uint16_t unused_5[0x4]; /* Gap */ + uint16_t risc_hw; + uint16_t unused_5_1; /* Gap */ + uint16_t pcr; /* Processor Control Register. */ + uint16_t unused_6[0x5]; /* Gap */ + uint16_t mctr; /* Memory Configuration and Timing. */ + uint16_t unused_7[0x3]; /* Gap */ + uint16_t fb_cmd_2100; /* Unused on 23XX */ + uint16_t unused_8[0x3]; /* Gap */ + uint16_t hccr; /* Host command & control register. */ #define HCCR_HOST_INT BIT_7 /* Host interrupt bit */ #define HCCR_RISC_PAUSE BIT_5 /* Pause mode bit */ /* HCCR commands */ @@ -822,9 +1075,9 @@ struct device_reg_2xxx { #define HCCR_DISABLE_PARITY_PAUSE 0x4001 /* Disable parity error RISC pause. */ #define HCCR_ENABLE_PARITY 0xA000 /* Enable PARITY interrupt */ - __le16 unused_9[5]; /* Gap */ - __le16 gpiod; /* GPIO Data register. */ - __le16 gpioe; /* GPIO Enable register. */ + uint16_t unused_9[5]; /* Gap */ + uint16_t gpiod; /* GPIO Data register. */ + uint16_t gpioe; /* GPIO Enable register. */ #define GPIO_LED_MASK 0x00C0 #define GPIO_LED_GREEN_OFF_AMBER_OFF 0x0000 #define GPIO_LED_GREEN_ON_AMBER_OFF 0x0040 @@ -836,95 +1089,95 @@ struct device_reg_2xxx { union { struct { - __le16 unused_10[8]; /* Gap */ - __le16 mailbox8; - __le16 mailbox9; - __le16 mailbox10; - __le16 mailbox11; - __le16 mailbox12; - __le16 mailbox13; - __le16 mailbox14; - __le16 mailbox15; - __le16 mailbox16; - __le16 mailbox17; - __le16 mailbox18; - __le16 mailbox19; - __le16 mailbox20; - __le16 mailbox21; - __le16 mailbox22; - __le16 mailbox23; /* Also probe reg. */ + uint16_t unused_10[8]; /* Gap */ + uint16_t mailbox8; + uint16_t mailbox9; + uint16_t mailbox10; + uint16_t mailbox11; + uint16_t mailbox12; + uint16_t mailbox13; + uint16_t mailbox14; + uint16_t mailbox15; + uint16_t mailbox16; + uint16_t mailbox17; + uint16_t mailbox18; + uint16_t mailbox19; + uint16_t mailbox20; + uint16_t mailbox21; + uint16_t mailbox22; + uint16_t mailbox23; /* Also probe reg. */ } __attribute__((packed)) isp2200; } u_end; }; struct device_reg_25xxmq { - __le32 req_q_in; - __le32 req_q_out; - __le32 rsp_q_in; - __le32 rsp_q_out; - __le32 atio_q_in; - __le32 atio_q_out; + uint32_t req_q_in; + uint32_t req_q_out; + uint32_t rsp_q_in; + uint32_t rsp_q_out; + uint32_t atio_q_in; + uint32_t atio_q_out; }; struct device_reg_fx00 { - __le32 mailbox0; /* 00 */ - __le32 mailbox1; /* 04 */ - __le32 mailbox2; /* 08 */ - __le32 mailbox3; /* 0C */ - __le32 mailbox4; /* 10 */ - __le32 mailbox5; /* 14 */ - __le32 mailbox6; /* 18 */ - __le32 mailbox7; /* 1C */ - __le32 mailbox8; /* 20 */ - __le32 mailbox9; /* 24 */ - __le32 mailbox10; /* 28 */ - __le32 mailbox11; - __le32 mailbox12; - __le32 mailbox13; - __le32 mailbox14; - __le32 mailbox15; - __le32 mailbox16; - __le32 mailbox17; - __le32 mailbox18; - __le32 mailbox19; - __le32 mailbox20; - __le32 mailbox21; - __le32 mailbox22; - __le32 mailbox23; - __le32 mailbox24; - __le32 mailbox25; - __le32 mailbox26; - __le32 mailbox27; - __le32 mailbox28; - __le32 mailbox29; - __le32 mailbox30; - __le32 mailbox31; - __le32 aenmailbox0; - __le32 aenmailbox1; - __le32 aenmailbox2; - __le32 aenmailbox3; - __le32 aenmailbox4; - __le32 aenmailbox5; - __le32 aenmailbox6; - __le32 aenmailbox7; + uint32_t mailbox0; /* 00 */ + uint32_t mailbox1; /* 04 */ + uint32_t mailbox2; /* 08 */ + uint32_t mailbox3; /* 0C */ + uint32_t mailbox4; /* 10 */ + uint32_t mailbox5; /* 14 */ + uint32_t mailbox6; /* 18 */ + uint32_t mailbox7; /* 1C */ + uint32_t mailbox8; /* 20 */ + uint32_t mailbox9; /* 24 */ + uint32_t mailbox10; /* 28 */ + uint32_t mailbox11; + uint32_t mailbox12; + uint32_t mailbox13; + uint32_t mailbox14; + uint32_t mailbox15; + uint32_t mailbox16; + uint32_t mailbox17; + uint32_t mailbox18; + uint32_t mailbox19; + uint32_t mailbox20; + uint32_t mailbox21; + uint32_t mailbox22; + uint32_t mailbox23; + uint32_t mailbox24; + uint32_t mailbox25; + uint32_t mailbox26; + uint32_t mailbox27; + uint32_t mailbox28; + uint32_t mailbox29; + uint32_t mailbox30; + uint32_t mailbox31; + uint32_t aenmailbox0; + uint32_t aenmailbox1; + uint32_t aenmailbox2; + uint32_t aenmailbox3; + uint32_t aenmailbox4; + uint32_t aenmailbox5; + uint32_t aenmailbox6; + uint32_t aenmailbox7; /* Request Queue. */ - __le32 req_q_in; /* A0 - Request Queue In-Pointer */ - __le32 req_q_out; /* A4 - Request Queue Out-Pointer */ + uint32_t req_q_in; /* A0 - Request Queue In-Pointer */ + uint32_t req_q_out; /* A4 - Request Queue Out-Pointer */ /* Response Queue. */ - __le32 rsp_q_in; /* A8 - Response Queue In-Pointer */ - __le32 rsp_q_out; /* AC - Response Queue Out-Pointer */ + uint32_t rsp_q_in; /* A8 - Response Queue In-Pointer */ + uint32_t rsp_q_out; /* AC - Response Queue Out-Pointer */ /* Init values shadowed on FW Up Event */ - __le32 initval0; /* B0 */ - __le32 initval1; /* B4 */ - __le32 initval2; /* B8 */ - __le32 initval3; /* BC */ - __le32 initval4; /* C0 */ - __le32 initval5; /* C4 */ - __le32 initval6; /* C8 */ - __le32 initval7; /* CC */ - __le32 fwheartbeat; /* D0 */ - __le32 pseudoaen; /* D4 */ + uint32_t initval0; /* B0 */ + uint32_t initval1; /* B4 */ + uint32_t initval2; /* B8 */ + uint32_t initval3; /* BC */ + uint32_t initval4; /* C0 */ + uint32_t initval5; /* C4 */ + uint32_t initval6; /* C8 */ + uint32_t initval7; /* CC */ + uint32_t fwheartbeat; /* D0 */ + uint32_t pseudoaen; /* D4 */ }; @@ -964,18 +1217,18 @@ typedef union { &(reg)->u_end.isp2200.mailbox8 + (num) - 8) : \ &(reg)->u.isp2300.mailbox0 + (num)) #define RD_MAILBOX_REG(ha, reg, num) \ - rd_reg_word(MAILBOX_REG(ha, reg, num)) + RD_REG_WORD(MAILBOX_REG(ha, reg, num)) #define WRT_MAILBOX_REG(ha, reg, num, data) \ - wrt_reg_word(MAILBOX_REG(ha, reg, num), data) + WRT_REG_WORD(MAILBOX_REG(ha, reg, num), data) #define FB_CMD_REG(ha, reg) \ (IS_QLA2100(ha) || IS_QLA2200(ha) ? \ &(reg)->fb_cmd_2100 : \ &(reg)->u.isp2300.fb_cmd) #define RD_FB_CMD_REG(ha, reg) \ - rd_reg_word(FB_CMD_REG(ha, reg)) + RD_REG_WORD(FB_CMD_REG(ha, reg)) #define WRT_FB_CMD_REG(ha, reg, data) \ - wrt_reg_word(FB_CMD_REG(ha, reg), data) + WRT_REG_WORD(FB_CMD_REG(ha, reg), data) typedef struct { uint32_t out_mb; /* outbound from driver */ @@ -1036,6 +1289,7 @@ struct mbx_cmd_32 { #define MBS_NOT_LOGGED_IN 0x400A #define MBS_LINK_DOWN_ERROR 0x400B #define MBS_DIAG_ECHO_TEST_ERROR 0x400C +#define MBS_FLASH_LOCKDOWN 0x44 static inline bool qla2xxx_is_valid_mbs(unsigned int mbs) { @@ -1061,7 +1315,8 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs) #define MBA_LIP_F8 0x8016 /* Received a LIP F8. */ #define MBA_LOOP_INIT_ERR 0x8017 /* Loop Initialization Error. */ #define MBA_FABRIC_AUTH_REQ 0x801b /* Fabric Authentication Required. */ -#define MBA_CONGN_NOTI_RECV 0x801e /* Congestion Notification Received */ +#define MBA_CONGESTION_NOTIFICATION_RECV 0x801e /* Congestion Notification Received */ +#define MBA_FAST_LOGIN 0x801d /* FAST_LOGIN_COMPLETE. */ #define MBA_SCSI_COMPLETION 0x8020 /* SCSI Command Complete. */ #define MBA_CTIO_COMPLETION 0x8021 /* CTIO Complete. */ #define MBA_IP_COMPLETION 0x8022 /* IP Transmit Command Complete. */ @@ -1117,6 +1372,12 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs) /* ISP mailbox loopback echo diagnostic error code */ #define MBS_LB_RESET 0x17 + +/* AEN mailbox Port Diagnosis test */ +#define AEN_START_DIAG_TEST 0x0 /* start the diagnostics */ +#define AEN_DONE_DIAG_TEST_WITH_NOERR 0x1 /* Done with no errors */ +#define AEN_DONE_DIAG_TEST_WITH_ERR 0x2 /* Done with error.*/ + /* * Firmware options 1, 2, 3. */ @@ -1147,6 +1408,7 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs) #define ADD_FO2_ENABLE_SEL_CLS2 BIT_5 +#define ADD_FO3_COPY_FLOGI_ACC_PL BIT_2 #define ADD_FO3_NO_ABT_ON_LINK_DOWN BIT_14 /* @@ -1179,7 +1441,7 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs) #define MBC_GET_FIRMWARE_OPTION 0x28 /* Get Firmware Options. */ #define MBC_GET_MEM_OFFLOAD_CNTRL_STAT 0x34 /* Memory Offload ctrl/Stat*/ #define MBC_SET_FIRMWARE_OPTION 0x38 /* Set Firmware Options. */ -#define MBC_SET_GET_FC_LED_CONFIG 0x3b /* Set/Get FC LED config */ +#define MBC_SET_GET_FC_LED_CONFIG 0x3b /* Set/Get FC LED configuration */ #define MBC_LOOP_PORT_BYPASS 0x40 /* Loop Port Bypass. */ #define MBC_LOOP_PORT_ENABLE 0x41 /* Loop Port Enable. */ #define MBC_GET_RESOURCE_COUNTS 0x42 /* Get Resource Counts. */ @@ -1269,6 +1531,30 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs) #define HCS_WRITE_SERDES 0x3 #define HCS_READ_SERDES 0x4 +/* + * ISP2[7|8]xx mailbox commands. + */ +#define MBC_MPI_PASSTHROUGH 0x200 + + + +/* MBC_MPI_PASSTHROUGH */ +#define MPIPT_REQ_V1 1 +enum { + MPIPT_SUBCMD_GET_SUP_CMD = 0x10, + MPIPT_SUBCMD_GET_SUP_FEATURE, + MPIPT_SUBCMD_GET_STATUS, +}; + +enum { + MPIPT_MPI_STATUS = 1, + MPIPT_FCORE_STATUS, + MPIPT_LOCKDOWN_STATUS, +}; + +#define MPI_LOCKDOWN_FUNCTIONAL 0x3 + + /* Firmware return data sizes */ #define FCAL_MAP_SIZE 128 @@ -1311,8 +1597,19 @@ static inline bool qla2xxx_is_valid_mbs(unsigned int mbs) #define RNID_BUFFER_CREDITS 0x8 #define RNID_TYPE_SET_VERSION 0x9 #define RNID_TYPE_ASIC_TEMP 0xC +#define RNID_TYPE_GET_FEATURES 0xE + +#define FW_FEATURES_SIZE 64 +#define FW_LOCKDOWN_SUPPORT BIT_0 +#define MPI_LOCKDOWN_SUPPORT BIT_1 #define ELS_CMD_MAP_SIZE 32 +/* Fabric Perf Impact Notification */ +#define ELS_COMMAND_FPIN 0x16 +#define ELS_COMMAND_RDP 0x18 +/* Read Diagnostic Functions */ +#define ELS_COMMAND_RDF 0x19 +#define ELS_COMMAND_EDC 0x17 /* * Firmware state codes from get firmware state mailbox command @@ -1359,7 +1656,7 @@ typedef struct { uint8_t port_id[4]; uint8_t node_name[WWN_SIZE]; uint8_t port_name[WWN_SIZE]; - __le16 execution_throttle; + uint16_t execution_throttle; uint16_t execution_count; uint8_t reset_count; uint8_t reserved_2; @@ -1445,9 +1742,9 @@ typedef struct { */ uint8_t firmware_options[2]; - __le16 frame_payload_size; - __le16 max_iocb_allocation; - __le16 execution_throttle; + uint16_t frame_payload_size; + uint16_t max_iocb_allocation; + uint16_t execution_throttle; uint8_t retry_count; uint8_t retry_delay; /* unused */ uint8_t port_name[WWN_SIZE]; /* Big endian. */ @@ -1456,17 +1753,17 @@ typedef struct { uint8_t login_timeout; uint8_t node_name[WWN_SIZE]; /* Big endian. */ - __le16 request_q_outpointer; - __le16 response_q_inpointer; - __le16 request_q_length; - __le16 response_q_length; - __le64 request_q_address __packed; - __le64 response_q_address __packed; + uint16_t request_q_outpointer; + uint16_t response_q_inpointer; + uint16_t request_q_length; + uint16_t response_q_length; + __le64 request_q_address __packed; + __le64 response_q_address __packed; - __le16 lun_enables; + uint16_t lun_enables; uint8_t command_resource_count; uint8_t immediate_notify_resource_count; - __le16 timeout; + uint16_t timeout; uint8_t reserved_2[2]; /* @@ -1517,6 +1814,11 @@ typedef struct { uint8_t reserved_3[26]; } init_cb_t; +#define SCM_SUPPORT BIT_13 +#define SCM_VL_SUPPORT BIT_11 +#define SCM_PUN_SUPPORT BIT_10 +#define SCM_DRIVER_CTRL_ELS BIT_7 + /* Special Features Control Block */ struct init_sf_cb { uint8_t format; @@ -1525,7 +1827,10 @@ struct init_sf_cb { * BIT 15-14 = Reserved * BIT_13 = SAN Congestion Management (1 - Enabled, 0 - Disabled) * BIT_12 = Remote Write Optimization (1 - Enabled, 0 - Disabled) - * BIT 11-0 = Reserved + * BIT_11 = Virtual Lane (1 - Enabled, 0 - Disabled) (28xx Only) + * BIT_10 = PUN Descriptor Support (1 - Enabled, 0 - Disabled) (28xx Only) + * BIT_7 = Driver controls EDC and RDF ELS(1 - Driver controls) + * BIT 6-0 = Reserved */ uint16_t flags; uint8_t reserved1[32]; @@ -1632,9 +1937,9 @@ typedef struct { */ uint8_t firmware_options[2]; - __le16 frame_payload_size; - __le16 max_iocb_allocation; - __le16 execution_throttle; + uint16_t frame_payload_size; + uint16_t max_iocb_allocation; + uint16_t execution_throttle; uint8_t retry_count; uint8_t retry_delay; /* unused */ uint8_t port_name[WWN_SIZE]; /* Big endian. */ @@ -1758,7 +2063,7 @@ typedef struct { uint8_t reset_delay; uint8_t port_down_retry_count; uint8_t boot_id_number; - __le16 max_luns_per_target; + uint16_t max_luns_per_target; uint8_t fcode_boot_port_name[WWN_SIZE]; uint8_t alternate_port_name[WWN_SIZE]; uint8_t alternate_node_name[WWN_SIZE]; @@ -1864,7 +2169,7 @@ struct atio { }; typedef union { - __le16 extended; + uint16_t extended; struct { uint8_t reserved; uint8_t standard; @@ -1890,18 +2195,18 @@ typedef struct { uint8_t entry_status; /* Entry Status. */ uint32_t handle; /* System handle. */ target_id_t target; /* SCSI ID */ - __le16 lun; /* SCSI LUN */ - __le16 control_flags; /* Control flags. */ + uint16_t lun; /* SCSI LUN */ + uint16_t control_flags; /* Control flags. */ #define CF_WRITE BIT_6 #define CF_READ BIT_5 #define CF_SIMPLE_TAG BIT_3 #define CF_ORDERED_TAG BIT_2 #define CF_HEAD_TAG BIT_1 uint16_t reserved_1; - __le16 timeout; /* Command timeout. */ - __le16 dseg_count; /* Data segment count. */ + uint16_t timeout; /* Command timeout. */ + uint16_t dseg_count; /* Data segment count. */ uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */ - __le32 byte_count; /* Total byte count. */ + uint32_t byte_count; /* Total byte count. */ union { struct dsd32 dsd32[3]; struct dsd64 dsd64[2]; @@ -1919,11 +2224,11 @@ typedef struct { uint8_t entry_status; /* Entry Status. */ uint32_t handle; /* System handle. */ target_id_t target; /* SCSI ID */ - __le16 lun; /* SCSI LUN */ - __le16 control_flags; /* Control flags. */ + uint16_t lun; /* SCSI LUN */ + uint16_t control_flags; /* Control flags. */ uint16_t reserved_1; - __le16 timeout; /* Command timeout. */ - __le16 dseg_count; /* Data segment count. */ + uint16_t timeout; /* Command timeout. */ + uint16_t dseg_count; /* Data segment count. */ uint8_t scsi_cdb[MAX_CMDSZ]; /* SCSI command words. */ uint32_t byte_count; /* Total byte count. */ struct dsd64 dsd[2]; @@ -1985,7 +2290,7 @@ struct crc_context { __le16 guard_seed; /* Initial Guard Seed */ __le16 prot_opts; /* Requested Data Protection Mode */ __le16 blk_size; /* Data size in bytes */ - __le16 runt_blk_guard; /* Guard value for runt block (tape + uint16_t runt_blk_guard; /* Guard value for runt block (tape * only) */ __le32 byte_count; /* Total byte count/ total data * transfer count */ @@ -2038,13 +2343,13 @@ typedef struct { uint8_t sys_define; /* System defined. */ uint8_t entry_status; /* Entry Status. */ uint32_t handle; /* System handle. */ - __le16 scsi_status; /* SCSI status. */ - __le16 comp_status; /* Completion status. */ - __le16 state_flags; /* State flags. */ - __le16 status_flags; /* Status flags. */ - __le16 rsp_info_len; /* Response Info Length. */ - __le16 req_sense_length; /* Request sense data length. */ - __le32 residual_length; /* Residual transfer length. */ + uint16_t scsi_status; /* SCSI status. */ + uint16_t comp_status; /* Completion status. */ + uint16_t state_flags; /* State flags. */ + uint16_t status_flags; /* Status flags. */ + uint16_t rsp_info_len; /* Response Info Length. */ + uint16_t req_sense_length; /* Request sense data length. */ + uint32_t residual_length; /* Residual transfer length. */ uint8_t rsp_info[8]; /* FCP response information. */ uint8_t req_sense_data[32]; /* Request sense data. */ } sts_entry_t; @@ -2101,6 +2406,12 @@ typedef struct { #define CS_COMPLETE_CHKCOND 0x30 /* Error? */ #define CS_IOCB_ERROR 0x31 /* Generic error for IOCB request failure */ +#define CS_REJECT_RECEIVED 0x4E /* Reject received */ +#define CS_EDIF_AUTH_ERROR 0x63 /* decrypt error */ +#define CS_EDIF_PAD_LEN_ERROR 0x65 /* pad > frame size, not 4byte align */ +#define CS_EDIF_INV_REQ 0x66 /* invalid request */ +#define CS_EDIF_SPI_ERROR 0x67 /* rx frame unable to locate sa */ +#define CS_EDIF_HDR_ERROR 0x69 /* data frame != expected len */ #define CS_BAD_PAYLOAD 0x80 /* Driver defined */ #define CS_UNKNOWN 0x81 /* Driver defined */ #define CS_RETRY 0x82 /* Driver defined */ @@ -2131,6 +2442,7 @@ typedef struct { uint8_t data[60]; /* data */ } sts_cont_entry_t; +#define STATUS_CONT_TYPE_1 0x6f /* Status continuation entry. */ /* * ISP queue - RIO Type 1 status entry (32 bit I/O entry handles) * structure definition. @@ -2176,8 +2488,8 @@ typedef struct { /* clear port changed, */ /* use sequence number. */ uint8_t reserved_1; - __le16 sequence_number; /* Sequence number of event */ - __le16 lun; /* SCSI LUN */ + uint16_t sequence_number; /* Sequence number of event */ + uint16_t lun; /* SCSI LUN */ uint8_t reserved_2[48]; } mrk_entry_t; @@ -2192,25 +2504,108 @@ typedef struct { uint8_t entry_status; /* Entry Status. */ uint32_t handle1; /* System handle. */ target_id_t loop_id; - __le16 status; - __le16 control_flags; /* Control flags. */ + uint16_t status; + uint16_t control_flags; /* Control flags. */ uint16_t reserved2; - __le16 timeout; - __le16 cmd_dsd_count; - __le16 total_dsd_count; + uint16_t timeout; + uint16_t cmd_dsd_count; + uint16_t total_dsd_count; uint8_t type; uint8_t r_ctl; - __le16 rx_id; + uint16_t rx_id; uint16_t reserved3; uint32_t handle2; - __le32 rsp_bytecount; - __le32 req_bytecount; + uint32_t rsp_bytecount; + uint32_t req_bytecount; struct dsd64 req_dsd; struct dsd64 rsp_dsd; } ms_iocb_entry_t; +/* Virtual Lane macros */ +#define VL_FAST 0 +#define VL_SLOW 1 +#define VL_NORMAL 2 + +/* The following are defined by the switch */ +#define MAX_RX_VLS 7 +#define MIN_RX_VLS 2 +#define MAX_TX_VLS 7 +#define MIN_TX_VLS 2 + +#define NUM_VLS_IN_RANGE(num_txvl, num_rxvl) \ + (((num_txvl >= MIN_TX_VLS ) || (num_txvl <= MAX_TX_VLS)) && \ + ((num_rxvl >= MIN_RX_VLS ) || (num_rxvl <= MAX_RX_VLS))) + +#define SCM_MARK_DEVICE_FAST (BIT_3|BIT_4) +#define SCM_MARK_DEVICE_SLOW BIT_3 +#define SCM_MARK_DEVICE_NORMAL BIT_4 +#define SCM_CLEAR_DEVICE_SLOW BIT_4 + #define SCM_EDC_ACC_RECEIVED BIT_6 #define SCM_RDF_ACC_RECEIVED BIT_7 +#define SCM_NOTIFICATION_TYPE_LINK_INTEGRITY 0x00020001 +#define SCM_NOTIFICATION_TYPE_DELIVERY 0x00020002 +#define SCM_NOTIFICATION_TYPE_PEER_CONGESTION 0x00020003 +#define SCM_NOTIFICATION_TYPE_CONGESTION 0x00020004 +#define SCM_NOTIFICATION_TYPE_PUN 0x00020005 +#define FPIN_DESCRIPTOR_HEADER_SIZE 4 +#define FPIN_ELS_DESCRIPTOR_LIST_OFFSET 8 + +struct pun_wwn_list { + uint8_t port_name[WWN_SIZE]; + uint32_t resvd; +}; + +struct priority_map_record { + uint8_t pr_high; + uint8_t pr_low; + __be16 num_devices; + struct pun_wwn_list port_list; +}; + +typedef struct fpin_descriptor{ + __be32 descriptor_tag; + __be32 descriptor_length; + union { + uint8_t common_detecting_port_name[WWN_SIZE]; + struct { + uint8_t detecting_port_name[WWN_SIZE]; + uint8_t attached_port_name[WWN_SIZE]; + __be16 event_type; + __be16 event_modifier; + __be32 event_threshold; + __be32 event_count; + __be32 port_name_count; + uint8_t port_name_list[1][WWN_SIZE]; + } link_integrity; + struct { + uint8_t detecting_port_name[WWN_SIZE]; + uint8_t attached_port_name[WWN_SIZE]; + __be32 delivery_reason_code; + } delivery; + struct { + uint8_t detecting_port_name[WWN_SIZE]; + uint8_t attached_port_name[WWN_SIZE]; + __be16 event_type; + __be16 event_modifier; + __be32 event_period; + __be32 port_name_count; + uint8_t port_name_list[1][WWN_SIZE]; + } peer_congestion; + struct { + __be16 event_type; + __be16 event_modifier; + __be32 event_period; + uint8_t severity; + uint8_t reserved[3]; + } congestion; + struct { + __be32 event_period; + __be32 num_prio_map_records; + struct priority_map_record prio_map_record; + } pun; + }; +} fpin_descriptor_t; /* * ISP queue - Mailbox Command entry structure definition. @@ -2234,20 +2629,20 @@ struct mbx_entry { uint32_t handle; target_id_t loop_id; - __le16 status; - __le16 state_flags; - __le16 status_flags; + uint16_t status; + uint16_t state_flags; + uint16_t status_flags; uint32_t sys_define2[2]; - __le16 mb0; - __le16 mb1; - __le16 mb2; - __le16 mb3; - __le16 mb6; - __le16 mb7; - __le16 mb9; - __le16 mb10; + uint16_t mb0; + uint16_t mb1; + uint16_t mb2; + uint16_t mb3; + uint16_t mb6; + uint16_t mb7; + uint16_t mb9; + uint16_t mb10; uint32_t reserved_2[2]; uint8_t node_name[WWN_SIZE]; uint8_t port_name[WWN_SIZE]; @@ -2269,52 +2664,53 @@ struct imm_ntfy_from_isp { uint8_t entry_status; /* Entry Status. */ union { struct { - __le32 sys_define_2; /* System defined. */ + uint32_t sys_define_2; /* System defined. */ target_id_t target; - __le16 lun; + uint16_t lun; uint8_t target_id; uint8_t reserved_1; - __le16 status_modifier; - __le16 status; - __le16 task_flags; - __le16 seq_id; - __le16 srr_rx_id; - __le32 srr_rel_offs; - __le16 srr_ui; + uint16_t status_modifier; + uint16_t status; + uint16_t task_flags; + uint16_t seq_id; + uint16_t srr_rx_id; + uint32_t srr_rel_offs; + uint16_t srr_ui; #define SRR_IU_DATA_IN 0x1 #define SRR_IU_DATA_OUT 0x5 #define SRR_IU_STATUS 0x7 - __le16 srr_ox_id; + uint16_t srr_ox_id; uint8_t reserved_2[28]; } isp2x; struct { uint32_t reserved; - __le16 nport_handle; + uint16_t nport_handle; uint16_t reserved_2; - __le16 flags; + uint16_t flags; +#define NOTIFY24XX_FLAGS_FCSP BIT_5 #define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1 #define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0 - __le16 srr_rx_id; - __le16 status; + uint16_t srr_rx_id; + uint16_t status; uint8_t status_subcode; uint8_t fw_handle; - __le32 exchange_address; - __le32 srr_rel_offs; - __le16 srr_ui; - __le16 srr_ox_id; + uint32_t exchange_address; + uint32_t srr_rel_offs; + uint16_t srr_ui; + uint16_t srr_ox_id; union { struct { uint8_t node_name[8]; } plogi; /* PLOGI/ADISC/PDISC */ struct { /* PRLI word 3 bit 0-15 */ - __le16 wd3_lo; + uint16_t wd3_lo; uint8_t resv0[6]; } prli; struct { uint8_t port_id[3]; uint8_t resv1; - __le16 nport_handle; + uint16_t nport_handle; uint16_t resv2; } req_els; } u; @@ -2327,7 +2723,7 @@ struct imm_ntfy_from_isp { } isp24; } u; uint16_t reserved_7; - __le16 ox_id; + uint16_t ox_id; } __packed; #endif @@ -2375,11 +2771,9 @@ struct mbx_24xx_entry { */ typedef enum { FCT_UNKNOWN, - FCT_RSCN, - FCT_SWITCH, - FCT_BROADCAST, - FCT_INITIATOR, - FCT_TARGET, + FCT_BROADCAST = 0x01, + FCT_INITIATOR = 0x02, + FCT_TARGET = 0x04, FCT_NVME_INITIATOR = 0x10, FCT_NVME_TARGET = 0x20, FCT_NVME_DISCOVERY = 0x40, @@ -2413,7 +2807,6 @@ struct ct_sns_desc { enum discovery_state { DSC_DELETED, - DSC_GNN_ID, DSC_GNL, DSC_LOGIN_PEND, DSC_LOGIN_FAILED, @@ -2422,6 +2815,7 @@ enum discovery_state { DSC_LOGIN_COMPLETE, DSC_ADISC, DSC_DELETE_PEND, + DSC_LOGIN_AUTH_PEND, }; enum login_state { /* FW control Target side */ @@ -2442,12 +2836,129 @@ enum rscn_addr_format { RSCN_FAB_ADDR, }; +enum qla_congestion_level { + QLA_CONG_NONE = 0, + QLA_CONG_LOW = 1, + QLA_CONG_HIGH = 2, +}; + +enum qla_throttle_dir { + QLA_DIR_NONE = 0, + QLA_DIR_UP = 1, + QLA_DIR_DOWN = 2, +}; + +enum qla_throttle_mode { + QLA_MODE_NONE = 0, + QLA_MODE_FLOWS = 1, + QLA_MODE_Q_DEPTH = 2, +}; + +enum qla_congestion_signal { + QLA_SIG_NONE = 0, + QLA_SIG_CLEAR = 1, + QLA_SIG_CREDIT_STALL = 2, + QLA_SIG_OVERSUBSCRIPTION = 3, + QLA_SIG_THROTTLE_FAST_TGT = 4, +}; + +enum qla_throttle_state { + QLA_THROTTLE_DISABLED = 0, + QLA_THROTTLE_NONE = 1, + QLA_THROTTLE_ACTIVE = 2 +}; + +#define QLA_MIN_BASELINE_BPS 0x100000 +#define QLA_MIN_BASELINE_IOS 1000 +#define QLA_MIN_BASELINE_QDEPTH 16 +#define QLA_MIN_HBA_Q_DEPTH 4 +#define QLA_MIN_TGT_Q_DEPTH 2 +#define QLA_MIN_LUN_Q_DEPTH 2 +#define QLA_SCMR_PERIODS_PER_SEC 10 +#define QLA_SCMR_WARN_THRESHOLD 5 +#define QLA_SCMR_ALARM_THRESHOLD 5 +#define QLA_SCMR_EVENT_PERIOD 60 +#define QLA_SCMR_BUFFER 2 + +struct node_perf { + uint64_t index; + /* Parameters for IOPS throttling */ + atomic_t scmr_reqs_per_period; + atomic_t reqs_last_sec; + uint64_t reqs_arr[10]; + /* Parameters for BPS throttling */ + atomic64_t scmr_bytes_per_period; + atomic64_t bytes_last_sec; + uint64_t bytes_arr[10]; + + atomic_t max_q_depth; + atomic_t q_depth; + atomic_t dir_q_depth; /* Queue depth (directional) */ +}; + +struct qla_scmr_flow_control { + enum qla_throttle_mode mode; +#define QLA_SCMRF_IS_TGT 0 +#define QLA_SCMRF_CONGESTED 1 +#define QLA_SCMRF_NOTIFY_FW 2 +#define QLA_SCMRF_TARGETED_THROTTLE 3 +#define QLA_SCMRF_THROTTLING_BPS 4 +#define QLA_SCMRF_THROTTLING_IOS 5 +#define QLA_SCMRF_THROTTLING_QDEPTH 6 +#define QLA_SCMRF_FAST_TGT 7 + unsigned long flags; + enum qla_throttle_dir dir; + enum qla_congestion_level level; + + /* Common for IOPS/BPS throttling */ + atomic_t scmr_base; + atomic_t scmr_permitted; + + struct node_perf perf; + /* Throughput per sec, at start of throttling */ + atomic64_t base_bytes; + int lun_q_depth; + + atomic_t scmr_congn_signal; + atomic_t num_sig_warning; + atomic_t num_sig_alarm; + + unsigned long expiration_jiffies; + + uint32_t ticks; + uint32_t event_period; + uint32_t throttle_period; + /* The last throttle down diff */ + uint32_t down_delta; + + int event_period_buffer; + + /* Back-pointers for easy access */ + struct scsi_qla_host *vha; + struct fc_port *fcport; + + struct qla_scmr_stats *rstats; + struct qla_scmr_port_profile profile; + int scmr_down_delta[MAX_SCM_PROFILE]; + int scmr_up_delta[MAX_SCM_PROFILE]; +}; + +#define VL_NORMAL_DEF_PRIO 0 +#define VL_SLOW_DEF_PRIO 2 + +struct qla_virtual_lane { + uint8_t v_lane; + uint8_t prio_hi; + uint8_t prio_lo; +}; + /* * Fibre channel port structure. */ typedef struct fc_port { struct list_head list; struct scsi_qla_host *vha; + struct list_head tmf_pending; unsigned int conf_compl_supported:1; unsigned int deleted:2; @@ -2463,9 +2974,13 @@ typedef struct fc_port { unsigned int id_changed:1; unsigned int scan_needed:1; unsigned int n2n_flag:1; - unsigned int explicit_logout:1; unsigned int prli_pend_timer:1; + unsigned int explicit_logout:1; + unsigned int do_prli_nvme:1; + uint8_t nvme_flag; + uint8_t active_tmf; +#define MAX_ACTIVE_TMF 8 uint8_t node_name[WWN_SIZE]; uint8_t port_name[WWN_SIZE]; @@ -2473,6 +2988,8 @@ typedef struct fc_port { uint16_t loop_id; uint16_t old_loop_id; + + struct completion nvme_del_done; uint32_t nvme_prli_service_param; #define NVME_PRLI_SP_PI_CTRL BIT_9 @@ -2506,6 +3023,7 @@ typedef struct fc_port { uint16_t tgt_id; uint16_t old_tgt_id; uint16_t sec_since_registration; + uint16_t prlo_rc; uint8_t fcp_prio; @@ -2519,7 +3037,7 @@ typedef struct fc_port { int login_retry; - struct fc_rport *rport, *drport; + struct fc_rport *rport; u32 supported_classes; uint8_t fc4_type; @@ -2555,6 +3073,46 @@ typedef struct fc_port { u16 n2n_chip_reset; struct dentry *dfs_rport_dir; + u64 online_time; /* jiffies */ + u64 offline_time; /* jiffies */ + + struct qla_scm_target_combined scm; + struct qla_scmr_flow_control sfc; + struct qla_virtual_lane vl; + + uint64_t tgt_short_link_down_cnt; + uint64_t tgt_link_down_time; + uint64_t dev_loss_tmo; + + /* + * EDIF parameters for encryption. + */ + struct { + uint32_t enable:1; /* device is edif enabled/req'd */ + uint32_t app_stop:2; +#define APP_STOPPED 0 +#define APP_STOPPING 1 + uint32_t authok:1; + uint32_t aes_gmac:1; + uint32_t app_sess_online:1; + uint32_t tx_sa_set:1; + uint32_t rx_sa_set:1; + uint32_t tx_sa_pending:1; + uint32_t rx_sa_pending:1; + uint32_t tx_rekey_cnt; + uint32_t rx_rekey_cnt; + uint64_t tx_bytes; + uint64_t rx_bytes; + u8 sess_down_acked; + uint8_t auth_state; + uint16_t rekey_cnt; + struct list_head edif_indx_list; + spinlock_t indx_list_lock; + + struct list_head tx_sa_list; + struct list_head rx_sa_list; + spinlock_t sa_list_lock; + } edif; } fc_port_t; enum { @@ -2588,7 +3146,6 @@ extern const char *const port_state_str[5]; static const char * const port_dstate_str[] = { "DELETED", - "GNN_ID", "GNL", "LOGIN_PEND", "LOGIN_FAILED", @@ -2596,7 +3153,8 @@ static const char * const port_dstate_str[] = { "UPD_FCPORT", "LOGIN_COMPLETE", "ADISC", - "DELETE_PEND" + "DELETE_PEND", + "LOGIN_AUTH_PEND", }; /* @@ -2608,6 +3166,8 @@ static const char * const port_dstate_str[] = { #define FCF_ASYNC_SENT BIT_3 #define FCF_CONF_COMP_SUPPORTED BIT_4 #define FCF_ASYNC_ACTIVE BIT_5 +#define FCF_FCSP_DEVICE BIT_6 +#define FCF_EDIF_DELETE BIT_7 /* No loop ID flag. */ #define FC_NO_LOOP_ID 0x1000 @@ -2699,7 +3259,7 @@ static const char * const port_dstate_str[] = { /* * FDMI HBA attribute types. */ -#define FDMI1_HBA_ATTR_COUNT 9 +#define FDMI1_HBA_ATTR_COUNT 10 #define FDMI2_HBA_ATTR_COUNT 17 #define FDMI_HBA_NODE_NAME 0x1 @@ -2722,8 +3282,8 @@ static const char * const port_dstate_str[] = { #define FDMI_HBA_VENDOR_IDENTIFIER 0xe0 struct ct_fdmi_hba_attr { - __be16 type; - __be16 len; + uint16_t type; + uint16_t len; union { uint8_t node_name[WWN_SIZE]; uint8_t manufacturer[64]; @@ -2735,11 +3295,11 @@ struct ct_fdmi_hba_attr { uint8_t orom_version[16]; uint8_t fw_version[32]; uint8_t os_version[128]; - __be32 max_ct_len; + uint32_t max_ct_len; uint8_t sym_name[256]; - __be32 vendor_specific_info; - __be32 num_ports; + uint32_t vendor_specific_info; + uint32_t num_ports; uint8_t fabric_name[WWN_SIZE]; uint8_t bios_name[32]; uint8_t vendor_identifier[8]; @@ -2747,12 +3307,12 @@ struct ct_fdmi_hba_attr { }; struct ct_fdmi1_hba_attributes { - __be32 count; + uint32_t count; struct ct_fdmi_hba_attr entry[FDMI1_HBA_ATTR_COUNT]; }; struct ct_fdmi2_hba_attributes { - __be32 count; + uint32_t count; struct ct_fdmi_hba_attr entry[FDMI2_HBA_ATTR_COUNT]; }; @@ -2808,52 +3368,52 @@ struct ct_fdmi2_hba_attributes { #define FC_CLASS_2_3 0x0C struct ct_fdmi_port_attr { - __be16 type; - __be16 len; - union { - uint8_t fc4_types[32]; - __be32 sup_speed; - __be32 cur_speed; - __be32 max_frame_size; - uint8_t os_dev_name[32]; - uint8_t host_name[256]; - - uint8_t node_name[WWN_SIZE]; - uint8_t port_name[WWN_SIZE]; - uint8_t port_sym_name[128]; - __be32 port_type; - __be32 port_supported_cos; - uint8_t fabric_name[WWN_SIZE]; - uint8_t port_fc4_type[32]; - __be32 port_state; - __be32 num_ports; - __be32 port_id; - - uint8_t smartsan_service[24]; - uint8_t smartsan_guid[16]; - uint8_t smartsan_version[24]; - uint8_t smartsan_prod_name[16]; - __be32 smartsan_port_info; - __be32 smartsan_qos_support; - __be32 smartsan_security_support; - } a; + uint16_t type; + uint16_t len; + union { + uint8_t fc4_types[32]; + uint32_t sup_speed; + uint32_t cur_speed; + uint32_t max_frame_size; + uint8_t os_dev_name[32]; + uint8_t host_name[256]; + + uint8_t node_name[WWN_SIZE]; + uint8_t port_name[WWN_SIZE]; + uint8_t port_sym_name[128]; + uint32_t port_type; + uint32_t port_supported_cos; + uint8_t fabric_name[WWN_SIZE]; + uint8_t port_fc4_type[32]; + uint32_t port_state; + uint32_t num_ports; + uint32_t port_id; + + uint8_t smartsan_service[24]; + uint8_t smartsan_guid[16]; + uint8_t smartsan_version[24]; + uint8_t smartsan_prod_name[16]; + uint32_t smartsan_port_info; + uint32_t smartsan_qos_support; + uint32_t smartsan_security_support; + } a; }; struct ct_fdmi1_port_attributes { - __be32 count; + uint32_t count; struct ct_fdmi_port_attr entry[FDMI1_PORT_ATTR_COUNT]; }; struct ct_fdmi2_port_attributes { - __be32 count; + uint32_t count; struct ct_fdmi_port_attr entry[FDMI2_PORT_ATTR_COUNT]; }; #define FDMI_ATTR_TYPELEN(obj) \ - (sizeof((obj)->type) + sizeof((obj)->len)) + (sizeof((obj)->type) + sizeof((obj)->len)) #define FDMI_ATTR_ALIGNMENT(len) \ - (4 - ((len) & 3)) + (4 - ((len) & 3)) /* FDMI register call options */ #define CALLOPT_FDMI1 0 @@ -2899,8 +3459,8 @@ struct ct_cmd_hdr { /* CT command request */ struct ct_sns_req { struct ct_cmd_hdr header; - __be16 command; - __be16 max_rsp_size; + uint16_t command; + uint16_t max_rsp_size; uint8_t fragment_id; uint8_t reserved[3]; @@ -2957,7 +3517,7 @@ struct ct_sns_req { struct { uint8_t hba_identifier[8]; - __be32 entry_count; + uint32_t entry_count; uint8_t port_name[8]; struct ct_fdmi2_hba_attributes attrs; } rhba; @@ -3012,7 +3572,7 @@ struct ct_sns_req { /* CT command response header */ struct ct_rsp_hdr { struct ct_cmd_hdr header; - __be16 response; + uint16_t response; uint16_t residual; uint8_t fragment_id; uint8_t reason_code; @@ -3098,14 +3658,16 @@ struct ct_sns_rsp { } gfpn_id; struct { - __be16 speeds; - __be16 speed; + uint16_t speeds; + uint16_t speed; } gpsc; #define GFF_FCP_SCSI_OFFSET 7 #define GFF_NVME_OFFSET 23 /* type = 28h */ struct { uint8_t fc4_features[128]; +#define FC4_FF_TARGET BIT_0 +#define FC4_FF_INITIATOR BIT_1 } gff_id; struct { uint8_t reserved; @@ -3189,13 +3751,13 @@ struct fab_scan { struct sns_cmd_pkt { union { struct { - __le16 buffer_length; - __le16 reserved_1; - __le64 buffer_address __packed; - __le16 subcommand_length; - __le16 reserved_2; - __le16 subcommand; - __le16 size; + uint16_t buffer_length; + uint16_t reserved_1; + __le64 buffer_address __packed; + uint16_t subcommand_length; + uint16_t reserved_2; + uint16_t subcommand; + uint16_t size; uint32_t reserved_3; uint8_t param[36]; } cmd; @@ -3221,7 +3783,7 @@ struct gid_list_info { uint8_t area; uint8_t domain; uint8_t loop_id_2100; /* ISP2100/ISP2200 -- 4 bytes. */ - __le16 loop_id; /* ISP23XX -- 6 bytes. */ + uint16_t loop_id; /* ISP23XX -- 6 bytes. */ uint16_t reserved_1; /* ISP24XX -- 8 bytes. */ }; @@ -3295,8 +3857,8 @@ struct isp_operations { int (*write_nvram)(struct scsi_qla_host *, void *, uint32_t, uint32_t); - void (*fw_dump)(struct scsi_qla_host *vha); - void (*mpi_fw_dump)(struct scsi_qla_host *, int); + void (*fw_dump) (struct scsi_qla_host *, int); + void (*mpi_fw_dump) (struct scsi_qla_host *, int); int (*beacon_on) (struct scsi_qla_host *); int (*beacon_off) (struct scsi_qla_host *); @@ -3341,6 +3903,7 @@ struct qla_msix_entry { int have_irq; int in_use; uint32_t vector; + uint32_t vector_base0; uint16_t entry; char name[30]; void *handle; @@ -3358,7 +3921,6 @@ enum qla_work_type { QLA_EVT_ASYNC_ADISC, QLA_EVT_UEVENT, QLA_EVT_AENFX, - QLA_EVT_GPNID, QLA_EVT_UNMAP, QLA_EVT_NEW_SESS, QLA_EVT_GPDB, @@ -3372,11 +3934,11 @@ enum qla_work_type { QLA_EVT_GPNFT, QLA_EVT_GPNFT_DONE, QLA_EVT_GNNFT_DONE, - QLA_EVT_GNNID, QLA_EVT_GFPNID, QLA_EVT_SP_RETRY, QLA_EVT_IIDMA, QLA_EVT_ELS_PLOGI, + QLA_EVT_SA_REPLACE, }; @@ -3412,9 +3974,6 @@ struct qla_work_evt { struct { srb_t *sp; } iosb; - struct { - port_id_t id; - } gpnid; struct { port_id_t id; u8 port_name[8]; @@ -3422,7 +3981,7 @@ struct qla_work_evt { void *pla; u8 fc4_type; } new_sess; - struct { /*Get PDB, Get Speed, update fcport, gnl, gidpn */ + struct { /*Get PDB, Get Speed, update fcport, gnl */ fc_port_t *fcport; u8 opt; } fcport; @@ -3435,6 +3994,11 @@ struct qla_work_evt { u8 fc4_type; srb_t *sp; } gpnft; + struct { + struct edif_sa_ctl *sa_ctl; + fc_port_t *fcport; + uint16_t nport_handle; + } sa_update; } u; }; @@ -3477,6 +4041,7 @@ struct qla_statistics { uint32_t stat_max_qfull_cmds_dropped; struct qla_dif_statistics qla_dif_stats; + uint32_t latency_outliers; }; struct bidi_statistics { @@ -3528,7 +4093,6 @@ struct qla_counters { uint64_t input_requests; uint64_t output_bytes; uint64_t output_requests; - }; struct qla_qpair; @@ -3538,8 +4102,8 @@ struct rsp_que { dma_addr_t dma; response_t *ring; response_t *ring_ptr; - __le32 __iomem *rsp_q_in; /* FWI2-capable only. */ - __le32 __iomem *rsp_q_out; + uint32_t __iomem *rsp_q_in; /* FWI2-capable only. */ + uint32_t __iomem *rsp_q_out; uint16_t ring_index; uint16_t out_ptr; uint16_t *in_ptr; /* queue shadow in index */ @@ -3554,6 +4118,8 @@ struct rsp_que { srb_t *status_srb; /* status continuation entry */ struct qla_qpair *qpair; + port_id_t pur_sid; + int pur_entcnt; dma_addr_t dma_fx00; response_t *ring_fx00; uint16_t length_fx00; @@ -3565,8 +4131,8 @@ struct req_que { dma_addr_t dma; request_t *ring; request_t *ring_ptr; - __le32 __iomem *req_q_in; /* FWI2-capable only. */ - __le32 __iomem *req_q_out; + uint32_t __iomem *req_q_in; /* FWI2-capable only. */ + uint32_t __iomem *req_q_out; uint16_t ring_index; uint16_t in_ptr; uint16_t *out_ptr; /* queue shadow out index */ @@ -3589,15 +4155,150 @@ struct req_que { uint8_t req_pkt[REQUEST_ENTRY_SIZE]; }; +#ifdef QLA2XXX_LATENCY_MEASURE +enum latency_io_cmd_type { + read6, + read10, + read12, + read16, + write6, + write10, + write12, + write16, + cmd_type_count +}; +enum latency_nvme_io_cmd_type { + ql_nvme_read, + ql_nvme_write, + nvme_cmd_count +}; + +struct qla_latency_counter { + uint64_t qla_tot_cmds[cmd_type_count]; + uint64_t qla_time_qcmd_to_req_q[cmd_type_count]; + uint64_t qla_time_req_q_to_rsp_q[cmd_type_count]; + uint64_t qla_time_rsq_q_to_ml[cmd_type_count]; + uint64_t qla_time_qcmd_to_ml[cmd_type_count]; + uint64_t qla_nvme_tot_cmds[nvme_cmd_count]; + uint64_t qla_nvme_qcmd_to_req_q[nvme_cmd_count]; + uint64_t qla_nvme_req_q_to_rsp_q[nvme_cmd_count]; + uint64_t qla_nvme_rsp_q_to_ml[nvme_cmd_count]; + uint64_t qla_nvme_qcmd_to_ml[nvme_cmd_count]; +}; +#endif + struct qla_fw_resources { u16 iocbs_total; u16 iocbs_limit; u16 iocbs_qp_limit; u16 iocbs_used; + u16 exch_total; + u16 exch_limit; + u16 exch_used; + u16 pad; }; - #define QLA_IOCB_PCT_LIMIT 95 +/* + * SAN Congestion Rate limiting related. + * + * Module parameters: + * + * ql2x_scmr_drop_pct_warn + * ql2x_scmr_drop_pct_alarm + * ql2x_scmr_up_pct + * ql2x_scmr_rate_down_intv + * when congested, if a SIGNAL appears within this interval, bring + * down congested peak rate by ql2x_scmr_drop_pct_warn. + * ql2x_scmr_rate_up_intv + * when congested, if no SIGNALS for this long, bump congested peak + * rate by ql2x_scmr_drop_pct_warn. + * ql2x_scmr_drop_pct_low_wm + * when congested, I/Os will not be throttled further when the congested + * peak rate hits this percentage. + * + */ +#define qla_scmr_reduced_throttle(_sfc) \ + (test_bit(QLA_SCMRF_THROTTLING_BPS, &(_sfc)->flags) || \ + test_bit(QLA_SCMRF_THROTTLING_IOS, &(_sfc)->flags) || \ + test_bit(QLA_SCMRF_THROTTLING_QDEPTH, &(_sfc)->flags)) + +#define qla_scmr_throttle_ios(_sfc) \ + test_bit(QLA_SCMRF_THROTTLING_IOS, &(_sfc)->flags) + +#define qla_scmr_throttle_qdepth(_sfc) \ + test_bit(QLA_SCMRF_THROTTLING_QDEPTH, &(_sfc)->flags) + +#define qla_scmr_throttle_bps(_sfc) \ + test_bit(QLA_SCMRF_THROTTLING_BPS, &(_sfc)->flags) + +#define qla_scmr_set_reduce_throttle_bps(_sfc) \ + set_bit(QLA_SCMRF_THROTTLING_BPS, &(_sfc)->flags) + +#define qla_scmr_set_reduce_throttle_ios(_sfc) \ + set_bit(QLA_SCMRF_THROTTLING_IOS, &(_sfc)->flags) + +#define qla_scmr_set_throttle_qdepth(_sfc) \ + set_bit(QLA_SCMRF_THROTTLING_QDEPTH, &(_sfc)->flags) + +#define qla_scmr_clear_throttle_bps(_sfc) \ + clear_bit(QLA_SCMRF_THROTTLING_BPS, &(_sfc)->flags) + +#define qla_scmr_clear_throttle_ios(_sfc) \ + clear_bit(QLA_SCMRF_THROTTLING_IOS, &(_sfc)->flags) + +#define qla_scmr_clear_throttle_qdepth(_sfc) \ + clear_bit(QLA_SCMRF_THROTTLING_QDEPTH, &(_sfc)->flags) + +#define qla_scmr_has_fast_tgt(_sfc) \ + test_bit(QLA_SCMRF_FAST_TGT, &(_sfc)->flags) + +#define qla_scmr_set_fast_tgt(_sfc) \ + set_bit(QLA_SCMRF_FAST_TGT, &(_sfc)->flags) + +#define qla_scmr_clear_fast_tgt(_sfc) \ + clear_bit(QLA_SCMRF_FAST_TGT, &(_sfc)->flags) + +#define qla_scmr_test_notify_fw(_sfc) \ + test_bit(QLA_SCMRF_NOTIFY_FW, &(_sfc)->flags) + +#define qla_scmr_set_notify_fw(_sfc) \ + set_bit(QLA_SCMRF_NOTIFY_FW, &(_sfc)->flags) + +#define qla_scmr_clr_notify_fw(_sfc) \ + clear_bit(QLA_SCMRF_NOTIFY_FW, &(_sfc)->flags) + +#define qla_scmr_is_congested(_sfc) \ + test_bit(QLA_SCMRF_CONGESTED, &(_sfc)->flags) + +#define qla_scmr_set_congested(_sfc) \ + set_bit(QLA_SCMRF_CONGESTED, &(_sfc)->flags) + +#define qla_scmr_clear_congested(_sfc) \ + clear_bit(QLA_SCMRF_CONGESTED, &(_sfc)->flags) + +#define qla_scmr_set_tgt(_sfc) \ + set_bit(QLA_SCMRF_IS_TGT, &(_sfc)->flags) + +#define qla_scmr_is_tgt(_sfc) \ + test_bit(QLA_SCMRF_IS_TGT, &(_sfc)->flags) + +#define qla_scmr_get_sig(_sfc) \ + atomic_read(&(_sfc)->scmr_congn_signal) + +#define qla_scmr_clear_sig(_sfc, _sig) \ + atomic_set(&(_sfc)->_sig, QLA_SIG_NONE) + +struct qla_buf_pool { + u16 num_bufs; + u16 num_active; + u16 max_used; + u16 reserved; + unsigned long *buf_map; + void **buf_array; + dma_addr_t *dma_array; +}; + /*Queue pair data structure */ struct qla_qpair { spinlock_t qp_lock; @@ -3644,13 +4345,17 @@ struct qla_qpair { struct list_head qp_list_elem; /* vha->qp_list */ struct list_head hints_list; - + uint16_t retry_term_cnt; - __le32 retry_term_exchg_addr; + uint32_t retry_term_exchg_addr; uint64_t retry_term_jiff; struct qla_tgt_counters tgt_counters; uint16_t cpuid; struct qla_fw_resources fwres ____cacheline_aligned; + struct qla_buf_pool buf_pool; + u32 cmd_cnt; + u32 cmd_completion_cnt; + u32 prev_completion_cnt; }; /* Place holder for FW buffer parameters */ @@ -3675,98 +4380,98 @@ struct rdp_req_payload { struct rdp_rsp_payload { struct { - __be32 cmd; - __be32 len; + uint32_t cmd; + uint32_t len; } hdr; /* LS Request Info descriptor */ struct { - __be32 desc_tag; - __be32 desc_len; - __be32 req_payload_word_0; + uint32_t desc_tag; + uint32_t desc_len; + uint32_t req_payload_word_0; } ls_req_info_desc; /* LS Request Info descriptor */ struct { - __be32 desc_tag; - __be32 desc_len; - __be32 req_payload_word_0; + uint32_t desc_tag; + uint32_t desc_len; + uint32_t req_payload_word_0; } ls_req_info_desc2; /* SFP diagnostic param descriptor */ struct { - __be32 desc_tag; - __be32 desc_len; - __be16 temperature; - __be16 vcc; - __be16 tx_bias; - __be16 tx_power; - __be16 rx_power; - __be16 sfp_flags; + uint32_t desc_tag; + uint32_t desc_len; + uint16_t temperature; + uint16_t vcc; + uint16_t tx_bias; + uint16_t tx_power; + uint16_t rx_power; + uint16_t sfp_flags; } sfp_diag_desc; /* Port Speed Descriptor */ struct { - __be32 desc_tag; - __be32 desc_len; - __be16 speed_capab; - __be16 operating_speed; + uint32_t desc_tag; + uint32_t desc_len; + uint16_t speed_capab; + uint16_t operating_speed; } port_speed_desc; /* Link Error Status Descriptor */ struct { - __be32 desc_tag; - __be32 desc_len; - __be32 link_fail_cnt; - __be32 loss_sync_cnt; - __be32 loss_sig_cnt; - __be32 prim_seq_err_cnt; - __be32 inval_xmit_word_cnt; - __be32 inval_crc_cnt; + uint32_t desc_tag; + uint32_t desc_len; + uint32_t link_fail_cnt; + uint32_t loss_sync_cnt; + uint32_t loss_sig_cnt; + uint32_t prim_seq_err_cnt; + uint32_t inval_xmit_word_cnt; + uint32_t inval_crc_cnt; uint8_t pn_port_phy_type; uint8_t reserved[3]; } ls_err_desc; /* Port name description with diag param */ struct { - __be32 desc_tag; - __be32 desc_len; + uint32_t desc_tag; + uint32_t desc_len; uint8_t WWNN[WWN_SIZE]; uint8_t WWPN[WWN_SIZE]; } port_name_diag_desc; /* Port Name desc for Direct attached Fx_Port or Nx_Port */ struct { - __be32 desc_tag; - __be32 desc_len; + uint32_t desc_tag; + uint32_t desc_len; uint8_t WWNN[WWN_SIZE]; uint8_t WWPN[WWN_SIZE]; } port_name_direct_desc; /* Buffer Credit descriptor */ struct { - __be32 desc_tag; - __be32 desc_len; - __be32 fcport_b2b; - __be32 attached_fcport_b2b; - __be32 fcport_rtt; + uint32_t desc_tag; + uint32_t desc_len; + uint32_t fcport_b2b; + uint32_t attached_fcport_b2b; + uint32_t fcport_rtt; } buffer_credit_desc; /* Optical Element Data Descriptor */ struct { - __be32 desc_tag; - __be32 desc_len; - __be16 high_alarm; - __be16 low_alarm; - __be16 high_warn; - __be16 low_warn; - __be32 element_flags; + uint32_t desc_tag; + uint32_t desc_len; + uint16_t high_alarm; + uint16_t low_alarm; + uint16_t high_warn; + uint16_t low_warn; + uint32_t element_flags; } optical_elmt_desc[5]; /* Optical Product Data Descriptor */ struct { - __be32 desc_tag; - __be32 desc_len; + uint32_t desc_tag; + uint32_t desc_len; uint8_t vendor_name[16]; uint8_t part_number[16]; uint8_t serial_number[16]; @@ -3776,7 +4481,7 @@ struct rdp_rsp_payload { }; #define RDP_DESC_LEN(obj) \ - (sizeof(obj) - sizeof((obj).desc_tag) - sizeof((obj).desc_len)) + (sizeof(obj) - sizeof((obj).desc_tag) - sizeof((obj).desc_len)) #define RDP_PORT_SPEED_1GB BIT_15 #define RDP_PORT_SPEED_2GB BIT_14 @@ -3804,17 +4509,16 @@ struct qlt_hw_data { struct atio *atio_ring_ptr; /* Current address. */ uint16_t atio_ring_index; /* Current index. */ uint16_t atio_q_length; - __le32 __iomem *atio_q_in; - __le32 __iomem *atio_q_out; + uint32_t __iomem *atio_q_in; + uint32_t __iomem *atio_q_out; struct qla_tgt_func_tmpl *tgt_ops; - struct qla_tgt_vp_map *tgt_vp_map; int saved_set; - __le16 saved_exchange_count; - __le32 saved_firmware_options_1; - __le32 saved_firmware_options_2; - __le32 saved_firmware_options_3; + uint16_t saved_exchange_count; + uint32_t saved_firmware_options_1; + uint32_t saved_firmware_options_2; + uint32_t saved_firmware_options_3; uint8_t saved_firmware_options[2]; uint8_t saved_add_firmware_options[2]; @@ -3834,7 +4538,9 @@ struct qlt_hw_data { int num_act_qpairs; #define DEFAULT_NAQP 2 spinlock_t atio_lock ____cacheline_aligned; - struct btree_head32 host_map; + dma_addr_t fast_dig_dma; + char *fast_dig_ptr; + char *fast_sw_sha; }; #define MAX_QFULL_CMDS_ALLOC 8192 @@ -3849,6 +4555,60 @@ struct qla_hw_data_stat { u32 num_mpi_reset; }; +// refer to pcie_do_recovery reference +typedef enum { + QLA_PCI_RESUME, + QLA_PCI_ERR_DETECTED, + QLA_PCI_MMIO_ENABLED, + QLA_PCI_SLOT_RESET, +} pci_error_state_t; + +struct qla_sig_severity { + uint64_t cn_alarm_sig; + uint64_t cn_warning_sig; +} __packed; + +/* Cisco VL specific */ + +#define MAX_VL 7 +struct virt_lane { + uint8_t prio_hi; + uint8_t prio_lo; + __le16 num_credits; +}; + +/* Extended FLOGI_ACC payload structure */ +struct flogi_acc_payld { + uint8_t page_code; + uint8_t rsvd1; + __le16 page_len; + + uint8_t vendor_code[8]; + + __le16 er_rdy_desc_len; + __le16 er_rdy_desc_tag; + + __le16 rx_vl_desc_len; + __le16 rx_vl_desc_tag; + + __le16 rsvd2; + __le16 num_rx_vl; + + struct virt_lane rx_vl[MAX_VL]; + + __le16 tx_vl_desc_len; + __le16 tx_vl_desc_tag; + + uint16_t rsvd3; + __le16 num_tx_vl; + + struct virt_lane tx_vl[MAX_VL]; + + /* Prio desc tag etc. Not used currently */ + uint32_t rsvd4[10]; + +}; + /* * Qlogic host adapter specific data structure. */ @@ -3857,6 +4617,7 @@ struct qla_hw_data { /* SRB cache. */ #define SRB_MIN_REQ 128 mempool_t *srb_mempool; + u8 port_name[WWN_SIZE]; volatile struct { uint32_t mbox_int :1; @@ -3911,13 +4672,22 @@ struct qla_hw_data { uint32_t n2n_bigger:1; uint32_t secure_adapter:1; uint32_t secure_fw:1; - /* Supported by Adapter */ - uint32_t scm_supported_a:1; - /* Supported by Firmware */ - uint32_t scm_supported_f:1; - /* Enabled in Driver */ - uint32_t scm_enabled:1; + uint32_t flogi_acc_enabled:1; + uint32_t flogi_acc_pl_in_cont_iocb:1; + uint32_t scm_supported_a:1; // Adapter support for SCM + uint32_t scm_supported_f:1; // Firmware support for SCM + uint32_t scm_enabled:1; // SCM enabled in Driver + uint32_t scm_supported_vl:1; // Adapter support for VL + uint32_t conn_fabric_cisco_er_rdy:1; // ER_RDY supported + uint32_t conn_fabric_brocade:1; + uint32_t edif_hw:1; + uint32_t edif_enabled:1; + uint32_t n2n_fw_acc_sec:1; uint32_t plogi_template_valid:1; + uint32_t port_isolated:1; + uint32_t eeh_flush:2; +#define EEH_FLUSH_RDY 1 +#define EEH_FLUSH_DONE 2 } flags; uint16_t max_exchg; @@ -3952,6 +4722,7 @@ struct qla_hw_data { uint32_t rsp_que_len; uint32_t req_que_off; uint32_t rsp_que_off; + unsigned long eeh_jif; /* Multi queue data structs */ device_reg_t *mqiobase; @@ -3961,6 +4732,7 @@ struct qla_hw_data { struct req_que **req_q_map; struct rsp_que **rsp_q_map; struct qla_qpair **queue_pair_map; + struct qla_qpair **qp_cpu_map; unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)]; unsigned long qpair_qid_map[(QLA_MAX_QUEUES / 8) @@ -3969,6 +4741,7 @@ struct qla_hw_data { uint8_t max_rsp_queues; uint8_t max_qpairs; uint8_t num_qpairs; + uint16_t slow_queue_id; struct qla_qpair *base_qpair; struct qla_npiv_entry *npiv_info; uint16_t nvram_npiv_size; @@ -4032,6 +4805,7 @@ struct qla_hw_data { #define PCI_DEVICE_ID_QLOGIC_ISP2089 0x2089 #define PCI_DEVICE_ID_QLOGIC_ISP2281 0x2281 #define PCI_DEVICE_ID_QLOGIC_ISP2289 0x2289 +#define PCI_DEVICE_ID_QLOGIC_ISP2971 0x2971 uint32_t isp_type; #define DT_ISP2100 BIT_0 @@ -4061,7 +4835,8 @@ struct qla_hw_data { #define DT_ISP2089 BIT_24 #define DT_ISP2281 BIT_25 #define DT_ISP2289 BIT_26 -#define DT_ISP_LAST (DT_ISP2289 << 1) +#define DT_ISP2971 BIT_27 +#define DT_ISP_LAST (DT_ISP2971 << 1) uint32_t device_type; #define DT_T10_PI BIT_25 @@ -4096,6 +4871,7 @@ struct qla_hw_data { #define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071) #define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271) #define IS_QLA2261(ha) (DT_MASK(ha) & DT_ISP2261) +#define IS_QLA2971(ha) (DT_MASK(ha) & DT_ISP2971) #define IS_QLA2081(ha) (DT_MASK(ha) & DT_ISP2081) #define IS_QLA2281(ha) (DT_MASK(ha) & DT_ISP2281) @@ -4106,7 +4882,8 @@ struct qla_hw_data { #define IS_QLA25XX(ha) (IS_QLA2532(ha)) #define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha)) #define IS_QLA84XX(ha) (IS_QLA8432(ha)) -#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha) || IS_QLA2261(ha)) +#define IS_QLA27XX(ha) (IS_QLA2071(ha) || IS_QLA2271(ha) || IS_QLA2261(ha)|| \ + IS_QLA2971(ha)) #define IS_QLA28XX(ha) (IS_QLA2081(ha) || IS_QLA2281(ha)) #define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \ IS_QLA84XX(ha)) @@ -4134,16 +4911,32 @@ struct qla_hw_data { #define IS_OEM_001(ha) ((ha)->device_type & DT_OEM_001) #define HAS_EXTENDED_IDS(ha) ((ha)->device_type & DT_EXTENDED_IDS) #define IS_CT6_SUPPORTED(ha) ((ha)->device_type & DT_CT6_SUPPORTED) -#define IS_MQUE_CAPABLE(ha) ((ha)->mqenable || IS_QLA83XX(ha) || \ - IS_QLA27XX(ha) || IS_QLA28XX(ha)) +#define IS_MQUE_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) +#define IS_SCM_CAPABLE(ha) (IS_QLA27XX(ha) || IS_QLA28XX(ha)) +#define IS_NPVC_CAPABLE(ha) (IS_QLA28XX(ha)) +#define IS_ARB_CAPABLE(ha) (IS_QLA28XX(ha)) +#define IS_VL_CAPABLE(ha) (IS_QLA28XX(ha)) + #define IS_BIDI_CAPABLE(ha) \ (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) /* Bit 21 of fw_attributes decides the MCTP capabilities */ #define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \ ((ha)->fw_attributes_ext[0] & BIT_0)) -#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) -#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) -#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0) +#define QLA_ABTS_FW_ENABLED(_ha) ((_ha)->fw_attributes_ext[0] & BIT_14) +#define QLA_SRB_NVME_LS(_sp) ( _sp->type == SRB_NVME_LS) +#define QLA_SRB_NVME_CMD(_sp) (_sp->type == SRB_NVME_CMD) +#define QLA_NVME_IOS(_sp) (QLA_SRB_NVME_CMD(_sp) || QLA_SRB_NVME_LS(_sp)) +#define QLA_LS_ABTS_WAIT_ENABLED(_sp) \ + (QLA_SRB_NVME_LS(_sp) && QLA_ABTS_FW_ENABLED(_sp->fcport->vha->hw)) +#define QLA_CMD_ABTS_WAIT_ENABLED(_sp) \ + (QLA_SRB_NVME_CMD(_sp) && QLA_ABTS_FW_ENABLED(_sp->fcport->vha->hw)) +#define QLA_ABTS_WAIT_ENABLED(_sp) \ + (QLA_NVME_IOS(_sp) && QLA_ABTS_FW_ENABLED(_sp->fcport->vha->hw)) + +#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) +#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ + IS_QLA28XX(ha)) #define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha) || \ IS_QLA28XX(ha)) #define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \ @@ -4165,8 +4958,10 @@ struct qla_hw_data { IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) #define IS_ZIO_THRESHOLD_CAPABLE(ha) \ - ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&\ - (ha->zio_mode == QLA_ZIO_MODE_6)) + (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) + +#define IS_SYS_LOCKDOWN_CAPABLE(ha) \ + (IS_QLA27XX(ha) || IS_QLA28XX(ha)) /* HBA serial number */ uint8_t serial0; @@ -4212,7 +5007,7 @@ struct qla_hw_data { void *sfp_data; dma_addr_t sfp_data_dma; - struct qla_flt_header *flt; + void *flt; dma_addr_t flt_dma; #define XGMAC_DATA_SIZE 4096 @@ -4242,6 +5037,12 @@ struct qla_hw_data { dma_addr_t sf_init_cb_dma; struct init_sf_cb *sf_init_cb; + uint32_t flogi_acc_login_ex_length; + uint16_t attached_port_bb_credit; + uint16_t flogi_acc_common_features; + uint16_t flogi_acc_pld_remaining; + uint16_t flogi_acc_cl3_sp_options; + uint16_t flogi_acc_curr_offset; void *scm_fpin_els_buff; uint64_t scm_fpin_els_buff_size; bool scm_fpin_valid; @@ -4267,7 +5068,7 @@ struct qla_hw_data { /* n2n */ struct fc_els_flogi plogi_els_payld; -#define LOGIN_TEMPLATE_SIZE (sizeof(struct fc_els_flogi) - 4) +#define LOGIN_TEMPLATE_SIZE (sizeof(struct fc_els_flogi)-4) void *swl; @@ -4310,12 +5111,12 @@ struct qla_hw_data { #define FW_ATTR_H_NVME_UPDATED BIT_14 /* About firmware SCM support */ -#define FW_ATTR_EXT0_SCM_SUPPORTED BIT_12 - /* Brocade fabric attached */ -#define FW_ATTR_EXT0_SCM_BROCADE 0x00001000 - /* Cisco fabric attached */ -#define FW_ATTR_EXT0_SCM_CISCO 0x00002000 +#define FW_ATTR_EXT0_SCM_SUPPORTED BIT_12 +//#define FW_ATTR_EXT0_SCM_BROCADE_CONNECTED 0x00001000 // Brocade switch connected +//#define FW_ATTR_EXT0_SCM_CISCO_CONNECTED 0x00002000 // Cisco switch connected #define FW_ATTR_EXT0_NVME2 BIT_13 +#define FW_ATTR_EXT0_EDIF BIT_5 + uint16_t fw_attributes_ext[2]; uint32_t fw_memory_size; uint32_t fw_transfer_size; @@ -4339,7 +5140,7 @@ struct qla_hw_data { uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */ uint8_t fw_seriallink_options[4]; - __le16 fw_seriallink_options24[4]; + uint16_t fw_seriallink_options24[4]; uint8_t serdes_version[3]; uint8_t mpi_version[3]; @@ -4367,10 +5168,10 @@ struct qla_hw_data { #define ISP_MBX_RDY 6 #define ISP_SOFT_RESET_CMPL 7 int fw_dump_reading; - void *mpi_fw_dump; - u32 mpi_fw_dump_len; - unsigned int mpi_fw_dump_reading:1; - unsigned int mpi_fw_dumped:1; + void *mpi_fw_dump; + uint32_t mpi_fw_dump_len; + uint32_t mpi_fw_dump_reading:1; + uint32_t mpi_fw_dumped:1; int prev_minidump_failed; dma_addr_t eft_dma; void *eft; @@ -4486,7 +5287,9 @@ struct qla_hw_data { struct qla_chip_state_84xx *cs84xx; struct isp_operations *isp_ops; struct workqueue_struct *wq; + struct work_struct hb_work; struct qlfc_fw fw_buf; + unsigned long last_hb_run_jiffies; /* FCP_CMND priority support */ struct qla_fcp_prio_cfg *fcp_prio_cfg; @@ -4522,7 +5325,7 @@ struct qla_hw_data { #define NUM_DSD_CHAIN 4096 uint8_t fw_type; - uint32_t file_prd_off; /* File firmware product offset */ + __le32 file_prd_off; /* File firmware product offset */ uint32_t md_template_size; void *md_tmplt_hdr; @@ -4583,9 +5386,44 @@ struct qla_hw_data { atomic_t zio_threshold; uint16_t last_zio_threshold; -#define DEFAULT_ZIO_THRESHOLD 5 - +#define DEFAULT_ZIO_THRESHOLD 64 struct qla_hw_data_stat stat; + unsigned short plogi_commfeat; + struct dma_pool *purex_dma_pool; + struct btree_head32 host_map; + +#define EDIF_NUM_SA_INDEX 512 +#define EDIF_TX_SA_INDEX_BASE EDIF_NUM_SA_INDEX + void *edif_rx_sa_id_map; + void *edif_tx_sa_id_map; + spinlock_t sadb_fp_lock; + + struct list_head sadb_tx_index_list; + struct list_head sadb_rx_index_list; + spinlock_t sadb_lock; + struct els_reject elsrej; + u8 edif_post_stop_cnt_down; + + struct qla_scm_port_combined scm; + struct qla_scmr_flow_control sfc; + atomic_t throttle_read; + atomic_t throttle_write; + + pci_error_state_t pci_error_state; + + /* USCM ELS */ + uint8_t edc_retry_cnt; + struct edc_els_payload edc_els_payload; + struct qla_sig_severity sig_sev; + /* Response ELS */ + struct edc_els_resp_payload *edc_rsp_payload; + dma_addr_t edc_rsp_payload_dma; + struct flogi_acc_payld flogi_acc; +#ifdef QLA_TRACING + QLA_DFS_DEFINE_DENTRY(srb_trace); + struct qla_trace srb_trace; +#endif /* QLA_TRACING */ + struct qla_vp_map *vp_map; }; struct active_regions { @@ -4595,7 +5433,9 @@ struct active_regions { uint8_t vpd_nvram; uint8_t npiv_config_0_1; uint8_t npiv_config_2_3; + uint8_t nvme_params; } aux; + unsigned short plogi_commfeat; }; #define FW_ABILITY_MAX_SPEED_MASK 0xFUL @@ -4617,6 +5457,7 @@ struct active_regions { struct purex_item { struct list_head list; struct scsi_qla_host *vha; + struct qla_qpair *qpair; void (*process_item)(struct scsi_qla_host *vha, struct purex_item *pkt); atomic_t in_use; @@ -4625,13 +5466,7 @@ struct purex_item { uint8_t iocb[64]; } iocb; }; - -#define SCM_FLAG_RDF_REJECT 0x00 -#define SCM_FLAG_RDF_COMPLETED 0x01 - -#define QLA_CON_PRIMITIVE_RECEIVED 0x1 -#define QLA_CONGESTION_ARB_WARNING 0x1 -#define QLA_CONGESTION_ARB_ALARM 0X2 +#include "qla_edif.h" /* * Qlogic scsi host structure @@ -4691,7 +5526,6 @@ typedef struct scsi_qla_host { #define ISP_ABORT_RETRY 10 /* ISP aborted. */ #define BEACON_BLINK_NEEDED 11 #define REGISTER_FDMI_NEEDED 12 -#define FCPORT_UPDATE_NEEDED 13 #define VP_DPC_NEEDED 14 /* wake up for VP dpc handling */ #define UNLOADING 15 #define NPIV_CONFIG_NEEDED 16 @@ -4706,13 +5540,16 @@ typedef struct scsi_qla_host { #define FX00_CRITEMP_RECOVERY 25 #define FX00_HOST_INFO_RESEND 26 #define QPAIR_ONLINE_CHECK_NEEDED 27 -#define SET_NVME_ZIO_THRESHOLD_NEEDED 28 +#define DO_EEH_RECOVERY 28 #define DETECT_SFP_CHANGE 29 #define N2N_LOGIN_NEEDED 30 #define IOCB_WORK_ACTIVE 31 #define SET_ZIO_THRESHOLD_NEEDED 32 #define ISP_ABORT_TO_ROM 33 #define VPORT_DELETE 34 +#define SCM_NOTIFY_FW 37 +#define SCM_SEND_RDF 39 +#define SCM_SEND_EDC 40 #define PROCESS_PUREX_IOCB 63 @@ -4749,6 +5586,8 @@ typedef struct scsi_qla_host { uint32_t timer_active; struct timer_list timer; + struct timer_list perf_timer; + uint32_t perf_timer_active; uint8_t node_name[WWN_SIZE]; uint8_t port_name[WWN_SIZE]; @@ -4842,22 +5681,46 @@ typedef struct scsi_qla_host { uint8_t n2n_port_name[WWN_SIZE]; uint16_t n2n_id; __le16 dport_data[4]; - struct list_head gpnid_list; struct fab_scan scan; - uint8_t scm_fabric_connection_flags; - unsigned int irq_offset; + + uint64_t hw_err_cnt; + uint64_t interface_err_cnt; + uint64_t cmd_timeout_cnt; + uint64_t reset_cmd_err_cnt; + + uint64_t link_down_time; + uint64_t short_link_down_cnt; + + struct edif_dbell e_dbell; + struct pur_core pur_cinfo; + +#ifdef QLA2XXX_LATENCY_MEASURE + struct dentry *dfs_latency_counters; + struct qla_latency_counter latency_counters; +#endif + /* USCM ELS */ + uint8_t rdf_retry_cnt; + struct rdf_els_payload rdf_els_payload; + /* USCM ELS RSP */ + struct rdf_els_payload *rdf_payload; + dma_addr_t rdf_payload_dma; + +#define DPORT_DIAG_IN_PROGRESS BIT_0 +#define DPORT_DIAG_CHIP_RESET_IN_PROGRESS BIT_1 + uint16_t dport_status; + } scsi_qla_host_t; struct qla27xx_image_status { uint8_t image_status_mask; - __le16 generation; + uint16_t generation; uint8_t ver_major; uint8_t ver_minor; uint8_t bitmap; /* 28xx only */ uint8_t reserved[2]; - __le32 checksum; - __le32 signature; + uint32_t checksum; + uint32_t signature; } __packed; /* 28xx aux image status bimap values */ @@ -4865,12 +5728,13 @@ struct qla27xx_image_status { #define QLA28XX_AUX_IMG_VPD_NVRAM BIT_1 #define QLA28XX_AUX_IMG_NPIV_CONFIG_0_1 BIT_2 #define QLA28XX_AUX_IMG_NPIV_CONFIG_2_3 BIT_3 +#define QLA28XX_AUX_IMG_NVME_PARAMS BIT_4 #define SET_VP_IDX 1 #define SET_AL_PA 2 #define RESET_VP_IDX 3 #define RESET_AL_PA 4 -struct qla_tgt_vp_map { +struct qla_vp_map { uint8_t idx; scsi_qla_host_t *vha; }; @@ -5007,6 +5871,7 @@ struct secure_flash_update_block_pk { #define QLA_LOOP_ID_USED (MBS_LOOP_ID_USED & MBS_MASK) #define QLA_ALL_IDS_IN_USE (MBS_ALL_IDS_IN_USE & MBS_MASK) #define QLA_NOT_LOGGED_IN (MBS_NOT_LOGGED_IN & MBS_MASK) +#define QLA_FLASH_LOCKDOWN (MBS_FLASH_LOCKDOWN & MBS_MASK) #define QLA_FUNCTION_TIMEOUT 0x100 #define QLA_FUNCTION_PARAMETER_ERROR 0x101 @@ -5018,6 +5883,9 @@ struct secure_flash_update_block_pk { #define QLA_BUSY 0x107 #define QLA_ALREADY_REGISTERED 0x109 #define QLA_OS_TIMER_EXPIRED 0x10a +#define QLA_ERR_NO_QPAIR 0x10b +#define QLA_ERR_NOT_FOUND 0x10c +#define QLA_ERR_FROM_FW 0x10d #define NVRAM_DELAY() udelay(10) @@ -5048,6 +5916,44 @@ enum nexus_wait_type { WAIT_LUN, }; + +#define INVALID_EDIF_SA_INDEX 0xffff +#define RX_DELETE_NO_EDIF_SA_INDEX 0xfffe + +#define QLA_SKIP_HANDLE QLA_TGT_SKIP_HANDLE + +/* edif hash element */ +struct edif_list_entry { + uint16_t handle; /* nport_handle */ + uint32_t update_sa_index; + uint32_t delete_sa_index; + uint32_t count; /* counter for filtering sa_index */ +#define EDIF_ENTRY_FLAGS_CLEANUP 0x01 /* this index is being cleaned up */ + uint32_t flags; /* used by sadb cleanup code */ + fc_port_t *fcport; /* needed by rx delay timer function */ + struct timer_list timer; /* rx delay timer */ + struct list_head next; +}; + +#define EDIF_TX_INDX_BASE 512 +#define EDIF_RX_INDX_BASE 0 +#define EDIF_RX_DELETE_FILTER_COUNT 3 /* delay queuing rx delete until this many */ + +/* entry in the sa_index free pool */ + +struct sa_index_pair { + uint16_t sa_index; + uint32_t spi; +}; + +/* edif sa_index data structure */ +struct edif_sa_index_entry { + struct sa_index_pair sa_pair[2]; + fc_port_t *fcport; + uint16_t handle; + struct list_head next; +}; + /* Refer to SNIA SFF 8247 */ struct sff_8247_a0 { u8 txid; /* transceiver id */ @@ -5126,16 +6032,20 @@ struct sff_8247_a0 { }; /* BPM -- Buffer Plus Management support. */ -#define IS_BPM_CAPABLE(ha) \ - (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ - IS_QLA27XX(ha) || IS_QLA28XX(ha)) -#define IS_BPM_RANGE_CAPABLE(ha) \ - (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) -#define IS_BPM_ENABLED(vha) \ - (ql2xautodetectsfp && !vha->vp_idx && IS_BPM_CAPABLE(vha->hw)) +#define IS_BPM_CAPABLE(ha)\ + (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || \ + IS_QLA27XX(ha) || IS_QLA28XX(ha)) +#define IS_BPM_RANGE_CAPABLE(ha)\ + (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) +#define IS_BPM_ENABLED(vha)\ + (ql2xautodetectsfp && !vha->vp_idx && IS_BPM_CAPABLE(vha->hw)) #define FLASH_SEMAPHORE_REGISTER_ADDR 0x00101016 +#define LOCKDOWN_ERROR(_ha, _status) \ + (IS_SYS_LOCKDOWN_CAPABLE(_ha) && \ + _status == MBS_FLASH_LOCKDOWN) + #define USER_CTRL_IRQ(_ha) (ql2xuctrlirq && QLA_TGT_MODE_ENABLED() && \ (IS_QLA27XX(_ha) || IS_QLA28XX(_ha) || IS_QLA83XX(_ha))) @@ -5163,20 +6073,93 @@ struct sff_8247_a0 { #define NVME_FCP_TARGET(fcport) \ (FCP_TYPE(fcport) && NVME_TYPE(fcport)) \ +#define NVME_PRIORITY(ha, fcport) \ + (NVME_FCP_TARGET(fcport) && \ + (ha->fc4_type_priority == FC4_PRIORITY_NVME)) + #define NVME_TARGET(ha, fcport) \ - ((NVME_FCP_TARGET(fcport) && \ - (ha->fc4_type_priority == FC4_PRIORITY_NVME)) || \ + (fcport->do_prli_nvme || \ NVME_ONLY_TARGET(fcport)) \ #define PRLI_PHASE(_cls) \ ((_cls == DSC_LS_PRLI_PEND) || (_cls == DSC_LS_PRLI_COMP)) +enum ql_vnd_host_stat_action { + stop = 0, + start, + clear, +}; + +struct ql_vnd_mng_host_stats_param { + uint32_t stat_type; + enum ql_vnd_host_stat_action action; +} __packed; + +struct ql_vnd_mng_host_stats_resp { + uint32_t status; +}__packed; + +struct ql_vnd_stats_param { + uint32_t stat_type; +}__packed; + +struct ql_vnd_tgt_stats_param { + int32_t tgt_id; + uint32_t stat_type; +}__packed; + +enum ql_vnd_host_port_action { + enable = 0, + disable, +}; + +struct ql_vnd_mng_host_port_param { + enum ql_vnd_host_port_action action; +}__packed; + +struct ql_vnd_mng_host_port_resp { + uint32_t status; +}__packed; + +struct ql_vnd_stat_entry { + uint32_t stat_type; /* Failure type */ + uint32_t tgt_num; /* Target Num */ + uint64_t cnt; /* Counter value */ +}__packed; + +struct ql_vnd_stats { + uint64_t entry_count; /* Num of entries */ + uint64_t rservd; + struct ql_vnd_stat_entry entry[0]; /* Place holder of entries */ +}__packed; + +struct ql_vnd_host_stats_resp { + uint32_t status; + struct ql_vnd_stats stats; +}__packed; + +struct ql_vnd_tgt_stats_resp { + uint32_t status; + struct ql_vnd_stats stats; +}__packed; + +#define QDBG_FW_DUMP BIT_0 +#define QDBG_CRASH_ON_ERR BIT_1 + +#define is_debug(_bit) unlikely((ql2xdebug & (_bit))) + +#define DBG_FCPORT_PRFMT(_fp, _fmt, _args...) \ + "%s: %8phC: " _fmt " (state=%d disc_state=%d scan_state=%d loopid=0x%x deleted=%d flags=0x%x)\n", \ + __func__, _fp->port_name, ##_args, atomic_read(&_fp->state), \ + _fp->disc_state, _fp->scan_state, _fp->loop_id, _fp->deleted, \ + _fp->flags + #include "qla_target.h" #include "qla_gbl.h" #include "qla_dbg.h" #include "qla_inline.h" +#include "qla_compat.h" -#define IS_SESSION_DELETED(_fcport) (_fcport->disc_state == DSC_DELETE_PEND || \ - _fcport->disc_state == DSC_DELETED) +#define SESSION_DELETE(_fcport) (_fcport->disc_state == DSC_DELETE_PEND || _fcport->disc_state == DSC_DELETED) #endif diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index d5ebcf7d70ff0..94e0d6f654fef 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -1,7 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" @@ -11,6 +12,10 @@ static struct dentry *qla2x00_dfs_root; static atomic_t qla2x00_dfs_root_count; +#ifdef QLA_TRACING +static QLA_DFS_ROOT_DEFINE_DENTRY(message_trace); /* qla_dfs_message_trace */ +#endif /* QLA_TRACING */ + #define QLA_DFS_RPORT_DEVLOSS_TMO 1 static int @@ -37,18 +42,19 @@ qla_dfs_rport_set(struct fc_port *fp, int attr_id, u64 val) /* Only supported for FC-NVMe devices that are registered. */ if (!(fp->nvme_flag & NVME_FLAG_REGISTERED)) return -EIO; -#if (IS_ENABLED(CONFIG_NVME_FC)) - return nvme_fc_set_remoteport_devloss(fp->nvme_remote_port, - val); -#else /* CONFIG_NVME_FC */ - return -EINVAL; -#endif /* CONFIG_NVME_FC */ + return nvme_fc_set_remoteport_devloss( + fp->nvme_remote_port, val); default: return -EINVAL; } return 0; } +#ifndef DEFINE_DEBUGFS_ATTRIBUTE +#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \ + DEFINE_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt) +#endif /* DEFINE_DEBUGFS_ATTRIBUTE */ + #define DEFINE_QLA_DFS_RPORT_RW_ATTR(_attr_id, _attr) \ static int qla_dfs_rport_##_attr##_get(void *data, u64 *val) \ { \ @@ -62,7 +68,7 @@ static int qla_dfs_rport_##_attr##_set(void *data, u64 val) \ } \ DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_##_attr##_fops, \ qla_dfs_rport_##_attr##_get, \ - qla_dfs_rport_##_attr##_set, "%llu\n") + qla_dfs_rport_##_attr##_set, "%llu\n"); /* * Wrapper for getting fc_port fields. @@ -79,7 +85,7 @@ static int qla_dfs_rport_field_##_attr##_get(void *data, u64 *val) \ } \ DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_field_##_attr##_fops, \ qla_dfs_rport_field_##_attr##_get, \ - NULL, "%llu\n") + NULL, "%llu\n"); #define DEFINE_QLA_DFS_RPORT_ACCESS(_attr, _get_val) \ DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val) @@ -99,6 +105,8 @@ DEFINE_QLA_DFS_RPORT_FIELD(last_rscn_gen); DEFINE_QLA_DFS_RPORT_FIELD(rscn_gen); DEFINE_QLA_DFS_RPORT_FIELD(login_gen); DEFINE_QLA_DFS_RPORT_FIELD(loop_id); +DEFINE_QLA_DFS_RPORT_FIELD(online_time); +DEFINE_QLA_DFS_RPORT_FIELD(offline_time); DEFINE_QLA_DFS_RPORT_FIELD_GET(port_id, fp->d_id.b24); DEFINE_QLA_DFS_RPORT_FIELD_GET(sess_kref, kref_read(&fp->sess_kref)); @@ -109,7 +117,7 @@ qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp) #define QLA_CREATE_RPORT_FIELD_ATTR(_attr) \ debugfs_create_file(#_attr, 0400, fp->dfs_rport_dir, \ - fp, &qla_dfs_rport_field_##_attr##_fops) + fp, &qla_dfs_rport_field_##_attr##_fops); if (!vha->dfs_rport_root || fp->dfs_rport_dir) return; @@ -120,7 +128,7 @@ qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp) return; if (NVME_TARGET(vha->hw, fp)) debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir, - fp, &qla_dfs_rport_dev_loss_tmo_fops); + fp, &qla_dfs_rport_dev_loss_tmo_fops); QLA_CREATE_RPORT_FIELD_ATTR(disc_state); QLA_CREATE_RPORT_FIELD_ATTR(scan_state); @@ -134,6 +142,8 @@ qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp) QLA_CREATE_RPORT_FIELD_ATTR(loop_id); QLA_CREATE_RPORT_FIELD_ATTR(port_id); QLA_CREATE_RPORT_FIELD_ATTR(sess_kref); + QLA_CREATE_RPORT_FIELD_ATTR(online_time); + QLA_CREATE_RPORT_FIELD_ATTR(offline_time); } void @@ -170,63 +180,94 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused) return 0; } -DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_sess); +static int +qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file) +{ + scsi_qla_host_t *vha = inode->i_private; + + return single_open(file, qla2x00_dfs_tgt_sess_show, vha); +} + +static const struct file_operations dfs_tgt_sess_ops = { + .open = qla2x00_dfs_tgt_sess_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; static int qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused) { scsi_qla_host_t *vha = s->private; struct qla_hw_data *ha = vha->hw; - struct gid_list_info *gid_list; + struct gid_list_info *gid_list, *gid; dma_addr_t gid_list_dma; - fc_port_t fc_port; + fc_port_t *fc_port; char *id_iter; int rc, i; uint16_t entries, loop_id; + fc_port=kzalloc(sizeof(*fc_port), GFP_KERNEL); + if (!fc_port) + return -ENOMEM; + seq_printf(s, "%s\n", vha->host_str); gid_list = dma_alloc_coherent(&ha->pdev->dev, - qla2x00_gid_list_size(ha), - &gid_list_dma, GFP_KERNEL); + qla2x00_gid_list_size(ha), + &gid_list_dma, GFP_KERNEL); if (!gid_list) { ql_dbg(ql_dbg_user, vha, 0x7018, - "DMA allocation failed for %u\n", - qla2x00_gid_list_size(ha)); - return 0; + "DMA allocation failed for %u\n", + qla2x00_gid_list_size(ha)); + goto out_free_fc_port; } rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, - &entries); + &entries); if (rc != QLA_SUCCESS) goto out_free_id_list; id_iter = (char *)gid_list; - seq_puts(s, "Port Name Port ID Loop ID\n"); + seq_puts(s, "Port Name Port ID Loop ID\n"); for (i = 0; i < entries; i++) { - struct gid_list_info *gid = - (struct gid_list_info *)id_iter; + gid = (struct gid_list_info *)id_iter; loop_id = le16_to_cpu(gid->loop_id); - memset(&fc_port, 0, sizeof(fc_port_t)); + memset(fc_port, 0, sizeof(*fc_port)); - fc_port.loop_id = loop_id; + fc_port->loop_id = loop_id; - rc = qla24xx_gpdb_wait(vha, &fc_port, 0); + rc = qla24xx_gpdb_wait(vha, fc_port, 0); seq_printf(s, "%8phC %02x%02x%02x %d\n", - fc_port.port_name, fc_port.d_id.b.domain, - fc_port.d_id.b.area, fc_port.d_id.b.al_pa, - fc_port.loop_id); + fc_port->port_name, fc_port->d_id.b.domain, + fc_port->d_id.b.area, fc_port->d_id.b.al_pa, + fc_port->loop_id); id_iter += ha->gid_list_info_size; } out_free_id_list: dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), - gid_list, gid_list_dma); + gid_list, gid_list_dma); +out_free_fc_port: + kfree(fc_port); return 0; } -DEFINE_SHOW_ATTRIBUTE(qla2x00_dfs_tgt_port_database); +static int +qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file) +{ + scsi_qla_host_t *vha = inode->i_private; + + return single_open(file, qla2x00_dfs_tgt_port_database_show, vha); +} + +static const struct file_operations dfs_tgt_port_database_ops = { + .open = qla2x00_dfs_tgt_port_database_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; static int qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) @@ -235,7 +276,7 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) uint16_t mb[MAX_IOCB_MB_REG]; int rc; struct qla_hw_data *ha = vha->hw; - u16 iocbs_used, i; + u16 iocbs_used, i, exch_used; rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG); if (rc != QLA_SUCCESS) { @@ -263,19 +304,38 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) if (ql2xenforce_iocb_limit) { /* lock is not require. It's an estimate. */ iocbs_used = ha->base_qpair->fwres.iocbs_used; - for (i = 0; i < ha->max_qpairs; i++) { - if (ha->queue_pair_map[i]) + exch_used = ha->base_qpair->fwres.exch_used; + for (i=0; i < ha->max_qpairs; i++) { + if (ha->queue_pair_map[i]) { iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used; + exch_used += ha->queue_pair_map[i]->fwres.exch_used; + } } - seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n", - iocbs_used, ha->base_qpair->fwres.iocbs_limit); + seq_printf(s, "Driver: estimate iocb used[%d] high water limit [%d] \n", + iocbs_used, ha->base_qpair->fwres.iocbs_limit); + + seq_printf(s, " estimate exchange used[%d] high water limit [%d] \n", + exch_used, ha->base_qpair->fwres.exch_limit); } return 0; } -DEFINE_SHOW_ATTRIBUTE(qla_dfs_fw_resource_cnt); +static int +qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file) +{ + struct scsi_qla_host *vha = inode->i_private; + + return single_open(file, qla_dfs_fw_resource_cnt_show, vha); +} + +static const struct file_operations dfs_fw_resource_cnt_ops = { + .open = qla_dfs_fw_resource_cnt_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; static int qla_dfs_tgt_counters_show(struct seq_file *s, void *unused) @@ -286,6 +346,11 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused) core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd, num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent; u16 i; + fc_port_t *fcport = NULL; + + if (qla2x00_chip_is_down(vha)) { + return 0; + } qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd; core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf; @@ -349,10 +414,426 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused) vha->qla_stats.qla_dif_stats.dif_ref_tag_err); seq_printf(s, "DIF App tag err = %d\n", vha->qla_stats.qla_dif_stats.dif_app_tag_err); + + seq_puts(s,"\n"); + seq_puts(s, "Initiator Error Counters\n"); + seq_printf(s, "HW Error Count = %14lld\n", + vha->hw_err_cnt); + seq_printf(s, "Link Down Count = %14lld\n", + vha->short_link_down_cnt); + seq_printf(s, "Interface Err Count = %14lld\n", + vha->interface_err_cnt); + seq_printf(s, "Cmd Timeout Count = %14lld\n", + vha->cmd_timeout_cnt); + seq_printf(s, "Reset Count = %14lld\n", + vha->reset_cmd_err_cnt); + seq_puts(s,"\n"); + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (!fcport->rport) + continue; + + seq_printf(s, "Target Num = %7d Link Down Count = %14lld\n", + fcport->rport->number, fcport->tgt_short_link_down_cnt); + } + seq_puts(s,"\n"); + + return 0; +} + +static int +qla_dfs_tgt_counters_open(struct inode *inode, struct file *file) +{ + struct scsi_qla_host *vha = inode->i_private; + + return single_open(file, qla_dfs_tgt_counters_show, vha); +} + +static const struct file_operations dfs_tgt_counters_ops = { + .open = qla_dfs_tgt_counters_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#ifdef QLA2XXX_LATENCY_MEASURE +static ssize_t +qla_dfs_latency_counters_write(struct file *file, const char __user *buffer, + size_t count, loff_t *pos) +{ + struct seq_file *s = file->private_data; + struct scsi_qla_host *vha = s->private; + char *buf; + int rc = 0; + int i = 0; + unsigned long action; + + buf = memdup_user_nul(buffer, count); + if (IS_ERR(buf)) { + pr_err("host%ld: fail to copy user buffer.", + vha->host_no); + return PTR_ERR(buf); + } + + action = simple_strtoul(buf, NULL, 0); + + if (action != 1) { + pr_err("User passed invalid value = %ld", action); + rc = -EINVAL; + goto out_free; + } + + for (i = 0; i < cmd_type_count; i++) + { + vha->latency_counters.qla_tot_cmds[i] = 0; + vha->latency_counters.qla_time_qcmd_to_req_q[i] = 0; + vha->latency_counters.qla_time_req_q_to_rsp_q[i] = 0; + vha->latency_counters.qla_time_rsq_q_to_ml[i] = 0; + vha->latency_counters.qla_time_qcmd_to_ml[i] = 0; + } + + for (i = 0; i < nvme_cmd_count; i++) + { + vha->latency_counters.qla_nvme_tot_cmds[i] = 0; + vha->latency_counters.qla_nvme_qcmd_to_req_q[i] = 0; + vha->latency_counters.qla_nvme_req_q_to_rsp_q[i] = 0; + vha->latency_counters.qla_nvme_rsp_q_to_ml[i] = 0; + vha->latency_counters.qla_nvme_qcmd_to_ml[i] = 0; + } + + rc = count; +out_free: + kfree(buf); + return rc; +} + +static int +qla_dfs_latency_counters_show(struct seq_file *s, void *unused) +{ + struct scsi_qla_host *vha = s->private; + int i =0; + + seq_puts(s, "\n"); + seq_puts(s, "\t\tSCSI Latency-Counters"); + seq_puts(s, "\n-------------------------------------------------\n"); + for (i = 0; i < cmd_type_count; i++) + { + if (vha->latency_counters.qla_tot_cmds[i] == 0) + continue; + + switch (i) { + case read6: + seq_puts(s, "READ6 :\n"); + break; + case read10: + seq_puts(s, "READ10 :\n"); + break; + case read12: + seq_puts(s, "READ12 :\n"); + break; + case read16: + seq_puts(s, "READ16 :\n"); + break; + case write6: + seq_puts(s, "WRITE6 :\n"); + break; + case write10: + seq_puts(s, "WRITE10 :\n"); + break; + case write12: + seq_puts(s, "WRITE12 :\n"); + break; + case write16: + seq_puts(s, "WRITE16 :\n"); + break; + } + seq_puts(s, "-----------\n"); + seq_printf(s, "Total commands : %14lld", vha->latency_counters.qla_tot_cmds[i]); + seq_puts(s,"\n"); + seq_printf(s, "Avg Time QCmd-->ReqQ(ns): %14lld", vha->latency_counters.qla_time_qcmd_to_req_q[i]/vha->latency_counters.qla_tot_cmds[i]); + seq_puts(s,"\n"); + seq_printf(s, "Avg Time ReqQ-->RspQ(ns): %14lld", vha->latency_counters.qla_time_req_q_to_rsp_q[i]/vha->latency_counters.qla_tot_cmds[i]); + seq_puts(s,"\n"); + seq_printf(s, "Avg Time RspQ-->ML(ns) : %14lld", vha->latency_counters.qla_time_rsq_q_to_ml[i]/vha->latency_counters.qla_tot_cmds[i]); + seq_puts(s,"\n"); + seq_printf(s, "Avg Time QCmd-->ML(ns) : %14lld", vha->latency_counters.qla_time_qcmd_to_ml[i]/vha->latency_counters.qla_tot_cmds[i]); + seq_puts(s,"\n"); + seq_puts(s,"\n"); + + } + + seq_puts(s, "\n-------------------------------------------------\n"); + seq_puts(s,"\n"); + + seq_puts(s, "\n"); + seq_puts(s, "\t\tNVMe Latency-Counters"); + seq_puts(s, "\n-------------------------------------------------\n"); + for (i = 0; i < nvme_cmd_count; i++) + { + if (vha->latency_counters.qla_nvme_tot_cmds[i] == 0) + continue; + + switch (i) { + case ql_nvme_read: + seq_puts(s, "READ :\n"); + break; + case ql_nvme_write: + seq_puts(s, "WRITE:\n"); + break; + } + + seq_printf(s, "Total commands : %14lld", vha->latency_counters.qla_nvme_tot_cmds[i]); + seq_puts(s,"\n"); + seq_printf(s, "Avg Time QCmd-->ReqQ(ns): %14lld", vha->latency_counters.qla_nvme_qcmd_to_req_q[i]/vha->latency_counters.qla_nvme_tot_cmds[i]); + seq_puts(s,"\n"); + seq_printf(s, "Avg Time ReqQ-->RspQ(ns): %14lld", vha->latency_counters.qla_nvme_req_q_to_rsp_q[i]/vha->latency_counters.qla_nvme_tot_cmds[i]); + seq_puts(s,"\n"); + seq_printf(s, "Avg Time RspQ-->ML(ns) : %14lld", vha->latency_counters.qla_nvme_rsp_q_to_ml[i]/vha->latency_counters.qla_nvme_tot_cmds[i]); + seq_puts(s,"\n"); + seq_printf(s, "Avg Time QCmd-->ML(ns) : %14lld", vha->latency_counters.qla_nvme_qcmd_to_ml[i]/vha->latency_counters.qla_nvme_tot_cmds[i]); + seq_puts(s,"\n"); + + } + + seq_puts(s, "\n-------------------------------------------------\n"); + seq_printf(s,"Latency outliers : %u\n", vha->qla_stats.latency_outliers); + seq_puts(s,"\n"); + return 0; } -DEFINE_SHOW_ATTRIBUTE(qla_dfs_tgt_counters); +static int +qla_dfs_latency_counters_open(struct inode *inode, struct file *file) +{ + struct scsi_qla_host *vha = inode->i_private; + + return single_open(file, qla_dfs_latency_counters_show, vha); +} + +static const struct file_operations dfs_latency_counters_ops = { + .open = qla_dfs_latency_counters_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = qla_dfs_latency_counters_write, +}; +#endif + +#ifdef QLA_TRACING +static char *trace_help = "\ +# Format:\n\ +# \n\ +#\n\ +# Trace control by writing:\n\ +# 'enable' - to enable this trace\n\ +# 'disable' - to disable this trace\n\ +# 'resize=' - to resize this trace to \n\ +#\n"; + +static int +qla_dfs_trace_show(struct seq_file *s, void *unused) +{ + struct qla_trace *trc = s->private; + char *buf; + u32 t_ind = 0, i; + + seq_puts(s, trace_help); + + if (qla_trace_get(trc)) + return 0; + + seq_printf(s, "# Trace max lines = %d, writes = %s\n#\n", + trc->num_entries, test_bit(QLA_TRACE_ENABLED, + &trc->flags) ? "enabled" : "disabled"); + + if (test_bit(QLA_TRACE_WRAPPED, &trc->flags)) + t_ind = qla_trace_cur_ind(trc) + 1; + + for (i = 0; i < qla_trace_len(trc); i++, t_ind++) { + t_ind = qla_trace_ind_norm(trc, t_ind); + buf = qla_trace_record(trc, t_ind); + if (!buf[0]) + continue; + seq_puts(s, buf); + } + + mb(); + qla_trace_put(trc); + return 0; +} + +#define string_is(_buf, _str_val) \ + (strncmp(_str_val, _buf, strlen(_str_val)) == 0) + +static ssize_t +qla_dfs_trace_write(struct file *file, const char __user *buffer, + size_t count, loff_t *pos) +{ + struct seq_file *s = file->private_data; + struct qla_trace *trc = s->private; + char buf[32]; + ssize_t ret = count; + + memset(buf, 0, sizeof(buf)); + if (copy_from_user(buf, buffer, min(sizeof(buf), count))) + return -EFAULT; + + if (string_is(buf, "enable")) { + if (!trc->recs) { + pr_warn("qla2xxx: '%s' is empty, resize before enabling.\n", + trc->name); + return -EINVAL; + } + pr_info("qla2xxx: Enabling trace '%s'\n", trc->name); + set_bit(QLA_TRACE_ENABLED, &trc->flags); + } else if (string_is(buf, "disable")) { + pr_info("qla2xxx: Disabling trace '%s'\n", trc->name); + clear_bit(QLA_TRACE_ENABLED, &trc->flags); + } else if (string_is(buf, "resize")) { + u32 new_len; + if (sscanf(buf, "resize=%u", &new_len) != 1) + return -EINVAL; + if (new_len == trc->num_entries) { + pr_info("qla2xxx: New trace size is same as old.\n"); + return count; + } + pr_info("qla2xxx: Changing trace '%s' size to %d\n", + trc->name, new_len); + if (qla_trace_quiesce(trc)) { + ret = -EBUSY; + goto done; + } + qla_trace_uninit(trc); + /* + * Go through init once again to start creating traces + * based on the respective tunable. + */ + qla_trace_init(trc, trc->name, new_len); + if (!trc->recs) { + pr_warn("qla2xxx: Trace allocation failed for '%s'\n", + trc->name); + ret = -ENOMEM; + } + } +done: + return ret; +} + +static int +qla_dfs_message_trace_show(struct seq_file *s, void *unused) +{ + return qla_dfs_trace_show(s, unused); +} + +static ssize_t +qla_dfs_message_trace_write(struct file *file, const char __user *buffer, + size_t count, loff_t *pos) +{ + return qla_dfs_trace_write(file, buffer, count, pos); +} + +static int +qla_dfs_srb_trace_show(struct seq_file *s, void *unused) +{ + return qla_dfs_trace_show(s, unused); +} + +static ssize_t +qla_dfs_srb_trace_write(struct file *file, const char __user *buffer, + size_t count, loff_t *pos) +{ + return qla_dfs_trace_write(file, buffer, count, pos); +} +#endif /* QLA_TRACING */ + +/* + * Helper macros for setting up debugfs entries. + * _name: The name of the debugfs entry + * _ctx_struct: The context that was passed when creating the debugfs file + * + * QLA_DFS_SETUP_RD could be used when there is only a show function. + * - show function take the name qla_dfs__show + * + * QLA_DFS_SETUP_RW could be used when there are both show and write functions. + * - show function take the name qla_dfs__show + * - write function take the name qla_dfs__write + * + * To have a new debugfs entry, do: + * 1. Create a "struct dentry *" in the appropriate structure in the format + * dfs_ + * 2. Setup debugfs entries using QLA_DFS_SETUP_RD / QLA_DFS_SETUP_RW + * 3. Create debugfs file in qla2x00_dfs_setup() using QLA_DFS_CREATE_FILE + * or QLA_DFS_ROOT_CREATE_FILE + * 4. Remove debugfs file in qla2x00_dfs_remove() using QLA_DFS_REMOVE_FILE + * or QLA_DFS_ROOT_REMOVE_FILE + * + * Example for creating "TEST" sysfs file: + * 1. struct qla_hw_data { ... struct dentry *dfs_TEST; } + * 2. QLA_DFS_SETUP_RD(TEST, scsi_qla_host_t); + * 3. In qla2x00_dfs_setup(): + * QLA_DFS_CREATE_FILE(ha, TEST, 0600, ha->dfs_dir, vha); + * 4. In qla2x00_dfs_remove(): + * QLA_DFS_REMOVE_FILE(ha, TEST); + */ +#define QLA_DFS_SETUP_RD(_name, _ctx_struct) \ +static int \ +qla_dfs_##_name##_open(struct inode *inode, struct file *file) \ +{ \ + _ctx_struct *__ctx = inode->i_private; \ + \ + return single_open(file, qla_dfs_##_name##_show, __ctx); \ +} \ + \ +static const struct file_operations qla_dfs_##_name##_ops = { \ + .open = qla_dfs_##_name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +}; + +#define QLA_DFS_SETUP_RW(_name, _ctx_struct) \ +static int \ +qla_dfs_##_name##_open(struct inode *inode, struct file *file) \ +{ \ + _ctx_struct *__ctx = inode->i_private; \ + \ + return single_open(file, qla_dfs_##_name##_show, __ctx); \ +} \ + \ +static const struct file_operations qla_dfs_##_name##_ops = { \ + .open = qla_dfs_##_name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + .write = qla_dfs_##_name##_write, \ +}; + +#define QLA_DFS_ROOT_CREATE_FILE(_name, _perm, _ctx) \ + if (!qla_dfs_##_name) \ + qla_dfs_##_name = debugfs_create_file(#_name, \ + _perm, qla2x00_dfs_root, _ctx, \ + &qla_dfs_##_name##_ops); + +#define QLA_DFS_ROOT_REMOVE_FILE(_name) \ + if (qla_dfs_##_name) { \ + debugfs_remove(qla_dfs_##_name); \ + qla_dfs_##_name = NULL; \ + } + +#define QLA_DFS_CREATE_FILE(_struct, _name, _perm, _parent, _ctx) \ + (_struct)->dfs_##_name = debugfs_create_file(#_name, _perm, \ + _parent, _ctx, &qla_dfs_##_name##_ops); + +#define QLA_DFS_REMOVE_FILE(_struct, _name) \ + if ((_struct)->dfs_##_name) { \ + debugfs_remove((_struct)->dfs_##_name); \ + (_struct)->dfs_##_name = NULL; \ + } + +#ifdef QLA_TRACING +QLA_DFS_SETUP_RW(message_trace, struct qla_trace *); +QLA_DFS_SETUP_RW(srb_trace, struct qla_trace *); +#endif /* QLA_TRACING */ static int qla2x00_dfs_fce_show(struct seq_file *s, void *unused) @@ -554,33 +1035,44 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha) create_nodes: ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count", - S_IRUSR, ha->dfs_dir, vha, &qla_dfs_fw_resource_cnt_fops); + S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops); ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR, - ha->dfs_dir, vha, &qla_dfs_tgt_counters_fops); + ha->dfs_dir, vha, &dfs_tgt_counters_ops); ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database", - S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_port_database_fops); + S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops); ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, &dfs_fce_ops); ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess", - S_IRUSR, ha->dfs_dir, vha, &qla2x00_dfs_tgt_sess_fops); + S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops); + +#ifdef QLA2XXX_LATENCY_MEASURE + vha->dfs_latency_counters = debugfs_create_file("latency_counters", 0400, + ha->dfs_dir, vha, &dfs_latency_counters_ops); +#endif + +#ifdef QLA_TRACING + QLA_DFS_ROOT_CREATE_FILE(message_trace, 0600, &qla_message_trace); + + QLA_DFS_CREATE_FILE(ha, srb_trace, 0600, ha->dfs_dir, &ha->srb_trace); +#endif /* QLA_TRACING */ if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) { ha->tgt.dfs_naqp = debugfs_create_file("naqp", 0400, ha->dfs_dir, vha, &dfs_naqp_ops); if (!ha->tgt.dfs_naqp) { ql_log(ql_log_warn, vha, 0xd011, - "Unable to create debugFS naqp node.\n"); + "Unable to create debugFS naqp node.\n"); goto out; } } vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir); if (!vha->dfs_rport_root) { ql_log(ql_log_warn, vha, 0xd012, - "Unable to create debugFS rports node.\n"); + "Unable to create debugFS rports node.\n"); goto out; } out: @@ -627,6 +1119,19 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha) vha->dfs_rport_root = NULL; } +#ifdef QLA2XXX_LATENCY_MEASURE + if (vha->dfs_latency_counters) { + debugfs_remove(vha->dfs_latency_counters); + vha->dfs_latency_counters = NULL; + } +#endif + +#ifdef QLA_TRACING + QLA_DFS_ROOT_REMOVE_FILE(message_trace); + + QLA_DFS_REMOVE_FILE(ha, srb_trace); +#endif /* QLA_TRACING */ + if (ha->dfs_dir) { debugfs_remove(ha->dfs_dir); ha->dfs_dir = NULL; diff --git a/drivers/scsi/qla2xxx/qla_edif.c b/drivers/scsi/qla2xxx/qla_edif.c new file mode 100644 index 0000000000000..e54beef1f5074 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_edif.c @@ -0,0 +1,3814 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Marvell Fibre Channel HBA Driver + * Copyright (c) 2018- Marvell + */ +#include "qla_def.h" +#include "qla_edif.h" + +#include +#include +#include +#include + +static struct edif_sa_index_entry *qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle, + struct list_head *sa_list); +static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport, + struct qla_sa_update_frame *sa_frame); +static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle, + uint16_t sa_index); + +static int qla_pur_get_pending(scsi_qla_host_t *, fc_port_t *, bsg_job_t *); +extern void qla24xx_process_purex_auth_rjt_iocb(struct scsi_qla_host *, void *); + +struct edb_node { + struct list_head list; + uint32_t ntype; + union { + port_id_t plogi_did; + uint32_t async; + port_id_t els_sid; + struct edif_sa_update_aen sa_aen; + } u; +}; + +static struct els_sub_cmd { + uint16_t cmd; + const char *str; +} sc_str[] = { + {SEND_ELS, "send ELS"}, + {SEND_ELS_REPLY, "send ELS Reply"}, + {PULL_ELS, "retrieve ELS"}, +}; + +const char *sc_to_str(uint16_t cmd) +{ + int i; + struct els_sub_cmd *e; + + for (i = 0; i < ARRAY_SIZE(sc_str); i++) { + e = sc_str + i; + if (cmd == e->cmd) + return e->str; + } + return "unknown"; +} + +static struct auth_msg_code_name { + u8 auth_msg_code; + const char *str; +} auth_msg_code_str[] = { + {AUTH_Reject, "AUTH_Reject"}, + {AUTH_Negotiate, "AUTH_Negotiate"}, + {AUTH_Done, "AUTH_Done"}, + {DHCHAP_Challenge, "DHCHAP_Challenge"}, + {DHCHAP_Reply, "DHCHAP_Reply"}, + {DHCHAP_Success, "DHCHAP_Success"}, + {FCAP_Request, "FCAP_Request"}, + {FCAP_Acknowledge, "FCAP_Acknowledge"}, + {FCAP_Confirm, "FCAP_Confirm"}, + {FCPAP_Init, "FCPAP_Init"}, + {FCPAP_Accept, "FCPAP_Accept"}, + {FCPAP_Complete, "FCPAP_Complete"}, + {IKE_SA_Init , "IKE_SA_Init "}, + {IKE_Auth , "IKE_Auth "}, + {IKE_Create_Child_SA, "IKE_Create_Child_SA"}, + {IKE_Informational, "IKE_Informational"}, + {FCEAP_Request, "FCEAP_Request"}, + {FCEAP_Response, "FCEAP_Response"}, + {FCEAP_Success, "FCEAP_Success"}, + {FCEAP_Failure, "FCEAP_Failure"}, +}; + +static const char* msg_code_to_str(u8 auth_msg_code) +{ + int i; + struct auth_msg_code_name *e; + + for (i = 0; i < ARRAY_SIZE(auth_msg_code_str); i++) { + e = auth_msg_code_str + i; + if (auth_msg_code == e->auth_msg_code) + return e->str; + } + return "unknown"; +} + +void qla_edif_print_auth_hdr(struct scsi_qla_host *vha, u8 *wwpn, + u32 src_pid, u32 dest_pid, u8 *ptr, u32 len, u32 xchgaddr) +{ + auth_els_header_t *h = (auth_els_header_t *)ptr; + u8 lwwpn[8]= {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff}; + + if (h->els_code != ELS_AUTH_ELS) + return; + + ql_dbg(ql_dbg_edif, vha, 0x1315, + "auth hdr: %8phC s:%06x d:%06x xchgaddr=%08x msg code=%s length=0x%x|0x%x transaction id=0x%x\n", + wwpn ? wwpn: lwwpn, src_pid, dest_pid, xchgaddr, + msg_code_to_str(h->message_code), len, be32_to_cpu(h->message_length), + be32_to_cpu(h->transaction_identifier)); + +} + + +static struct edb_node * qla_edb_getnext(scsi_qla_host_t *vha) +{ + unsigned long flags; + struct edb_node *edbnode = NULL; + + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + + /* db nodes are fifo - no qualifications done */ + if (!list_empty(&vha->e_dbell.head)) { + edbnode = list_first_entry(&vha->e_dbell.head, + struct edb_node, list); + list_del_init(&edbnode->list); + } + + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + return edbnode; +} + +static void qla_edb_node_free(scsi_qla_host_t *vha, struct edb_node *node) +{ + list_del_init(&node->list); + kfree(node); +} + +struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport, + uint16_t handle) +{ + struct edif_list_entry *entry; + struct edif_list_entry *tentry; + struct list_head *indx_list = &fcport->edif.edif_indx_list; + + list_for_each_entry_safe(entry, tentry, indx_list, next) { + if (entry->handle == handle) + return entry; + } + return NULL; +} + +/* timeout called when no traffic and delayed rx sa_index delete */ +static void qla2x00_sa_replace_iocb_timeout(qla_timer_arg_t t) +{ + struct edif_list_entry *edif_entry = qla_from_timer(edif_entry, t, timer); + fc_port_t *fcport = edif_entry->fcport; + struct scsi_qla_host *vha = fcport->vha; + struct edif_sa_ctl *sa_ctl; + uint16_t nport_handle; + unsigned long flags = 0; + + ql_dbg(ql_dbg_edif, vha, 0x1300, + "%s: nport_handle 0x%x, SA REPL Delay Timeout, %8phC portid=%06x\n", + __func__, edif_entry->handle, fcport->port_name, fcport->d_id.b24); + + /* + * if delete_sa_index is valid then no one has serviced this + * delayed delete + */ + spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); + + /* + * delete_sa_index is invalidated when we find the new sa_index in + * the incoming data stream. If it is not invalidated then we are + * still looking for the new sa_index because there is no I/O and we + * need to just force the rx delete and move on. Otherwise + * we could get another rekey which will result in an error 66. + */ + if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) { + + uint16_t delete_sa_index = edif_entry->delete_sa_index; + edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX; + nport_handle = edif_entry->handle; + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + + sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, + delete_sa_index, 0); + + if (sa_ctl) { + ql_dbg(ql_dbg_edif, vha, 0x1301, + "%s: sa_ctl: %p, delete index %d, update index: %d, lid: 0x%x\n", + __func__, sa_ctl, delete_sa_index, edif_entry->update_sa_index, + nport_handle); + + sa_ctl->flags = EDIF_SA_CTL_FLG_DEL; + set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state); + qla_post_sa_replace_work(fcport->vha, fcport, + nport_handle, sa_ctl); + + } else { + ql_dbg(ql_dbg_edif, vha, 0x1302, + "%s: sa_ctl not found for delete_sa_index: %d\n", + __func__, edif_entry->delete_sa_index); + } + } else { + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + } +} + +/* + * create a new list entry for this nport handle and + * add an sa_update index to the list - called for sa_update + */ +static int qla_edif_list_add_sa_update_index(fc_port_t *fcport, + uint16_t sa_index, uint16_t handle) +{ + struct edif_list_entry *entry; + unsigned long flags = 0; + + /* if the entry exists, then just update the sa_index */ + entry = qla_edif_list_find_sa_index(fcport, handle); + if (entry) { + entry->update_sa_index = sa_index; + entry->count = 0; + return 0; + } + + /* + * This is the normal path - there should be no existing entry + * when update is called. The exception is at startup + * when update is called for the first two sa_indexes + * followed by a delete of the first sa_index + */ + entry = kzalloc((sizeof(struct edif_list_entry)), GFP_ATOMIC); + if (!entry) + return -ENOMEM; + + INIT_LIST_HEAD(&entry->next); + entry->handle = handle; + entry->update_sa_index = sa_index; + entry->delete_sa_index = INVALID_EDIF_SA_INDEX; + entry->count = 0; + entry->flags = 0; + qla_timer_setup(&entry->timer, qla2x00_sa_replace_iocb_timeout, + 0, entry); + spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); + list_add_tail(&entry->next, &fcport->edif.edif_indx_list); + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + return 0; +} + +/* remove an entry from the list */ +static void qla_edif_list_delete_sa_index(fc_port_t *fcport, struct edif_list_entry *entry) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); + list_del(&entry->next); + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); +} + + + +int qla_post_sa_replace_work(struct scsi_qla_host *vha, + fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl) +{ + struct qla_work_evt *e; + + e = qla2x00_alloc_work(vha, QLA_EVT_SA_REPLACE); + if (!e) + return QLA_FUNCTION_FAILED; + + e->u.sa_update.fcport = fcport; + e->u.sa_update.sa_ctl = sa_ctl; + e->u.sa_update.nport_handle = nport_handle; + fcport->flags |= FCF_ASYNC_ACTIVE; + return qla2x00_post_work(vha, e); +} + +static void +qla_edif_sa_ctl_init(scsi_qla_host_t *vha, struct fc_port *fcport) +{ + ql_dbg(ql_dbg_edif, vha, 0x1303, + "Init SA_CTL List for fcport - nn %8phN pn %8phN portid=%06x.\n", + fcport->node_name, fcport->port_name, fcport->d_id.b24); + + fcport->edif.tx_rekey_cnt = 0; + fcport->edif.rx_rekey_cnt = 0; + + fcport->edif.tx_bytes = 0; + fcport->edif.rx_bytes = 0; +} + +static int qla_bsg_check(scsi_qla_host_t *vha, bsg_job_t *bsg_job, +fc_port_t *fcport) +{ + struct extra_auth_els *p; + struct qla_bsg_auth_els_request *req = + (struct qla_bsg_auth_els_request *)bsg_job->request; + + if (!vha->hw->flags.edif_enabled) { + ql_dbg(ql_dbg_edif, vha, 0x1304, + "%s edif not enabled\n", __func__); + goto done; + } + if (DBELL_INACTIVE(vha)) { + ql_dbg(ql_dbg_edif, vha, 0x1305, + "%s doorbell not enabled\n", __func__); + goto done; + } + + p = &req->e; + + /* Get response */ + if (p->sub_cmd == PULL_ELS) { + struct qla_bsg_auth_els_reply *rpl = + (struct qla_bsg_auth_els_reply *)bsg_job->reply; + + qla_pur_get_pending(vha, fcport, bsg_job); + + ql_dbg(ql_dbg_edif, vha, 0x1306, + "%s %s %8phN sid=%x. xchg %x, nb=%xh bsg ptr %px\n", + __func__, sc_to_str(p->sub_cmd), fcport->port_name, + fcport->d_id.b24, rpl->rx_xchg_address, + rpl->r.reply_payload_rcv_len, bsg_job); + + goto done; + } + return 0; + +done: + + bsg_job_done(bsg_job, ((struct fc_bsg_reply *)bsg_job->reply)->result, + ((struct fc_bsg_reply *)bsg_job->reply)->reply_payload_rcv_len); + return -EIO; +} + +fc_port_t * +qla2x00_find_fcport_by_pid(scsi_qla_host_t *vha, port_id_t *id) +{ + fc_port_t *f, *tf; + + f = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { + if (f->d_id.b24 == id->b24) + return f; + } + return NULL; +} + +int qla2x00_check_rdp_test( uint32_t cmd, uint32_t port) +{ + if (cmd == ELS_COMMAND_RDP && port == 0xFEFFFF) + return 1; + else + return 0; + +} + +/** + * qla_edif_app_check(): check for valid application id. + * @vha: host adapter pointer + * @appid: application id + * + * Return: false = fail, true = pass + */ +static bool +qla_edif_app_check(scsi_qla_host_t *vha, struct app_id appid) +{ + /* check that the app is allow/known to the driver */ + + if (appid.app_vid != EDIF_APP_ID) { + ql_dbg(ql_dbg_edif, vha, 0x1307, "%s app id not ok (%x)", + __func__, appid.app_vid); + return false; + } + + if (appid.version != EDIF_VERSION1) { + ql_dbg(ql_dbg_edif, vha, 0x1308, "%s app version is not ok (%x)", + __func__, appid.version); + return false; + } + + return true; +} + + +static void +qla_edif_free_sa_ctl(fc_port_t *fcport, struct edif_sa_ctl *sa_ctl, + int index) +{ + unsigned long flags = 0; + + spin_lock_irqsave(&fcport->edif.sa_list_lock, flags); + list_del(&sa_ctl->next); + spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags); + if (index >= 512) + fcport->edif.tx_rekey_cnt--; + else + fcport->edif.rx_rekey_cnt--; + kfree(sa_ctl); +} + +/* return an index to the freepool */ +static void qla_edif_add_sa_index_to_freepool(fc_port_t *fcport, int dir, + uint16_t sa_index) +{ + void *sa_id_map; + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags = 0; + u16 lsa_index = sa_index; + + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x1309, + "%s: entry\n", __func__); + + if (dir) { + sa_id_map = ha->edif_tx_sa_id_map; + lsa_index -= EDIF_TX_SA_INDEX_BASE; + } else { + sa_id_map = ha->edif_rx_sa_id_map; + } + + spin_lock_irqsave(&ha->sadb_fp_lock, flags); + clear_bit(lsa_index, sa_id_map); + spin_unlock_irqrestore(&ha->sadb_fp_lock, flags); + ql_dbg(ql_dbg_edif, vha, 0x130a, + "%s: index %d added to free pool\n", __func__, sa_index); +} + +static void __qla2x00_release_all_sadb(struct scsi_qla_host *vha, + struct fc_port *fcport, struct edif_sa_index_entry *entry, + int pdir) +{ + struct edif_list_entry *edif_entry; + struct edif_sa_ctl *sa_ctl; + int i, dir; + int key_cnt = 0; + + for (i = 0; i < 2; i++) { + if (entry->sa_pair[i].sa_index == INVALID_EDIF_SA_INDEX) + continue; + + if (fcport->loop_id != entry->handle) { + ql_dbg(ql_dbg_edif, vha, 0x130b, + "%s: ** WARNING %d** entry handle: 0x%x, lid: 0x%x, sa_index: %d\n", + __func__, i, entry->handle, fcport->loop_id, + entry->sa_pair[i].sa_index); + } + + /* release the sa_ctl */ + sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, + entry->sa_pair[i].sa_index, pdir); + if (sa_ctl && + qla_edif_find_sa_ctl_by_index(fcport, sa_ctl->index, pdir)) { + ql_dbg(ql_dbg_edif, vha, 0x130c, + "%s: freeing sa_ctl for index %d\n", __func__, sa_ctl->index); + qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index); + } else { + ql_dbg(ql_dbg_edif, vha, 0x130d, + "%s: sa_ctl NOT freed, sa_ctl: %p\n", __func__, sa_ctl); + } + + /* Release the index */ + ql_dbg(ql_dbg_edif, vha, 0x130e, + "%s: freeing sa_index %d, nph: 0x%x\n", + __func__, entry->sa_pair[i].sa_index, entry->handle); + + dir = (entry->sa_pair[i].sa_index < + EDIF_TX_SA_INDEX_BASE) ? 0 : 1; + qla_edif_add_sa_index_to_freepool(fcport, dir, + entry->sa_pair[i].sa_index); + + /* Delete timer on RX */ + if (pdir != SAU_FLG_TX) { + edif_entry = + qla_edif_list_find_sa_index(fcport, entry->handle); + if (edif_entry) { + ql_dbg(ql_dbg_edif, vha, 0x130f, + "%s: remove edif_entry %p, update_sa_index: 0x%x, " + "delete_sa_index: 0x%x\n", + __func__, edif_entry, edif_entry->update_sa_index, + edif_entry->delete_sa_index); + qla_edif_list_delete_sa_index(fcport, edif_entry); + /* + * valid delete_sa_index indicates there is a rx + * delayed delete queued + */ + if (edif_entry->delete_sa_index != + INVALID_EDIF_SA_INDEX) { + del_timer(&edif_entry->timer); + + /* build and send the aen */ + fcport->edif.rx_sa_set = 1; + fcport->edif.rx_sa_pending = 0; + qla_edb_eventcreate(vha, + VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + QL_VND_SA_STAT_SUCCESS, + QL_VND_RX_SA_KEY, fcport); + } + ql_dbg(ql_dbg_edif, vha, 0x1310, + "%s: release edif_entry %p, update_sa_index: 0x%x, " + "delete_sa_index: 0x%x\n", + __func__, edif_entry, edif_entry->update_sa_index, + edif_entry->delete_sa_index); + + kfree(edif_entry); + } + } + key_cnt++; + } + ql_dbg(ql_dbg_edif, vha, 0x1311, + "%s: %d %s keys released\n", + __func__, key_cnt, pdir ? "tx" : "rx"); +} + +/* find an release all outstanding sadb sa_indicies */ +void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport) +{ + struct edif_sa_index_entry *entry, *tmp; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x1312, + "%s: Starting...\n", __func__); + + spin_lock_irqsave(&ha->sadb_lock, flags); + + list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) { + if (entry->fcport == fcport) { + list_del(&entry->next); + spin_unlock_irqrestore(&ha->sadb_lock, flags); + __qla2x00_release_all_sadb(vha, fcport, entry, 0); + kfree(entry); + spin_lock_irqsave(&ha->sadb_lock, flags); + break; + } + } + + list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) { + if (entry->fcport == fcport) { + list_del(&entry->next); + spin_unlock_irqrestore(&ha->sadb_lock, flags); + + __qla2x00_release_all_sadb(vha, fcport, entry, SAU_FLG_TX); + + kfree(entry); + spin_lock_irqsave(&ha->sadb_lock, flags); + break; + } + } + spin_unlock_irqrestore(&ha->sadb_lock, flags); +} + +/** + * qla_delete_n2n_sess_and_wait: search for N2N session, tear it down and + * wait for tear down to complete. In N2N topology, there is only one + * session being active in tracking the remote device. + * @vha: host adapter pointer + * return code: 0 - found the session and completed the tear down. + * 1 - timeout occurred. Caller to use link bounce to reset. + */ +static int qla_delete_n2n_sess_and_wait(scsi_qla_host_t *vha) +{ + struct fc_port *fcport; + int rc = -EIO; + ulong expire = jiffies + 23 * HZ; + + if (! N2N_TOPO(vha->hw)) + return 0; + + fcport = NULL; + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (!fcport->n2n_flag) + continue; + + ql_dbg(ql_dbg_disc, fcport->vha, 0x2016, + "%s reset sess at app start \n", __func__); + + qla_edif_sa_ctl_init(vha, fcport); + qlt_schedule_sess_for_deletion(fcport); + + while (time_before_eq(jiffies, expire)) { + if (fcport->disc_state != DSC_DELETE_PEND) { + rc = 0; + break; + } + msleep(1); + } + + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; + } + + return rc; +} + +/** + * qla_edif_app_start: application has announce its present + * @vha: host adapter pointer + * @ bsg_job: user request + * + * Set/activate doorbell. Reset current sessions and re-login with + * secure flag. + */ +static int +qla_edif_app_start(scsi_qla_host_t *vha, bsg_job_t *bsg_job) +{ + int32_t rval = 0; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct app_start appstart; + struct app_start_reply appreply; + struct fc_port *fcport, *tf; + + ql_log(ql_log_info, vha, 0x1313, + "EDIF application registration with driver, FC device connections will be re-established.\n"); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &appstart, + sizeof(struct app_start)); + + ql_dbg(ql_dbg_edif, vha, 0x1314, "%s app_vid=%x app_start_flags %x\n", + __func__, appstart.app_info.app_vid, appstart.app_start_flags); + + if (DBELL_INACTIVE(vha)) { + /* mark doorbell as active since an app is now present */ + vha->e_dbell.db_flags |= EDB_ACTIVE; + } else { + goto out; + } + + if (N2N_TOPO(vha->hw)) { + list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) + fcport->n2n_link_reset_cnt = 0; + + if (vha->hw->flags.n2n_fw_acc_sec) { + bool link_bounce = false; + /* + * While authentication app was not running, remote device + * could still try to login with this local port. Let's + * reset the session, reconnect and re-authenticate. + */ + if (qla_delete_n2n_sess_and_wait(vha)) + link_bounce = true; + + /* bounce the link to start login */ + if (!vha->hw->flags.n2n_bigger || link_bounce) { + set_bit(N2N_LINK_RESET, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + } else { + qla2x00_wait_for_hba_online(vha); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + qla2x00_wait_for_hba_online(vha); + } + } else { + list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { + ql_dbg(ql_dbg_edif, vha, 0x1316, + "%s: sess %p %8phC lid %#04x s_id %06x logout %d\n", + __func__, fcport, fcport->port_name, + fcport->loop_id, fcport->d_id.b24, + fcport->logout_on_delete); + + ql_dbg(ql_dbg_edif, vha, 0x1317, + "keep %d els_logo %d disc state %d auth state %d stop state %d\n", + fcport->keep_nport_handle, + fcport->send_els_logo, fcport->disc_state, + fcport->edif.auth_state, fcport->edif.app_stop); + + if (atomic_read(&vha->loop_state) == LOOP_DOWN) + break; + + fcport->login_retry = vha->hw->login_retry_count; + fcport->edif.app_stop = 0; + fcport->edif.app_sess_online = 0; + + if (fcport->scan_state != QLA_FCPORT_FOUND) + continue; + + if (fcport->port_type == FCT_UNKNOWN && !fcport->fc4_features) + rval = qla24xx_async_gffid(vha, fcport, true); + + if (!rval && !(fcport->fc4_features & FC4_FF_TARGET || + fcport->port_type & (FCT_TARGET|FCT_NVME_TARGET))) + continue; + + rval = 0; + + ql_dbg(ql_dbg_edif, vha, 0x1318, + "%s wwpn %8phC calling qla_edif_reset_auth_wait" + "\n", __func__, fcport->port_name); + qlt_schedule_sess_for_deletion(fcport); + qla_edif_sa_ctl_init(vha, fcport); + } + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + } + + if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) { + /* mark as active since an app is now present */ + vha->pur_cinfo.enode_flags = ENODE_ACTIVE; + } else { + ql_dbg(ql_dbg_edif, vha, 0x1319, "%s enode already active\n", + __func__); + } + +out: + appreply.host_support_edif = vha->hw->flags.edif_enabled; + appreply.edif_enode_active = vha->pur_cinfo.enode_flags; + appreply.edif_edb_active = vha->e_dbell.db_flags; + appreply.version = EDIF_VERSION1; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + + SET_DID_STATUS(bsg_reply->result, DID_OK); + + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &appreply, + sizeof(struct app_start_reply)); + + ql_dbg(ql_dbg_edif, vha, 0x131a, + "%s app start completed with 0x%x\n", + __func__, rval); + + return rval; +} + +/** + * qla_edif_app_stop - app has announced it's exiting. + * @vha: host adapter pointer + * @bsg_job: user space command pointer + * + * Free any in flight messages, clear all doorbell events + * to application. Reject any message relate to security. + */ +static int +qla_edif_app_stop(scsi_qla_host_t *vha, bsg_job_t *bsg_job) +{ + int32_t rval = 0; + struct app_stop appstop; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct fc_port *fcport, *tf; + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &appstop, + sizeof(struct app_stop)); + + ql_dbg(ql_dbg_edif, vha, 0x131b, "%s Stopping APP: app_vid=%x\n", + __func__, appstop.app_info.app_vid); + + + qla_enode_stop(vha); + qla_edb_stop(vha); + + list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { + if (!(fcport->flags & FCF_FCSP_DEVICE)) + continue; + + if (fcport->flags & FCF_FCSP_DEVICE) { + ql_dbg(ql_dbg_edif, vha, 0x131c, + "FCSP - nn %8phN pn %8phN " + "portid=%02x%02x%02x.\n", + fcport->node_name, fcport->port_name, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa); + ql_dbg(ql_dbg_edif, vha, 0x131d, + "%s: se_sess %px / sess %px from port %8phC loop_id %#04x" + " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", + __func__, fcport->se_sess, fcport, + fcport->port_name, fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, + fcport->d_id.b.al_pa, fcport->logout_on_delete, + fcport->keep_nport_handle, fcport->send_els_logo); + + + if (atomic_read(&vha->loop_state) == LOOP_DOWN) { + break; + } + + fcport->edif.app_stop = APP_STOPPING; + ql_dbg(ql_dbg_edif, vha, 0x131e, + "%s wwpn %8phC calling qla_edif_reset_auth_wait" + "\n", __func__, fcport->port_name); + + fcport->send_els_logo = 1; + qlt_schedule_sess_for_deletion(fcport); + } + } + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + SET_DID_STATUS(bsg_reply->result, DID_OK); + + return rval; +} + +static int +qla_edif_app_chk_sa_update(scsi_qla_host_t *vha, fc_port_t *fcport, + struct app_plogi_reply *appplogireply) +{ + int ret = 0; + + if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) { + ql_dbg(ql_dbg_edif, vha, 0x131f, + "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n", + __func__, fcport->port_name, fcport->edif.tx_sa_set, + fcport->edif.rx_sa_set); + appplogireply->prli_status = 0; + ret = 1; + } else { + ql_dbg(ql_dbg_edif, vha, 0x1320, + "%s wwpn %8phC Both SA(s) updated.\n", __func__, + fcport->port_name); + fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0; + fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0; + appplogireply->prli_status = 1; + } + return ret; +} + +/* + * event that the app has approved plogi to complete (e.g., finish + * up with prli + */ +static int +qla_edif_app_authok(scsi_qla_host_t *vha, bsg_job_t *bsg_job) +{ + int32_t rval = 0; + struct auth_complete_cmd appplogiok; + struct app_plogi_reply appplogireply = {0}; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + fc_port_t *fcport = NULL; + port_id_t portid = {0}; + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &appplogiok, + sizeof(struct auth_complete_cmd)); + + /* silent unaligned access warning */ + portid.b.domain = appplogiok.u.d_id.b.domain; + portid.b.area = appplogiok.u.d_id.b.area; + portid.b.al_pa = appplogiok.u.d_id.b.al_pa; + + appplogireply.version = EDIF_VERSION1; + switch (appplogiok.type) { + case PL_TYPE_WWPN: + fcport = qla2x00_find_fcport_by_wwpn(vha, + appplogiok.u.wwpn, 0); + if (!fcport) + ql_dbg(ql_dbg_edif, vha, 0x1321, + "%s wwpn lookup failed: %8phC\n", + __func__, appplogiok.u.wwpn); + break; + case PL_TYPE_DID: + fcport = qla2x00_find_fcport_by_pid(vha, &portid); + if (!fcport) + ql_dbg(ql_dbg_edif, vha, 0x1322, + "%s d_id lookup failed: %x\n", __func__, + portid.b24); + break; + default: + ql_dbg(ql_dbg_edif, vha, 0x1323, + "%s undefined type: %x\n", __func__, + appplogiok.type); + break; + } + + if (!fcport) { + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto errstate_exit; + } + + /* + * if port is online then this is a REKEY operation + * Only do sa update checking + */ + if (atomic_read(&fcport->state) == FCS_ONLINE) { + ql_dbg(ql_dbg_edif, vha, 0x1324, + "%s Skipping PRLI complete based on rekey\n", __func__); + appplogireply.prli_status = 1; + SET_DID_STATUS(bsg_reply->result, DID_OK); + qla_edif_app_chk_sa_update(vha, fcport, &appplogireply); + goto errstate_exit; + } + + /* make sure in AUTH_PENDING or else reject */ + if (fcport->disc_state != DSC_LOGIN_AUTH_PEND) { + ql_dbg(ql_dbg_edif, vha, 0x1325, + "%s wwpn %8phC is not in auth pending state (%x)\n", + __func__, fcport->port_name, fcport->disc_state); + SET_DID_STATUS(bsg_reply->result, DID_OK); + appplogireply.prli_status = 0; + goto errstate_exit; + } + + SET_DID_STATUS(bsg_reply->result, DID_OK); + appplogireply.prli_status = 1; + fcport->edif.authok = 1; + if (!(fcport->edif.rx_sa_set && fcport->edif.tx_sa_set)) { + ql_dbg(ql_dbg_edif, vha, 0x1326, + "%s: wwpn %8phC Both SA indexes has not been SET TX %d, RX %d.\n", + __func__, fcport->port_name, fcport->edif.tx_sa_set, + fcport->edif.rx_sa_set); + SET_DID_STATUS(bsg_reply->result, DID_OK); + appplogireply.prli_status = 0; + goto errstate_exit; + + } else { + ql_dbg(ql_dbg_edif, vha, 0x1327, + "%s wwpn %8phC Both SA(s) updated.\n", __func__, + fcport->port_name); + fcport->edif.rx_sa_set = fcport->edif.tx_sa_set = 0; + fcport->edif.rx_sa_pending = fcport->edif.tx_sa_pending = 0; + } + + if (qla_ini_mode_enabled(vha)) { + ql_dbg(ql_dbg_edif, vha, 0x1328, + "%s AUTH complete - RESUME with prli for wwpn %8phC\n", + __func__, fcport->port_name); + qla24xx_post_prli_work(vha, fcport); + } + +errstate_exit: + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, &appplogireply, + sizeof(struct app_plogi_reply)); + + return rval; +} + +/** + * qla_edif_app_authfail - authentication by app has failed. Driver is given + * notice to tear down current session. + * @vha: host adapter pointer + * @bsg_job: user request + */ +static int +qla_edif_app_authfail(scsi_qla_host_t *vha, bsg_job_t *bsg_job) +{ + int32_t rval = 0; + struct auth_complete_cmd appplogifail; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + fc_port_t *fcport = NULL; + port_id_t portid = {0}; + + ql_dbg(ql_dbg_edif, vha, 0x1329, "%s app auth fail\n", __func__); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &appplogifail, + sizeof(struct auth_complete_cmd)); + + /* silent unaligned access warning */ + portid.b.domain = appplogifail.u.d_id.b.domain; + portid.b.area = appplogifail.u.d_id.b.area; + portid.b.al_pa = appplogifail.u.d_id.b.al_pa; + + switch (appplogifail.type) { + case PL_TYPE_WWPN: + fcport = qla2x00_find_fcport_by_wwpn(vha, + appplogifail.u.wwpn, 0); + SET_DID_STATUS(bsg_reply->result, DID_OK); + break; + case PL_TYPE_DID: + fcport = qla2x00_find_fcport_by_pid(vha, &portid); + if (!fcport) + ql_dbg(ql_dbg_edif, vha, 0x132a, + "%s d_id lookup failed: %x\n", __func__, + portid.b24); + SET_DID_STATUS(bsg_reply->result, DID_OK); + break; + default: + ql_dbg(ql_dbg_edif, vha, 0x132b, + "%s undefined type: %x\n", __func__, + appplogifail.type); + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + rval = -1; + break; + } + + ql_dbg(ql_dbg_edif, vha, 0x132c, + "%s fcport is 0x%px\n", __func__, fcport); + + if (fcport != NULL) { + /* set/reset edif values and flags */ + ql_dbg(ql_dbg_edif, vha, 0x132d, + "%s reset the auth process - %8phC, loopid=%x portid=%06x.\n", + __func__, fcport->port_name, fcport->loop_id, fcport->d_id.b24); + + if (qla_ini_mode_enabled(fcport->vha)) { + fcport->send_els_logo = 1; + qlt_schedule_sess_for_deletion(fcport); + } + } + + return rval; +} + +/** + * qla_edif_app_getfcinfo - app would like to read session info (wwpn, nportid, + * [initiator|target] mode. It can specific session with specific nport id or + * all sessions. + * @vha: host adapter pointer + * @bsg_job: user request pointer + */ +static int +qla_edif_app_getfcinfo(scsi_qla_host_t *vha, bsg_job_t *bsg_job) +{ + int32_t rval = 0; + int32_t pcnt = 0; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct app_pinfo_req app_req; + struct app_pinfo_reply *app_reply; + port_id_t tdid; + + ql_dbg(ql_dbg_edif, vha, 0x132e, "%s app get fcinfo\n", __func__); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &app_req, + sizeof(struct app_pinfo_req)); + + app_reply = kzalloc((sizeof(struct app_pinfo_reply) + + sizeof(struct app_pinfo) * app_req.num_ports), GFP_KERNEL); + + if (!app_reply) { + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + rval = -1; + } else { + struct fc_port *fcport = NULL, *tf; + + app_reply->version = EDIF_VERSION1; + + list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { + + if (!(fcport->flags & FCF_FCSP_DEVICE)) + continue; + + tdid = app_req.remote_pid; + + ql_dbg(ql_dbg_edif, vha, 0x132f, + "APP request entry - portid=%06x.\n", tdid.b24); + + /* Ran out of space */ + if (pcnt > app_req.num_ports) + break; + + if (tdid.b24 != 0 && tdid.b24 != fcport->d_id.b24) + continue; + + if (!N2N_TOPO(vha->hw)) { + if (fcport->scan_state != QLA_FCPORT_FOUND) + continue; + + if (fcport->port_type == FCT_UNKNOWN && !fcport->fc4_features) + rval = qla24xx_async_gffid(vha, fcport, true); + + if (!rval && !(fcport->fc4_features & FC4_FF_TARGET || + fcport->port_type & (FCT_TARGET|FCT_NVME_TARGET))) + continue; + } + + rval = 0; + + app_reply->ports[pcnt].version = EDIF_VERSION1; + app_reply->ports[pcnt].remote_type = + VND_CMD_RTYPE_UNKNOWN; + if (fcport->port_type & (FCT_NVME_TARGET|FCT_TARGET)) + app_reply->ports[pcnt].remote_type |= + VND_CMD_RTYPE_TARGET; + if (fcport->port_type & (FCT_NVME_INITIATOR|FCT_INITIATOR)) + app_reply->ports[pcnt].remote_type |= + VND_CMD_RTYPE_INITIATOR; + + app_reply->ports[pcnt].remote_pid = fcport->d_id; + + ql_dbg(ql_dbg_edif, vha, 0x1330, + "Found FC_SP fcport - nn %8phN pn %8phN pcnt %d portid=%06x\n", + fcport->node_name, fcport->port_name, pcnt, fcport->d_id.b24); + + switch (fcport->edif.auth_state) { + case VND_CMD_AUTH_STATE_ELS_RCVD: + if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) { + fcport->edif.auth_state = VND_CMD_AUTH_STATE_NEEDED; + app_reply->ports[pcnt].auth_state = + VND_CMD_AUTH_STATE_NEEDED; + } else { + app_reply->ports[pcnt].auth_state = + VND_CMD_AUTH_STATE_ELS_RCVD; + } + break; + default: + app_reply->ports[pcnt].auth_state = fcport->edif.auth_state; + break; + } + + memcpy(app_reply->ports[pcnt].remote_wwpn, + fcport->port_name, 8); + + app_reply->ports[pcnt].remote_state = + (atomic_read(&fcport->state) == + FCS_ONLINE ? 1 : 0); + + pcnt++; + + if (tdid.b24 != 0) + break; + } + app_reply->port_count = pcnt; + SET_DID_STATUS(bsg_reply->result, DID_OK); + } + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, app_reply, + sizeof(struct app_pinfo_reply) + sizeof(struct app_pinfo) * pcnt); + + kfree(app_reply); + + return rval; +} + +/** + * qla_edif_app_getstats - app would like to read various statistics info + * @vha: host adapter pointer + * @bsg_job: user request + */ +static int32_t +qla_edif_app_getstats(scsi_qla_host_t *vha, bsg_job_t *bsg_job) +{ + int32_t rval = 0; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + uint32_t size; + struct app_sinfo_req app_req; + struct app_stats_reply *app_reply; + uint32_t pcnt = 0; + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &app_req, + sizeof(struct app_sinfo_req)); + if (app_req.num_ports == 0) { + ql_dbg(ql_dbg_async, vha, 0x13a9, + "%s app did not indicate number of ports to return\n", + __func__); + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + rval = -1; + } + + size = sizeof(struct app_stats_reply) + + (sizeof(struct app_sinfo) * app_req.num_ports); + + app_reply = kzalloc(size, GFP_KERNEL); + if (!app_reply) { + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + rval = -1; + } else { + struct fc_port *fcport = NULL, *tf; + + app_reply->version = EDIF_VERSION1; + + list_for_each_entry_safe(fcport, tf, &vha->vp_fcports, list) { + if (fcport->edif.enable) { + if (pcnt > app_req.num_ports) + break; + + app_reply->elem[pcnt].rekey_count = + fcport->edif.rekey_cnt; + app_reply->elem[pcnt].tx_bytes = + fcport->edif.tx_bytes; + app_reply->elem[pcnt].rx_bytes = + fcport->edif.rx_bytes; + + memcpy(app_reply->elem[pcnt].remote_wwpn, + fcport->port_name, 8); + + pcnt++; + + } + } + app_reply->elem_count = pcnt; + SET_DID_STATUS(bsg_reply->result, DID_OK); + } + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, app_reply, + sizeof(struct app_stats_reply) + (sizeof(struct app_sinfo) * pcnt)); + + kfree(app_reply); + + return rval; +} + + +static int32_t +qla_edif_ack(scsi_qla_host_t *vha, bsg_job_t *bsg_job) +{ + struct fc_port *fcport; + struct aen_complete_cmd ack; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + port_id_t portid; + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &ack, sizeof(ack)); + + ql_dbg(ql_dbg_edif, vha, 0x1331, + "%s: %06x event_code %x\n", + __func__, ack.port_id.b24, ack.event_code); + + /* silent unaligned access warning */ + portid.b.domain = ack.port_id.b.domain; + portid.b.area = ack.port_id.b.area; + portid.b.al_pa = ack.port_id.b.al_pa; + + fcport = qla2x00_find_fcport_by_pid(vha, &portid); + SET_DID_STATUS(bsg_reply->result, DID_OK); + + if (!fcport) { + ql_dbg(ql_dbg_edif, vha, 0x1332, + "%s: unable to find fcport %06x \n", + __func__, ack.port_id.b24); + return 0; + } + + switch(ack.event_code) { + case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN: + fcport->edif.sess_down_acked = 1; + break; + default: + break; + } + return 0; +} + +static int qla_edif_consume_dbell(scsi_qla_host_t *vha, bsg_job_t *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + u32 sg_skip, reply_payload_len; + bool keep; + struct edb_node *dbnode = NULL; + struct edif_app_dbell ap; + int dat_size = 0; + + sg_skip = 0; + reply_payload_len = bsg_job->reply_payload.payload_len; + + while ((reply_payload_len - sg_skip) >= sizeof(struct edb_node)) { + + dbnode = qla_edb_getnext(vha); + if (dbnode != NULL) { + keep = true; + dat_size = 0; + ap.event_code = dbnode->ntype; + switch (dbnode->ntype) { + case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN: + case VND_CMD_AUTH_STATE_NEEDED: + ap.port_id = dbnode->u.plogi_did; + dat_size += sizeof(ap.port_id); + break; + case VND_CMD_AUTH_STATE_ELS_RCVD: + ap.port_id = dbnode->u.els_sid; + dat_size += sizeof(ap.port_id); + break; + case VND_CMD_AUTH_STATE_SAUPDATE_COMPL: + ap.port_id = dbnode->u.sa_aen.port_id; + memcpy(&ap.event_data, &dbnode->u, + sizeof(struct edif_sa_update_aen)); + dat_size += sizeof(struct edif_sa_update_aen); + break; + default: + keep = false; + ql_log(ql_log_warn, vha, 0x13a5, + "%s unknown DB type=%d %p\n", + __func__, dbnode->ntype, dbnode); + break; + } + ap.event_data_size = dat_size; + /* 8 = sizeof(ap.event_code + ap.event_data_size)*/ + dat_size += 8; + if (keep) + sg_skip += sg_copy_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + &ap, dat_size, sg_skip, false); + + ql_dbg(ql_dbg_edif, vha, 0x1333, + "%s Doorbell consumed : type=%d %p\n", + __func__, dbnode->ntype, dbnode); + + kfree(dbnode); + } else { + break; + } + } + + SET_DID_STATUS(bsg_reply->result, DID_OK); + bsg_reply->reply_payload_rcv_len = sg_skip; + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + + return 0; +} + + +static void __qla_edif_dbell_bsg_done(scsi_qla_host_t *vha, bsg_job_t *bsg_job, + u32 delay) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + uint32_t result; + uint32_t reply_payload_rcv_len; + + /* small sleep for doorbell events to accumulate */ + if (delay) + msleep(delay); + + qla_edif_consume_dbell(vha, bsg_job); + + result = bsg_reply->result; + reply_payload_rcv_len = bsg_reply->reply_payload_rcv_len; + bsg_job_done(bsg_job, result, reply_payload_rcv_len); +} + +static void qla_edif_dbell_bsg_done(scsi_qla_host_t *vha) +{ + unsigned long flags; + bsg_job_t *prev_bsg_job = NULL; + + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + if (vha->e_dbell.dbell_bsg_job) { + prev_bsg_job = vha->e_dbell.dbell_bsg_job; + vha->e_dbell.dbell_bsg_job = NULL; + } + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + if (prev_bsg_job) + __qla_edif_dbell_bsg_done(vha, prev_bsg_job, 0); + + return; +} + +static int +qla_edif_dbell_bsg(scsi_qla_host_t *vha, bsg_job_t *bsg_job) +{ + unsigned long flags; + bool return_bsg = false; + + + /* flush previous dbell bsg */ + qla_edif_dbell_bsg_done(vha); + + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + if (list_empty(&vha->e_dbell.head) && DBELL_ACTIVE(vha)) { + /* + * when the next db event happens, bsg_job will return. + * Otherwise, timer will return it. + */ + vha->e_dbell.dbell_bsg_job = bsg_job; + vha->e_dbell.bsg_expire = jiffies + 10*HZ; + } else { + return_bsg = true; + } + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + if (return_bsg) + __qla_edif_dbell_bsg_done(vha, bsg_job, 1); + + return 0; +} + + +int32_t +qla_edif_app_mgmt(bsg_job_t *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + struct app_id appcheck; + bool done = true; + int32_t rval = 0; + uint32_t vnd_sc = bsg_request->rqst_data.h_vendor.vendor_cmd[1]; + u32 level = ql_dbg_edif; + + /* doorbell is high traffic */ + if (vnd_sc == QL_VND_SC_READ_DBELL) + level = 0; + + ql_dbg(level, vha, 0x13ae, "%s vnd subcmd=%x\n", + __func__, vnd_sc); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &appcheck, + sizeof(struct app_id)); + + if (!vha->hw->flags.edif_enabled || + test_bit(VPORT_DELETE, &vha->dpc_flags)) { + ql_dbg(level, vha, 0x13af, + "%s edif not enabled or vp delete. bsg ptr done %px\n", + __func__, bsg_job); + + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + } + + if (!qla_edif_app_check(vha, appcheck)) { + ql_dbg(level, vha, 0x13b0, + "%s app checked failed.\n", + __func__); + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + } + + switch (vnd_sc) { + case QL_VND_SC_SA_UPDATE: + done = false; + rval = qla24xx_sadb_update(bsg_job); + break; + case QL_VND_SC_APP_START: + rval = qla_edif_app_start(vha, bsg_job); + break; + case QL_VND_SC_APP_STOP: + rval = qla_edif_app_stop(vha, bsg_job); + break; + case QL_VND_SC_AUTH_OK: + rval = qla_edif_app_authok(vha, bsg_job); + break; + case QL_VND_SC_AUTH_FAIL: + rval = qla_edif_app_authfail(vha, bsg_job); + break; + case QL_VND_SC_GET_FCINFO: + rval = qla_edif_app_getfcinfo(vha, bsg_job); + break; + case QL_VND_SC_GET_STATS: + rval = qla_edif_app_getstats(vha, bsg_job); + break; + case QL_VND_SC_AEN_COMPLETE: + rval = qla_edif_ack(vha, bsg_job); + break; + case QL_VND_SC_READ_DBELL: + rval = qla_edif_dbell_bsg(vha, bsg_job); + done = false; + break; + default: + ql_dbg(ql_dbg_edif, vha, 0x1334, "%s unknown cmd=%x\n", + __func__, + bsg_request->rqst_data.h_vendor.vendor_cmd[1]); + rval = EXT_STATUS_INVALID_PARAM; + done = false; + break; + } + +done: + if (done) { + ql_dbg(level, vha, 0x139d, + "%s: %d bsg ptr done %px\n", __func__, __LINE__, bsg_job); + bsg_job_done(bsg_job, bsg_reply->result,bsg_reply->reply_payload_rcv_len); + } + + return rval; +} + + +static struct edif_sa_ctl * +qla_edif_add_sa_ctl(fc_port_t *fcport, struct qla_sa_update_frame *sa_frame, + int dir) +{ + struct edif_sa_ctl *sa_ctl; + struct qla_sa_update_frame *sap; + int index = sa_frame->fast_sa_index; + unsigned long flags = 0; + + sa_ctl = kzalloc(sizeof(*sa_ctl), GFP_KERNEL); + if (!sa_ctl) { + /* couldn't get space */ + ql_dbg(ql_dbg_edif, fcport->vha, 0x1335, + "unable to allocate SA CTL\n"); + return NULL; + } + + /* + * need to allocate sa_index here and save it + * in both sa_ctl->index and sa_frame->fast_sa_index; + * If alloc fails then delete sa_ctl and return NULL + */ + INIT_LIST_HEAD(&sa_ctl->next); + sap = &sa_ctl->sa_frame; + *sap = *sa_frame; + sa_ctl->index = index; + sa_ctl->fcport = fcport; + sa_ctl->flags = 0; + sa_ctl->state = 0L; + ql_dbg(ql_dbg_edif, fcport->vha, 0x1336, + "%s: Added sa_ctl %px, index %d, state 0x%lx\n", + __func__, sa_ctl, sa_ctl->index, sa_ctl->state); + spin_lock_irqsave(&fcport->edif.sa_list_lock, flags); + if (dir == SAU_FLG_TX) + list_add_tail(&sa_ctl->next, &fcport->edif.tx_sa_list); + else + list_add_tail(&sa_ctl->next, &fcport->edif.rx_sa_list); + spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags); + + return sa_ctl; +} + +void +qla_edif_flush_sa_ctl_lists(fc_port_t *fcport) +{ + struct edif_sa_ctl *sa_ctl, *tsa_ctl; + unsigned long flags = 0; + + spin_lock_irqsave(&fcport->edif.sa_list_lock, flags); + + list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.tx_sa_list, + next) { + list_del(&sa_ctl->next); + kfree(sa_ctl); + } + + list_for_each_entry_safe(sa_ctl, tsa_ctl, &fcport->edif.rx_sa_list, + next) { + list_del(&sa_ctl->next); + kfree(sa_ctl); + } + + spin_unlock_irqrestore(&fcport->edif.sa_list_lock, flags); +} + +struct edif_sa_ctl * +qla_edif_find_sa_ctl_by_index(fc_port_t *fcport, int index, int dir) +{ + struct edif_sa_ctl *sa_ctl, *tsa_ctl; + struct list_head *sa_list; + + if (dir == SAU_FLG_TX) + sa_list = &fcport->edif.tx_sa_list; + else + sa_list = &fcport->edif.rx_sa_list; + + list_for_each_entry_safe(sa_ctl, tsa_ctl, sa_list, next) { + if (test_bit(EDIF_SA_CTL_USED, &sa_ctl->state) && + sa_ctl->index == index) + return sa_ctl; + } + return NULL; +} + +/* add the sa to the correct list */ +static int +qla24xx_check_sadb_avail_slot(bsg_job_t *bsg_job, fc_port_t *fcport, + struct qla_sa_update_frame *sa_frame) +{ + struct edif_sa_ctl *sa_ctl = NULL; + int dir; + uint16_t sa_index; + + dir = (sa_frame->flags & SAU_FLG_TX); + + /* map the spi to an sa_index */ + sa_index = qla_edif_sadb_get_sa_index(fcport, sa_frame); + if (sa_index == RX_DELETE_NO_EDIF_SA_INDEX) { + /* process rx delete */ + ql_dbg(ql_dbg_edif, fcport->vha, 0x1337, + "%s: rx delete for lid 0x%x, spi 0x%x, no entry found\n", + __func__, fcport->loop_id, sa_frame->spi); + + /* build and send the aen */ + fcport->edif.rx_sa_set = 1; + fcport->edif.rx_sa_pending = 0; + qla_edb_eventcreate(fcport->vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + QL_VND_SA_STAT_SUCCESS, + QL_VND_RX_SA_KEY, fcport); + + /* force a return of good bsg status; */ + return RX_DELETE_NO_EDIF_SA_INDEX; + } else if (sa_index == INVALID_EDIF_SA_INDEX) { + ql_dbg(ql_dbg_edif, fcport->vha, 0x1338, + "%s: Failed to get sa_index for spi 0x%x, dir: %d\n", + __func__, sa_frame->spi, dir); + return INVALID_EDIF_SA_INDEX; + } + + ql_dbg(ql_dbg_edif, fcport->vha, 0x1339, + "%s: index %d allocated to spi 0x%x, dir: %d, nport_handle: 0x%x\n", + __func__, sa_index, sa_frame->spi, dir, fcport->loop_id); + + /* This is a local copy of sa_frame. */ + sa_frame->fast_sa_index = sa_index; + /* create the sa_ctl */ + sa_ctl = qla_edif_add_sa_ctl(fcport, sa_frame, dir); + if (!sa_ctl) { + ql_dbg(ql_dbg_edif, fcport->vha, 0x133a, + "%s: Failed to add sa_ctl for spi 0x%x, dir: %d, sa_index: %d\n", + __func__, sa_frame->spi, dir, sa_index); + return -1; + } + + set_bit(EDIF_SA_CTL_USED, &sa_ctl->state); + + if (dir == SAU_FLG_TX) + fcport->edif.tx_rekey_cnt++; + else + fcport->edif.rx_rekey_cnt++; + + ql_dbg(ql_dbg_edif, fcport->vha, 0x133b, + "%s: Found sa_ctl %px, index %d, state 0x%lx, tx_cnt %d, rx_cnt %d, nport_handle: 0x%x\n", + __func__, sa_ctl, sa_ctl->index, sa_ctl->state, + fcport->edif.tx_rekey_cnt, + fcport->edif.rx_rekey_cnt, fcport->loop_id); + + return 0; +} + +#define QLA_SA_UPDATE_FLAGS_RX_KEY 0x0 +#define QLA_SA_UPDATE_FLAGS_TX_KEY 0x2 +#define EDIF_MSLEEP_INTERVAL 100 +#define EDIF_RETRY_COUNT 50 + +int +qla24xx_sadb_update(bsg_job_t *bsg_job) +{ + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); + scsi_qla_host_t *vha = shost_priv(host); + fc_port_t *fcport = NULL; + srb_t *sp = NULL; + struct edif_list_entry *edif_entry = NULL; + int found = 0; + int rval = 0; + int result = 0, cnt; + struct qla_sa_update_frame sa_frame; + struct srb_iocb *iocb_cmd; + port_id_t portid; + + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x133c, + "%s entered, vha: 0x%px\n", __func__, vha); + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, &sa_frame, + sizeof(struct qla_sa_update_frame)); + + /* Check if host is online */ + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x13a0, "Host is not online\n"); + rval = -EIO; + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + } + + if (DBELL_INACTIVE(vha)) { + ql_log(ql_log_warn, vha, 0x13a1, "App not started\n"); + rval = -EIO; + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + } + + /* silent unaligned access warning */ + portid.b.domain = sa_frame.port_id.b.domain; + portid.b.area = sa_frame.port_id.b.area; + portid.b.al_pa = sa_frame.port_id.b.al_pa; + + fcport = qla2x00_find_fcport_by_pid(vha, &portid); + if (fcport) { + found = 1; + if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_TX_KEY) + fcport->edif.tx_bytes = 0; + if (sa_frame.flags == QLA_SA_UPDATE_FLAGS_RX_KEY) + fcport->edif.rx_bytes = 0; + } + + if (!found) { + ql_dbg(ql_dbg_edif, vha, 0x133d, "Failed to find port= %06x\n", + sa_frame.port_id.b24); + rval = -EINVAL; + SET_DID_STATUS(bsg_reply->result, DID_TARGET_FAILURE); + goto done; + } + + /* make sure the nport_handle is valid */ + if (fcport->loop_id == FC_NO_LOOP_ID) { + ql_dbg(ql_dbg_edif, vha, 0x133e, + "%s: %8phN lid=FC_NO_LOOP_ID, spi: 0x%x, DS %d, returning NO_CONNECT\n", + __func__, fcport->port_name, sa_frame.spi, + fcport->disc_state); + rval = -EINVAL; + SET_DID_STATUS(bsg_reply->result, DID_NO_CONNECT); + goto done; + } + + /* allocate and queue an sa_ctl */ + result = qla24xx_check_sadb_avail_slot(bsg_job, fcport, &sa_frame); + + // failure of bsg + if (result == INVALID_EDIF_SA_INDEX) { + ql_dbg(ql_dbg_edif, vha, 0x133f, + "%s: %8phN, skipping update.\n", + __func__, fcport->port_name); + rval = -EINVAL; + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + + // rx delete failure + } else if (result == RX_DELETE_NO_EDIF_SA_INDEX) { + ql_dbg(ql_dbg_edif, vha, 0x1340, + "%s: %8phN, skipping rx delete.\n", + __func__, fcport->port_name); + SET_DID_STATUS(bsg_reply->result, DID_OK); + goto done; + } + + ql_dbg(ql_dbg_edif, vha, 0x1341, + "%s: %8phN, sa_index in sa_frame: %d flags %xh\n", + __func__, fcport->port_name, sa_frame.fast_sa_index, + sa_frame.flags); + + /* looking for rx index and delete */ + if (((sa_frame.flags & SAU_FLG_TX) == 0) && + (sa_frame.flags & SAU_FLG_INV)) { + uint16_t nport_handle = fcport->loop_id; + uint16_t sa_index = sa_frame.fast_sa_index; + + /* + * make sure we have an existing rx key, otherwise just process + * this as a straight delete just like TX + * This is NOT a normal case, it indicates an error recovery or key cleanup + * by the ipsec code above us. + */ + edif_entry = qla_edif_list_find_sa_index(fcport, fcport->loop_id); + if (!edif_entry) { + ql_dbg(ql_dbg_edif, vha, 0x1342, + "%s: WARNING: no active sa_index for nport_handle 0x%x, forcing " + "delete for sa_index 0x%x\n", __func__, fcport->loop_id, sa_index); + goto force_rx_delete; + } + + // + // if we have a forced delete for rx, remove the sa_index from the edif list + // and proceed with normal delete. The rx delay timer should not be running + // + if ( (sa_frame.flags & SAU_FLG_FORCE_DELETE) == SAU_FLG_FORCE_DELETE) { + + qla_edif_list_delete_sa_index(fcport, edif_entry); + ql_dbg(ql_dbg_edif, vha, 0x1343, + "%s: FORCE DELETE flag found for nport_handle 0x%x, sa_index 0x%x, " + "forcing DELETE\n", __func__, fcport->loop_id, sa_index); + kfree(edif_entry); + goto force_rx_delete; + } + + /* + * delayed rx delete + * + * if delete_sa_index is not invalid then there is already + * a delayed index in progress, return bsg bad status + */ + if (edif_entry->delete_sa_index != INVALID_EDIF_SA_INDEX) { + struct edif_sa_ctl *sa_ctl; + + ql_dbg(ql_dbg_edif, vha, 0x1344, + "%s: delete for lid 0x%x, delete_sa_index %d is pending\n", + __func__, edif_entry->handle, edif_entry->delete_sa_index); + + /* free up the sa_ctl that was allocated with the sa_index */ + sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, sa_index, + (sa_frame.flags & SAU_FLG_TX)); + if (sa_ctl) { + ql_dbg(ql_dbg_edif, vha, 0x1345, + "%s: freeing sa_ctl for index %d\n", + __func__, sa_ctl->index); + qla_edif_free_sa_ctl(fcport, sa_ctl, sa_ctl->index); + } + + /* release the sa_index */ + ql_dbg(ql_dbg_edif, vha, 0x1346, + "%s: freeing sa_index %d, nph: 0x%x\n", + __func__, sa_index, nport_handle); + qla_edif_sadb_delete_sa_index(fcport, nport_handle, sa_index); + + rval = -EINVAL; + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + goto done; + } + + fcport->edif.rekey_cnt++; + + /* configure and start the rx delay timer */ + edif_entry->fcport = fcport; + edif_entry->timer.expires = jiffies + RX_DELAY_DELETE_TIMEOUT * HZ; + + ql_dbg(ql_dbg_edif, vha, 0x1347, + "%s: adding timer, entry: %p, delete sa_index %d, lid 0x%x to edif_list\n", + __func__, edif_entry, sa_index, nport_handle); + + /* + * Start the timer when we queue the delayed rx delete. + * This is an activity timer that goes off if we have not + * received packets with the new sa_index + */ + add_timer(&edif_entry->timer); + + /* + * sa_delete for rx key with an active rx key including this one + * add the delete rx sa index to the hash so we can look for it + * in the rsp queue. Do this after making any changes to the + * edif_entry as part of the rx delete. + */ + + ql_dbg(ql_dbg_edif, vha, 0x1348, + "%s: delete sa_index %d, lid 0x%x to edif_list. bsg done ptr %p\n", + __func__, sa_index, nport_handle, bsg_job); + + edif_entry->delete_sa_index = sa_index; + + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_reply->result = DID_OK << 16; + + ql_dbg(ql_dbg_edif, vha, 0x1349, + "%s: SA_DELETE vha: 0x%px nport_handle: 0x%x sa_index: %d successfully queued \n", + __func__, vha, fcport->loop_id, sa_index); + goto done; + + /* + * rx index and update + * add the index to the list and continue with normal update + */ + } else if (((sa_frame.flags & SAU_FLG_TX) == 0) && + ((sa_frame.flags & SAU_FLG_INV) == 0)) { + /* sa_update for rx key */ + uint32_t nport_handle = fcport->loop_id; + uint16_t sa_index = sa_frame.fast_sa_index; + int result; + + /* + * add the update rx sa index to the hash so we can look for it + * in the rsp queue and continue normally + */ + + ql_dbg(ql_dbg_edif, vha, 0x134a, + "%s: adding update sa_index %d, lid 0x%x to edif_list\n", + __func__, sa_index, nport_handle); + + result = qla_edif_list_add_sa_update_index(fcport, sa_index, + nport_handle); + if (result) { + ql_dbg(ql_dbg_edif, vha, 0x134b, + "%s: SA_UPDATE failed to add new sa index %d to list for lid 0x%x\n", + __func__, sa_index, nport_handle); + } + } + if (sa_frame.flags & SAU_FLG_GMAC_MODE) + fcport->edif.aes_gmac = 1; + else + fcport->edif.aes_gmac = 0; + +force_rx_delete: + /* + * sa_update for both rx and tx keys, sa_delete for tx key + * immediately process the request + */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + rval = -ENOMEM; + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + goto done; + } + + + sp->type = SRB_SA_UPDATE; + sp->name = "bsg_sa_update"; + sp->u.bsg_job = bsg_job; + sp->free = qla2x00_rel_sp; + sp->done = qla2x00_bsg_job_done; + iocb_cmd = &sp->u.iocb_cmd; + iocb_cmd->u.sa_update.sa_frame = sa_frame; + cnt = 0; +retry: + + rval = qla2x00_start_sp(sp); + switch (rval) { + case QLA_SUCCESS: + break; + case EAGAIN: + msleep(EDIF_MSLEEP_INTERVAL); + cnt++; + if (cnt < EDIF_RETRY_COUNT) + goto retry; + + fallthrough; + default: + ql_dbg(ql_dbg_edif, vha, 0x134c, + "%s qla2x00_start_sp failed=%d.\n", __func__, rval); + + qla2x00_rel_sp(sp); + rval = -EIO; + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + goto done; + } + + fcport->edif.rekey_cnt++; + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + SET_DID_STATUS(bsg_reply->result, DID_OK); + + return 0; + +/* + * send back error status + */ +done: + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + ql_dbg(ql_dbg_edif, vha, 0x134e, + "%s:status: FAIL, result: 0x%x, bsg ptr done %px\n", + __func__, bsg_reply->result, bsg_job); + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + return 0; +} + +static void +qla_enode_free(scsi_qla_host_t *vha, struct enode *node) +{ + node->ntype = N_UNDEF; + kfree(node); +} + +/** + * qla_enode_init - initialize enode structs & lock + * @vha: host adapter pointer + * + * should only be called when driver attaching + */ +void +qla_enode_init(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + char name[32]; + + if (vha->pur_cinfo.enode_flags == ENODE_ACTIVE) { + /* list still active - error */ + ql_dbg(ql_dbg_edif, vha, 0x134f, "%s enode still active\n", + __func__); + return; + } + + /* initialize lock which protects pur_core & init list */ + spin_lock_init(&vha->pur_cinfo.pur_lock); + INIT_LIST_HEAD(&vha->pur_cinfo.head); + + snprintf(name, sizeof(name), "%s_%d_purex", QLA2XXX_DRIVER_NAME, + ha->pdev->device); +} + +/** + * qla_enode_stop - stop and clear and enode data + * @vha: host adapter pointer + * + * called when app notified it is exiting + */ +void +qla_enode_stop(scsi_qla_host_t *vha) +{ + unsigned long flags; + struct enode *node, *q; + + if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) { + /* doorbell list not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x1350, + "%s enode not active\n", __func__); + return; + } + + /* grab lock so list doesn't move */ + spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); + + vha->pur_cinfo.enode_flags &= ~ENODE_ACTIVE; /* mark it not active */ + + /* hopefully this is a null list at this point */ + list_for_each_entry_safe(node, q, &vha->pur_cinfo.head, list) { + ql_dbg(ql_dbg_edif, vha, 0x1351, "%s freeing enode type=" + "%x, cnt=%x\n", __func__, node->ntype, + node->dinfo.nodecnt); + list_del_init(&node->list); + spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); + qla_enode_free(vha, node); + spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); + } + spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); +} + + +void qla_enode_clear(scsi_qla_host_t *vha, port_id_t portid) +{ + unsigned long flags; + struct enode *e, *tmp; + struct purexevent *purex; + LIST_HEAD(enode_list); + + + if (vha->pur_cinfo.enode_flags != ENODE_ACTIVE) { + ql_dbg(ql_dbg_edif, vha, 0x1352, + "%s enode not active\n", __func__); + return; + } + spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); + list_for_each_entry_safe(e, tmp, &vha->pur_cinfo.head, list) { + purex = &e->u.purexinfo; + if(purex->pur_info.pur_sid.b24 == portid.b24) { + ql_dbg(ql_dbg_edif, vha, 0x1353, + "%s free ELS sid=%x. xchg %x, nb=%xh\n", + __func__, portid.b24, + purex->pur_info.pur_rx_xchg_address, + purex->pur_info.pur_bytes_rcvd); + + list_del_init(&e->list); + list_add_tail(&e->list, &enode_list); + } + } + + spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); + + list_for_each_entry_safe(e, tmp, &enode_list, list) { + list_del_init(&e->list); + qla_enode_free(vha, e); + } + + return; +} + +/* + * allocate enode struct and populate buffer + * returns: enode pointer with buffers + * NULL on error + */ +static struct enode * +qla_enode_alloc(scsi_qla_host_t *vha, uint32_t ntype) +{ + struct enode *node; + struct purexevent *purex; + + node = kzalloc(RX_ELS_SIZE, GFP_ATOMIC); + if (!node) + return NULL; + + purex = &node->u.purexinfo; + purex->msgp = (u8*) (node + 1); + purex->msgp_len = ELS_MAX_PAYLOAD; + + node->ntype = ntype; + INIT_LIST_HEAD(&node->list); + return node; +} + +static void +qla_enode_add(scsi_qla_host_t *vha, struct enode *ptr) +{ + unsigned long flags; + + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x1354, + "%s add enode for type=%x, cnt=%x\n", + __func__, ptr->ntype, ptr->dinfo.nodecnt); + + spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); + list_add_tail(&ptr->list, &vha->pur_cinfo.head); + spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); + + return; +} + +static struct enode * +qla_enode_find(scsi_qla_host_t *vha, uint32_t ntype, uint32_t p1, uint32_t p2) +{ + struct enode *node_rtn = NULL; + struct enode *list_node, *q; + unsigned long flags; + uint32_t sid; + uint32_t rw_flag; + struct purexevent *purex; + + /* secure the list from moving under us */ + spin_lock_irqsave(&vha->pur_cinfo.pur_lock, flags); + + list_for_each_entry_safe(list_node, q, &vha->pur_cinfo.head, list) { + + /* node type determines what p1 and p2 are */ + purex = &list_node->u.purexinfo; + sid = p1; + rw_flag = p2; + + if (purex->pur_info.pur_sid.b24 == sid) { + /* found it and its complete */ + node_rtn = list_node; + list_del(&list_node->list); + break; + } + } + + spin_unlock_irqrestore(&vha->pur_cinfo.pur_lock, flags); + + return node_rtn; +} + +/** + * qla_pur_get_pending - read/return authentication message sent + * from remote port + * @vha: host adapter pointer + * @fcport: session pointer + * @bsg_job: user request where the message is copy to. + */ +static int +qla_pur_get_pending(scsi_qla_host_t *vha, fc_port_t *fcport, bsg_job_t *bsg_job) +{ + struct enode *ptr; + struct purexevent *purex; + struct qla_bsg_auth_els_reply *rpl = + (struct qla_bsg_auth_els_reply *)bsg_job->reply; + + bsg_job->reply_len = sizeof(*rpl); + + ptr = qla_enode_find(vha, N_PUREX, fcport->d_id.b24, PUR_GET); + if (!ptr) { + ql_dbg(ql_dbg_edif, vha, 0x1355, + "%s no enode data found for %8phN sid=%06x\n", + __func__, fcport->port_name, fcport->d_id.b24); + SET_DID_STATUS(rpl->r.result, DID_IMM_RETRY); + return -EIO; + } + + /* + * enode is now off the linked list and is ours to deal with + */ + purex = &ptr->u.purexinfo; + + /* Copy info back to caller */ + rpl->rx_xchg_address = purex->pur_info.pur_rx_xchg_address; + + SET_DID_STATUS(rpl->r.result, DID_OK); + rpl->r.reply_payload_rcv_len = + sg_pcopy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, purex->msgp, + purex->pur_info.pur_bytes_rcvd, 0); + + /* data copy / passback completed - destroy enode */ + qla_enode_free(vha, ptr); + + return 0; +} + +/* it is assume qpair lock is held */ +static int +qla_els_reject_iocb(scsi_qla_host_t *vha, struct qla_qpair *qp, + struct qla_els_pt_arg *a) +{ + struct els_entry_24xx *els_iocb; + + els_iocb = __qla2x00_alloc_iocbs(qp, NULL); + if (!els_iocb) { + ql_log(ql_log_warn, vha, 0x139a, + "qla2x00_alloc_iocbs failed.\n"); + return QLA_FUNCTION_FAILED; + } + + qla_els_pt_iocb(vha, els_iocb, a); + + ql_dbg(ql_dbg_edif, vha, 0x1356, + "Sending ELS reject ox_id %04x s:%06x -> d:%06x\n", + a->ox_id, a->sid.b24, a->did.b24); + ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, vha, 0x1357, + vha->hw->elsrej.c, sizeof(*vha->hw->elsrej.c)); + /* flush iocb to mem before notifying hw doorbell */ + wmb(); + qla2x00_start_iocbs(vha, qp->req); + return 0; +} + +void +qla_edb_init(scsi_qla_host_t *vha) +{ + if (DBELL_ACTIVE(vha)) { + /* list already init'd - error */ + ql_dbg(ql_dbg_edif, vha, 0x1358, + "edif db already initialized, cannot reinit\n"); + return; + } + + /* initialize lock which protects doorbell & init list */ + spin_lock_init(&vha->e_dbell.db_lock); + INIT_LIST_HEAD(&vha->e_dbell.head); +} + +void qla_edb_clear(scsi_qla_host_t *vha, port_id_t portid) +{ + unsigned long flags; + struct edb_node *e, *tmp; + port_id_t sid; + LIST_HEAD(edb_list); + + + if (DBELL_INACTIVE(vha)) { + /* doorbell list not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x1359, + "%s doorbell not enabled\n", __func__); + return; + } + /* grab lock so list doesn't move */ + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + + list_for_each_entry_safe(e, tmp, &vha->e_dbell.head, list) { + switch (e->ntype) { + case VND_CMD_AUTH_STATE_NEEDED: + case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN: + sid = e->u.plogi_did; + break; + case VND_CMD_AUTH_STATE_ELS_RCVD: + sid = e->u.els_sid; + break; + case VND_CMD_AUTH_STATE_SAUPDATE_COMPL: + /* app wants to see this */ + continue; + default: + ql_log(ql_log_warn, vha, 0x13a6, + "%s unknown type: %x\n", __func__, e->ntype); + sid.b24 = 0; + break; + } + if(sid.b24 == portid.b24) { + ql_dbg(ql_dbg_edif, vha, 0x135a, + "%s Doorbell free : type=%x %p\n", + __func__, e->ntype, e); + list_del_init(&e->list); + list_add_tail(&e->list, &edb_list); + } + } + + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + list_for_each_entry_safe(e, tmp, &edb_list, list) + qla_edb_node_free(vha, e); +} + +// function called when app is stopping + +void +qla_edb_stop(scsi_qla_host_t *vha) +{ + unsigned long flags; + struct edb_node *node, *q; + + if (DBELL_INACTIVE(vha)) { + /* doorbell list not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x135b, + "%s doorbell not enabled\n", __func__); + return; + } + + /* grab lock so list doesn't move */ + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + + vha->e_dbell.db_flags &= ~EDB_ACTIVE; /* mark it not active */ + /* hopefully this is a null list at this point */ + list_for_each_entry_safe(node, q, &vha->e_dbell.head, list) { + ql_dbg(ql_dbg_edif, vha, 0x135c, + "%s freeing edb_node type=%x\n", + __func__, node->ntype); + qla_edb_node_free(vha, node); + } + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + qla_edif_dbell_bsg_done(vha); +} + +static struct edb_node * +qla_edb_node_alloc(scsi_qla_host_t *vha, uint32_t ntype) +{ + struct edb_node *node; + + node = kzalloc(sizeof(*node), GFP_ATOMIC); + if (!node) { + /* couldn't get space */ + ql_dbg(ql_dbg_edif, vha, 0x135d, + "edb node unable to be allocated\n"); + return NULL; + } + + node->ntype = ntype; + INIT_LIST_HEAD(&node->list); + return node; +} + +/* adds a already alllocated enode to the linked list */ +static bool +qla_edb_node_add(scsi_qla_host_t *vha, struct edb_node *ptr) +{ + unsigned long flags; + + if (DBELL_INACTIVE(vha)) { + /* doorbell list not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x135e, + "%s doorbell not enabled\n", __func__); + return false; + } + + spin_lock_irqsave(&vha->e_dbell.db_lock, flags); + list_add_tail(&ptr->list, &vha->e_dbell.head); + spin_unlock_irqrestore(&vha->e_dbell.db_lock, flags); + + return true; +} + +/* adds event to doorbell list */ +void +qla_edb_eventcreate(scsi_qla_host_t *vha, uint32_t dbtype, + uint32_t data, uint32_t data2, fc_port_t *sfcport) +{ + struct edb_node *edbnode; + fc_port_t *fcport = sfcport; + port_id_t id; + + if (!vha->hw->flags.edif_enabled) { + /* edif not enabled */ + return; + } + + if (DBELL_INACTIVE(vha)) { + if (fcport) + fcport->edif.auth_state = dbtype; + /* doorbell list not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x135f, + "%s doorbell not enabled (type=%d\n", __func__, dbtype); + return; + } + + edbnode = qla_edb_node_alloc(vha, dbtype); + if (!edbnode) { + ql_dbg(ql_dbg_edif, vha, 0x1360, + "%s unable to alloc db node\n", __func__); + return; + } + + if (!fcport) { + id.b.domain = (data >> 16) & 0xff; + id.b.area = (data >> 8) & 0xff; + id.b.al_pa = data & 0xff; + fcport = qla2x00_find_fcport_by_pid(vha, &id); + if (!fcport) { + ql_dbg(ql_dbg_edif, vha, 0x1362, + "%s can't find fcport for sid= 0x%x - ignoring\n", + __func__, id.b24); + kfree(edbnode); + return; + } + } + + /* populate the edb node */ + switch (dbtype) { + case VND_CMD_AUTH_STATE_NEEDED: + case VND_CMD_AUTH_STATE_SESSION_SHUTDOWN: + edbnode->u.plogi_did.b24 = fcport->d_id.b24; + break; + case VND_CMD_AUTH_STATE_ELS_RCVD: + edbnode->u.els_sid.b24 = fcport->d_id.b24; + break; + case VND_CMD_AUTH_STATE_SAUPDATE_COMPL: + edbnode->u.sa_aen.port_id = fcport->d_id; + edbnode->u.sa_aen.status = data; + edbnode->u.sa_aen.key_type = data2; + edbnode->u.sa_aen.version = EDIF_VERSION1; + break; + default: + ql_dbg(ql_dbg_edif, vha, 0x1363, + "%s unknown type: %x\n", __func__, dbtype); + kfree(edbnode); + edbnode = NULL; + break; + } + + if (edbnode) { + if (!qla_edb_node_add(vha, edbnode)) { + ql_dbg(ql_dbg_edif, vha, 0x1364, + "%s unable to add dbnode\n", __func__); + kfree(edbnode); + return; + } + ql_dbg(ql_dbg_edif, vha, 0x1365, + "%s Doorbell produced : type=%d %p\n", __func__, dbtype, edbnode); + qla_edif_dbell_bsg_done(vha); + if (fcport) + fcport->edif.auth_state = dbtype; + } +} + +void +qla_edif_timer(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (!vha->vp_idx && N2N_TOPO(ha) && ha->flags.n2n_fw_acc_sec) { + if (DBELL_INACTIVE(vha) && + ha->edif_post_stop_cnt_down) { + ha->edif_post_stop_cnt_down--; + + /* turn off auto 'Plogi Acc + secure=1' feature + * Set Add FW option[3] BIT_15, if. + */ + if (ha->edif_post_stop_cnt_down == 0) { + ql_dbg(ql_dbg_async, vha, 0x139b, + "%s chip reset to turn off PLOGI ACC + secure\n", + __func__); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + } + } else { + ha->edif_post_stop_cnt_down = 60; + } + } + + if (vha->e_dbell.dbell_bsg_job && time_after_eq(jiffies, vha->e_dbell.bsg_expire)) + qla_edif_dbell_bsg_done(vha); +} + + +static void qla_noop_sp_done(srb_t *sp, int res) +{ + sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +} + +/* + * Called from work queue + * build and send the sa_update iocb to delete an rx sa_index + */ +int +qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *vha, struct qla_work_evt *e) +{ + srb_t *sp; + fc_port_t *fcport = NULL; + struct srb_iocb *iocb_cmd = NULL; + int rval = QLA_SUCCESS; + struct edif_sa_ctl *sa_ctl = e->u.sa_update.sa_ctl; + uint16_t nport_handle = e->u.sa_update.nport_handle; + + ql_dbg(ql_dbg_edif, vha, 0x1366, + "%s: starting, sa_ctl: %px\n", __func__, sa_ctl); + + if (!sa_ctl) { + ql_dbg(ql_dbg_edif, vha, 0x1367, + "sa_ctl allocation failed\n"); + rval = -ENOMEM; + goto done; + } + + fcport = sa_ctl->fcport; + + /* Alloc SRB structure */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + ql_dbg(ql_dbg_edif, vha, 0x1368, + "SRB allocation failed\n"); + rval = -ENOMEM; + goto done; + } + + fcport->flags |= FCF_ASYNC_SENT; + iocb_cmd = &sp->u.iocb_cmd; + iocb_cmd->u.sa_update.sa_ctl = sa_ctl; + + ql_dbg(ql_dbg_edif, vha, 0x1369, + "Enter: SA REPL portid=%06x, sa_ctl %px, index %x, nport_handle: 0x%x\n", + fcport->d_id.b24, sa_ctl, sa_ctl->index, nport_handle); + /* + * if this is a sadb cleanup delete, mark it so the isr can + * take the correct action + */ + if (sa_ctl->flags & EDIF_SA_CTL_FLG_CLEANUP_DEL) { + /* mark this srb as a cleanup delete */ + sp->flags |= SRB_EDIF_CLEANUP_DELETE; + ql_dbg(ql_dbg_edif, vha, 0x136a, + "%s: sp 0x%px flagged as cleanup delete\n", __func__, sp); + } + + sp->type = SRB_SA_REPLACE; + sp->name = "SA_REPLACE"; + sp->fcport = fcport; + sp->free = qla2x00_rel_sp; + sp->done = qla_noop_sp_done; + + rval = qla2x00_start_sp(sp); + + if (rval != QLA_SUCCESS) { + goto done_free_sp; + } + + return rval; +done_free_sp: + kref_put(&sp->cmd_kref, qla2x00_sp_release); + fcport->flags &= ~FCF_ASYNC_SENT; +done: + if (fcport) + fcport->flags &= ~FCF_ASYNC_ACTIVE; + return rval; +} + + +void qla24xx_sa_update_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb) +{ + int itr = 0; + struct scsi_qla_host *vha = sp->vha; + struct qla_sa_update_frame *sa_frame = + &sp->u.iocb_cmd.u.sa_update.sa_frame; + u8 flags = 0; + + switch (sa_frame->flags & (SAU_FLG_INV | SAU_FLG_TX)) { + case 0: + ql_dbg(ql_dbg_edif, vha, 0x136b, + "%s: EDIF SA UPDATE RX IOCB vha: 0x%px index: %d\n", + __func__, vha, sa_frame->fast_sa_index); + break; + case 1: + ql_dbg(ql_dbg_edif, vha, 0x136c, + "%s: EDIF SA DELETE RX IOCB vha: 0x%px index: %d\n", + __func__, vha, sa_frame->fast_sa_index); + flags |= SA_FLAG_INVALIDATE; + break; + case 2: + ql_dbg(ql_dbg_edif, vha, 0x136d, + "%s: EDIF SA UPDATE TX IOCB vha: 0x%px index: %d\n", + __func__, vha, sa_frame->fast_sa_index); + flags |= SA_FLAG_TX; + break; + case 3: + ql_dbg(ql_dbg_edif, vha, 0x136e, + "%s: EDIF SA DELETE TX IOCB vha: 0x%px index: %d\n", + __func__, vha, sa_frame->fast_sa_index); + flags |= SA_FLAG_TX | SA_FLAG_INVALIDATE; + break; + } + + sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE; + sa_update_iocb->entry_count = 1; + sa_update_iocb->sys_define = 0; + sa_update_iocb->entry_status = 0; + sa_update_iocb->handle = sp->handle; + sa_update_iocb->u.nport_handle = cpu_to_le16(sp->fcport->loop_id); + sa_update_iocb->vp_index = sp->fcport->vha->vp_idx; + sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; + sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area; + sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain; + + sa_update_iocb->flags = flags; + sa_update_iocb->salt = cpu_to_le32(sa_frame->salt); + sa_update_iocb->spi = cpu_to_le32(sa_frame->spi); + sa_update_iocb->sa_index = cpu_to_le16(sa_frame->fast_sa_index); + + sa_update_iocb->sa_control |= SA_CNTL_ENC_FCSP; + if (sp->fcport->edif.aes_gmac) + sa_update_iocb->sa_control |= SA_CNTL_AES_GMAC; + + if (sa_frame->flags & SAU_FLG_KEY256) { + sa_update_iocb->sa_control |= SA_CNTL_KEY256; + for (itr = 0; itr < 32; itr++) + sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr]; + } else { + sa_update_iocb->sa_control |= SA_CNTL_KEY128; + for (itr = 0; itr < 16; itr++) + sa_update_iocb->sa_key[itr] = sa_frame->sa_key[itr]; + } + + ql_dbg(ql_dbg_edif, vha, 0x136f, + "%s SAU Port ID = %02x%02x%02x, flags=%xh, index=%u, ctl=%xh, SPI 0x%x flags 0x%x " + "hdl=%x gmac %d\n", + __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1], + sa_update_iocb->port_id[0], sa_update_iocb->flags, sa_update_iocb->sa_index, + sa_update_iocb->sa_control, sa_update_iocb->spi, sa_frame->flags, sp->handle, + sp->fcport->edif.aes_gmac); + + if (sa_frame->flags & SAU_FLG_TX) + sp->fcport->edif.tx_sa_pending = 1; + else + sp->fcport->edif.rx_sa_pending = 1; + + sp->fcport->vha->qla_stats.control_requests++; +} + +void +qla24xx_sa_replace_iocb(srb_t *sp, struct sa_update_28xx *sa_update_iocb) +{ + struct scsi_qla_host *vha = sp->vha; + struct srb_iocb *srb_iocb = &sp->u.iocb_cmd; + struct edif_sa_ctl *sa_ctl = srb_iocb->u.sa_update.sa_ctl; + uint16_t nport_handle = sp->fcport->loop_id; + + sa_update_iocb->entry_type = SA_UPDATE_IOCB_TYPE; + sa_update_iocb->entry_count = 1; + sa_update_iocb->sys_define = 0; + sa_update_iocb->entry_status = 0; + sa_update_iocb->handle = sp->handle; + + sa_update_iocb->u.nport_handle = cpu_to_le16(nport_handle); + + sa_update_iocb->vp_index = sp->fcport->vha->vp_idx; + sa_update_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; + sa_update_iocb->port_id[1] = sp->fcport->d_id.b.area; + sa_update_iocb->port_id[2] = sp->fcport->d_id.b.domain; + + /* Invalidate the index. salt, spi, control & key are ignore */ + sa_update_iocb->flags = SA_FLAG_INVALIDATE; + sa_update_iocb->salt = 0; + sa_update_iocb->spi = 0; + sa_update_iocb->sa_index = cpu_to_le16(sa_ctl->index); + sa_update_iocb->sa_control = 0; + + ql_dbg(ql_dbg_edif, vha, 0x1370, + "%s SAU DELETE RX Port ID = %02x:%02x:%02x, lid %d flags=%xh, index=%u, hdl=%x\n", + __func__, sa_update_iocb->port_id[2], sa_update_iocb->port_id[1], + sa_update_iocb->port_id[0], nport_handle, sa_update_iocb->flags, + sa_update_iocb->sa_index, sp->handle); + + sp->fcport->vha->qla_stats.control_requests++; +} + +void qla24xx_auth_els(scsi_qla_host_t *vha, void **pkt, struct rsp_que **rsp) +{ + struct purex_entry_24xx *p = *pkt; + struct enode *ptr; + int sid; + u16 totlen; + struct purexevent *purex; + struct scsi_qla_host *host = NULL; + int rc; + struct fc_port *fcport; + struct qla_els_pt_arg a; + be_id_t beid; + + memset(&a, 0, sizeof(a)); + + a.els_opcode = ELS_AUTH_ELS; + a.nport_handle = p->nport_handle; + a.rx_xchg_address = p->rx_xchg_addr; + a.did.b.domain = p->s_id[2]; + a.did.b.area = p->s_id[1]; + a.did.b.al_pa = p->s_id[0]; + a.tx_byte_count = a.tx_len = sizeof(struct fc_els_ls_rjt); + a.tx_addr = vha->hw->elsrej.cdma; + a.vp_idx = vha->vp_idx; + a.control_flags = EPD_ELS_RJT; + a.ox_id = p->ox_id; + + sid = p->s_id[0] | (p->s_id[1] << 8) | (p->s_id[2] << 16); + + totlen = (le16_to_cpu(p->frame_size) & 0x0fff) - PURX_ELS_HEADER_SIZE; + if (le16_to_cpu(p->status_flags) & 0x8000) { + totlen = le16_to_cpu(p->trunc_frame_size); + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + __qla_consume_iocb(vha, pkt, rsp); + return; + } + + if (totlen > ELS_MAX_PAYLOAD) { + ql_dbg(ql_dbg_edif, vha, 0x1371, + "%s WARNING: verbose ELS frame received (totlen=%x)\n", + __func__, totlen); + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + __qla_consume_iocb(vha, pkt, rsp); + return; + } + + if (!vha->hw->flags.edif_enabled) { + /* edif support not enabled */ + ql_dbg(ql_dbg_edif, vha, 0x1372, "%s edif not enabled\n", + __func__); + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + __qla_consume_iocb(vha, pkt, rsp); + return; + } + + ptr = qla_enode_alloc(vha, N_PUREX); + if (!ptr) { + ql_dbg(ql_dbg_edif, vha, 0x1373, + "WARNING: enode allloc failed for sid=%x\n", + sid); + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + __qla_consume_iocb(vha, pkt, rsp); + return; + } + + purex = &ptr->u.purexinfo; + purex->pur_info.pur_sid = a.did; + purex->pur_info.pur_bytes_rcvd = totlen; + purex->pur_info.pur_rx_xchg_address = le32_to_cpu(p->rx_xchg_addr); + purex->pur_info.pur_nphdl = le16_to_cpu(p->nport_handle); + purex->pur_info.pur_did.b.domain = p->d_id[2]; + purex->pur_info.pur_did.b.area = p->d_id[1]; + purex->pur_info.pur_did.b.al_pa = p->d_id[0]; + purex->pur_info.vp_idx = p->vp_idx; + + a.sid = purex->pur_info.pur_did; + + rc = __qla_copy_purex_to_buffer(vha, pkt, rsp, purex->msgp, + purex->msgp_len); + if (rc) { + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + qla_enode_free(vha, ptr); + return; + } + beid.al_pa = purex->pur_info.pur_did.b.al_pa; + beid.area = purex->pur_info.pur_did.b.area; + beid.domain = purex->pur_info.pur_did.b.domain; + host = qla_find_host_by_d_id(vha, beid); + if (!host) { + ql_log(ql_log_fatal, vha, 0x13ac, + "%s Drop ELS due to unable to find host %06x\n", + __func__, purex->pur_info.pur_did.b24); + + qla_els_reject_iocb(vha, (*rsp)->qpair, &a); + qla_enode_free(vha, ptr); + return; + } + + fcport = qla2x00_find_fcport_by_pid(host, &purex->pur_info.pur_sid); + + if (DBELL_INACTIVE(vha)) { + ql_dbg(ql_dbg_edif, host, 0x1374, "%s e_dbell.db_flags =%x %06x\n", + __func__, host->e_dbell.db_flags, + fcport ? fcport->d_id.b24 : 0); + + qla_els_reject_iocb(host, (*rsp)->qpair, &a); + qla_enode_free(host, ptr); + return; + } + + if (fcport && EDIF_SESSION_DOWN(fcport)) { + ql_dbg(ql_dbg_edif, host, 0x13b6, + "%s terminate exchange. Send logo to 0x%x\n", + __func__, a.did.b24); + + a.tx_byte_count = a.tx_len = 0; + a.tx_addr = 0; + a.control_flags = EPD_RX_XCHG; /* EPD_RX_XCHG = terminate cmd */ + qla_els_reject_iocb(host, (*rsp)->qpair, &a); + qla_enode_free(host, ptr); + /* send logo to let remote port knows to tear down session */ + fcport->send_els_logo = 1; + qlt_schedule_sess_for_deletion(fcport); + return; + } + + /* add the local enode to the list */ + qla_enode_add(host, ptr); + + qla_edif_print_auth_hdr(host, fcport->port_name, purex->pur_info.pur_sid.b24, + purex->pur_info.pur_did.b24, purex->msgp, purex->pur_info.pur_bytes_rcvd, + purex->pur_info.pur_rx_xchg_address); + + + qla_edb_eventcreate(host, VND_CMD_AUTH_STATE_ELS_RCVD, sid, 0, NULL); +} + +static uint16_t qla_edif_get_sa_index_from_freepool(fc_port_t *fcport, int dir) +{ + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + void *sa_id_map; + unsigned long flags = 0; + u16 sa_index; + + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x1376, + "%s: entry\n", __func__); + + if (dir) + sa_id_map = ha->edif_tx_sa_id_map; + else + sa_id_map = ha->edif_rx_sa_id_map; + + spin_lock_irqsave(&ha->sadb_fp_lock, flags); + sa_index = find_first_zero_bit(sa_id_map, EDIF_NUM_SA_INDEX); + if (sa_index >= EDIF_NUM_SA_INDEX) { + spin_unlock_irqrestore(&ha->sadb_fp_lock, flags); + return INVALID_EDIF_SA_INDEX; + } + set_bit(sa_index, sa_id_map); + spin_unlock_irqrestore(&ha->sadb_fp_lock, flags); + + if (dir) + sa_index += EDIF_TX_SA_INDEX_BASE; + + ql_dbg(ql_dbg_edif, vha, 0x1377, + "%s: index retrieved from free pool %d\n", __func__, sa_index); + + return sa_index; +} + +/* find an sadb entry for an nport_handle */ +static struct edif_sa_index_entry * +qla_edif_sadb_find_sa_index_entry(uint16_t nport_handle, + struct list_head *sa_list) +{ + struct edif_sa_index_entry *entry; + struct edif_sa_index_entry *tentry; + struct list_head *indx_list = sa_list; + + list_for_each_entry_safe(entry, tentry, indx_list, next) { + if (entry->handle == nport_handle) + return entry; + } + return NULL; +} + +/* remove an sa_index from the nport_handle and return it to the free pool */ +static int qla_edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle, + uint16_t sa_index) +{ + struct edif_sa_index_entry *entry; + struct list_head *sa_list; + int dir = (sa_index < EDIF_TX_SA_INDEX_BASE) ? 0 : 1; + int slot = 0; + int free_slot_count = 0; + scsi_qla_host_t *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags = 0; + + ql_dbg(ql_dbg_edif, vha, 0x1378, + "%s: entry\n", __func__); + + if (dir) + sa_list = &ha->sadb_tx_index_list; + else + sa_list = &ha->sadb_rx_index_list; + + entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list); + if (!entry) { + ql_dbg(ql_dbg_edif, vha, 0x1379, + "%s: no entry found for nport_handle 0x%x\n", + __func__, nport_handle); + return -1; + } + + spin_lock_irqsave(&ha->sadb_lock, flags); + /* + * each tx/rx direction has up to 2 sa indexes/slots. 1 slot for in flight traffic + * the other is use at re-key time. + */ + for (slot = 0; slot < 2; slot++) { + if (entry->sa_pair[slot].sa_index == sa_index) { + entry->sa_pair[slot].sa_index = INVALID_EDIF_SA_INDEX; + entry->sa_pair[slot].spi = 0; + free_slot_count++; + qla_edif_add_sa_index_to_freepool(fcport, dir, sa_index); + } else if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) { + free_slot_count++; + } + } + + if (free_slot_count == 2) { + list_del(&entry->next); + kfree(entry); + } + spin_unlock_irqrestore(&ha->sadb_lock, flags); + + ql_dbg(ql_dbg_edif, vha, 0x137a, + "%s: sa_index %d removed, free_slot_count: %d\n", + __func__, sa_index, free_slot_count); + + return 0; +} + +void +qla28xx_sa_update_iocb_entry(scsi_qla_host_t *v, struct req_que *req, + struct sa_update_28xx *pkt) +{ + const char *func = "SA_UPDATE_RESPONSE_IOCB"; + srb_t *sp; + struct edif_sa_ctl *sa_ctl; + int old_sa_deleted = 1; + uint16_t nport_handle; + struct scsi_qla_host *vha; + const char *action; + + sp = qla2x00_get_sp_from_handle(v, func, req, pkt); + + if (!sp) { + ql_dbg(ql_dbg_edif, v, 0x137b, + "%s: no sp found for pkt\n", __func__); + return; + } + /* use sp->vha due to npiv */ + vha = sp->vha; + + switch (pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) { + case 0: + action = "UPDATE RX key"; + break; + case 1: + action = "DELETE RX key"; + break; + case 2: + action = "UPDATE TX key"; + break; + case 3: + action = "DELETE TX key"; + break; + } + + /* + * dig the nport handle out of the iocb, fcport->loop_id can not be trusted + * to be correct during cleanup sa_update iocbs. + */ + nport_handle = sp->fcport->loop_id; + + if (pkt->u.comp_sts) + ql_dbg(ql_dbg_edif, vha, 0x137f, + "%8phC %s failed status=%x old_sa_info=%x new_sa_info=%x lid %d, index=0x%x " + "pkt_flags %xh hdl=%x\n", + sp->fcport->port_name, action, pkt->u.comp_sts, pkt->old_sa_info, pkt->new_sa_info, + nport_handle, pkt->sa_index, pkt->flags, sp->handle); + else + ql_dbg(ql_dbg_edif, vha, 0x1380, + "%8phC %s success old_sa_info=%x new_sa_info=%x lid %d, index=0x%x " + "pkt_flags %xh hdl=%x\n", + sp->fcport->port_name, action, pkt->old_sa_info, pkt->new_sa_info, + nport_handle, pkt->sa_index, pkt->flags, sp->handle); + + /* if rx delete, remove the timer */ + if ((pkt->flags & (SA_FLAG_INVALIDATE | SA_FLAG_TX)) == SA_FLAG_INVALIDATE) { + struct edif_list_entry *edif_entry; + + sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + + edif_entry = qla_edif_list_find_sa_index(sp->fcport, nport_handle); + if (edif_entry) { + ql_dbg(ql_dbg_edif, vha, 0x1381, + "%s: removing edif_entry %px, new sa_index: 0x%x\n", + __func__, edif_entry, pkt->sa_index); + qla_edif_list_delete_sa_index(sp->fcport, edif_entry); + del_timer(&edif_entry->timer); + + ql_dbg(ql_dbg_edif, vha, 0x1382, + "%s: releasing edif_entry %px, new sa_index: 0x%x\n", + __func__, edif_entry, pkt->sa_index); + + kfree(edif_entry); + } + } + + /* + * if this is a delete for either tx or rx, make sure it succeeded. + * The new_sa_info field should be 0xffff on success + */ + if (pkt->flags & SA_FLAG_INVALIDATE) + old_sa_deleted = (le16_to_cpu(pkt->new_sa_info) == 0xffff) ? 1 : 0; + + /* Process update and delete the same way */ + + /* If this is an sadb cleanup delete, bypass sending events to IPSEC */ + if (sp->flags & SRB_EDIF_CLEANUP_DELETE) { + sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + ql_dbg(ql_dbg_edif, vha, 0x1383, + "%s: nph 0x%x, sa_index %d removed from fw\n", + __func__, sp->fcport->loop_id, pkt->sa_index); + + } else if ((pkt->entry_status == 0) && (pkt->u.comp_sts == 0) && + old_sa_deleted) { + /* + * Note: Wa are only keeping track of latest SA, + * so we know when we can start enableing encryption per I/O. + * If all SA's get deleted, let FW reject the IOCB. + + * TODO: edif: don't set enabled here I think + * TODO: edif: prli complete is where it should be set + */ + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x1384, + "SA(%x)updated for s_id %02x%02x%02x\n", + pkt->new_sa_info, + pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]); + sp->fcport->edif.enable = 1; + if (pkt->flags & SA_FLAG_TX) { + sp->fcport->edif.tx_sa_set = 1; + sp->fcport->edif.tx_sa_pending = 0; + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + QL_VND_SA_STAT_SUCCESS, + QL_VND_TX_SA_KEY, sp->fcport); + } else { + sp->fcport->edif.rx_sa_set = 1; + sp->fcport->edif.rx_sa_pending = 0; + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + QL_VND_SA_STAT_SUCCESS, + QL_VND_RX_SA_KEY, sp->fcport); + } + } else { + ql_dbg(ql_dbg_edif, vha, 0x1385, + "%s: %8phN SA update FAILED: sa_index: %d, new_sa_info %d, %02x%02x%02x\n", + __func__, sp->fcport->port_name, pkt->sa_index, pkt->new_sa_info, + pkt->port_id[2], pkt->port_id[1], pkt->port_id[0]); + + if (pkt->flags & SA_FLAG_TX) + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED, + QL_VND_TX_SA_KEY, sp->fcport); + else + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SAUPDATE_COMPL, + (le16_to_cpu(pkt->u.comp_sts) << 16) | QL_VND_SA_STAT_FAILED, + QL_VND_RX_SA_KEY, sp->fcport); + } + + /* for delete, release sa_ctl, sa_index */ + if (pkt->flags & SA_FLAG_INVALIDATE) { + /* release the sa_ctl */ + sa_ctl = qla_edif_find_sa_ctl_by_index(sp->fcport, + le16_to_cpu(pkt->sa_index), (pkt->flags & SA_FLAG_TX)); + if (sa_ctl && + qla_edif_find_sa_ctl_by_index(sp->fcport, sa_ctl->index, + (pkt->flags & SA_FLAG_TX)) != NULL) { + ql_dbg(ql_dbg_edif + ql_dbg_verbose, vha, 0x1386, + "%s: freeing sa_ctl for index %d\n", + __func__, sa_ctl->index); + qla_edif_free_sa_ctl(sp->fcport, sa_ctl, sa_ctl->index); + } else { + ql_dbg(ql_dbg_edif, vha, 0x1387, + "%s: sa_ctl NOT freed, sa_ctl: %px\n", + __func__, sa_ctl); + } + ql_dbg(ql_dbg_edif, vha, 0x1388, + "%s: freeing sa_index %d, nph: 0x%x\n", + __func__, le16_to_cpu(pkt->sa_index), nport_handle); + qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle, + le16_to_cpu(pkt->sa_index)); + /* + * check for a failed sa_update and remove + * the sadb entry. + */ + } else if (pkt->u.comp_sts) { + ql_dbg(ql_dbg_edif, vha, 0x1389, + "%s: freeing sa_index %d, nph: 0x%x\n", + __func__, pkt->sa_index, nport_handle); + qla_edif_sadb_delete_sa_index(sp->fcport, nport_handle, + le16_to_cpu(pkt->sa_index)); + switch (le16_to_cpu(pkt->u.comp_sts)) { + case CS_PORT_EDIF_UNAVAIL: + case CS_PORT_EDIF_LOGOUT: + qlt_schedule_sess_for_deletion(sp->fcport); + break; + default: + break; + } + } + + sp->done(sp, 0); +} + +/** + * qla28xx_start_scsi_edif() - Send a SCSI type 6 command to the ISP + * @sp: command to send to the ISP + * + * Returns non-zero if a failure occurred, else zero. + */ +int +qla28xx_start_scsi_edif(srb_t *sp) +{ + int nseg; + unsigned long flags; + struct scsi_cmnd *cmd; + uint32_t *clr_ptr; + uint32_t index, i; + uint32_t handle; + uint16_t cnt; + int16_t req_cnt; + uint16_t tot_dsds; + __be32 *fcp_dl; + uint8_t additional_cdb_len; + struct ct6_dsd *ctx; + struct scsi_qla_host *vha = sp->vha; + struct qla_hw_data *ha = vha->hw; + struct cmd_type_6 *cmd_pkt; + struct dsd64 *cur_dsd; + uint8_t avail_dsds = 0; + struct scatterlist *sg; + struct req_que *req = sp->qpair->req; + spinlock_t *lock = sp->qpair->qp_lock_ptr; + + /* Setup device pointers. */ + cmd = GET_CMD_SP(sp); + + /* So we know we haven't pci_map'ed anything yet */ + tot_dsds = 0; + + /* Send marker if required */ + if (vha->marker_needed != 0) { + if (qla2x00_marker(vha, sp->qpair, 0, 0, MK_SYNC_ALL) != + QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x13ad, + "qla2x00_marker failed for cmd=%px.\n", cmd); + return QLA_FUNCTION_FAILED; + } + vha->marker_needed = 0; + } + + /* Acquire ring specific lock */ + spin_lock_irqsave(lock, flags); + + /* Check for room in outstanding command list. */ + handle = req->current_outstanding_cmd; + for (index = 1; index < req->num_outstanding_cmds; index++) { + handle++; + if (handle == req->num_outstanding_cmds) + handle = 1; + if (!req->outstanding_cmds[handle]) + break; + } + if (index == req->num_outstanding_cmds) + goto queuing_error; + + /* Map the sg table so we have an accurate count of sg entries needed */ + if (scsi_sg_count(cmd)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), + scsi_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) + goto queuing_error; + } else { + nseg = 0; + } + + tot_dsds = nseg; + req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + + sp->iores.res_type = RESOURCE_IOCB|RESOURCE_EXCH; + sp->iores.exch_cnt = 1; + sp->iores.iocb_cnt = req_cnt; + if (qla_get_fw_resources(sp->qpair, &sp->iores)) + goto queuing_error; + + if (req->cnt < (req_cnt + 2)) { + cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : + RD_REG_DWORD_RELAXED(req->req_q_out); + if (req->ring_index < cnt) + req->cnt = cnt - req->ring_index; + else + req->cnt = req->length - + (req->ring_index - cnt); + if (req->cnt < (req_cnt + 2)) + goto queuing_error; + } + + if (qla_get_buf(vha, sp->qpair, &sp->u.scmd.buf_dsc)) { + ql_log(ql_log_fatal, vha, 0x13b1, + "Failed to allocate buf for fcp_cmnd for cmd=%px.\n", cmd); + goto queuing_error; + } + sp->flags |= SRB_GOT_BUF; + ctx = &sp->u.scmd.ct6_ctx; + ctx->fcp_cmnd = sp->u.scmd.buf_dsc.buf; + ctx->fcp_cmnd_dma = sp->u.scmd.buf_dsc.buf_dma; + + if (cmd->cmd_len > 16) { + additional_cdb_len = cmd->cmd_len - 16; + if ((cmd->cmd_len % 4) != 0) { + /* + * SCSI command bigger than 16 bytes must be + * multiple of 4 + */ + ql_log(ql_log_warn, vha, 0x13b2, + "scsi cmd len %d not multiple of 4 for cmd=%p.\n", + cmd->cmd_len, cmd); + goto queuing_error_fcp_cmnd; + } + ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; + } else { + additional_cdb_len = 0; + ctx->fcp_cmnd_len = 12 + 16 + 4; + } + + + cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; + cmd_pkt->handle = MAKE_HANDLE(req->id, handle); + + /* + * Zero out remaining portion of packet. + * tagged queuing modifier -- default is TSK_SIMPLE (0). + */ + clr_ptr = (uint32_t *)cmd_pkt + 2; + memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); + cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); + + /* No data transfer */ + if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { + cmd_pkt->byte_count = cpu_to_le32(0); + goto no_dsds; + } + + /* Set transfer direction */ + if (cmd->sc_data_direction == DMA_TO_DEVICE) { + cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); + vha->qla_stats.output_bytes += scsi_bufflen(cmd); + vha->qla_stats.output_requests++; + sp->fcport->edif.tx_bytes += scsi_bufflen(cmd); + } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { + cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); + vha->qla_stats.input_bytes += scsi_bufflen(cmd); + vha->qla_stats.input_requests++; + sp->fcport->edif.rx_bytes += scsi_bufflen(cmd); + } + + cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF); + cmd_pkt->control_flags &= ~(cpu_to_le16(CF_NEW_SA)); + + /* One DSD is available in the Command Type 6 IOCB */ + avail_dsds = 1; + cur_dsd = &cmd_pkt->fcp_dsd; + + /* Load data segments */ + scsi_for_each_sg(cmd, sg, tot_dsds, i) { + dma_addr_t sle_dma; + cont_a64_entry_t *cont_pkt; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Continuation + * Type 1 IOCB. + */ + cont_pkt = qla2x00_prep_cont_type1_iocb(vha, req); + cur_dsd = cont_pkt->dsd; + avail_dsds = 5; + } + + sle_dma = sg_dma_address(sg); + put_unaligned_le64(sle_dma, &cur_dsd->address); + cur_dsd->length = cpu_to_le32(sg_dma_len(sg)); + cur_dsd++; + avail_dsds--; + } + +no_dsds: + /* Set NPORT-ID and LUN number*/ + cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); + cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; + cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; + cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; + cmd_pkt->vp_index = sp->vha->vp_idx; + + cmd_pkt->entry_type = COMMAND_TYPE_6; + + /* Set total data segment count. */ + cmd_pkt->entry_count = (uint8_t)req_cnt; + + int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); + host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); + + /* build FCP_CMND IU */ + int_to_scsilun(cmd->device->lun, &ctx->fcp_cmnd->lun); + ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len; + + if (cmd->sc_data_direction == DMA_TO_DEVICE) + ctx->fcp_cmnd->additional_cdb_len |= 1; + else if (cmd->sc_data_direction == DMA_FROM_DEVICE) + ctx->fcp_cmnd->additional_cdb_len |= 2; + + /* Populate the FCP_PRIO. */ + if (ha->flags.fcp_prio_enabled) + ctx->fcp_cmnd->task_attribute |= + sp->fcport->fcp_prio << 3; + + memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); + + fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + + additional_cdb_len); + *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); + + cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len); + put_unaligned_le64(ctx->fcp_cmnd_dma, &cmd_pkt->fcp_cmnd_dseg_address); + + cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); + /* Set total data segment count. */ + cmd_pkt->entry_count = (uint8_t)req_cnt; + cmd_pkt->entry_status = 0; + + /* Build command packet. */ + req->current_outstanding_cmd = handle; + req->outstanding_cmds[handle] = sp; + sp->handle = handle; + cmd->host_scribble = (unsigned char *)(unsigned long)handle; + req->cnt -= req_cnt; + + /* Adjust ring index. */ + wmb(); + req->ring_index++; + if (req->ring_index == req->length) { + req->ring_index = 0; + req->ring_ptr = req->ring; + } else { + req->ring_ptr++; + } + + /* Set chip new ring index. */ + WRT_REG_DWORD(req->req_q_in, req->ring_index); + + spin_unlock_irqrestore(lock, flags); + +#ifdef QLA2XXX_LATENCY_MEASURE + ktime_get_real_ts64(&sp->cmd_to_req_q); +#endif + return QLA_SUCCESS; + +queuing_error_fcp_cmnd: +queuing_error: + if (tot_dsds) + scsi_dma_unmap(cmd); + + qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc); + qla_put_fw_resources(sp->qpair, &sp->iores); + spin_unlock_irqrestore(lock, flags); + + return QLA_FUNCTION_FAILED; +} + +/********************************************** + * edif update/delete sa_index list functions * + **********************************************/ + +/* clear the edif_indx_list for this port */ +void qla_edif_list_del(fc_port_t *fcport) +{ + struct edif_list_entry *indx_lst, *tindx_lst; + struct list_head *indx_list = &fcport->edif.edif_indx_list; + unsigned long flags = 0; + + list_for_each_entry_safe(indx_lst, tindx_lst, indx_list, next) { + spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); + list_del(&indx_lst->next); + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + kfree(indx_lst); + } +} + +/****************** + * SADB functions * + ******************/ + +/* allocate/retrieve an sa_index for a given spi */ +static uint16_t qla_edif_sadb_get_sa_index(fc_port_t *fcport, + struct qla_sa_update_frame *sa_frame) +{ + struct edif_sa_index_entry *entry; + struct list_head *sa_list; + uint16_t sa_index; + int dir = sa_frame->flags & SAU_FLG_TX; + int slot = 0; + int free_slot = -1; + scsi_qla_host_t *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags = 0; + uint16_t nport_handle = fcport->loop_id; + + ql_dbg(ql_dbg_edif, vha, 0x138a, + "%s: entry fc_port: %px, nport_handle: 0x%x\n", + __func__, fcport, nport_handle); + + if (dir) + sa_list = &ha->sadb_tx_index_list; + else + sa_list = &ha->sadb_rx_index_list; + + entry = qla_edif_sadb_find_sa_index_entry(nport_handle, sa_list); + if (!entry) { + + if ( (sa_frame->flags & (SAU_FLG_TX | SAU_FLG_INV)) == SAU_FLG_INV ) { + ql_dbg(ql_dbg_edif, vha, 0x138b, + "%s: rx delete request with no entry\n", __func__); + return RX_DELETE_NO_EDIF_SA_INDEX; + + } + + /* if there is no entry for this nport, add one */ + entry = kzalloc((sizeof(struct edif_sa_index_entry)), GFP_ATOMIC); + if (!entry) + return INVALID_EDIF_SA_INDEX; + + sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir); + if (sa_index == INVALID_EDIF_SA_INDEX) { + kfree(entry); + return INVALID_EDIF_SA_INDEX; + } + + INIT_LIST_HEAD(&entry->next); + entry->handle = nport_handle; + entry->fcport = fcport; + entry->sa_pair[0].spi = sa_frame->spi; + entry->sa_pair[0].sa_index = sa_index; + entry->sa_pair[1].spi = 0; + entry->sa_pair[1].sa_index = INVALID_EDIF_SA_INDEX; + spin_lock_irqsave(&ha->sadb_lock, flags); + list_add_tail(&entry->next, sa_list); + spin_unlock_irqrestore(&ha->sadb_lock, flags); + ql_dbg(ql_dbg_edif, vha, 0x138c, + "%s: Created new sadb entry for nport_handle 0x%x, spi 0x%x, returning " + "sa_index %d\n", + __func__, nport_handle, sa_frame->spi, sa_index); + + return sa_index; + } + + spin_lock_irqsave(&ha->sadb_lock, flags); + + /* see if we already have an entry for this spi */ + for (slot = 0; slot < 2; slot++) { + if (entry->sa_pair[slot].sa_index == INVALID_EDIF_SA_INDEX) { + free_slot = slot; + } else { + if (entry->sa_pair[slot].spi == sa_frame->spi) { + spin_unlock_irqrestore(&ha->sadb_lock, flags); + ql_dbg(ql_dbg_edif, vha, 0x138d, + "%s: sadb slot %d entry for lid 0x%x, spi 0x%x found, " + "sa_index %d\n", __func__, slot, entry->handle, sa_frame->spi, + entry->sa_pair[slot].sa_index); + return entry->sa_pair[slot].sa_index; + } + } + } + spin_unlock_irqrestore(&ha->sadb_lock, flags); + + /* both slots are used */ + if (free_slot == -1) { + ql_dbg(ql_dbg_edif, vha, 0x138e, + "%s: WARNING: No free slots in sadb for nport_handle 0x%x, spi: 0x%x\n", + __func__, entry->handle, sa_frame->spi); + ql_dbg(ql_dbg_edif, vha, 0x138f, + "%s: Slot 0 spi: 0x%x sa_index: %d, Slot 1 spi: 0x%x sa_index: %d\n", + __func__, entry->sa_pair[0].spi, entry->sa_pair[0].sa_index, + entry->sa_pair[1].spi, entry->sa_pair[1].sa_index); + + return INVALID_EDIF_SA_INDEX; + } + + /* there is at least one free slot, use it */ + sa_index = qla_edif_get_sa_index_from_freepool(fcport, dir); + if (sa_index == INVALID_EDIF_SA_INDEX) { + ql_dbg(ql_dbg_edif, fcport->vha, 0x1390, + "%s: empty freepool!!\n", __func__); + return INVALID_EDIF_SA_INDEX; + } + + spin_lock_irqsave(&ha->sadb_lock, flags); + entry->sa_pair[free_slot].spi = sa_frame->spi; + entry->sa_pair[free_slot].sa_index = sa_index; + spin_unlock_irqrestore(&ha->sadb_lock, flags); + ql_dbg(ql_dbg_edif, fcport->vha, 0x1391, + "%s: sadb slot %d entry for nport_handle 0x%x, spi 0x%x added, returning sa_index %d\n", + __func__, free_slot, entry->handle, sa_frame->spi, sa_index); + + return sa_index; +} + +/* release any sadb entries -- only done at teardown */ +void qla_edif_sadb_release(struct qla_hw_data *ha) +{ + struct edif_sa_index_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &ha->sadb_rx_index_list, next) { + list_del(&entry->next); + kfree(entry); + } + + list_for_each_entry_safe(entry, tmp, &ha->sadb_tx_index_list, next) { + list_del(&entry->next); + kfree(entry); + } +} + +/************************** + * sadb freepool functions + **************************/ + +/* build the rx and tx sa_index free pools -- only done at fcport init */ +int qla_edif_sadb_build_free_pool(struct qla_hw_data *ha) +{ + ha->edif_tx_sa_id_map = + kzalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX) * sizeof(long), GFP_KERNEL); + + if (!ha->edif_tx_sa_id_map) { + ql_log_pci(ql_log_fatal, ha->pdev, 0x13a7, + "Unable to allocate memory for sadb tx.\n"); + return -ENOMEM; + } + + ha->edif_rx_sa_id_map = + kzalloc(BITS_TO_LONGS(EDIF_NUM_SA_INDEX) * sizeof(long), GFP_KERNEL); + if (!ha->edif_rx_sa_id_map) { + kfree(ha->edif_tx_sa_id_map); + ha->edif_tx_sa_id_map = NULL; + ql_log_pci(ql_log_fatal, ha->pdev, 0x13a8, + "Unable to allocate memory for sadb rx.\n"); + return -ENOMEM; + } + return 0; +} + +/* release the free pool - only done during fcport teardown */ +void qla_edif_sadb_release_free_pool(struct qla_hw_data *ha) +{ + kfree(ha->edif_tx_sa_id_map); + ha->edif_tx_sa_id_map = NULL; + kfree(ha->edif_rx_sa_id_map); + ha->edif_rx_sa_id_map = NULL; +} + +static void __chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, + fc_port_t *fcport, uint32_t handle, uint16_t sa_index) +{ + struct edif_list_entry *edif_entry; + struct edif_sa_ctl *sa_ctl; + uint16_t delete_sa_index = INVALID_EDIF_SA_INDEX; + unsigned long flags = 0; + uint16_t nport_handle = fcport->loop_id; + uint16_t cached_nport_handle; + + spin_lock_irqsave(&fcport->edif.indx_list_lock, flags); + edif_entry = qla_edif_list_find_sa_index(fcport, nport_handle); + if (!edif_entry) { + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + return; /* no pending delete for this handle */ + } + + /* + * check for no pending delete for this index or iocb does not + * match rx sa_index + */ + if (edif_entry->delete_sa_index == INVALID_EDIF_SA_INDEX || + edif_entry->update_sa_index != sa_index) { + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + return; + } + + /* + * wait until we have seen at least EDIF_DELAY_COUNT transfers before + * queueing RX delete + */ + if (edif_entry->count++ < EDIF_RX_DELETE_FILTER_COUNT) { + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + return; + } + + ql_dbg(ql_dbg_edif, vha, 0x1392, + "%s: invalidating delete_sa_index, update_sa_index: 0x%x sa_index: 0x%x, " + "delete_sa_index: 0x%x\n", + __func__, edif_entry->update_sa_index, sa_index, edif_entry->delete_sa_index); + + delete_sa_index = edif_entry->delete_sa_index; + edif_entry->delete_sa_index = INVALID_EDIF_SA_INDEX; + cached_nport_handle = edif_entry->handle; + spin_unlock_irqrestore(&fcport->edif.indx_list_lock, flags); + + /* sanity check on the nport handle */ + if (nport_handle != cached_nport_handle) { + ql_dbg(ql_dbg_edif, vha, 0x1393, + "%s: POST SA DELETE nport_handle mismatch: lid: 0x%x, edif_entry nph: 0x%x\n", + __func__, nport_handle, cached_nport_handle); + } + + // + // find the sa_ctl for the delete and schedule the delete + // + sa_ctl = qla_edif_find_sa_ctl_by_index(fcport, delete_sa_index, 0); + if (sa_ctl) { + ql_dbg(ql_dbg_edif, vha, 0x1394, + "%s: POST SA DELETE sa_ctl: %p, index recvd %d\n", + __func__, sa_ctl, sa_index); + ql_dbg(ql_dbg_edif, vha, 0x1395, + "delete index %d, update index: %d, nport handle: 0x%x, handle: 0x%x\n", + delete_sa_index, + edif_entry->update_sa_index, nport_handle, handle); + + sa_ctl->flags = EDIF_SA_CTL_FLG_DEL; + set_bit(EDIF_SA_CTL_REPL, &sa_ctl->state); + qla_post_sa_replace_work(fcport->vha, fcport, + nport_handle, sa_ctl); + } else { + ql_dbg(ql_dbg_edif, vha, 0x1396, + "%s: POST SA DELETE sa_ctl not found for delete_sa_index: %d\n", + __func__, delete_sa_index); + } +} + +void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, + srb_t *sp, struct sts_entry_24xx *sts24) +{ + fc_port_t *fcport = sp->fcport; + /* sa_index used by this iocb */ + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + uint32_t handle; + + handle = (uint32_t)LSW(sts24->handle); + + /* find out if this status iosb is for a scsi read */ + if (cmd->sc_data_direction != DMA_FROM_DEVICE) + return; + + return __chk_edif_rx_sa_delete_pending(vha, fcport, handle, + le16_to_cpu(sts24->edif_sa_index)); +} + +void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, fc_port_t *fcport, + struct ctio7_from_24xx *pkt) +{ + __chk_edif_rx_sa_delete_pending(vha, fcport, + pkt->handle, le16_to_cpu(pkt->edif_sa_index)); +} + +void qla_parse_auth_els_ctl(struct srb *sp) +{ + struct qla_els_pt_arg *a = &sp->u.bsg_cmd.u.els_arg; + bsg_job_t *bsg_job = sp->u.bsg_cmd.bsg_job; + struct fc_bsg_request *request = bsg_job->request; + struct qla_bsg_auth_els_request *p = + (struct qla_bsg_auth_els_request *)bsg_job->request; + + a->tx_len = a->tx_byte_count = sp->remap.req.len; + a->tx_addr = sp->remap.req.dma; + a->rx_len = a->rx_byte_count = sp->remap.rsp.len; + a->rx_addr = sp->remap.rsp.dma; + + if (p->e.sub_cmd == SEND_ELS_REPLY) { + a->control_flags = p->e.extra_control_flags << 13; + a->rx_xchg_address = cpu_to_le32(p->e.extra_rx_xchg_address); + if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_ACC) + a->els_opcode = ELS_LS_ACC; + else if (p->e.extra_control_flags == BSG_CTL_FLAG_LS_RJT) + a->els_opcode = ELS_LS_RJT; + } + a->did = sp->fcport->d_id; + a->els_opcode = request->rqst_data.h_els.command_code; + a->nport_handle = cpu_to_le16(sp->fcport->loop_id); + a->vp_idx = sp->vha->vp_idx; +} + +int qla_edif_process_els(scsi_qla_host_t *vha, bsg_job_t *bsg_job) +{ + struct fc_bsg_request *bsg_request = bsg_job->request; + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + fc_port_t *fcport = NULL; + struct qla_hw_data *ha = vha->hw; + srb_t *sp; + int rval = (DID_ERROR << 16), cnt; + port_id_t d_id; + struct qla_bsg_auth_els_request *p = + (struct qla_bsg_auth_els_request *)bsg_job->request; + struct qla_bsg_auth_els_reply *rpl = + (struct qla_bsg_auth_els_reply *)bsg_job->reply; + + rpl->version = EDIF_VERSION1; + + d_id.b.al_pa = bsg_request->rqst_data.h_els.port_id[2]; + d_id.b.area = bsg_request->rqst_data.h_els.port_id[1]; + d_id.b.domain = bsg_request->rqst_data.h_els.port_id[0]; + + /* find matching d_id in fcport list */ + fcport = qla2x00_find_fcport_by_pid(vha, &d_id); + if (!fcport) { + ql_dbg(ql_dbg_edif, vha, 0x1397, + "%s fcport not find online portid=%06x.\n", + __func__, d_id.b24); + SET_DID_STATUS(bsg_reply->result, DID_ERROR); + return -EIO; + } + + if (qla_bsg_check(vha, bsg_job, fcport)) + return 0; + + if (EDIF_SESS_DELETE(fcport)) { + ql_dbg(ql_dbg_edif, vha, 0x1398, + "%s ELS code %x, no loop id.\n", __func__, + bsg_request->rqst_data.r_els.els_code); + SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET); + return -ENXIO; + } + + if (!vha->flags.online) { + ql_log(ql_log_warn, vha, 0x13a2, "Host not online.\n"); + SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET); + rval = -EIO; + goto done; + } + + /* pass through is supported only for ISP 4Gb or higher */ + if (!IS_FWI2_CAPABLE(ha)) { + ql_dbg(ql_dbg_user, vha, 0x13a3, + "ELS passthru not supported for ISP23xx based adapters.\n"); + SET_DID_STATUS(bsg_reply->result, DID_BAD_TARGET); + rval = -EPERM; + goto done; + } + + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + ql_dbg(ql_dbg_user, vha, 0x13a4, + "Failed get sp pid=%06x\n", fcport->d_id.b24); + rval = -ENOMEM; + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + goto done; + } + + sp->remap.req.len = bsg_job->request_payload.payload_len; + sp->remap.req.buf = dma_pool_alloc(ha->purex_dma_pool, + GFP_KERNEL, &sp->remap.req.dma); + if (!sp->remap.req.buf) { + ql_dbg(ql_dbg_user, vha, 0x13aa, + "Failed allocate request dma len=%x\n", + bsg_job->request_payload.payload_len); + rval = -ENOMEM; + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + goto done_free_sp; + } + + sp->remap.rsp.len = bsg_job->reply_payload.payload_len; + sp->remap.rsp.buf = dma_pool_alloc(ha->purex_dma_pool, + GFP_KERNEL, &sp->remap.rsp.dma); + if (!sp->remap.rsp.buf) { + ql_dbg(ql_dbg_user, vha, 0x13ab, + "Failed allocate response dma len=%x\n", + bsg_job->reply_payload.payload_len); + rval = -ENOMEM; + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + goto done_free_remap_req; + } + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, sp->remap.req.buf, + sp->remap.req.len); + sp->remap.remapped = true; + + sp->type = SRB_ELS_CMD_HST_NOLOGIN; + sp->name = "SPCN_BSG_HST_NOLOGIN"; + sp->u.bsg_cmd.bsg_job = bsg_job; + qla_parse_auth_els_ctl(sp); + + sp->free = qla2x00_bsg_sp_free; + sp->done = qla2x00_bsg_job_done; + cnt = 0; +retry: + rval = qla2x00_start_sp(sp); + + switch (rval) { + case QLA_SUCCESS: + if (p->e.sub_cmd == SEND_ELS) + qla_edif_print_auth_hdr(vha, fcport->port_name, vha->d_id.b24, + fcport->d_id.b24, sp->remap.req.buf, sp->remap.req.len, 0xffffffff); + else + ql_dbg(ql_dbg_edif, vha, 0x1399, + "%s %s %8phN xchg %x ctlflag %x hdl %x reqlen %xh bsg ptr %px\n", + __func__, sc_to_str(p->e.sub_cmd), fcport->port_name, + p->e.extra_rx_xchg_address, p->e.extra_control_flags, + sp->handle, sp->remap.req.len, bsg_job); + break; + case EAGAIN: + msleep(EDIF_MSLEEP_INTERVAL); + cnt++; + if (cnt < EDIF_RETRY_COUNT) + goto retry; + fallthrough; + default: + ql_log(ql_log_warn, vha, 0x13b3, + "%s qla2x00_start_sp failed = %d\n", __func__, rval); + SET_DID_STATUS(bsg_reply->result, DID_IMM_RETRY); + rval = -EIO; + goto done_free_remap_rsp; + } + return rval; + +done_free_remap_rsp: + dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf, + sp->remap.rsp.dma); +done_free_remap_req: + dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf, + sp->remap.req.dma); +done_free_sp: + qla2x00_rel_sp(sp); + +done: + return rval; +} + +void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess) +{ + u16 cnt = 0; + + if (sess->edif.app_sess_online && DBELL_ACTIVE(vha)) { + ql_dbg(ql_dbg_disc, vha, 0x139e, + "%s: sess %8phN send port_offline event\n", + __func__, sess->port_name); + sess->edif.app_sess_online = 0; + sess->edif.sess_down_acked = 0; + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_SESSION_SHUTDOWN, + sess->d_id.b24, 0, sess); + qla2x00_post_aen_work(vha, FCH_EVT_PORT_OFFLINE, sess->d_id.b24); + + while (!READ_ONCE(sess->edif.sess_down_acked) && + !test_bit(VPORT_DELETE, &vha->dpc_flags)) { + msleep(100); + cnt++; + if (cnt > 100) + break; + } + sess->edif.sess_down_acked = 0; + ql_dbg(ql_dbg_disc, vha, 0x139f, + "%s: sess %8phN port_offline event completed\n", + __func__, sess->port_name); + } +} + +void qla_edif_clear_appdata(struct scsi_qla_host *vha, struct fc_port *fcport) +{ + if (!(fcport->flags & FCF_FCSP_DEVICE)) + return; + + qla_edb_clear(vha, fcport->d_id); + qla_enode_clear(vha, fcport->d_id); + return; +} diff --git a/drivers/scsi/qla2xxx/qla_edif.h b/drivers/scsi/qla2xxx/qla_edif.h new file mode 100644 index 0000000000000..9579188a6e6cf --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_edif.h @@ -0,0 +1,180 @@ +/* + * Marvell Fibre Channel HBA Driver + * Copyright (c) 2003-2016 QLogic Corporation + * Copyright (C) 2016-2018 Cavium Inc + * Copyright (c) 2018- Marve + * + * See LICENSE.qla2xxx for copyright and licensing details. + */ +#ifndef __QLA_EDIF_H +#define __QLA_EDIF_H + +struct qla_scsi_host; +#define EDIF_APP_ID 0x73730001 + +enum auth_msg_code { + AUTH_Reject = 0xa, + AUTH_Negotiate = 0xb, + AUTH_Done = 0xc, + DHCHAP_Challenge = 0x10, + DHCHAP_Reply = 0x11, + DHCHAP_Success = 0x12, + FCAP_Request = 0x13, + FCAP_Acknowledge = 0x14, + FCAP_Confirm = 0x15, + FCPAP_Init = 0x16, + FCPAP_Accept = 0x17, + FCPAP_Complete = 0x18, + IKE_SA_Init = 0x22, + IKE_Auth = 0x23, + IKE_Create_Child_SA = 0x24, + IKE_Informational = 0x25, + FCEAP_Request = 0x26, + FCEAP_Response = 0x27, + FCEAP_Success = 0x28, + FCEAP_Failure = 0x29, +}; + +#define EDIF_MAX_INDEX 2048 +struct edif_sa_ctl { + struct list_head next; + uint16_t del_index; + uint16_t index; + uint16_t slot; + uint16_t flags; +#define EDIF_SA_CTL_FLG_REPL BIT_0 +#define EDIF_SA_CTL_FLG_DEL BIT_1 +#define EDIF_SA_CTL_FLG_CLEANUP_DEL BIT_4 + // Invalidate Index bit and mirrors QLA_SA_UPDATE_FLAGS_DELETE + unsigned long state; +#define EDIF_SA_CTL_USED 1 /* Active Sa update */ +#define EDIF_SA_CTL_PEND 2 /* Waiting for slot */ +#define EDIF_SA_CTL_REPL 3 /* Active Replace and Delete */ +#define EDIF_SA_CTL_DEL 4 /* Delete Pending */ + struct fc_port *fcport; + struct fc_bsg_job *bsg_job; + struct qla_sa_update_frame sa_frame; +}; + +enum enode_flags_t { + ENODE_ACTIVE = 0x1, +}; + +struct pur_core { + enum enode_flags_t enode_flags; + spinlock_t pur_lock; + struct list_head head; +}; + +enum db_flags_t { + EDB_ACTIVE = BIT_0, +}; +#define DBELL_ACTIVE(_v) (_v->e_dbell.db_flags & EDB_ACTIVE) +#define DBELL_INACTIVE(_v) (!(_v->e_dbell.db_flags & EDB_ACTIVE)) + + +struct edif_dbell { + enum db_flags_t db_flags; + spinlock_t db_lock; + struct list_head head; + bsg_job_t *dbell_bsg_job; + unsigned long bsg_expire; +}; + + +#define IS_FAST_CAPABLE(ha) ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) && \ + ((ha)->fw_attributes_ext[0] & BIT_5)) + +#define SA_UPDATE_IOCB_TYPE 0x71 /* Security Association Update IOCB entry */ +struct sa_update_28xx { + uint8_t entry_type; /* Entry type. */ + uint8_t entry_count; /* Entry count. */ + uint8_t sys_define; /* System Defined. */ + uint8_t entry_status; /* Entry Status. */ + + uint32_t handle; /* IOCB System handle. */ + + union { + __le16 nport_handle; /* in: N_PORT handle. */ + __le16 comp_sts; /* out: completion status */ +#define CS_PORT_EDIF_UNAVAIL 0x28 +#define CS_PORT_EDIF_LOGOUT 0x29 +#define CS_PORT_EDIF_SUPP_NOT_RDY 0x64 +#define CS_PORT_EDIF_INV_REQ 0x66 + } u; + uint8_t vp_index; + uint8_t reserved_1; + uint8_t port_id[3]; + uint8_t flags; +#define SA_FLAG_INVALIDATE BIT_0 +#define SA_FLAG_TX BIT_1 // 1=tx, 0=rx + + uint8_t sa_key[32]; /* 256 bit key */ + __le32 salt; + __le32 spi; + uint8_t sa_control; +#define SA_CNTL_ENC_FCSP (1 << 3) +#define SA_CNTL_ENC_OPD (2 << 3) +#define SA_CNTL_ENC_MSK (3 << 3) // mask bits 4,3 +#define SA_CNTL_AES_GMAC (1 << 2) +#define SA_CNTL_KEY256 (2 << 0) +#define SA_CNTL_KEY128 0 + + uint8_t reserved_2; + __le16 sa_index; // reserve: bit 11-15 + __le16 old_sa_info; + __le16 new_sa_info; +}; + +#define NUM_ENTRIES 256 +#define PUR_GET 1 + +struct dinfo { + int nodecnt; + int lstate; +}; + +struct pur_ninfo { + port_id_t pur_sid; + port_id_t pur_did; + uint8_t vp_idx; + short pur_bytes_rcvd; + unsigned short pur_nphdl; + unsigned int pur_rx_xchg_address; +}; + +struct purexevent { + struct pur_ninfo pur_info; + unsigned char *msgp; + u32 msgp_len; +}; + +#define N_UNDEF 0 +#define N_PUREX 1 +struct enode { + struct list_head list; + struct dinfo dinfo; + uint32_t ntype; + union { + struct purexevent purexinfo; + } u; +}; +#define RX_ELS_SIZE (roundup(sizeof(struct enode) + ELS_MAX_PAYLOAD, SMP_CACHE_BYTES)) + +#define EDIF_SESSION_DOWN(_s) \ + (qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \ + _s->disc_state == DSC_DELETED || \ + !_s->edif.app_sess_online)) + + +#define EDIF_NEGOTIATION_PENDING(_fcport) \ + (DBELL_ACTIVE(_fcport->vha) && \ + _fcport->disc_state == DSC_LOGIN_AUTH_PEND) + +#define EDIF_SESS_DELETE(_s) \ + (qla_ini_mode_enabled(_s->vha) && (_s->disc_state == DSC_DELETE_PEND || \ + _s->disc_state == DSC_DELETED)) + +#define EDIF_CAP(_ha) (ql2xsecenable && IS_QLA28XX(_ha)) + +#endif /* __QLA_EDIF_H */ diff --git a/drivers/scsi/qla2xxx/qla_edif_bsg.h b/drivers/scsi/qla2xxx/qla_edif_bsg.h new file mode 100644 index 0000000000000..657bfeca3a3ac --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_edif_bsg.h @@ -0,0 +1,258 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Marvell Fibre Channel HBA Driver + * Copyright (C) 2018- Marvell + * + */ +#ifndef __QLA_EDIF_BSG_H +#define __QLA_EDIF_BSG_H + +#define EDIF_VERSION1 1 + +/* BSG Vendor specific commands */ +#define ELS_MAX_PAYLOAD 2112 +#ifndef WWN_SIZE +#define WWN_SIZE 8 +#endif +#define VND_CMD_APP_RESERVED_SIZE 28 +#define VND_CMD_PAD_SIZE 3 +enum auth_els_sub_cmd { + SEND_ELS = 0, + SEND_ELS_REPLY, + PULL_ELS, +}; + +struct extra_auth_els { + enum auth_els_sub_cmd sub_cmd; + uint32_t extra_rx_xchg_address; + uint8_t extra_control_flags; +#define BSG_CTL_FLAG_INIT 0 +#define BSG_CTL_FLAG_LS_ACC 1 +#define BSG_CTL_FLAG_LS_RJT 2 +#define BSG_CTL_FLAG_TRM 3 + uint8_t version; + uint8_t pad[2]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __attribute__ ((packed)); + +struct qla_bsg_auth_els_request { + struct fc_bsg_request r; + struct extra_auth_els e; +}; + +struct qla_bsg_auth_els_reply { + struct fc_bsg_reply r; + uint32_t rx_xchg_address; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +}; + +struct app_id { + int app_vid; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __attribute__ ((packed)); + +struct app_start_reply { + uint32_t host_support_edif; + uint32_t edif_enode_active; + uint32_t edif_edb_active; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __attribute__ ((packed)); + +struct app_start { + struct app_id app_info; + + uint8_t app_start_flags; + uint8_t version; + uint8_t pad[2]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __attribute__ ((packed)); + +struct app_stop { + struct app_id app_info; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __attribute__ ((packed)); + +struct app_plogi_reply { + uint32_t prli_status; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __attribute__ ((packed)); + +struct app_pinfo_req { + struct app_id app_info; + uint8_t num_ports; + port_id_t remote_pid; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __attribute__ ((packed)); + +struct app_pinfo { + port_id_t remote_pid; + uint8_t remote_wwpn[WWN_SIZE]; + uint8_t remote_type; +#define VND_CMD_RTYPE_UNKNOWN 0 +#define VND_CMD_RTYPE_TARGET 1 +#define VND_CMD_RTYPE_INITIATOR 2 + uint8_t remote_state; + uint8_t auth_state; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __attribute__ ((packed)); + +/* AUTH States */ +#define VND_CMD_AUTH_STATE_UNDEF 0 +#define VND_CMD_AUTH_STATE_SESSION_SHUTDOWN 1 +#define VND_CMD_AUTH_STATE_NEEDED 2 +#define VND_CMD_AUTH_STATE_ELS_RCVD 3 +#define VND_CMD_AUTH_STATE_SAUPDATE_COMPL 4 + +struct app_pinfo_reply { + uint8_t port_count; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; + struct app_pinfo ports[0]; +} __attribute__ ((packed)); + +struct app_sinfo_req { + struct app_id app_info; + uint8_t num_ports; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __attribute__ ((packed)); + + +struct app_sinfo { + uint8_t remote_wwpn[WWN_SIZE]; + int64_t rekey_count; + uint8_t rekey_mode; + int64_t tx_bytes; + int64_t rx_bytes; +} __attribute__ ((packed)); + +struct app_stats_reply { + uint8_t elem_count; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; + struct app_sinfo elem[0]; +} __attribute__ ((packed)); + + +struct qla_sa_update_frame { + struct app_id app_info; + uint16_t flags; +#define SAU_FLG_INV 0x01 /* delete key */ +#define SAU_FLG_TX 0x02 /* 1=tx, 0 = rx */ +#define SAU_FLG_FORCE_DELETE 0x08 +#define SAU_FLG_GMAC_MODE 0x20 /* GMAC mode is cleartext for the IO (i.e. NULL encryption) */ +#define SAU_FLG_KEY128 0x40 +#define SAU_FLG_KEY256 0x80 + uint16_t fast_sa_index:10, + reserved:6; + uint32_t salt; + uint32_t spi; + uint8_t sa_key[32]; + uint8_t node_name[WWN_SIZE]; + uint8_t port_name[WWN_SIZE]; + port_id_t port_id; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved2[VND_CMD_APP_RESERVED_SIZE]; +} __attribute__ ((packed)); + +#define QL_VND_SC_UNDEF 0 +#define QL_VND_SC_SA_UPDATE 1 +#define QL_VND_SC_APP_START 2 +#define QL_VND_SC_APP_STOP 3 +#define QL_VND_SC_AUTH_OK 4 +#define QL_VND_SC_AUTH_FAIL 5 +#define QL_VND_SC_REKEY_CONFIG 6 +#define QL_VND_SC_GET_FCINFO 7 +#define QL_VND_SC_GET_STATS 8 +#define QL_VND_SC_AEN_COMPLETE 9 +#define QL_VND_SC_READ_DBELL 10 + +/* + * bsg caller to provide empty buffer for doorbell events. + * + * sg_io_v4.din_xferp = empty buffer for door bell events + * sg_io_v4.dout_xferp = struct edif_read_dbell *buf + */ +struct edif_read_dbell { + struct app_id app_info; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +}; + + +/* Application interface data structure for rtn data */ +#define EXT_DEF_EVENT_DATA_SIZE 64 +struct edif_app_dbell { + uint32_t event_code; + uint32_t event_data_size; + union { + port_id_t port_id; + uint8_t event_data[EXT_DEF_EVENT_DATA_SIZE]; + }; +} __attribute__ ((packed)); + +struct edif_sa_update_aen { + port_id_t port_id; + uint32_t key_type; /* Tx (1) or RX (2) */ + uint32_t status; /* 0 succes, 1 failed, 2 timeout , 3 error */ + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __attribute__ ((packed)); + +#define QL_VND_SA_STAT_SUCCESS 0 +#define QL_VND_SA_STAT_FAILED 1 +#define QL_VND_SA_STAT_TIMEOUT 2 +#define QL_VND_SA_STAT_ERROR 3 + +#define QL_VND_RX_SA_KEY 1 +#define QL_VND_TX_SA_KEY 2 + +/* App defines for plogi auth'd ok and plogi auth bad requests */ +struct auth_complete_cmd { + struct app_id app_info; +#define PL_TYPE_WWPN 1 +#define PL_TYPE_DID 2 + uint32_t type; + union { + uint8_t wwpn[WWN_SIZE]; + port_id_t d_id; + } u; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __attribute__ ((packed)); + + +struct aen_complete_cmd { + struct app_id app_info; + port_id_t port_id; + uint32_t event_code; + uint8_t version; + uint8_t pad[VND_CMD_PAD_SIZE]; + uint8_t reserved[VND_CMD_APP_RESERVED_SIZE]; +} __attribute__ ((packed)); +#define RX_DELAY_DELETE_TIMEOUT 20 + +#define FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN 1 + +#endif /* QLA_EDIF_BSG_H */ diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index 12b689e328834..7b9da00677b81 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #ifndef __QLA_FW_H #define __QLA_FW_H @@ -83,10 +84,16 @@ struct port_database_24xx { uint8_t node_name[WWN_SIZE]; uint8_t reserved_3[4]; +#if 0 uint16_t prli_nvme_svc_param_word_0; /* Bits 15-0 of word 0 */ uint16_t prli_nvme_svc_param_word_3; /* Bits 15-0 of word 3 */ uint16_t nvme_first_burst_size; uint8_t reserved_4[14]; +#else + uint8_t reserved_5[4]; + uint8_t secure_login; + uint8_t reserved_4[14]; +#endif }; /* @@ -133,28 +140,28 @@ struct vp_database_24xx { struct nvram_24xx { /* NVRAM header. */ uint8_t id[4]; - __le16 nvram_version; + uint16_t nvram_version; uint16_t reserved_0; /* Firmware Initialization Control Block. */ - __le16 version; + uint16_t version; uint16_t reserved_1; - __le16 frame_payload_size; - __le16 execution_throttle; - __le16 exchange_count; - __le16 hard_address; + __le16 frame_payload_size; + uint16_t execution_throttle; + uint16_t exchange_count; + uint16_t hard_address; uint8_t port_name[WWN_SIZE]; uint8_t node_name[WWN_SIZE]; - __le16 login_retry_count; - __le16 link_down_on_nos; - __le16 interrupt_delay_timer; - __le16 login_timeout; + uint16_t login_retry_count; + uint16_t link_down_on_nos; + uint16_t interrupt_delay_timer; + uint16_t login_timeout; - __le32 firmware_options_1; - __le32 firmware_options_2; - __le32 firmware_options_3; + uint32_t firmware_options_1; + uint32_t firmware_options_2; + uint32_t firmware_options_3; /* Offset 56. */ @@ -177,7 +184,7 @@ struct nvram_24xx { * BIT 11-13 = Output Emphasis 4G * BIT 14-15 = Reserved */ - __le16 seriallink_options[4]; + uint16_t seriallink_options[4]; uint16_t reserved_2[16]; @@ -217,25 +224,25 @@ struct nvram_24xx { * * BIT 16-31 = */ - __le32 host_p; + uint32_t host_p; uint8_t alternate_port_name[WWN_SIZE]; uint8_t alternate_node_name[WWN_SIZE]; uint8_t boot_port_name[WWN_SIZE]; - __le16 boot_lun_number; + uint16_t boot_lun_number; uint16_t reserved_8; uint8_t alt1_boot_port_name[WWN_SIZE]; - __le16 alt1_boot_lun_number; + uint16_t alt1_boot_lun_number; uint16_t reserved_9; uint8_t alt2_boot_port_name[WWN_SIZE]; - __le16 alt2_boot_lun_number; + uint16_t alt2_boot_lun_number; uint16_t reserved_10; uint8_t alt3_boot_port_name[WWN_SIZE]; - __le16 alt3_boot_lun_number; + uint16_t alt3_boot_lun_number; uint16_t reserved_11; /* @@ -248,23 +255,23 @@ struct nvram_24xx { * BIT 6 = Reserved * BIT 7-31 = */ - __le32 efi_parameters; + uint32_t efi_parameters; uint8_t reset_delay; uint8_t reserved_12; uint16_t reserved_13; - __le16 boot_id_number; + uint16_t boot_id_number; uint16_t reserved_14; - __le16 max_luns_per_target; + uint16_t max_luns_per_target; uint16_t reserved_15; - __le16 port_down_retry_count; - __le16 link_down_timeout; + uint16_t port_down_retry_count; + uint16_t link_down_timeout; /* FCode parameters. */ - __le16 fcode_parameter; + uint16_t fcode_parameter; uint16_t reserved_16[3]; @@ -274,13 +281,13 @@ struct nvram_24xx { uint8_t prev_drv_ver_minor; uint8_t prev_drv_ver_subminor; - __le16 prev_bios_ver_major; - __le16 prev_bios_ver_minor; + uint16_t prev_bios_ver_major; + uint16_t prev_bios_ver_minor; - __le16 prev_efi_ver_major; - __le16 prev_efi_ver_minor; + uint16_t prev_efi_ver_major; + uint16_t prev_efi_ver_minor; - __le16 prev_fw_ver_major; + uint16_t prev_fw_ver_major; uint8_t prev_fw_ver_minor; uint8_t prev_fw_ver_subminor; @@ -308,7 +315,7 @@ struct nvram_24xx { uint16_t subsystem_vendor_id; uint16_t subsystem_device_id; - __le32 checksum; + uint32_t checksum; }; /* @@ -317,46 +324,46 @@ struct nvram_24xx { */ #define ICB_VERSION 1 struct init_cb_24xx { - __le16 version; + uint16_t version; uint16_t reserved_1; - __le16 frame_payload_size; - __le16 execution_throttle; - __le16 exchange_count; + uint16_t frame_payload_size; + uint16_t execution_throttle; + uint16_t exchange_count; - __le16 hard_address; + uint16_t hard_address; uint8_t port_name[WWN_SIZE]; /* Big endian. */ uint8_t node_name[WWN_SIZE]; /* Big endian. */ - __le16 response_q_inpointer; - __le16 request_q_outpointer; + uint16_t response_q_inpointer; + uint16_t request_q_outpointer; - __le16 login_retry_count; + uint16_t login_retry_count; - __le16 prio_request_q_outpointer; + uint16_t prio_request_q_outpointer; - __le16 response_q_length; - __le16 request_q_length; + uint16_t response_q_length; + uint16_t request_q_length; - __le16 link_down_on_nos; /* Milliseconds. */ + uint16_t link_down_on_nos; /* Milliseconds. */ - __le16 prio_request_q_length; + uint16_t prio_request_q_length; __le64 request_q_address __packed; __le64 response_q_address __packed; __le64 prio_request_q_address __packed; - __le16 msix; - __le16 msix_atio; + uint16_t msix; + uint16_t msix_atio; uint8_t reserved_2[4]; - __le16 atio_q_inpointer; - __le16 atio_q_length; - __le64 atio_q_address __packed; + uint16_t atio_q_inpointer; + uint16_t atio_q_length; + __le64 atio_q_address __packed; - __le16 interrupt_delay_timer; /* 100us increments. */ - __le16 login_timeout; + uint16_t interrupt_delay_timer; /* 100us increments. */ + uint16_t login_timeout; /* * BIT 0 = Enable Hard Loop Id @@ -377,7 +384,7 @@ struct init_cb_24xx { * BIT 14 = Node Name Option * BIT 15-31 = Reserved */ - __le32 firmware_options_1; + uint32_t firmware_options_1; /* * BIT 0 = Operation Mode bit 0 @@ -398,7 +405,7 @@ struct init_cb_24xx { * BIT 14 = Enable Target PRLI Control * BIT 15-31 = Reserved */ - __le32 firmware_options_2; + uint32_t firmware_options_2; /* * BIT 0 = Reserved @@ -424,9 +431,9 @@ struct init_cb_24xx { * BIT 30 = Enable request queue 0 out index shadowing * BIT 31 = Reserved */ - __le32 firmware_options_3; - __le16 qos; - __le16 rid; + uint32_t firmware_options_3; + uint16_t qos; + uint16_t rid; uint8_t reserved_3[20]; }; @@ -442,27 +449,27 @@ struct cmd_bidir { uint32_t handle; /* System handle. */ - __le16 nport_handle; /* N_PORT handle. */ + uint16_t nport_handle; /* N_PORT hanlde. */ - __le16 timeout; /* Command timeout. */ + uint16_t timeout; /* Commnad timeout. */ - __le16 wr_dseg_count; /* Write Data segment count. */ - __le16 rd_dseg_count; /* Read Data segment count. */ + uint16_t wr_dseg_count; /* Write Data segment count. */ + uint16_t rd_dseg_count; /* Read Data segment count. */ struct scsi_lun lun; /* FCP LUN (BE). */ - __le16 control_flags; /* Control flags. */ + uint16_t control_flags; /* Control flags. */ #define BD_WRAP_BACK BIT_3 #define BD_READ_DATA BIT_1 #define BD_WRITE_DATA BIT_0 - __le16 fcp_cmnd_dseg_len; /* Data segment length. */ + uint16_t fcp_cmnd_dseg_len; /* Data segment length. */ __le64 fcp_cmnd_dseg_address __packed;/* Data segment address. */ uint16_t reserved[2]; /* Reserved */ - __le32 rd_byte_count; /* Total Byte count Read. */ - __le32 wr_byte_count; /* Total Byte count write. */ + uint32_t rd_byte_count; /* Total Byte count Read. */ + uint32_t wr_byte_count; /* Total Byte count write. */ uint8_t port_id[3]; /* PortID of destination port.*/ uint8_t vp_index; @@ -479,28 +486,31 @@ struct cmd_type_6 { uint32_t handle; /* System handle. */ - __le16 nport_handle; /* N_PORT handle. */ - __le16 timeout; /* Command timeout. */ + uint16_t nport_handle; /* N_PORT handle. */ + uint16_t timeout; /* Command timeout. */ - __le16 dseg_count; /* Data segment count. */ + uint16_t dseg_count; /* Data segment count. */ - __le16 fcp_rsp_dsd_len; /* FCP_RSP DSD length. */ + uint16_t fcp_rsp_dsd_len; /* FCP_RSP DSD length. */ struct scsi_lun lun; /* FCP LUN (BE). */ - __le16 control_flags; /* Control flags. */ + uint16_t control_flags; /* Control flags. */ +#define CF_NEW_SA BIT_12 +#define CF_EN_EDIF BIT_9 +#define CF_ADDITIONAL_PARAM_BLK BIT_8 #define CF_DIF_SEG_DESCR_ENABLE BIT_3 #define CF_DATA_SEG_DESCR_ENABLE BIT_2 #define CF_READ_DATA BIT_1 #define CF_WRITE_DATA BIT_0 - __le16 fcp_cmnd_dseg_len; /* Data segment length. */ + uint16_t fcp_cmnd_dseg_len; /* Data segment length. */ /* Data segment address. */ __le64 fcp_cmnd_dseg_address __packed; /* Data segment address. */ __le64 fcp_rsp_dseg_address __packed; - __le32 byte_count; /* Total byte count. */ + uint32_t byte_count; /* Total byte count. */ uint8_t port_id[3]; /* PortID of destination port. */ uint8_t vp_index; @@ -517,16 +527,16 @@ struct cmd_type_7 { uint32_t handle; /* System handle. */ - __le16 nport_handle; /* N_PORT handle. */ - __le16 timeout; /* Command timeout. */ + uint16_t nport_handle; /* N_PORT handle. */ + uint16_t timeout; /* Command timeout. */ #define FW_MAX_TIMEOUT 0x1999 - __le16 dseg_count; /* Data segment count. */ + uint16_t dseg_count; /* Data segment count. */ uint16_t reserved_1; struct scsi_lun lun; /* FCP LUN (BE). */ - __le16 task_mgmt_flags; /* Task management flags. */ + uint16_t task_mgmt_flags; /* Task management flags. */ #define TMF_CLEAR_ACA BIT_14 #define TMF_TARGET_RESET BIT_13 #define TMF_LUN_RESET BIT_12 @@ -546,7 +556,7 @@ struct cmd_type_7 { uint8_t crn; uint8_t fcp_cdb[MAX_CMDSZ]; /* SCSI command words. */ - __le32 byte_count; /* Total byte count. */ + uint32_t byte_count; /* Total byte count. */ uint8_t port_id[3]; /* PortID of destination port. */ uint8_t vp_index; @@ -564,29 +574,29 @@ struct cmd_type_crc_2 { uint32_t handle; /* System handle. */ - __le16 nport_handle; /* N_PORT handle. */ - __le16 timeout; /* Command timeout. */ + uint16_t nport_handle; /* N_PORT handle. */ + uint16_t timeout; /* Command timeout. */ - __le16 dseg_count; /* Data segment count. */ + uint16_t dseg_count; /* Data segment count. */ - __le16 fcp_rsp_dseg_len; /* FCP_RSP DSD length. */ + uint16_t fcp_rsp_dseg_len; /* FCP_RSP DSD length. */ struct scsi_lun lun; /* FCP LUN (BE). */ - __le16 control_flags; /* Control flags. */ + uint16_t control_flags; /* Control flags. */ - __le16 fcp_cmnd_dseg_len; /* Data segment length. */ + uint16_t fcp_cmnd_dseg_len; /* Data segment length. */ __le64 fcp_cmnd_dseg_address __packed; /* Data segment address. */ __le64 fcp_rsp_dseg_address __packed; - __le32 byte_count; /* Total byte count. */ + uint32_t byte_count; /* Total byte count. */ uint8_t port_id[3]; /* PortID of destination port. */ uint8_t vp_index; __le64 crc_context_address __packed; /* Data segment address. */ - __le16 crc_context_len; /* Data segment length. */ + uint16_t crc_context_len; /* Data segment length. */ uint16_t reserved_1; /* MUST be set to 0. */ }; @@ -603,32 +613,33 @@ struct sts_entry_24xx { uint32_t handle; /* System handle. */ - __le16 comp_status; /* Completion status. */ - __le16 ox_id; /* OX_ID used by the firmware. */ + uint16_t comp_status; /* Completion status. */ + uint16_t ox_id; /* OX_ID used by the firmware. */ - __le32 residual_len; /* FW calc residual transfer length. */ + uint32_t residual_len; /* FW calc residual transfer length. */ union { - __le16 reserved_1; - __le16 nvme_rsp_pyld_len; + uint16_t reserved_1; + uint16_t nvme_rsp_pyld_len; + uint16_t edif_sa_index; /* edif sa_index used for initiator read data */ }; - __le16 state_flags; /* State flags. */ + uint16_t state_flags; /* State flags. */ #define SF_TRANSFERRED_DATA BIT_11 #define SF_NVME_ERSP BIT_6 #define SF_FCP_RSP_DMA BIT_0 - __le16 status_qualifier; - __le16 scsi_status; /* SCSI status. */ + uint16_t retry_delay; + uint16_t scsi_status; /* SCSI status. */ #define SS_CONFIRMATION_REQ BIT_12 - __le32 rsp_residual_count; /* FCP RSP residual count. */ + uint32_t rsp_residual_count; /* FCP RSP residual count. */ - __le32 sense_len; /* FCP SENSE length. */ + uint32_t sense_len; /* FCP SENSE length. */ union { struct { - __le32 rsp_data_len; /* FCP response data length */ + uint32_t rsp_data_len; /* FCP response data length */ uint8_t data[28]; /* FCP rsp/sense information */ }; struct nvme_fc_ersp_iu nvme_ersp; @@ -671,7 +682,7 @@ struct mrk_entry_24xx { uint32_t handle; /* System handle. */ - __le16 nport_handle; /* N_PORT handle. */ + uint16_t nport_handle; /* N_PORT handle. */ uint8_t modifier; /* Modifier (7-0). */ #define MK_SYNC_ID_LUN 0 /* Synchronize ID/LUN */ @@ -700,30 +711,28 @@ struct ct_entry_24xx { uint32_t handle; /* System handle. */ - __le16 comp_status; /* Completion status. */ + uint16_t comp_status; /* Completion status. */ - __le16 nport_handle; /* N_PORT handle. */ + uint16_t nport_handle; /* N_PORT handle. */ - __le16 cmd_dsd_count; + uint16_t cmd_dsd_count; uint8_t vp_index; uint8_t reserved_1; - __le16 timeout; /* Command timeout. */ + uint16_t timeout; /* Command timeout. */ uint16_t reserved_2; - __le16 rsp_dsd_count; + uint16_t rsp_dsd_count; uint8_t reserved_3[10]; - __le32 rsp_byte_count; - __le32 cmd_byte_count; + uint32_t rsp_byte_count; + uint32_t cmd_byte_count; struct dsd64 dsd[2]; }; -#define PURX_ELS_HEADER_SIZE 0x18 - /* * ISP queue - PUREX IOCB entry structure definition */ @@ -734,17 +743,17 @@ struct purex_entry_24xx { uint8_t sys_define; /* System defined. */ uint8_t entry_status; /* Entry Status. */ - __le16 reserved1; + uint16_t reserved1; uint8_t vp_idx; uint8_t reserved2; - __le16 status_flags; - __le16 nport_handle; + uint16_t status_flags; + uint16_t nport_handle; - __le16 frame_size; - __le16 trunc_frame_size; + uint16_t frame_size; + uint16_t trunc_frame_size; - __le32 rx_xchg_addr; + uint32_t rx_xchg_addr; uint8_t d_id[3]; uint8_t r_ctl; @@ -755,17 +764,20 @@ struct purex_entry_24xx { uint8_t f_ctl[3]; uint8_t type; - __le16 seq_cnt; + uint16_t seq_cnt; uint8_t df_ctl; uint8_t seq_id; - __le16 rx_id; - __le16 ox_id; - __le32 param; + uint16_t rx_id; + uint16_t ox_id; + uint32_t param; uint8_t els_frame_payload[20]; }; +#define PURX_ELS_HEADER_SIZE 0x18 +#define FPIN_ELS_DESCRIPTOR_LIST_OFFSET 8 + /* * ISP queue - ELS Pass-Through entry structure definition. */ @@ -778,18 +790,18 @@ struct els_entry_24xx { uint32_t handle; /* System handle. */ - __le16 comp_status; /* response only */ - __le16 nport_handle; + uint16_t comp_status; /* response only */ + uint16_t nport_handle; - __le16 tx_dsd_count; + uint16_t tx_dsd_count; uint8_t vp_index; uint8_t sof_type; #define EST_SOFI3 (1 << 4) #define EST_SOFI2 (3 << 4) - __le32 rx_xchg_address; /* Receive exchange address. */ - __le16 rx_dsd_count; + uint32_t rx_xchg_address; /* Receive exchange address. */ + uint16_t rx_dsd_count; uint8_t opcode; uint8_t reserved_2; @@ -797,33 +809,33 @@ struct els_entry_24xx { uint8_t d_id[3]; uint8_t s_id[3]; - __le16 control_flags; /* Control flags. */ + uint16_t control_flags; /* Control flags. */ #define ECF_PAYLOAD_DESCR_MASK (BIT_15|BIT_14|BIT_13) #define EPD_ELS_COMMAND (0 << 13) #define EPD_ELS_ACC (1 << 13) #define EPD_ELS_RJT (2 << 13) -#define EPD_RX_XCHG (3 << 13) +#define EPD_RX_XCHG (3 << 13) /* terminate exchange */ #define ECF_CLR_PASSTHRU_PEND BIT_12 #define ECF_INCL_FRAME_HDR BIT_11 +#define ECF_SEC_LOGIN BIT_3 + union { struct { __le32 rx_byte_count; __le32 tx_byte_count; - __le64 tx_address __packed; /* DSD 0 address. */ __le32 tx_len; /* DSD 0 length. */ - __le64 rx_address __packed; /* DSD 1 address. */ __le32 rx_len; /* DSD 1 length. */ }; struct { - __le32 total_byte_count; - __le32 error_subcode_1; - __le32 error_subcode_2; - __le32 error_subcode_3; - }; - }; + uint32_t total_byte_count; + uint32_t error_subcode_1; + uint32_t error_subcode_2; + uint32_t error_subcode_3; + }; + }; }; struct els_sts_entry_24xx { @@ -832,19 +844,19 @@ struct els_sts_entry_24xx { uint8_t sys_define; /* System Defined. */ uint8_t entry_status; /* Entry Status. */ - __le32 handle; /* System handle. */ + uint32_t handle; /* System handle. */ - __le16 comp_status; + uint16_t comp_status; - __le16 nport_handle; /* N_PORT handle. */ + uint16_t nport_handle; /* N_PORT handle. */ - __le16 reserved_1; + uint16_t reserved_1; uint8_t vp_index; uint8_t sof_type; - __le32 rx_xchg_address; /* Receive exchange address. */ - __le16 reserved_2; + uint32_t rx_xchg_address; /* Receive exchange address. */ + uint16_t reserved_2; uint8_t opcode; uint8_t reserved_3; @@ -852,13 +864,13 @@ struct els_sts_entry_24xx { uint8_t d_id[3]; uint8_t s_id[3]; - __le16 control_flags; /* Control flags. */ - __le32 total_byte_count; - __le32 error_subcode_1; - __le32 error_subcode_2; - __le32 error_subcode_3; + uint16_t control_flags; /* Control flags. */ + uint32_t total_byte_count; + uint32_t error_subcode_1; + uint32_t error_subcode_2; + uint32_t error_subcode_3; - __le32 reserved_4[4]; + uint32_t reserved_4[4]; }; /* * ISP queue - Mailbox Command entry structure definition. @@ -885,17 +897,18 @@ struct logio_entry_24xx { uint32_t handle; /* System handle. */ - __le16 comp_status; /* Completion status. */ + uint16_t comp_status; /* Completion status. */ #define CS_LOGIO_ERROR 0x31 /* Login/Logout IOCB error. */ - __le16 nport_handle; /* N_PORT handle. */ + uint16_t nport_handle; /* N_PORT handle. */ - __le16 control_flags; /* Control flags. */ + uint16_t control_flags; /* Control flags. */ /* Modifiers. */ #define LCF_INCLUDE_SNS BIT_10 /* Include SNS (FFFFFC) during LOGO. */ #define LCF_FCP2_OVERRIDE BIT_9 /* Set/Reset word 3 of PRLI. */ #define LCF_CLASS_2 BIT_8 /* Enable class 2 during PLOGI. */ #define LCF_FREE_NPORT BIT_7 /* Release NPORT handle after LOGO. */ +#define LCF_COMMON_FEAT BIT_7 /* PLOGI - Set Common Features Field */ #define LCF_EXPL_LOGO BIT_6 /* Perform an explicit LOGO. */ #define LCF_NVME_PRLI BIT_6 /* Perform NVME FC4 PRLI */ #define LCF_SKIP_PRLI BIT_5 /* Skip PRLI after PLOGI. */ @@ -919,7 +932,9 @@ struct logio_entry_24xx { uint8_t rsp_size; /* Response size in 32bit words. */ - __le32 io_parameter[11]; /* General I/O parameters. */ + uint32_t io_parameter[11]; /* General I/O parameters. */ +#define LIO_COMM_FEAT_FCSP BIT_21 +#define LIO_COMM_FEAT_CIO BIT_31 #define LSC_SCODE_NOLINK 0x01 #define LSC_SCODE_NOIOCB 0x02 #define LSC_SCODE_NOXCB 0x03 @@ -937,6 +952,8 @@ struct logio_entry_24xx { #define LSC_SCODE_LOGGED_IN 0x1D #define LSC_SCODE_NOFLOGI_ACC 0x1F }; +#define PRLO_TYPE_CODE_EXT 0x10 +#define PRLO_CMD_LEN 20 #define TSK_MGMT_IOCB_TYPE 0x14 struct tsk_mgmt_entry { @@ -947,17 +964,17 @@ struct tsk_mgmt_entry { uint32_t handle; /* System handle. */ - __le16 nport_handle; /* N_PORT handle. */ + uint16_t nport_handle; /* N_PORT handle. */ uint16_t reserved_1; - __le16 delay; /* Activity delay in seconds. */ + uint16_t delay; /* Activity delay in seconds. */ - __le16 timeout; /* Command timeout. */ + uint16_t timeout; /* Command timeout. */ struct scsi_lun lun; /* FCP LUN (BE). */ - __le32 control_flags; /* Control Flags. */ + uint32_t control_flags; /* Control Flags. */ #define TCF_NOTMCMD_TO_TARGET BIT_31 #define TCF_LUN_RESET BIT_4 #define TCF_ABORT_TASK_SET BIT_3 @@ -982,21 +999,39 @@ struct abort_entry_24xx { uint32_t handle; /* System handle. */ - __le16 nport_handle; /* N_PORT handle. */ - /* or Completion status. */ + union { + __le16 nport_handle; /* N_PORT handle. */ + __le16 comp_status; /* Completion status. */ + }; - __le16 options; /* Options. */ + uint16_t options; /* Options. */ #define AOF_NO_ABTS BIT_0 /* Do not send any ABTS. */ +#define AOF_NO_RRQ BIT_1 /* Do not send RRQ. */ +#define AOF_ABTS_TIMEOUT BIT_2 /* Disable logout on ABTS timeout. */ +#define AOF_ABTS_RTY_CNT BIT_3 /* Use driver specified retry count. */ +#define AOF_RSP_TIMEOUT BIT_4 /* Use specified response timeout. */ uint32_t handle_to_abort; /* System handle to abort. */ - __le16 req_que_no; + uint16_t req_que_no; uint8_t reserved_1[30]; uint8_t port_id[3]; /* PortID of destination port. */ uint8_t vp_index; - - uint8_t reserved_2[12]; + uint8_t reserved_2[4]; + union { + struct { + __le16 abts_rty_cnt; + __le16 rsp_timeout; + }drv; + struct { + uint8_t ba_rjt_vendorUnique; + uint8_t ba_rjt_reasonCodeExpl; + uint8_t ba_rjt_reasonCode; + uint8_t reserved_3; + }fw; + }; + uint8_t reserved_4[4]; }; #define ABTS_RCV_TYPE 0x54 @@ -1007,16 +1042,16 @@ struct abts_entry_24xx { uint8_t handle_count; uint8_t entry_status; - __le32 handle; /* type 0x55 only */ + uint32_t handle; /* type 0x55 only */ - __le16 comp_status; /* type 0x55 only */ - __le16 nport_handle; /* type 0x54 only */ + uint16_t comp_status; /* type 0x55 only */ + uint16_t nport_handle; /* type 0x54 only */ - __le16 control_flags; /* type 0x55 only */ + uint16_t control_flags; /* type 0x55 only */ uint8_t vp_idx; uint8_t sof_type; /* sof_type is upper nibble */ - __le32 rx_xch_addr; + uint32_t rx_xch_addr; uint8_t d_id[3]; uint8_t r_ctl; @@ -1027,30 +1062,30 @@ struct abts_entry_24xx { uint8_t f_ctl[3]; uint8_t type; - __le16 seq_cnt; + uint16_t seq_cnt; uint8_t df_ctl; uint8_t seq_id; - __le16 rx_id; - __le16 ox_id; + uint16_t rx_id; + uint16_t ox_id; - __le32 param; + uint32_t param; union { struct { - __le32 subcode3; - __le32 rsvd; - __le32 subcode1; - __le32 subcode2; + uint32_t subcode3; + uint32_t rsvd; + uint32_t subcode1; + uint32_t subcode2; } error; struct { - __le16 rsrvd1; + uint16_t rsrvd1; uint8_t last_seq_id; uint8_t seq_id_valid; - __le16 aborted_rx_id; - __le16 aborted_ox_id; - __le16 high_seq_cnt; - __le16 low_seq_cnt; + uint16_t aborted_rx_id; + uint16_t aborted_ox_id; + uint16_t high_seq_cnt; + uint16_t low_seq_cnt; } ba_acc; struct { uint8_t vendor_unique; @@ -1059,7 +1094,7 @@ struct abts_entry_24xx { } ba_rjt; } payload; - __le32 rx_xch_addr_to_abort; + uint32_t rx_xch_addr_to_abort; } __packed; /* ABTS payload explanation values */ @@ -1088,7 +1123,7 @@ struct abts_entry_24xx { * ISP I/O Register Set structure definitions. */ struct device_reg_24xx { - __le32 flash_addr; /* Flash/NVRAM BIOS address. */ + uint32_t flash_addr; /* Flash/NVRAM BIOS address. */ #define FARX_DATA_FLAG BIT_31 #define FARX_ACCESS_FLASH_CONF 0x7FFD0000 #define FARX_ACCESS_FLASH_DATA 0x7FF00000 @@ -1139,9 +1174,9 @@ struct device_reg_24xx { #define HW_EVENT_NVRAM_CHKSUM_ERR 0xF023 #define HW_EVENT_FLASH_FW_ERR 0xF024 - __le32 flash_data; /* Flash/NVRAM BIOS data. */ + uint32_t flash_data; /* Flash/NVRAM BIOS data. */ - __le32 ctrl_status; /* Control/Status. */ + uint32_t ctrl_status; /* Control/Status. */ #define CSRX_FLASH_ACCESS_ERROR BIT_18 /* Flash/NVRAM Access Error. */ #define CSRX_DMA_ACTIVE BIT_17 /* DMA Active status. */ #define CSRX_DMA_SHUTDOWN BIT_16 /* DMA Shutdown control status. */ @@ -1167,35 +1202,35 @@ struct device_reg_24xx { #define CSRX_FLASH_ENABLE BIT_1 /* Flash BIOS Read/Write enable. */ #define CSRX_ISP_SOFT_RESET BIT_0 /* ISP soft reset. */ - __le32 ictrl; /* Interrupt control. */ + uint32_t ictrl; /* Interrupt control. */ #define ICRX_EN_RISC_INT BIT_3 /* Enable RISC interrupts on PCI. */ - __le32 istatus; /* Interrupt status. */ + uint32_t istatus; /* Interrupt status. */ #define ISRX_RISC_INT BIT_3 /* RISC interrupt. */ - __le32 unused_1[2]; /* Gap. */ + uint32_t unused_1[2]; /* Gap. */ /* Request Queue. */ - __le32 req_q_in; /* In-Pointer. */ - __le32 req_q_out; /* Out-Pointer. */ + uint32_t req_q_in; /* In-Pointer. */ + uint32_t req_q_out; /* Out-Pointer. */ /* Response Queue. */ - __le32 rsp_q_in; /* In-Pointer. */ - __le32 rsp_q_out; /* Out-Pointer. */ + uint32_t rsp_q_in; /* In-Pointer. */ + uint32_t rsp_q_out; /* Out-Pointer. */ /* Priority Request Queue. */ - __le32 preq_q_in; /* In-Pointer. */ - __le32 preq_q_out; /* Out-Pointer. */ + uint32_t preq_q_in; /* In-Pointer. */ + uint32_t preq_q_out; /* Out-Pointer. */ - __le32 unused_2[2]; /* Gap. */ + uint32_t unused_2[2]; /* Gap. */ /* ATIO Queue. */ - __le32 atio_q_in; /* In-Pointer. */ - __le32 atio_q_out; /* Out-Pointer. */ + uint32_t atio_q_in; /* In-Pointer. */ + uint32_t atio_q_out; /* Out-Pointer. */ - __le32 host_status; + uint32_t host_status; #define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */ #define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */ - __le32 hccr; /* Host command & control register. */ + uint32_t hccr; /* Host command & control register. */ /* HCCR statuses. */ #define HCCRX_HOST_INT BIT_6 /* Host to RISC interrupt bit. */ #define HCCRX_RISC_RESET BIT_5 /* RISC Reset mode bit. */ @@ -1217,7 +1252,7 @@ struct device_reg_24xx { /* Clear RISC to PCI interrupt. */ #define HCCRX_CLR_RISC_INT 0xA0000000 - __le32 gpiod; /* GPIO Data register. */ + uint32_t gpiod; /* GPIO Data register. */ /* LED update mask. */ #define GPDX_LED_UPDATE_MASK (BIT_20|BIT_19|BIT_18) @@ -1236,7 +1271,7 @@ struct device_reg_24xx { /* Data in/out. */ #define GPDX_DATA_INOUT (BIT_1|BIT_0) - __le32 gpioe; /* GPIO Enable register. */ + uint32_t gpioe; /* GPIO Enable register. */ /* Enable update mask. */ #define GPEX_ENABLE_UPDATE_MASK (BIT_17|BIT_16) /* Enable update mask. */ @@ -1244,56 +1279,56 @@ struct device_reg_24xx { /* Enable. */ #define GPEX_ENABLE (BIT_1|BIT_0) - __le32 iobase_addr; /* I/O Bus Base Address register. */ - - __le32 unused_3[10]; /* Gap. */ - - __le16 mailbox0; - __le16 mailbox1; - __le16 mailbox2; - __le16 mailbox3; - __le16 mailbox4; - __le16 mailbox5; - __le16 mailbox6; - __le16 mailbox7; - __le16 mailbox8; - __le16 mailbox9; - __le16 mailbox10; - __le16 mailbox11; - __le16 mailbox12; - __le16 mailbox13; - __le16 mailbox14; - __le16 mailbox15; - __le16 mailbox16; - __le16 mailbox17; - __le16 mailbox18; - __le16 mailbox19; - __le16 mailbox20; - __le16 mailbox21; - __le16 mailbox22; - __le16 mailbox23; - __le16 mailbox24; - __le16 mailbox25; - __le16 mailbox26; - __le16 mailbox27; - __le16 mailbox28; - __le16 mailbox29; - __le16 mailbox30; - __le16 mailbox31; - - __le32 iobase_window; - __le32 iobase_c4; - __le32 iobase_c8; - __le32 unused_4_1[6]; /* Gap. */ - __le32 iobase_q; - __le32 unused_5[2]; /* Gap. */ - __le32 iobase_select; - __le32 unused_6[2]; /* Gap. */ - __le32 iobase_sdata; + uint32_t iobase_addr; /* I/O Bus Base Address register. */ + + uint32_t unused_3[10]; /* Gap. */ + + uint16_t mailbox0; + uint16_t mailbox1; + uint16_t mailbox2; + uint16_t mailbox3; + uint16_t mailbox4; + uint16_t mailbox5; + uint16_t mailbox6; + uint16_t mailbox7; + uint16_t mailbox8; + uint16_t mailbox9; + uint16_t mailbox10; + uint16_t mailbox11; + uint16_t mailbox12; + uint16_t mailbox13; + uint16_t mailbox14; + uint16_t mailbox15; + uint16_t mailbox16; + uint16_t mailbox17; + uint16_t mailbox18; + uint16_t mailbox19; + uint16_t mailbox20; + uint16_t mailbox21; + uint16_t mailbox22; + uint16_t mailbox23; + uint16_t mailbox24; + uint16_t mailbox25; + uint16_t mailbox26; + uint16_t mailbox27; + uint16_t mailbox28; + uint16_t mailbox29; + uint16_t mailbox30; + uint16_t mailbox31; + + uint32_t iobase_window; + uint32_t iobase_c4; + uint32_t iobase_c8; + uint32_t unused_4_1[6]; /* Gap. */ + uint32_t iobase_q; + uint32_t unused_5[2]; /* Gap. */ + uint32_t iobase_select; + uint32_t unused_6[2]; /* Gap. */ + uint32_t iobase_sdata; }; /* RISC-RISC semaphore register PCI offet */ #define RISC_REGISTER_BASE_OFFSET 0x7010 -#define RISC_REGISTER_WINDOW_OFFSET 0x6 +#define RISC_REGISTER_WINDOW_OFFET 0x6 /* RISC-RISC semaphore/flag register (risc address 0x7016) */ @@ -1355,8 +1390,8 @@ struct mid_conf_entry_24xx { struct mid_init_cb_24xx { struct init_cb_24xx init_cb; - __le16 count; - __le16 options; + uint16_t count; + uint16_t options; struct mid_conf_entry_24xx entries[MAX_MULTI_ID_FABRIC]; }; @@ -1390,27 +1425,27 @@ struct vp_ctrl_entry_24xx { uint32_t handle; /* System handle. */ - __le16 vp_idx_failed; + uint16_t vp_idx_failed; - __le16 comp_status; /* Completion status. */ + uint16_t comp_status; /* Completion status. */ #define CS_VCE_IOCB_ERROR 0x01 /* Error processing IOCB */ #define CS_VCE_ACQ_ID_ERROR 0x02 /* Error while acquireing ID. */ #define CS_VCE_BUSY 0x05 /* Firmware not ready to accept cmd. */ - __le16 command; + uint16_t command; #define VCE_COMMAND_ENABLE_VPS 0x00 /* Enable VPs. */ #define VCE_COMMAND_DISABLE_VPS 0x08 /* Disable VPs. */ #define VCE_COMMAND_DISABLE_VPS_REINIT 0x09 /* Disable VPs and reinit link. */ #define VCE_COMMAND_DISABLE_VPS_LOGO 0x0a /* Disable VPs and LOGO ports. */ #define VCE_COMMAND_DISABLE_VPS_LOGO_ALL 0x0b /* Disable VPs and LOGO ports. */ - __le16 vp_count; + uint16_t vp_count; uint8_t vp_idx_map[16]; - __le16 flags; - __le16 id; + uint16_t flags; + uint16_t id; uint16_t reserved_4; - __le16 hopct; + uint16_t hopct; uint8_t reserved_5[24]; }; @@ -1426,12 +1461,12 @@ struct vp_config_entry_24xx { uint32_t handle; /* System handle. */ - __le16 flags; + uint16_t flags; #define CS_VF_BIND_VPORTS_TO_VF BIT_0 #define CS_VF_SET_QOS_OF_VPORTS BIT_1 #define CS_VF_SET_HOPS_OF_VPORTS BIT_2 - __le16 comp_status; /* Completion status. */ + uint16_t comp_status; /* Completion status. */ #define CS_VCT_STS_ERROR 0x01 /* Specified VPs were not disabled. */ #define CS_VCT_CNT_ERROR 0x02 /* Invalid VP count. */ #define CS_VCT_ERROR 0x03 /* Unknown error. */ @@ -1458,9 +1493,9 @@ struct vp_config_entry_24xx { uint16_t reserved_vp2; uint8_t port_name_idx2[WWN_SIZE]; uint8_t node_name_idx2[WWN_SIZE]; - __le16 id; + uint16_t id; uint16_t reserved_4; - __le16 hopct; + uint16_t hopct; uint8_t reserved_5[2]; }; @@ -1487,7 +1522,7 @@ struct vp_rpt_id_entry_24xx { uint8_t entry_count; /* Entry count. */ uint8_t sys_define; /* System defined. */ uint8_t entry_status; /* Entry Status. */ - __le32 resv1; + uint32_t resv1; uint8_t vp_acquired; uint8_t vp_setup; uint8_t vp_idx; /* Format 0=reserved */ @@ -1516,8 +1551,15 @@ struct vp_rpt_id_entry_24xx { uint8_t ls_rjt_vendor; uint8_t ls_rjt_explanation; uint8_t ls_rjt_reason; - uint8_t rsv3[5]; - + uint8_t rsv3; + union { + uint32_t rsv6; + struct { + uint16_t rsv8; + uint16_t flogi_acc_payload_size:9; + uint16_t rsv9:7; + }; + }; uint8_t port_name[8]; uint8_t node_name[8]; uint16_t bbcr; @@ -1551,15 +1593,15 @@ struct vf_evfp_entry_24xx { uint8_t entry_status; /* Entry Status. */ uint32_t handle; /* System handle. */ - __le16 comp_status; /* Completion status. */ - __le16 timeout; /* timeout */ - __le16 adim_tagging_mode; + uint16_t comp_status; /* Completion status. */ + uint16_t timeout; /* timeout */ + uint16_t adim_tagging_mode; - __le16 vfport_id; + uint16_t vfport_id; uint32_t exch_addr; - __le16 nport_handle; /* N_PORT handle. */ - __le16 control_flags; + uint16_t nport_handle; /* N_PORT handle. */ + uint16_t control_flags; uint32_t io_parameter_0; uint32_t io_parameter_1; __le64 tx_address __packed; /* Data segment 0 address. */ @@ -1574,13 +1616,13 @@ struct vf_evfp_entry_24xx { struct qla_fdt_layout { uint8_t sig[4]; - __le16 version; - __le16 len; - __le16 checksum; + uint16_t version; + uint16_t len; + uint16_t checksum; uint8_t unused1[2]; uint8_t model[16]; - __le16 man_id; - __le16 id; + uint16_t man_id; + uint16_t id; uint8_t flags; uint8_t erase_cmd; uint8_t alt_erase_cmd; @@ -1589,15 +1631,15 @@ struct qla_fdt_layout { uint8_t wrt_sts_reg_cmd; uint8_t unprotect_sec_cmd; uint8_t read_man_id_cmd; - __le32 block_size; - __le32 alt_block_size; - __le32 flash_size; - __le32 wrt_enable_data; + uint32_t block_size; + uint32_t alt_block_size; + uint32_t flash_size; + uint32_t wrt_enable_data; uint8_t read_id_addr_len; uint8_t wrt_disable_bits; uint8_t read_dev_id_len; uint8_t chip_erase_cmd; - __le16 read_timeout; + uint16_t read_timeout; uint8_t protect_sec_cmd; uint8_t unused2[65]; }; @@ -1606,11 +1648,18 @@ struct qla_fdt_layout { struct qla_flt_location { uint8_t sig[4]; - __le16 start_lo; - __le16 start_hi; + uint16_t start_lo; + uint16_t start_hi; uint8_t version; uint8_t unused[5]; - __le16 checksum; + uint16_t checksum; +}; + +struct qla_flt_header { + uint16_t version; + uint16_t length; + uint16_t checksum; + uint16_t unused; }; #define FLT_REG_FW 0x01 @@ -1647,6 +1696,7 @@ struct qla_flt_location { #define FLT_REG_VPD_SEC_27XX_1 0x52 #define FLT_REG_VPD_SEC_27XX_2 0xD8 #define FLT_REG_VPD_SEC_27XX_3 0xDA +#define FLT_REG_NVME_PARAMS_27XX 0x21 /* 28xx */ #define FLT_REG_AUX_IMG_PRI_28XX 0x125 @@ -1663,22 +1713,16 @@ struct qla_flt_location { #define FLT_REG_MPI_SEC_28XX 0xF0 #define FLT_REG_PEP_PRI_28XX 0xD1 #define FLT_REG_PEP_SEC_28XX 0xF1 +#define FLT_REG_NVME_PARAMS_PRI_28XX 0x14E +#define FLT_REG_NVME_PARAMS_SEC_28XX 0x179 struct qla_flt_region { - __le16 code; + uint16_t code; uint8_t attribute; uint8_t reserved; - __le32 size; - __le32 start; - __le32 end; -}; - -struct qla_flt_header { - __le16 version; - __le16 length; - __le16 checksum; - __le16 unused; - struct qla_flt_region region[0]; + uint32_t size; + uint32_t start; + uint32_t end; }; #define FLT_REGION_SIZE 16 @@ -1689,18 +1733,18 @@ struct qla_flt_header { struct qla_npiv_header { uint8_t sig[2]; - __le16 version; - __le16 entries; - __le16 unused[4]; - __le16 checksum; + uint16_t version; + uint16_t entries; + uint16_t unused[4]; + uint16_t checksum; }; struct qla_npiv_entry { - __le16 flags; - __le16 vf_id; + uint16_t flags; + uint16_t vf_id; uint8_t q_qos; uint8_t f_qos; - __le16 unused1; + uint16_t unused1; uint8_t port_name[WWN_SIZE]; uint8_t node_name[WWN_SIZE]; }; @@ -1730,7 +1774,7 @@ struct verify_chip_entry_84xx { uint32_t handle; - __le16 options; + uint16_t options; #define VCO_DONT_UPDATE_FW BIT_0 #define VCO_FORCE_UPDATE BIT_1 #define VCO_DONT_RESET_UPDATE BIT_2 @@ -1738,18 +1782,18 @@ struct verify_chip_entry_84xx { #define VCO_END_OF_DATA BIT_14 #define VCO_ENABLE_DSD BIT_15 - __le16 reserved_1; + uint16_t reserved_1; - __le16 data_seg_cnt; - __le16 reserved_2[3]; + uint16_t data_seg_cnt; + uint16_t reserved_2[3]; - __le32 fw_ver; - __le32 exchange_address; + uint32_t fw_ver; + uint32_t exchange_address; - __le32 reserved_3[3]; - __le32 fw_size; - __le32 fw_seq_size; - __le32 relative_offset; + uint32_t reserved_3[3]; + uint32_t fw_size; + uint32_t fw_seq_size; + uint32_t relative_offset; struct dsd64 dsd; }; @@ -1762,22 +1806,22 @@ struct verify_chip_rsp_84xx { uint32_t handle; - __le16 comp_status; + uint16_t comp_status; #define CS_VCS_CHIP_FAILURE 0x3 #define CS_VCS_BAD_EXCHANGE 0x8 #define CS_VCS_SEQ_COMPLETEi 0x40 - __le16 failure_code; + uint16_t failure_code; #define VFC_CHECKSUM_ERROR 0x1 #define VFC_INVALID_LEN 0x2 #define VFC_ALREADY_IN_PROGRESS 0x8 - __le16 reserved_1[4]; + uint16_t reserved_1[4]; - __le32 fw_ver; - __le32 exchange_address; + uint32_t fw_ver; + uint32_t exchange_address; - __le32 reserved_2[6]; + uint32_t reserved_2[6]; }; #define ACCESS_CHIP_IOCB_TYPE 0x2B @@ -1789,24 +1833,24 @@ struct access_chip_84xx { uint32_t handle; - __le16 options; + uint16_t options; #define ACO_DUMP_MEMORY 0x0 #define ACO_LOAD_MEMORY 0x1 #define ACO_CHANGE_CONFIG_PARAM 0x2 #define ACO_REQUEST_INFO 0x3 - __le16 reserved1; + uint16_t reserved1; - __le16 dseg_count; - __le16 reserved2[3]; + uint16_t dseg_count; + uint16_t reserved2[3]; - __le32 parameter1; - __le32 parameter2; - __le32 parameter3; + uint32_t parameter1; + uint32_t parameter2; + uint32_t parameter3; - __le32 reserved3[3]; - __le32 total_byte_cnt; - __le32 reserved4; + uint32_t reserved3[3]; + uint32_t total_byte_cnt; + uint32_t reserved4; struct dsd64 dsd; }; @@ -1819,11 +1863,11 @@ struct access_chip_rsp_84xx { uint32_t handle; - __le16 comp_status; - __le16 failure_code; - __le32 residual_count; + uint16_t comp_status; + uint16_t failure_code; + uint32_t residual_count; - __le32 reserved[12]; + uint32_t reserved[12]; }; /* 81XX Support **************************************************************/ @@ -1878,52 +1922,72 @@ struct access_chip_rsp_84xx { struct nvram_81xx { /* NVRAM header. */ uint8_t id[4]; - __le16 nvram_version; - __le16 reserved_0; + uint16_t nvram_version; + uint16_t reserved_0; /* Firmware Initialization Control Block. */ - __le16 version; - __le16 reserved_1; - __le16 frame_payload_size; - __le16 execution_throttle; - __le16 exchange_count; - __le16 reserved_2; + uint16_t version; + uint16_t reserved_1; + uint16_t frame_payload_size; + uint16_t execution_throttle; + uint16_t exchange_count; + uint16_t reserved_2; uint8_t port_name[WWN_SIZE]; uint8_t node_name[WWN_SIZE]; - __le16 login_retry_count; - __le16 reserved_3; - __le16 interrupt_delay_timer; - __le16 login_timeout; + uint16_t login_retry_count; + uint16_t reserved_3; + uint16_t interrupt_delay_timer; + uint16_t login_timeout; - __le32 firmware_options_1; - __le32 firmware_options_2; - __le32 firmware_options_3; + uint32_t firmware_options_1; + uint32_t firmware_options_2; + uint32_t firmware_options_3; - __le16 reserved_4[4]; + uint16_t reserved_4[4]; /* Offset 64. */ uint8_t enode_mac[6]; - __le16 reserved_5[5]; + uint16_t reserved_5[5]; /* Offset 80. */ - __le16 reserved_6[24]; + uint16_t reserved_6[24]; /* Offset 128. */ - __le16 ex_version; + uint16_t ex_version; uint8_t prio_fcf_matching_flags; uint8_t reserved_6_1[3]; - __le16 pri_fcf_vlan_id; + uint16_t pri_fcf_vlan_id; uint8_t pri_fcf_fabric_name[8]; - __le16 reserved_6_2[7]; + uint16_t reserved_6_2[7]; uint8_t spma_mac_addr[6]; - __le16 reserved_6_3[14]; + + uint16_t reserved_6_3[8]; + + /* + * BIT 0 = scmr from app + * BIT 1 = scmr default queue + * BIT 2 = scmr driver throttling + * BIT 3-7= Reserved + */ + uint8_t scmr_control_flags; + + /* Monitor - 0, Conservative - 1, Moderate - 2, Aggressive - 3*/ + uint8_t scmr_throttle_profile; + + uint8_t reserved_6_4[10]; /* Offset 192. */ uint8_t min_supported_speed; - uint8_t reserved_7_0; - __le16 reserved_7[31]; + /* + * BIT 0 = port disabled state + * BIT 1 = fabric priority enabled + * BIT 2 = Use Virtual Lanes + * BIT 3-7= Reserved + */ + uint8_t port_features; + uint16_t reserved_7[31]; /* * BIT 0 = Enable spinup delay @@ -1956,26 +2020,26 @@ struct nvram_81xx { * BIT 25 = Temp WWPN * BIT 26-31 = */ - __le32 host_p; + uint32_t host_p; uint8_t alternate_port_name[WWN_SIZE]; uint8_t alternate_node_name[WWN_SIZE]; uint8_t boot_port_name[WWN_SIZE]; - __le16 boot_lun_number; - __le16 reserved_8; + uint16_t boot_lun_number; + uint16_t reserved_8; uint8_t alt1_boot_port_name[WWN_SIZE]; - __le16 alt1_boot_lun_number; - __le16 reserved_9; + uint16_t alt1_boot_lun_number; + uint16_t reserved_9; uint8_t alt2_boot_port_name[WWN_SIZE]; - __le16 alt2_boot_lun_number; - __le16 reserved_10; + uint16_t alt2_boot_lun_number; + uint16_t reserved_10; uint8_t alt3_boot_port_name[WWN_SIZE]; - __le16 alt3_boot_lun_number; - __le16 reserved_11; + uint16_t alt3_boot_lun_number; + uint16_t reserved_11; /* * BIT 0 = Selective Login @@ -1987,35 +2051,35 @@ struct nvram_81xx { * BIT 6 = Reserved * BIT 7-31 = */ - __le32 efi_parameters; + uint32_t efi_parameters; uint8_t reset_delay; uint8_t reserved_12; - __le16 reserved_13; + uint16_t reserved_13; - __le16 boot_id_number; - __le16 reserved_14; + uint16_t boot_id_number; + uint16_t reserved_14; - __le16 max_luns_per_target; - __le16 reserved_15; + uint16_t max_luns_per_target; + uint16_t reserved_15; - __le16 port_down_retry_count; - __le16 link_down_timeout; + uint16_t port_down_retry_count; + uint16_t link_down_timeout; /* FCode parameters. */ - __le16 fcode_parameter; + uint16_t fcode_parameter; - __le16 reserved_16[3]; + uint16_t reserved_16[3]; /* Offset 352. */ uint8_t reserved_17[4]; - __le16 reserved_18[5]; + uint16_t reserved_18[5]; uint8_t reserved_19[2]; - __le16 reserved_20[8]; + uint16_t reserved_20[8]; /* Offset 384. */ uint8_t reserved_21[16]; - __le16 reserved_22[3]; + uint16_t reserved_22[3]; /* Offset 406 (0x196) Enhanced Features * BIT 0 = Extended BB credits for LR @@ -2030,20 +2094,20 @@ struct nvram_81xx { uint16_t reserved_24[4]; /* Offset 416. */ - __le16 reserved_25[32]; + uint16_t reserved_25[32]; /* Offset 480. */ uint8_t model_name[16]; /* Offset 496. */ - __le16 feature_mask_l; - __le16 feature_mask_h; - __le16 reserved_26[2]; + uint16_t feature_mask_l; + uint16_t feature_mask_h; + uint16_t reserved_26[2]; - __le16 subsystem_vendor_id; - __le16 subsystem_device_id; + uint16_t subsystem_vendor_id; + uint16_t subsystem_device_id; - __le32 checksum; + uint32_t checksum; }; /* @@ -2052,31 +2116,31 @@ struct nvram_81xx { */ #define ICB_VERSION 1 struct init_cb_81xx { - __le16 version; - __le16 reserved_1; + uint16_t version; + uint16_t reserved_1; - __le16 frame_payload_size; - __le16 execution_throttle; - __le16 exchange_count; + uint16_t frame_payload_size; + uint16_t execution_throttle; + uint16_t exchange_count; - __le16 reserved_2; + uint16_t reserved_2; uint8_t port_name[WWN_SIZE]; /* Big endian. */ uint8_t node_name[WWN_SIZE]; /* Big endian. */ - __le16 response_q_inpointer; - __le16 request_q_outpointer; + uint16_t response_q_inpointer; + uint16_t request_q_outpointer; - __le16 login_retry_count; + uint16_t login_retry_count; - __le16 prio_request_q_outpointer; + uint16_t prio_request_q_outpointer; - __le16 response_q_length; - __le16 request_q_length; + uint16_t response_q_length; + uint16_t request_q_length; - __le16 reserved_3; + uint16_t reserved_3; - __le16 prio_request_q_length; + uint16_t prio_request_q_length; __le64 request_q_address __packed; __le64 response_q_address __packed; @@ -2084,12 +2148,12 @@ struct init_cb_81xx { uint8_t reserved_4[8]; - __le16 atio_q_inpointer; - __le16 atio_q_length; + uint16_t atio_q_inpointer; + uint16_t atio_q_length; __le64 atio_q_address __packed; - __le16 interrupt_delay_timer; /* 100us increments. */ - __le16 login_timeout; + uint16_t interrupt_delay_timer; /* 100us increments. */ + uint16_t login_timeout; /* * BIT 0-3 = Reserved @@ -2102,7 +2166,7 @@ struct init_cb_81xx { * BIT 14 = Node Name Option * BIT 15-31 = Reserved */ - __le32 firmware_options_1; + uint32_t firmware_options_1; /* * BIT 0 = Operation Mode bit 0 @@ -2120,7 +2184,7 @@ struct init_cb_81xx { * BIT 14 = Enable Target PRLI Control * BIT 15-31 = Reserved */ - __le32 firmware_options_2; + uint32_t firmware_options_2; /* * BIT 0-3 = Reserved @@ -2141,8 +2205,9 @@ struct init_cb_81xx { * BIT 28 = SPMA selection bit 1 * BIT 30-31 = Reserved */ - __le32 firmware_options_3; - + uint32_t firmware_options_3; +#define FWO3_DATA_RATE_MASK 0x0000E000 +#define FWO3_DATA_RATE_SHIFT 13 uint8_t reserved_5[8]; uint8_t enode_mac[6]; @@ -2219,9 +2284,9 @@ struct qla_fcp_prio_cfg { #define FCP_PRIO_ATTR_ENABLE 0x1 #define FCP_PRIO_ATTR_PERSIST 0x2 uint8_t reserved; /* Reserved for future use */ -#define FCP_PRIO_CFG_HDR_SIZE offsetof(struct qla_fcp_prio_cfg, entry) - struct qla_fcp_prio_entry entry[1023]; /* fcp priority entries */ - uint8_t reserved2[16]; +#define FCP_PRIO_CFG_HDR_SIZE 0x10 + struct qla_fcp_prio_entry entry[1]; /* fcp priority entries */ +#define FCP_PRIO_CFG_ENTRY_SIZE 0x20 }; #define FCP_PRIO_CFG_SIZE (32*1024) /* fcp prio data per port*/ diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 3bc1850273421..4c9873c75401d 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -1,17 +1,20 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #ifndef __QLA_GBL_H #define __QLA_GBL_H #include +#include /* * Global Function Prototypes in qla_init.c source file. */ extern int qla2x00_initialize_adapter(scsi_qla_host_t *); +extern int qla24xx_post_prli_work(struct scsi_qla_host *, fc_port_t *); extern int qla2100_pci_config(struct scsi_qla_host *); extern int qla2300_pci_config(struct scsi_qla_host *); @@ -30,6 +33,8 @@ extern int qla24xx_nvram_config(struct scsi_qla_host *); extern int qla81xx_nvram_config(struct scsi_qla_host *); extern void qla2x00_update_fw_options(struct scsi_qla_host *); extern void qla24xx_update_fw_options(scsi_qla_host_t *); +extern void qla81xx_update_fw_options(scsi_qla_host_t *); +extern void qla83xx_update_fw_options(scsi_qla_host_t *); extern int qla2x00_load_risc(struct scsi_qla_host *, uint32_t *); extern int qla24xx_load_risc(scsi_qla_host_t *, uint32_t *); @@ -68,7 +73,7 @@ extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *); extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *); extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *, uint16_t *); -extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t); +extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint64_t, uint32_t); extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *, uint16_t *); struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *, @@ -108,7 +113,6 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*, void *, u8); int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *); -int qla24xx_detect_sfp(scsi_qla_host_t *); int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8); extern void qla28xx_get_aux_images(struct scsi_qla_host *, @@ -129,6 +133,30 @@ void qla_rscn_replay(fc_port_t *fcport); void qla24xx_free_purex_item(struct purex_item *item); extern bool qla24xx_risc_firmware_invalid(uint32_t *); void qla_init_iocb_limit(scsi_qla_host_t *); +#ifdef QLA2XXX_LATENCY_MEASURE +void qla_get_scsi_cmd_latency(srb_t *sp); +void qla_get_nvme_cmd_latency(srb_t *sp); +#endif + +struct edif_list_entry; + +struct edif_list_entry *qla_edif_list_find_sa_index(fc_port_t *fcport, uint16_t handle); +void qla_edif_list_del(fc_port_t *fcport); +int edif_sadb_delete_sa_index(fc_port_t *fcport, uint16_t nport_handle, uint16_t sa_index); +void qla_edif_sadb_release(struct qla_hw_data *); +int qla_edif_sadb_build_free_pool(struct qla_hw_data *); +void qla_edif_sadb_release_free_pool(struct qla_hw_data *); +void qla_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *vha, + srb_t *sp, struct sts_entry_24xx *sts24); +void qlt_chk_edif_rx_sa_delete_pending(scsi_qla_host_t *, fc_port_t *, + struct ctio7_from_24xx *); + +void qla2x00_release_all_sadb(struct scsi_qla_host *vha, struct fc_port *fcport); +int qla_edif_process_els(scsi_qla_host_t *, bsg_job_t *); +void qla_edif_sess_down(struct scsi_qla_host *vha, struct fc_port *sess); +void qla_edif_clear_appdata(struct scsi_qla_host *vha, + struct fc_port *fcport); +const char *sc_to_str(uint16_t cmd); /* @@ -148,6 +176,7 @@ extern int ql2xrdpenable; extern int ql2xsmartsan; extern int ql2xallocfwdump; extern int ql2xextended_error_logging; +extern uint32_t ql_dbg_offset; extern int ql2xiidmaenable; extern int ql2xmqsupport; extern int ql2xfwloadbin; @@ -175,7 +204,23 @@ extern int qla2xuseresexchforels; extern int ql2xexlogins; extern int ql2xdifbundlinginternalbuffers; extern int ql2xfulldump_on_mpifail; +extern int ql2xsecenable; extern int ql2xenforce_iocb_limit; +extern int ql2xabts_wait_nvme; +extern int ql2x_scmr_drop_pct; +extern int ql2x_scmr_drop_pct_low_wm; +extern int ql2x_scmr_up_pct; +extern int ql2x_scmr_flow_ctl_tgt; +extern int ql2x_scmr_flow_ctl_host; +extern int ql2x_scmr_throttle_mode; +extern int ql2x_scmr_profile; +extern int ql2x_scmr_use_slow_queue; +extern int ql2xrspq_follow_inptr; +extern int ql2xrspq_follow_inptr_legacy; +extern int ql2xcontrol_edc_rdf; +extern u64 ql2xdebug; +extern u32 ql2xnvme_queues; +extern int ql2xvirtuallane; extern int qla2x00_loop_reset(scsi_qla_host_t *); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); @@ -222,6 +267,7 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); extern void qla2x00_disable_board_on_pci_error(struct work_struct *); +extern void qla_eeh_work(struct work_struct *); extern void qla2x00_sp_compl(srb_t *sp, int); extern void qla2xxx_qpair_sp_free_dma(srb_t *sp); extern void qla2xxx_qpair_sp_compl(srb_t *sp, int); @@ -231,15 +277,50 @@ void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); int qla24xx_post_relogin_work(struct scsi_qla_host *vha); void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *); -void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, - struct purex_item *pkt); +struct edif_sa_ctl *qla_edif_find_sa_ctl_by_index(fc_port_t *, int , int ); +void qla2xxx_update_scm_fcport(scsi_qla_host_t *vha); +bool qla2xxx_throttle_req(srb_t *sp, struct qla_hw_data *ha, fc_port_t *fcport, uint8_t dir); +void qla2xxx_scmr_clear_throttle(struct qla_scmr_flow_control *sfc); +void qla2xxx_scmr_clear_congn(struct qla_scmr_flow_control *sfc); +void qla2xxx_scmr_manage_qdepth(srb_t *sp, struct fc_port *fcport, bool inc); +void qla2xxx_update_sfc_ios(srb_t *sp, struct qla_hw_data *ha, fc_port_t *fcport, + int new); +void qla_pci_set_eeh_busy(struct scsi_qla_host *); +void qla_schedule_eeh_work(struct scsi_qla_host *); +int qla2xxx_scm_get_features(struct scsi_qla_host *vha); +int qla2xxx_scm_send_rdf_els(scsi_qla_host_t *vha); +int qla2xxx_scm_send_edc_els(scsi_qla_host_t *vha); +void qla2xxx_send_uscm_els(scsi_qla_host_t *vha); +void qla2xx_scm_process_purex_edc(struct scsi_qla_host *vha, + struct purex_item *item); +void qla2xxx_scm_process_purex_rdf(struct scsi_qla_host *vha, + struct purex_item *item); +void qla2xxx_scm_alloc_rdf_payload(struct scsi_qla_host *vha); +void qla2xxx_scm_free_rdf_payload(struct scsi_qla_host *vha); +void qla2xxx_clear_scm_stats(struct scsi_qla_host *vha); +void qla2xxx_clear_scmr_stats(struct scsi_qla_host *vha); +void qla2xxx_scmr_init_deltas(struct qla_scmr_flow_control *sfc); +void qla_scm_clear_previous_event(struct scsi_qla_host *vha); +void qla_scm_clear_session(fc_port_t *fcport); +void qla_scm_clear_host(struct scsi_qla_host *vha); +bool qla_scm_chk_throttle_cmd_opcode(srb_t *sp); +int qla2xxx_set_vl(fc_port_t *fcport, uint8_t vl); +void qla_scm_host_clear_vl_state(struct scsi_qla_host *vha); +void qla_scm_tgt_clear_vl_state(fc_port_t *fcport); +uint8_t qla_get_throttling_state(struct qla_scmr_flow_control *sfc); +bool qla2xxx_switch_vl(struct qla_scmr_flow_control *sfc, uint8_t vl); +void qla27xx_process_purex_fpin(struct scsi_qla_host *vha, + struct purex_item *item); +void qla_scm_clear_all_tgt_sess(struct scsi_qla_host *vha); /* * Global Functions in qla_mid.c source file. */ +extern void qla_update_vp_map(struct scsi_qla_host *, int); extern struct scsi_host_template qla2xxx_driver_template; extern struct scsi_transport_template *qla2xxx_transport_vport_template; -extern void qla2x00_timer(struct timer_list *); +extern void qla2x00_timer(qla_timer_arg_t timer_arg); +extern void qla2xxx_perf_timer(qla_timer_arg_t timer_arg); extern void qla2x00_start_timer(scsi_qla_host_t *, unsigned long); extern void qla24xx_deallocate_vp_id(scsi_qla_host_t *); extern int qla24xx_disable_vp (scsi_qla_host_t *); @@ -277,7 +358,10 @@ extern int qla2x00_vp_abort_isp(scsi_qla_host_t *); /* * Global Function Prototypes in qla_iocb.c source file. */ - +void qla_els_pt_iocb(struct scsi_qla_host *, + struct els_entry_24xx *, struct qla_els_pt_arg *); +cont_a64_entry_t * qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *, + struct req_que *); extern uint16_t qla2x00_calc_iocbs_32(uint16_t); extern uint16_t qla2x00_calc_iocbs_64(uint16_t); extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t); @@ -293,7 +377,8 @@ extern int qla2x00_start_sp(srb_t *); extern int qla24xx_dif_start_scsi(srb_t *); extern int qla2x00_start_bidir(srb_t *, struct scsi_qla_host *, uint32_t); extern int qla2xxx_dif_start_scsi_mq(srb_t *); -extern void qla2x00_init_timer(srb_t *sp, unsigned long tmo); +extern void qla2x00_init_async_sp(srb_t *sp, unsigned long tmo, + void (*done)(struct srb *, int)); extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *); @@ -307,6 +392,10 @@ extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, struct dsd64 *, uint16_t, struct qla_tgt_cmd *); extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *); extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *); +extern int qla24xx_issue_sa_replace_iocb(scsi_qla_host_t *, + struct qla_work_evt *); +void qla2x00_sp_release(struct kref *kref); +void qla2x00_els_dcmd2_iocb_timeout(void *data); /* * Global Function Prototypes in qla_mbx.c source file. @@ -405,7 +494,8 @@ extern int qla2x00_get_resource_cnts(scsi_qla_host_t *); extern int -qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map); +qla2x00_get_fcal_position_map(scsi_qla_host_t *ha, char *pos_map, + u8 *num_entries); extern int qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *, @@ -466,7 +556,7 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *); extern int qla24xx_get_buffer_credits(scsi_qla_host_t *, struct buffer_credit_24xx *, - dma_addr_t); + dma_addr_t); extern int qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *, @@ -526,6 +616,10 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t); extern int qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint); +extern int qla26xx_dport_diagnostics_v2(scsi_qla_host_t *, + struct qla_dport_diag_v2 *, mbx_cmd_t *); + +int qla2xxx_set_scm_params(fc_port_t *fcport, bool congested); int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *); int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8); int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t, @@ -546,6 +640,9 @@ extern int qla2xxx_read_remote_register(scsi_qla_host_t *, uint32_t, uint32_t *); extern int qla2xxx_write_remote_register(scsi_qla_host_t *, uint32_t, uint32_t); +int qla_get_features(scsi_qla_host_t *, dma_addr_t, u16); +int qla_mpipt_get_status(scsi_qla_host_t *, u16 , u16 *, u16); +void qla_no_op_mb(struct scsi_qla_host *vha); /* * Global Function Prototypes in qla_isr.c source file. @@ -569,11 +666,12 @@ qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *, uint32_t); extern irqreturn_t qla2xxx_msix_rsp_q(int irq, void *dev_id); -extern irqreturn_t -qla2xxx_msix_rsp_q_hs(int irq, void *dev_id); fc_port_t *qla2x00_find_fcport_by_loopid(scsi_qla_host_t *, uint16_t); fc_port_t *qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *, u8 *, u8); fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8); +void qla24xx_queue_purex_item(scsi_qla_host_t *, struct purex_item *, + void (*process_item)(struct scsi_qla_host *, struct purex_item *)); +void __qla_consume_iocb(struct scsi_qla_host *, void **, struct rsp_que **); /* * Global Function Prototypes in qla_sup.c source file. @@ -636,20 +734,26 @@ extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t); extern void qla2xxx_flash_npiv_conf(scsi_qla_host_t *); extern int qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *); +extern int qla2x00_mailbox_passthru(bsg_job_t *bsg_job); +int qla2x00_sys_ld_info(bsg_job_t *bsg_job); +int __qla_copy_purex_to_buffer(struct scsi_qla_host *, void **, + struct rsp_que **, u8 *, u32); + +int qla_mailbox_passthru(scsi_qla_host_t *vha, uint16_t *mbx_in, + uint16_t *mbx_out); /* * Global Function Prototypes in qla_dbg.c source file. */ -void qla2xxx_dump_fw(scsi_qla_host_t *vha); -void qla2100_fw_dump(scsi_qla_host_t *vha); -void qla2300_fw_dump(scsi_qla_host_t *vha); -void qla24xx_fw_dump(scsi_qla_host_t *vha); -void qla25xx_fw_dump(scsi_qla_host_t *vha); -void qla81xx_fw_dump(scsi_qla_host_t *vha); -void qla82xx_fw_dump(scsi_qla_host_t *vha); -void qla8044_fw_dump(scsi_qla_host_t *vha); - -void qla27xx_fwdump(scsi_qla_host_t *vha); +extern void qla2100_fw_dump(scsi_qla_host_t *, int); +extern void qla2300_fw_dump(scsi_qla_host_t *, int); +extern void qla24xx_fw_dump(scsi_qla_host_t *, int); +extern void qla25xx_fw_dump(scsi_qla_host_t *, int); +extern void qla81xx_fw_dump(scsi_qla_host_t *, int); +extern void qla82xx_fw_dump(scsi_qla_host_t *, int); +extern void qla8044_fw_dump(scsi_qla_host_t *, int); + +extern void qla27xx_fwdump(scsi_qla_host_t *, int); extern void qla27xx_mpi_fwdump(scsi_qla_host_t *, int); extern ulong qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *, void *); extern int qla27xx_fwdt_template_valid(void *); @@ -658,6 +762,8 @@ extern ulong qla27xx_fwdt_template_size(void *); extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int); extern void ql_dump_regs(uint, scsi_qla_host_t *, uint); extern void ql_dump_buffer(uint, scsi_qla_host_t *, uint, const void *, uint); +extern void ql_scm_dump_buffer(uint level, scsi_qla_host_t *vha, + uint id, void *buf, uint size); /* * Global Function Prototypes in qla_gs.c source file. */ @@ -684,22 +790,15 @@ extern void qla2x00_async_iocb_timeout(void *data); extern void qla2x00_free_fcport(fc_port_t *); -extern int qla24xx_post_gpnid_work(struct scsi_qla_host *, port_id_t *); -extern int qla24xx_async_gpnid(scsi_qla_host_t *, port_id_t *); -void qla24xx_handle_gpnid_event(scsi_qla_host_t *, struct event_arg *); - int qla24xx_post_gpsc_work(struct scsi_qla_host *, fc_port_t *); int qla24xx_async_gpsc(scsi_qla_host_t *, fc_port_t *); void qla24xx_handle_gpsc_event(scsi_qla_host_t *, struct event_arg *); int qla2x00_mgmt_svr_login(scsi_qla_host_t *); void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea); -int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport); +int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool); int qla24xx_async_gpnft(scsi_qla_host_t *, u8, srb_t *); void qla24xx_async_gpnft_done(scsi_qla_host_t *, srb_t *); void qla24xx_async_gnnft_done(scsi_qla_host_t *, srb_t *); -int qla24xx_async_gnnid(scsi_qla_host_t *, fc_port_t *); -void qla24xx_handle_gnnid_event(scsi_qla_host_t *, struct event_arg *); -int qla24xx_post_gnnid_work(struct scsi_qla_host *, fc_port_t *); int qla24xx_post_gfpnid_work(struct scsi_qla_host *, fc_port_t *); int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *); void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *); @@ -777,7 +876,7 @@ extern int qlafx00_fw_ready(scsi_qla_host_t *); extern int qlafx00_configure_devices(scsi_qla_host_t *); extern int qlafx00_reset_initialize(scsi_qla_host_t *); extern int qlafx00_fx_disc(scsi_qla_host_t *, fc_port_t *, uint16_t); -extern void qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *); +extern int qlafx00_process_aen(struct scsi_qla_host *, struct qla_work_evt *); extern int qlafx00_post_aenfx_work(struct scsi_qla_host *, uint32_t, uint32_t *, int); extern uint32_t qlafx00_fw_state_show(struct device *, @@ -819,7 +918,7 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *); /* IOCB related functions */ extern int qla82xx_start_scsi(srb_t *); extern void qla2x00_sp_free(srb_t *sp); -extern void qla2x00_sp_timeout(struct timer_list *); +extern void qla2x00_sp_timeout(qla_timer_arg_t timer_arg); extern void qla2x00_bsg_job_done(srb_t *sp, int); extern void qla2x00_bsg_sp_free(srb_t *sp); extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *); @@ -869,16 +968,19 @@ extern int qla2x00_read_sfp_dev(struct scsi_qla_host *, char *, int); extern int ql26xx_led_config(scsi_qla_host_t *, uint16_t, uint16_t *); /* BSG related functions */ -extern int qla24xx_bsg_request(struct bsg_job *); -extern int qla24xx_bsg_timeout(struct bsg_job *); +extern int qla24xx_bsg_request(bsg_job_t *); +extern int qla24xx_bsg_timeout(bsg_job_t *); extern int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t); extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *, dma_addr_t, size_t, uint32_t); extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t *, uint16_t *); +extern int qla24xx_sadb_update(bsg_job_t *bsg_job); +extern int qla_post_sa_replace_work(struct scsi_qla_host *vha, + fc_port_t *fcport, uint16_t nport_handle, struct edif_sa_ctl *sa_ctl); /* 83xx related functions */ -void qla83xx_fw_dump(scsi_qla_host_t *vha); +extern void qla83xx_fw_dump(scsi_qla_host_t *, int); /* Minidump related functions */ extern int qla82xx_md_get_template_size(scsi_qla_host_t *); @@ -920,6 +1022,7 @@ extern int qla_set_exchoffld_mem_cfg(scsi_qla_host_t *); extern void qlt_handle_abts_recv(struct scsi_qla_host *, struct rsp_que *, response_t *); +struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *, be_id_t d_id); int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, struct imm_ntfy_from_isp *, int); void qla24xx_do_nack_work(struct scsi_qla_host *, struct qla_work_evt *); @@ -931,16 +1034,74 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *, uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **); void qla24xx_delete_sess_fn(struct work_struct *); void qlt_unknown_atio_work_fn(struct work_struct *); -void qlt_update_host_map(struct scsi_qla_host *, port_id_t); -void qlt_remove_target_resources(struct qla_hw_data *); +void qla_update_host_map(struct scsi_qla_host *, port_id_t); +void qla_remove_hostmap(struct qla_hw_data *); void qlt_clr_qp_table(struct scsi_qla_host *vha); void qlt_set_mode(struct scsi_qla_host *); int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode); -extern void qla24xx_process_purex_list(struct purex_list *); extern void qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp); extern void qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp); +extern void qla_wait_nvme_release_cmd_kref(srb_t *sp); +extern void qla_nvme_abort_set_option( + struct abort_entry_24xx *abt, srb_t *sp); +extern void qla_nvme_abort_process_comp_status( + struct abort_entry_24xx *abt, srb_t *sp); +void qlt_handle_fast_error(struct scsi_qla_host *, struct qla_tgt_cmd *, + void *); +struct scsi_qla_host *qla_find_host_by_vp_idx(struct scsi_qla_host *vha, + uint16_t vp_idx); /* nvme.c */ void qla_nvme_unregister_remote_port(struct fc_port *fcport); + +/* qla_edif.c */ +fc_port_t * qla2x00_find_fcport_by_pid(scsi_qla_host_t *, port_id_t *); +void qla_edb_eventcreate(scsi_qla_host_t *, uint32_t, uint32_t, uint32_t, + fc_port_t *); +void qla_edb_stop(scsi_qla_host_t *); +int32_t qla_edif_app_mgmt(bsg_job_t *); +int qla2x00_check_rdp_test( uint32_t cmd, uint32_t port); +void qla_enode_init(scsi_qla_host_t *); +void qla_enode_stop(scsi_qla_host_t *); +void qla_edif_flush_sa_ctl_lists(fc_port_t *fcport); +void qla_edb_init(scsi_qla_host_t *); +void qla_edif_timer(scsi_qla_host_t *vha); +int qla28xx_start_scsi_edif(srb_t *); +void qla24xx_sa_update_iocb(srb_t *, struct sa_update_28xx *); +void qla24xx_sa_replace_iocb(srb_t *, struct sa_update_28xx *); +void qla24xx_auth_els(scsi_qla_host_t *, void **, struct rsp_que **); +void qla24xx_purex_cont_entry(scsi_qla_host_t *, struct rsp_que *, + sts_cont_entry_t *); +void qla28xx_sa_update_iocb_entry(scsi_qla_host_t *, struct req_que *, + struct sa_update_28xx *); +void qla_parse_auth_els_ctl(struct srb *); + + void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea); + +#define QLA2XX_HW_ERROR BIT_0 +#define QLA2XX_SHT_LNK_DWN BIT_1 +#define QLA2XX_INT_ERR BIT_2 +#define QLA2XX_CMD_TIMEOUT BIT_3 +#define QLA2XX_RESET_CMD_ERR BIT_4 +#define QLA2XX_TGT_SHT_LNK_DOWN BIT_17 + +#define QLA2XX_MAX_LINK_DOWN_TIME 100 + +int qla2xxx_start_stats(struct Scsi_Host *, uint32_t flags); +int qla2xxx_stop_stats(struct Scsi_Host *, uint32_t flags); +int qla2xxx_reset_stats(struct Scsi_Host *, uint32_t flags); + +int qla2xxx_get_ini_stats(struct Scsi_Host *, uint32_t flags, void *data, uint64_t size); +int qla2xxx_get_tgt_stats(struct Scsi_Host *, uint32_t flags, struct fc_rport *rport, void *data, uint64_t size); +int qla2xxx_disable_port(struct Scsi_Host *); +int qla2xxx_enable_port(struct Scsi_Host *); + +uint64_t qla2x00_get_num_tgts(scsi_qla_host_t *vha); +uint64_t qla2x00_count_set_bits(uint32_t num); +int qla_create_buf_pool(struct scsi_qla_host *, struct qla_qpair *); +void qla_free_buf_pool(struct qla_qpair *); +int qla_get_buf(struct scsi_qla_host *, struct qla_qpair *, struct qla_buf_dsc *); +void qla_put_buf(struct qla_qpair *, struct qla_buf_dsc *); + #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 73015c69b5e89..419a0e97980c3 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -1,7 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include "qla_target.h" @@ -529,7 +530,6 @@ static void qla2x00_async_sns_sp_done(srb_t *sp, int rc) if (!e) goto err2; - del_timer(&sp->u.iocb_cmd.timer); e->u.iosb.sp = sp; qla2x00_post_work(vha, e); return; @@ -556,8 +556,8 @@ static void qla2x00_async_sns_sp_done(srb_t *sp, int rc) sp->u.iocb_cmd.u.ctarg.rsp = NULL; } - sp->free(sp); - + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return; } @@ -592,13 +592,15 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) if (!vha->flags.online) goto done; + /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_CT_PTHRU_CMD; sp->name = "rft_id"; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_sns_sp_done); sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, @@ -638,8 +640,6 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - sp->done = qla2x00_async_sns_sp_done; ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - hdl=%x portid %06x.\n", @@ -653,7 +653,8 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) } return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } @@ -687,13 +688,15 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, srb_t *sp; struct ct_sns_pkt *ct_sns; + /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_CT_PTHRU_CMD; sp->name = "rff_id"; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_sns_sp_done); sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, @@ -731,8 +734,6 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - sp->done = qla2x00_async_sns_sp_done; ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - hdl=%x portid %06x feature %x type %x.\n", @@ -748,7 +749,8 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } @@ -778,13 +780,15 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, srb_t *sp; struct ct_sns_pkt *ct_sns; + /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_CT_PTHRU_CMD; sp->name = "rnid"; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_sns_sp_done); sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, @@ -822,9 +826,6 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - sp->done = qla2x00_async_sns_sp_done; - ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - hdl=%x portid %06x\n", sp->name, sp->handle, d_id->b24); @@ -839,7 +840,8 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } @@ -885,13 +887,15 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha) srb_t *sp; struct ct_sns_pkt *ct_sns; + /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_CT_PTHRU_CMD; sp->name = "rsnn_nn"; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_sns_sp_done); sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, @@ -935,9 +939,6 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha) sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - sp->done = qla2x00_async_sns_sp_done; - ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - hdl=%x.\n", sp->name, sp->handle); @@ -952,7 +953,8 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha) return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } @@ -1536,7 +1538,8 @@ qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha) } if (IS_QLA2031(ha)) { if ((ha->pdev->subsystem_vendor == 0x103C) && - (ha->pdev->subsystem_device == 0x8002)) { + ((ha->pdev->subsystem_device == 0x8002) || + (ha->pdev->subsystem_device == 0x8086))) { speeds = FDMI_PORT_SPEED_16GB; } else { speeds = FDMI_PORT_SPEED_16GB|FDMI_PORT_SPEED_8GB| @@ -1544,7 +1547,7 @@ qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha) } return speeds; } - if (IS_QLA25XX(ha) || IS_QLAFX00(ha)) + if (IS_QLA25XX(ha)) return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB| FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB; if (IS_QLA24XX_TYPE(ha)) @@ -1579,27 +1582,14 @@ qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha) return FDMI_PORT_SPEED_UNKNOWN; } } - -/** - * qla2x00_hba_attributes() perform HBA attributes registration - * @vha: HA context - * @entries: number of entries to use - * @callopt: Option to issue extended or standard FDMI - * command parameter - * - * Returns 0 on success. - */ -static unsigned long -qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries, - unsigned int callopt) +static ulong +qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries, uint callopt) { struct qla_hw_data *ha = vha->hw; - struct init_cb_24xx *icb24 = (void *)ha->init_cb; struct new_utsname *p_sysid = utsname(); struct ct_fdmi_hba_attr *eiter; uint16_t alen; - unsigned long size = 0; - + ulong size = 0; /* Nodename. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME); @@ -1615,7 +1605,7 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries, eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER); alen = scnprintf( eiter->a.manufacturer, sizeof(eiter->a.manufacturer), - "%s", "QLogic Corporation"); + "%s", QLA2XXX_MANUFACTURER); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); @@ -1729,8 +1719,6 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries, size += alen; ql_dbg(ql_dbg_disc, vha, 0x20a8, "FIRMWARE VERSION = %s.\n", eiter->a.fw_version); - if (callopt == CALLOPT_FDMI1) - goto done; /* OS Name and Version */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION); @@ -1753,11 +1741,13 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries, size += alen; ql_dbg(ql_dbg_disc, vha, 0x20a9, "OS VERSION = %s.\n", eiter->a.os_version); + if (callopt == CALLOPT_FDMI1) + goto done; /* MAX CT Payload Length */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH); - eiter->a.max_ct_len = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ? - icb24->frame_payload_size : ha->init_cb->frame_payload_size)); + eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size >> 2); + alen = sizeof(eiter->a.max_ct_len); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); @@ -1834,29 +1824,16 @@ qla2x00_hba_attributes(scsi_qla_host_t *vha, void *entries, done: return size; } - -/** - * qla2x00_port_attributes() perform Port attributes registration - * @vha: HA context - * @entries: number of entries to use - * @callopt: Option to issue extended or standard FDMI - * command parameter - * - * Returns 0 on success. - */ -static unsigned long -qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, - unsigned int callopt) +static ulong +qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, uint callopt) { struct qla_hw_data *ha = vha->hw; - struct init_cb_24xx *icb24 = (void *)ha->init_cb; struct new_utsname *p_sysid = utsname(); char *hostname = p_sysid ? p_sysid->nodename : fc_host_system_hostname(vha->host); struct ct_fdmi_port_attr *eiter; uint16_t alen; - unsigned long size = 0; - + ulong size = 0; /* FC4 types. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES); @@ -1901,8 +1878,7 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, /* Max frame size. */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE); - eiter->a.max_frame_size = cpu_to_be32(le16_to_cpu(IS_FWI2_CAPABLE(ha) ? - icb24->frame_payload_size : ha->init_cb->frame_payload_size)); + eiter->a.max_frame_size = cpu_to_be32(ha->frame_payload_size); alen = sizeof(eiter->a.max_frame_size); alen += FDMI_ATTR_TYPELEN(eiter); eiter->len = cpu_to_be16(alen); @@ -1935,10 +1911,8 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, size += alen; ql_dbg(ql_dbg_disc, vha, 0x20c5, "HOSTNAME = %s.\n", eiter->a.host_name); - if (callopt == CALLOPT_FDMI1) - goto done; - + goto done; /* Node Name */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME); @@ -1949,7 +1923,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, size += alen; ql_dbg(ql_dbg_disc, vha, 0x20c6, "NODENAME = %016llx.\n", wwn_to_u64(eiter->a.node_name)); - /* Port Name */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_NAME); @@ -1960,7 +1933,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, size += alen; ql_dbg(ql_dbg_disc, vha, 0x20c7, "PORTNAME = %016llx.\n", wwn_to_u64(eiter->a.port_name)); - /* Port Symbolic Name */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME); @@ -1972,7 +1944,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, size += alen; ql_dbg(ql_dbg_disc, vha, 0x20c8, "PORT SYMBOLIC NAME = %s\n", eiter->a.port_sym_name); - /* Port Type */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_TYPE); @@ -1983,7 +1954,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, size += alen; ql_dbg(ql_dbg_disc, vha, 0x20c9, "PORT TYPE = %x.\n", be32_to_cpu(eiter->a.port_type)); - /* Supported Class of Service */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS); @@ -1994,7 +1964,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, size += alen; ql_dbg(ql_dbg_disc, vha, 0x20ca, "SUPPORTED COS = %08x\n", be32_to_cpu(eiter->a.port_supported_cos)); - /* Port Fabric Name */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME); @@ -2006,7 +1975,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, size += alen; ql_dbg(ql_dbg_disc, vha, 0x20cb, "FABRIC NAME = %016llx.\n", wwn_to_u64(eiter->a.fabric_name)); - /* FC4_type */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE); @@ -2021,7 +1989,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, ql_dbg(ql_dbg_disc, vha, 0x20cc, "PORT ACTIVE FC4 TYPE = %016llx.\n", *(uint64_t *)eiter->a.port_fc4_type); - /* Port State */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_STATE); @@ -2032,7 +1999,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, size += alen; ql_dbg(ql_dbg_disc, vha, 0x20cd, "PORT_STATE = %x.\n", be32_to_cpu(eiter->a.port_state)); - /* Number of Ports */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_COUNT); @@ -2043,7 +2009,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, size += alen; ql_dbg(ql_dbg_disc, vha, 0x20ce, "PORT COUNT = %x.\n", be32_to_cpu(eiter->a.num_ports)); - /* Port Identifier */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_PORT_IDENTIFIER); @@ -2054,10 +2019,8 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, size += alen; ql_dbg(ql_dbg_disc, vha, 0x20cf, "PORT ID = %x.\n", be32_to_cpu(eiter->a.port_id)); - if (callopt == CALLOPT_FDMI2 || !ql2xsmartsan) goto done; - /* Smart SAN Service Category (Populate Smart SAN Initiator)*/ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_SMARTSAN_SERVICE); @@ -2070,7 +2033,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, size += alen; ql_dbg(ql_dbg_disc, vha, 0x20d0, "SMARTSAN SERVICE CATEGORY = %s.\n", eiter->a.smartsan_service); - /* Smart SAN GUID (NWWN+PWWN) */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_SMARTSAN_GUID); @@ -2084,7 +2046,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, "Smart SAN GUID = %016llx-%016llx\n", wwn_to_u64(eiter->a.smartsan_guid), wwn_to_u64(eiter->a.smartsan_guid + WWN_SIZE)); - /* Smart SAN Version (populate "Smart SAN Version 1.0") */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_SMARTSAN_VERSION); @@ -2097,12 +2058,11 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, size += alen; ql_dbg(ql_dbg_disc, vha, 0x20d2, "SMARTSAN VERSION = %s\n", eiter->a.smartsan_version); - /* Smart SAN Product Name (Specify Adapter Model No) */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_SMARTSAN_PROD_NAME); - alen = scnprintf(eiter->a.smartsan_prod_name, - sizeof(eiter->a.smartsan_prod_name), + alen = scnprintf( + eiter->a.smartsan_prod_name, sizeof(eiter->a.smartsan_prod_name), "ISP%04x", ha->pdev->device); alen += FDMI_ATTR_ALIGNMENT(alen); alen += FDMI_ATTR_TYPELEN(eiter); @@ -2110,7 +2070,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, size += alen; ql_dbg(ql_dbg_disc, vha, 0x20d3, "SMARTSAN PRODUCT NAME = %s\n", eiter->a.smartsan_prod_name); - /* Smart SAN Port Info (specify: 1=Physical, 2=NPIV, 3=SRIOV) */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_SMARTSAN_PORT_INFO); @@ -2121,7 +2080,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, size += alen; ql_dbg(ql_dbg_disc, vha, 0x20d4, "SMARTSAN PORT INFO = %x\n", eiter->a.smartsan_port_info); - /* Smart SAN Security Support */ eiter = entries + size; eiter->type = cpu_to_be16(FDMI_SMARTSAN_SECURITY_SUPPORT); @@ -2133,7 +2091,6 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, ql_dbg(ql_dbg_disc, vha, 0x20d6, "SMARTSAN SECURITY SUPPORT = %d\n", be32_to_cpu(eiter->a.smartsan_security_support)); - done: return size; } @@ -2141,23 +2098,23 @@ qla2x00_port_attributes(scsi_qla_host_t *vha, void *entries, /** * qla2x00_fdmi_rhba() - perform RHBA FDMI registration * @vha: HA context - * @callopt: Option to issue FDMI registration * * Returns 0 on success. */ static int -qla2x00_fdmi_rhba(scsi_qla_host_t *vha, unsigned int callopt) +qla2x00_fdmi_rhba(scsi_qla_host_t *vha, uint callopt) { struct qla_hw_data *ha = vha->hw; - unsigned long size = 0; - unsigned int rval, count; + ulong size = 0; + uint rval, count; ms_iocb_entry_t *ms_pkt; struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; void *entries; - count = callopt != CALLOPT_FDMI1 ? - FDMI2_HBA_ATTR_COUNT : FDMI1_HBA_ATTR_COUNT; + count = + callopt != CALLOPT_FDMI1 ? + FDMI2_HBA_ATTR_COUNT : FDMI1_HBA_ATTR_COUNT; size = RHBA_RSP_SIZE; @@ -2215,8 +2172,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha, unsigned int callopt) rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA"); if (rval) { if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && - ct_rsp->header.explanation_code == - CT_EXPL_ALREADY_REGISTERED) { + ct_rsp->header.explanation_code == CT_EXPL_ALREADY_REGISTERED) { ql_dbg(ql_dbg_disc, vha, 0x20e4, "RHBA already registered.\n"); return QLA_ALREADY_REGISTERED; @@ -2269,16 +2225,7 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha) } return rval; } - -/** - * qla2x00_fdmi_rprt() perform RPRT registration - * @vha: HA context - * @callopt: Option to issue extended or standard FDMI - * command parameter - * - * Returns 0 on success. - */ -static int +int qla2x00_fdmi_rprt(scsi_qla_host_t *vha, int callopt) { struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev); @@ -2334,8 +2281,7 @@ qla2x00_fdmi_rprt(scsi_qla_host_t *vha, int callopt) rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPRT"); if (rval) { if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && - ct_rsp->header.explanation_code == - CT_EXPL_ALREADY_REGISTERED) { + ct_rsp->header.explanation_code == CT_EXPL_ALREADY_REGISTERED) { ql_dbg(ql_dbg_disc, vha, 0x20ec, "RPRT already registered.\n"); return QLA_ALREADY_REGISTERED; @@ -2354,7 +2300,6 @@ qla2x00_fdmi_rprt(scsi_qla_host_t *vha, int callopt) /** * qla2x00_fdmi_rpa() - perform RPA registration * @vha: HA context - * @callopt: Option to issue FDMI registration * * Returns 0 on success. */ @@ -2424,8 +2369,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha, uint callopt) rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA"); if (rval) { if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM && - ct_rsp->header.explanation_code == - CT_EXPL_ALREADY_REGISTERED) { + ct_rsp->header.explanation_code == CT_EXPL_ALREADY_REGISTERED) { ql_dbg(ql_dbg_disc, vha, 0x20f4, "RPA already registered.\n"); return QLA_ALREADY_REGISTERED; @@ -2825,6 +2769,10 @@ void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea) if (fcport->disc_state == DSC_DELETE_PEND) return; + /* We will figure-out what happen after AUTH completes */ + if (fcport->disc_state == DSC_LOGIN_AUTH_PEND) + return; + if (ea->sp->gen2 != fcport->login_gen) { /* target side must have changed it. */ ql_dbg(ql_dbg_disc, vha, 0x20d3, @@ -2887,7 +2835,8 @@ static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res) qla24xx_handle_gpsc_event(vha, &ea); done: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) @@ -2899,6 +2848,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) return rval; + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; @@ -2907,8 +2857,8 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) sp->name = "gpsc"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; - - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla24xx_async_gpsc_sp_done); /* CT_IU preamble */ ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD, @@ -2926,9 +2876,6 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id; - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - sp->done = qla24xx_async_gpsc_sp_done; - ql_dbg(ql_dbg_disc, vha, 0x205e, "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n", sp->name, fcport->port_name, sp->handle, @@ -2941,27 +2888,12 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } -int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id) -{ - struct qla_work_evt *e; - - if (test_bit(UNLOADING, &vha->dpc_flags) || - (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags))) - return 0; - - e = qla2x00_alloc_work(vha, QLA_EVT_GPNID); - if (!e) - return QLA_FUNCTION_FAILED; - - e->u.gpnid.id = *id; - return qla2x00_post_work(vha, e); -} - void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp) { struct srb_iocb *c = &sp->u.iocb_cmd; @@ -2970,6 +2902,26 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp) case SRB_ELS_DCMD: qla2x00_els_dcmd2_free(vha, &c->u.els_plogi); break; + case SRB_ELS_RDF: + case SRB_ELS_EDC: + if (sp->u.iocb_cmd.u.drv_els.dma_addr.cmd_buf) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.drv_els.dma_addr.cmd_len, + sp->u.iocb_cmd.u.drv_els.dma_addr.cmd_buf, + sp->u.iocb_cmd.u.drv_els.dma_addr.cmd_dma); + sp->u.iocb_cmd.u.drv_els.dma_addr.cmd_buf = NULL; + } + if (sp->u.iocb_cmd.u.drv_els.dma_addr.rsp_buf) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.drv_els.dma_addr.rsp_len, + sp->u.iocb_cmd.u.drv_els.dma_addr.rsp_buf, + sp->u.iocb_cmd.u.drv_els.dma_addr.rsp_dma); + sp->u.iocb_cmd.u.drv_els.dma_addr.rsp_buf = NULL; + } + qla2x00_free_fcport(sp->fcport); + qla2x00_rel_sp(sp); + return; + case SRB_CT_PTHRU_CMD: default: if (sp->u.iocb_cmd.u.ctarg.req) { @@ -2990,291 +2942,8 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp) break; } - sp->free(sp); -} - -void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) -{ - fc_port_t *fcport, *conflict, *t; - u16 data[2]; - - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d port_id: %06x\n", - __func__, __LINE__, ea->id.b24); - - if (ea->rc) { - /* cable is disconnected */ - list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) { - if (fcport->d_id.b24 == ea->id.b24) - fcport->scan_state = QLA_FCPORT_SCAN; - - qlt_schedule_sess_for_deletion(fcport); - } - } else { - /* cable is connected */ - fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1); - if (fcport) { - list_for_each_entry_safe(conflict, t, &vha->vp_fcports, - list) { - if ((conflict->d_id.b24 == ea->id.b24) && - (fcport != conflict)) - /* - * 2 fcports with conflict Nport ID or - * an existing fcport is having nport ID - * conflict with new fcport. - */ - - conflict->scan_state = QLA_FCPORT_SCAN; - - qlt_schedule_sess_for_deletion(conflict); - } - - fcport->scan_needed = 0; - fcport->rscn_gen++; - fcport->scan_state = QLA_FCPORT_FOUND; - fcport->flags |= FCF_FABRIC_DEVICE; - if (fcport->login_retry == 0) { - fcport->login_retry = - vha->hw->login_retry_count; - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Port login retry %8phN, lid 0x%04x cnt=%d.\n", - fcport->port_name, fcport->loop_id, - fcport->login_retry); - } - switch (fcport->disc_state) { - case DSC_LOGIN_COMPLETE: - /* recheck session is still intact. */ - ql_dbg(ql_dbg_disc, vha, 0x210d, - "%s %d %8phC revalidate session with ADISC\n", - __func__, __LINE__, fcport->port_name); - data[0] = data[1] = 0; - qla2x00_post_async_adisc_work(vha, fcport, - data); - break; - case DSC_DELETED: - ql_dbg(ql_dbg_disc, vha, 0x210d, - "%s %d %8phC login\n", __func__, __LINE__, - fcport->port_name); - fcport->d_id = ea->id; - qla24xx_fcport_handle_login(vha, fcport); - break; - case DSC_DELETE_PEND: - fcport->d_id = ea->id; - break; - default: - fcport->d_id = ea->id; - break; - } - } else { - list_for_each_entry_safe(conflict, t, &vha->vp_fcports, - list) { - if (conflict->d_id.b24 == ea->id.b24) { - /* 2 fcports with conflict Nport ID or - * an existing fcport is having nport ID - * conflict with new fcport. - */ - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC DS %d\n", - __func__, __LINE__, - conflict->port_name, - conflict->disc_state); - - conflict->scan_state = QLA_FCPORT_SCAN; - qlt_schedule_sess_for_deletion(conflict); - } - } - - /* create new fcport */ - ql_dbg(ql_dbg_disc, vha, 0x2065, - "%s %d %8phC post new sess\n", - __func__, __LINE__, ea->port_name); - qla24xx_post_newsess_work(vha, &ea->id, - ea->port_name, NULL, NULL, 0); - } - } -} - -static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res) -{ - struct scsi_qla_host *vha = sp->vha; - struct ct_sns_req *ct_req = - (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; - struct ct_sns_rsp *ct_rsp = - (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp; - struct event_arg ea; - struct qla_work_evt *e; - unsigned long flags; - - if (res) - ql_dbg(ql_dbg_disc, vha, 0x2066, - "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n", - sp->name, res, sp->gen1, &ct_req->req.port_id.port_id, - ct_rsp->rsp.gpn_id.port_name); - else - ql_dbg(ql_dbg_disc, vha, 0x2066, - "Async done-%s good rscn gen %d ID %3phC. %8phC\n", - sp->name, sp->gen1, &ct_req->req.port_id.port_id, - ct_rsp->rsp.gpn_id.port_name); - - memset(&ea, 0, sizeof(ea)); - memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); - ea.sp = sp; - ea.id = be_to_port_id(ct_req->req.port_id.port_id); - ea.rc = res; - - spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); - list_del(&sp->elem); - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - - if (res) { - if (res == QLA_FUNCTION_TIMEOUT) { - qla24xx_post_gpnid_work(sp->vha, &ea.id); - sp->free(sp); - return; - } - } else if (sp->gen1) { - /* There was another RSCN for this Nport ID */ - qla24xx_post_gpnid_work(sp->vha, &ea.id); - sp->free(sp); - return; - } - - qla24xx_handle_gpnid_event(vha, &ea); - - e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); - if (!e) { - /* please ignore kernel warning. otherwise, we have mem leak. */ - dma_free_coherent(&vha->hw->pdev->dev, - sp->u.iocb_cmd.u.ctarg.req_allocated_size, - sp->u.iocb_cmd.u.ctarg.req, - sp->u.iocb_cmd.u.ctarg.req_dma); - sp->u.iocb_cmd.u.ctarg.req = NULL; - - dma_free_coherent(&vha->hw->pdev->dev, - sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, - sp->u.iocb_cmd.u.ctarg.rsp, - sp->u.iocb_cmd.u.ctarg.rsp_dma); - sp->u.iocb_cmd.u.ctarg.rsp = NULL; - - sp->free(sp); - return; - } - - e->u.iosb.sp = sp; - qla2x00_post_work(vha, e); -} - -/* Get WWPN with Nport ID. */ -int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) -{ - int rval = QLA_FUNCTION_FAILED; - struct ct_sns_req *ct_req; - srb_t *sp, *tsp; - struct ct_sns_pkt *ct_sns; - unsigned long flags; - - if (!vha->flags.online) - goto done; - - sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); - if (!sp) - goto done; - - sp->type = SRB_CT_PTHRU_CMD; - sp->name = "gpnid"; - sp->u.iocb_cmd.u.ctarg.id = *id; - sp->gen1 = 0; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); - - spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); - list_for_each_entry(tsp, &vha->gpnid_list, elem) { - if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) { - tsp->gen1++; - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - sp->free(sp); - goto done; - } - } - list_add_tail(&sp->elem, &vha->gpnid_list); - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - - sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, - GFP_KERNEL); - sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); - if (!sp->u.iocb_cmd.u.ctarg.req) { - ql_log(ql_log_warn, vha, 0xd041, - "Failed to allocate ct_sns request.\n"); - goto done_free_sp; - } - - sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, - GFP_KERNEL); - sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); - if (!sp->u.iocb_cmd.u.ctarg.rsp) { - ql_log(ql_log_warn, vha, 0xd042, - "Failed to allocate ct_sns request.\n"); - goto done_free_sp; - } - - ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp; - memset(ct_sns, 0, sizeof(*ct_sns)); - - ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req; - /* CT_IU preamble */ - ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE); - - /* GPN_ID req */ - ct_req->req.port_id.port_id = port_id_to_be_id(*id); - - sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE; - sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE; - sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - sp->done = qla2x00_async_gpnid_sp_done; - - ql_dbg(ql_dbg_disc, vha, 0x2067, - "Async-%s hdl=%x ID %3phC.\n", sp->name, - sp->handle, &ct_req->req.port_id.port_id); - - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) - goto done_free_sp; - - return rval; - -done_free_sp: - spin_lock_irqsave(&vha->hw->vport_slock, flags); - list_del(&sp->elem); - spin_unlock_irqrestore(&vha->hw->vport_slock, flags); - - if (sp->u.iocb_cmd.u.ctarg.req) { - dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), - sp->u.iocb_cmd.u.ctarg.req, - sp->u.iocb_cmd.u.ctarg.req_dma); - sp->u.iocb_cmd.u.ctarg.req = NULL; - } - if (sp->u.iocb_cmd.u.ctarg.rsp) { - dma_free_coherent(&vha->hw->pdev->dev, - sizeof(struct ct_sns_pkt), - sp->u.iocb_cmd.u.ctarg.rsp, - sp->u.iocb_cmd.u.ctarg.rsp_dma); - sp->u.iocb_cmd.u.ctarg.rsp = NULL; - } - - sp->free(sp); -done: - return rval; -} - -void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea) -{ - fc_port_t *fcport = ea->fcport; - - qla24xx_post_gnl_work(vha, fcport); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } void qla24xx_async_gffid_sp_done(srb_t *sp, int res) @@ -3282,7 +2951,6 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res) struct scsi_qla_host *vha = sp->vha; fc_port_t *fcport = sp->fcport; struct ct_sns_rsp *ct_rsp; - struct event_arg ea; uint8_t fc4_scsi_feat; uint8_t fc4_nvme_feat; @@ -3290,10 +2958,10 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res) "Async done-%s res %x ID %x. %8phC\n", sp->name, res, fcport->d_id.b24, fcport->port_name); - fcport->flags &= ~FCF_ASYNC_SENT; - ct_rsp = &fcport->ct_desc.ct_sns->p.rsp; + ct_rsp = sp->u.iocb_cmd.u.ctarg.rsp; fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; + sp->rc = res; /* * FC-GS-7, 5.2.3.12 FC-4 Features - format @@ -3314,68 +2982,126 @@ void qla24xx_async_gffid_sp_done(srb_t *sp, int res) } } - memset(&ea, 0, sizeof(ea)); - ea.sp = sp; - ea.fcport = sp->fcport; - ea.rc = res; + if (sp->flags & SRB_WAKEUP_ON_COMP) { + complete(sp->comp); + } else { + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.req_allocated_size, + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } - qla24xx_handle_gffid_event(vha, &ea); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + /* we should not be here */ + dump_stack(); + } } /* Get FC4 Feature with Nport ID. */ -int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport) +int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport, bool wait) { int rval = QLA_FUNCTION_FAILED; struct ct_sns_req *ct_req; srb_t *sp; + DECLARE_COMPLETION_ONSTACK(comp); - if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) + /* this routine does not have handling for no wait */ + if (!vha->flags.online || !wait) return rval; + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) return rval; - fcport->flags |= FCF_ASYNC_SENT; sp->type = SRB_CT_PTHRU_CMD; sp->name = "gffid"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla24xx_async_gffid_sp_done); + sp->comp = ∁ + sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout; + + if (wait) + sp->flags = SRB_WAKEUP_ON_COMP; + + sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt); + sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.req_allocated_size, &sp->u.iocb_cmd.u.ctarg.req_dma, + GFP_KERNEL); + if (!sp->u.iocb_cmd.u.ctarg.req) { + ql_log(ql_log_warn, vha, 0xd041, + "%s: Failed to allocate ct_sns request.\n", + __func__); + goto done_free_sp; + } - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); + sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, &sp->u.iocb_cmd.u.ctarg.rsp_dma, + GFP_KERNEL); + if (!sp->u.iocb_cmd.u.ctarg.req) { + ql_log(ql_log_warn, vha, 0xd041, + "%s: Failed to allocate ct_sns request.\n", + __func__); + goto done_free_sp; + } /* CT_IU preamble */ - ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD, - GFF_ID_RSP_SIZE); + ct_req = qla2x00_prep_ct_req(sp->u.iocb_cmd.u.ctarg.req, GFF_ID_CMD, GFF_ID_RSP_SIZE); ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain; ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area; ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa; - sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; - sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; - sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; - sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->done = qla24xx_async_gffid_sp_done; - - ql_dbg(ql_dbg_disc, vha, 0x2132, - "Async-%s hdl=%x %8phC.\n", sp->name, - sp->handle, fcport->port_name); - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) + + if (rval != QLA_SUCCESS) { + rval = QLA_FUNCTION_FAILED; goto done_free_sp; + } else { + ql_dbg(ql_dbg_disc, vha, 0x3074, + "Async-%s hdl=%x portid %06x\n", sp->name, sp->handle, fcport->d_id.b24); + } + + wait_for_completion(sp->comp); + rval = sp->rc; - return rval; done_free_sp: - sp->free(sp); - fcport->flags &= ~FCF_ASYNC_SENT; + if (sp->u.iocb_cmd.u.ctarg.req) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.req_allocated_size, + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + } + + if (sp->u.iocb_cmd.u.ctarg.rsp) { + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; + } + + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return rval; } @@ -3442,6 +3168,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) list_for_each_entry(fcport, &vha->vp_fcports, list) { if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) { fcport->scan_state = QLA_FCPORT_SCAN; + if (fcport->loop_id == FC_NO_LOOP_ID) + fcport->logout_on_delete = 0; + else + fcport->logout_on_delete = 1; } } goto login_logout; @@ -3493,7 +3223,16 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) continue; fcport->scan_state = QLA_FCPORT_FOUND; fcport->last_rscn_gen = fcport->rscn_gen; + fcport->fc4_type = rp->fc4type; found = true; + + if (fcport->scan_needed) { + if (NVME_PRIORITY(vha->hw, fcport)) + fcport->do_prli_nvme = 1; + else + fcport->do_prli_nvme = 0; + } + /* * If device was not a fabric device before. */ @@ -3501,9 +3240,8 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) qla2x00_clear_loop_id(fcport); fcport->flags |= FCF_FABRIC_DEVICE; } else if (fcport->d_id.b24 != rp->id.b24 || - (fcport->scan_needed && - fcport->port_type != FCT_INITIATOR && - fcport->port_type != FCT_NVME_INITIATOR)) { + (fcport->scan_needed && + atomic_read(&fcport->state) == FCS_ONLINE)) { qlt_schedule_sess_for_deletion(fcport); } fcport->d_id.b24 = rp->id.b24; @@ -3538,30 +3276,30 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) if (fcport->scan_state != QLA_FCPORT_FOUND) { bool do_delete = false; - if (fcport->scan_needed && fcport->disc_state == DSC_LOGIN_PEND) { - /* Cable got disconnected after we sent - * a login. Do delete to prevent timeout. - */ + /* his cable just got disconnected after we + * send him a login. Do delete to prvent + * timeout */ fcport->logout_on_delete = 1; do_delete = true; } fcport->scan_needed = 0; if (((qla_dual_mode_enabled(vha) || - qla_ini_mode_enabled(vha)) && + qla_ini_mode_enabled(vha)) && atomic_read(&fcport->state) == FCS_ONLINE) || do_delete) { if (fcport->loop_id != FC_NO_LOOP_ID) { if (fcport->flags & FCF_FCP2_DEVICE) - fcport->logout_on_delete = 0; + continue; - ql_dbg(ql_dbg_disc, vha, 0x20f0, + ql_log(ql_log_warn, vha, 0x20f0, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); + fcport->tgt_link_down_time = 0; qlt_schedule_sess_for_deletion(fcport); continue; } @@ -3747,7 +3485,6 @@ static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) "Async done-%s res %x FC4Type %x\n", sp->name, res, sp->gen2); - del_timer(&sp->u.iocb_cmd.timer); sp->rc = res; if (res) { unsigned long flags; @@ -3851,7 +3588,7 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) { ql_log(ql_log_warn, vha, 0xffff, - "%s: req %p rsp %p are not setup\n", + "%s: req %px rsp %px are not setup\n", __func__, sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.rsp); spin_lock_irqsave(&vha->work_lock, flags); @@ -3872,9 +3609,8 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, sp->name = "gnnft"; sp->gen1 = vha->hw->base_qpair->chip_reset; sp->gen2 = fc4_type; - - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_gpnft_gnnft_sp_done); memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size); @@ -3890,8 +3626,6 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->done = qla2x00_async_gpnft_gnnft_sp_done; - ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s hdl=%x FC4Type %x.\n", sp->name, sp->handle, ct_req->req.gpn_ft.port_type); @@ -3918,8 +3652,8 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, sp->u.iocb_cmd.u.ctarg.rsp_dma); sp->u.iocb_cmd.u.ctarg.rsp = NULL; } - - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; @@ -3971,9 +3705,12 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, "%s: Performing FCP Scan\n", __func__); - if (sp) - sp->free(sp); /* should not happen */ + if (sp) { + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + } + /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) { spin_lock_irqsave(&vha->work_lock, flags); @@ -4018,6 +3755,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) sp->u.iocb_cmd.u.ctarg.req, sp->u.iocb_cmd.u.ctarg.req_dma); sp->u.iocb_cmd.u.ctarg.req = NULL; + /* ref: INIT */ qla2x00_rel_sp(sp); return rval; } @@ -4037,9 +3775,8 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) sp->name = "gpnft"; sp->gen1 = vha->hw->base_qpair->chip_reset; sp->gen2 = fc4_type; - - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_gpnft_gnnft_sp_done); rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size; memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size); @@ -4054,8 +3791,6 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->done = qla2x00_async_gpnft_gnnft_sp_done; - ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s hdl=%x FC4Type %x.\n", sp->name, sp->handle, ct_req->req.gpn_ft.port_type); @@ -4083,7 +3818,8 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) sp->u.iocb_cmd.u.ctarg.rsp = NULL; } - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); spin_lock_irqsave(&vha->work_lock, flags); vha->scan.scan_flags &= ~SF_SCANNING; @@ -4117,116 +3853,6 @@ void qla_scan_work_fn(struct work_struct *work) spin_unlock_irqrestore(&vha->work_lock, flags); } -/* GNN_ID */ -void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea) -{ - qla24xx_post_gnl_work(vha, ea->fcport); -} - -static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res) -{ - struct scsi_qla_host *vha = sp->vha; - fc_port_t *fcport = sp->fcport; - u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name; - struct event_arg ea; - u64 wwnn; - - fcport->flags &= ~FCF_ASYNC_SENT; - wwnn = wwn_to_u64(node_name); - if (wwnn) - memcpy(fcport->node_name, node_name, WWN_SIZE); - - memset(&ea, 0, sizeof(ea)); - ea.fcport = fcport; - ea.sp = sp; - ea.rc = res; - - ql_dbg(ql_dbg_disc, vha, 0x204f, - "Async done-%s res %x, WWPN %8phC %8phC\n", - sp->name, res, fcport->port_name, fcport->node_name); - - qla24xx_handle_gnnid_event(vha, &ea); - - sp->free(sp); -} - -int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) -{ - int rval = QLA_FUNCTION_FAILED; - struct ct_sns_req *ct_req; - srb_t *sp; - - if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) - return rval; - - qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID); - sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); - if (!sp) - goto done; - - fcport->flags |= FCF_ASYNC_SENT; - sp->type = SRB_CT_PTHRU_CMD; - sp->name = "gnnid"; - sp->gen1 = fcport->rscn_gen; - sp->gen2 = fcport->login_gen; - - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); - - /* CT_IU preamble */ - ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD, - GNN_ID_RSP_SIZE); - - /* GNN_ID req */ - ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); - - - /* req & rsp use the same buffer */ - sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns; - sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma; - sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns; - sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma; - sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE; - sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE; - sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - - sp->done = qla2x00_async_gnnid_sp_done; - - ql_dbg(ql_dbg_disc, vha, 0xffff, - "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n", - sp->name, fcport->port_name, - sp->handle, fcport->loop_id, fcport->d_id.b24); - - rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) - goto done_free_sp; - return rval; - -done_free_sp: - sp->free(sp); - fcport->flags &= ~FCF_ASYNC_SENT; -done: - return rval; -} - -int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport) -{ - struct qla_work_evt *e; - int ls; - - ls = atomic_read(&vha->loop_state); - if (((ls != LOOP_READY) && (ls != LOOP_UP)) || - test_bit(UNLOADING, &vha->dpc_flags)) - return 0; - - e = qla2x00_alloc_work(vha, QLA_EVT_GNNID); - if (!e) - return QLA_FUNCTION_FAILED; - - e->u.fcport.fcport = fcport; - return qla2x00_post_work(vha, e); -} - /* GPFN_ID */ void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) { @@ -4277,7 +3903,8 @@ static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res) qla24xx_handle_gfpnid_event(vha, &ea); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) @@ -4289,6 +3916,7 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) return rval; + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); if (!sp) goto done; @@ -4297,9 +3925,8 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) sp->name = "gfpnid"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; - - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_gfpnid_sp_done); /* CT_IU preamble */ ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD, @@ -4318,8 +3945,6 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE; sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS; - sp->done = qla2x00_async_gfpnid_sp_done; - ql_dbg(ql_dbg_disc, vha, 0xffff, "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n", sp->name, fcport->port_name, @@ -4332,7 +3957,8 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 9452848ede3f8..8c3f1ffd4bf7f 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1,7 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include "qla_gbl.h" @@ -34,7 +35,6 @@ static int qla2x00_restart_isp(scsi_qla_host_t *); static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); static int qla84xx_init_chip(scsi_qla_host_t *); static int qla25xx_init_queues(struct qla_hw_data *); -static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *); static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea); static void qla24xx_handle_prli_done_event(struct scsi_qla_host *, @@ -44,14 +44,24 @@ static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *); /* SRB Extensions ---------------------------------------------------------- */ void -qla2x00_sp_timeout(struct timer_list *t) +qla2x00_sp_timeout(qla_timer_arg_t t) { - srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); + srb_t *sp = qla_from_timer(sp, t, u.iocb_cmd.timer); struct srb_iocb *iocb; + scsi_qla_host_t *vha = sp->vha; WARN_ON(irqs_disabled()); iocb = &sp->u.iocb_cmd; iocb->timeout(sp); + + /* ref: TMR */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); + + if (vha && qla2x00_isp_reg_stat(vha->hw)) { + ql_log(ql_log_info, vha, 0x9008, + "PCI/Register disconnect.\n"); + qla_pci_set_eeh_busy(vha); + } } void qla2x00_sp_free(srb_t *sp) @@ -62,16 +72,6 @@ void qla2x00_sp_free(srb_t *sp) qla2x00_rel_sp(sp); } -void qla2xxx_rel_done_warning(srb_t *sp, int res) -{ - WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp); -} - -void qla2xxx_rel_free_warning(srb_t *sp) -{ - WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp); -} - /* Asynchronous Login/Logout Routines -------------------------------------- */ unsigned long @@ -126,22 +126,30 @@ static void qla24xx_abort_iocb_timeout(void *data) } spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); - if (sp->cmd_sp) + if (sp->cmd_sp) { + /* This done function should take care of + * original command ref: INIT + */ sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED); + } - abt->u.abt.comp_status = cpu_to_le16(CS_TIMEOUT); + abt->u.abt.comp_status = CS_TIMEOUT; sp->done(sp, QLA_OS_TIMER_EXPIRED); } static void qla24xx_abort_sp_done(srb_t *sp, int res) { struct srb_iocb *abt = &sp->u.iocb_cmd; + srb_t *orig_sp = sp->cmd_sp; + + if (orig_sp) + qla_wait_nvme_release_cmd_kref(orig_sp); - del_timer(&sp->u.iocb_cmd.timer); if (sp->flags & SRB_WAKEUP_ON_COMP) complete(&abt->u.abt.comp); else - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) @@ -150,45 +158,49 @@ int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) struct srb_iocb *abt_iocb; srb_t *sp; int rval = QLA_FUNCTION_FAILED; + int8_t bail; + /* ref: INIT for ABTS command */ sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport, GFP_ATOMIC); if (!sp) - return rval; + return QLA_MEMORY_ALLOC_FAILED; + QLA_VHA_MARK_BUSY(vha, bail); abt_iocb = &sp->u.iocb_cmd; sp->type = SRB_ABT_CMD; sp->name = "abort"; sp->qpair = cmd_sp->qpair; - sp->cmd_sp = cmd_sp; + if (cmd_sp->type != SRB_MB_IOCB) + sp->cmd_sp = cmd_sp; if (wait) sp->flags = SRB_WAKEUP_ON_COMP; - abt_iocb->timeout = qla24xx_abort_iocb_timeout; init_completion(&abt_iocb->u.abt.comp); /* FW can send 2 x ABTS's timeout/20s */ - qla2x00_init_timer(sp, 42); + qla2x00_init_async_sp(sp, 42, qla24xx_abort_sp_done); + sp->u.iocb_cmd.timeout = qla24xx_abort_iocb_timeout; abt_iocb->u.abt.cmd_hndl = cmd_sp->handle; abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id); - sp->done = qla24xx_abort_sp_done; - ql_dbg(ql_dbg_async, vha, 0x507c, "Abort command issued - hdl=%x, type=%x\n", cmd_sp->handle, cmd_sp->type); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return rval; } if (wait) { wait_for_completion(&abt_iocb->u.abt.comp); rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? - QLA_SUCCESS : QLA_FUNCTION_FAILED; - sp->free(sp); + QLA_SUCCESS : QLA_ERR_FROM_FW; + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } return rval; @@ -246,7 +258,7 @@ qla2x00_async_iocb_timeout(void *data) case SRB_CTRL_VP: default: rc = qla24xx_async_abort_cmd(sp, false); - if (rc) { + if (rc || (sp->type == SRB_MB_IOCB)) { spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { @@ -283,26 +295,13 @@ static void qla2x00_async_login_sp_done(srb_t *sp, int res) ea.iop[0] = lio->u.logio.iop[0]; ea.iop[1] = lio->u.logio.iop[1]; ea.sp = sp; + if (res) + ea.data[0] = MBS_COMMAND_ERROR; qla24xx_handle_plogi_done_event(vha, &ea); } - sp->free(sp); -} - -static inline bool -fcport_is_smaller(fc_port_t *fcport) -{ - if (wwn_to_u64(fcport->port_name) < - wwn_to_u64(fcport->vha->port_name)) - return true; - else - return false; -} - -static inline bool -fcport_is_bigger(fc_port_t *fcport) -{ - return !fcport_is_smaller(fcport); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int @@ -316,14 +315,17 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || fcport->loop_id == FC_NO_LOOP_ID) { ql_log(ql_log_warn, vha, 0xffff, - "%s: %8phC - not sending command.\n", - __func__, fcport->port_name); - return rval; + "%s: %8phC online %d flags %x - not sending command.\n", + __func__, fcport->port_name, vha->flags.online, + fcport->flags); + goto done; } + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); - if (!sp) + if (!sp) { goto done; + } qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); fcport->flags |= FCF_ASYNC_SENT; @@ -333,27 +335,33 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, sp->name = "login"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_login_sp_done); lio = &sp->u.iocb_cmd; - lio->timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); - - sp->done = qla2x00_async_login_sp_done; - if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) + if (N2N_TOPO(fcport->vha->hw) && fcport_is_bigger(fcport)) { lio->u.logio.flags |= SRB_LOGIN_PRLI_ONLY; - else - lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; + } else { + if (vha->hw->flags.edif_enabled && + DBELL_ACTIVE(vha)) { + lio->u.logio.flags |= + (SRB_LOGIN_FCSP | SRB_LOGIN_SKIP_PRLI); + } else { + lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; + } + } if (NVME_TARGET(vha->hw, fcport)) lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; - ql_dbg(ql_dbg_disc, vha, 0x2072, - "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x " - "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id, - fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, - fcport->login_retry); - rval = qla2x00_start_sp(sp); + + ql_log(ql_log_warn, vha, 0x2072, + "Async-login - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n", + fcport->port_name, sp->handle, fcport->loop_id, + fcport->d_id.b24, fcport->login_retry, + lio->u.logio.flags & SRB_LOGIN_FCSP ? "FCSP" : "" ); + if (rval != QLA_SUCCESS) { fcport->flags |= FCF_LOGIN_NEEDED; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); @@ -363,10 +371,16 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); fcport->flags &= ~FCF_ASYNC_SENT; done: fcport->flags &= ~FCF_ASYNC_ACTIVE; + /* + * async login failed. Could be due to iocb/exchange resource being low. + * Set state DELETED for re-login process to start again. + */ + qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); return rval; } @@ -374,36 +388,33 @@ static void qla2x00_async_logout_sp_done(srb_t *sp, int res) { sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); sp->fcport->login_gen++; - qlt_logo_completion_handler(sp->fcport, res); - sp->free(sp); + qlt_logo_completion_handler(sp->fcport, sp->u.iocb_cmd.u.logio.data[0]); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) { srb_t *sp; - struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; fcport->flags |= FCF_ASYNC_SENT; + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_LOGOUT_CMD; sp->name = "logout"; - - lio = &sp->u.iocb_cmd; - lio->timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); - - sp->done = qla2x00_async_logout_sp_done; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_logout_sp_done), ql_dbg(ql_dbg_disc, vha, 0x2070, - "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n", + "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC explicit %d.\n", sp->handle, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, - fcport->port_name); + fcport->port_name, fcport->explicit_logout); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) @@ -411,7 +422,8 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); return rval; @@ -425,6 +437,7 @@ qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, /* Don't re-login in target mode */ if (!fcport->tgt_session) qla2x00_mark_device_lost(vha, fcport, 1); + fcport->prlo_rc = data[0]; qlt_logo_completion_handler(fcport, data[0]); } @@ -437,29 +450,26 @@ static void qla2x00_async_prlo_sp_done(srb_t *sp, int res) if (!test_bit(UNLOADING, &vha->dpc_flags)) qla2x00_post_async_prlo_done_work(sp->fcport->vha, sp->fcport, lio->u.logio.data); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport) { srb_t *sp; - struct srb_iocb *lio; int rval; rval = QLA_FUNCTION_FAILED; + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_PRLO_CMD; sp->name = "prlo"; - - lio = &sp->u.iocb_cmd; - lio->timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); - - sp->done = qla2x00_async_prlo_sp_done; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_prlo_sp_done); ql_dbg(ql_dbg_disc, vha, 0x2070, "Async-prlo - hdl=%x loop-id=%x portid=%02x%02x%02x.\n", @@ -473,9 +483,10 @@ qla2x00_async_prlo(struct scsi_qla_host *vha, fc_port_t *fcport) return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: - fcport->flags &= ~FCF_ASYNC_ACTIVE; + fcport->flags &= ~(FCF_ASYNC_ACTIVE); return rval; } @@ -532,7 +543,6 @@ static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) e->u.fcport.fcport = fcport; fcport->flags |= FCF_ASYNC_ACTIVE; - qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); return qla2x00_post_work(vha, e); } @@ -556,10 +566,12 @@ static void qla2x00_async_adisc_sp_done(srb_t *sp, int res) ea.iop[1] = lio->u.logio.iop[1]; ea.fcport = sp->fcport; ea.sp = sp; + if (res) + ea.data[0] = MBS_COMMAND_ERROR; qla24xx_handle_adisc_event(vha, &ea); - - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int @@ -570,34 +582,39 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; - if (IS_SESSION_DELETED(fcport)) { + if (SESSION_DELETE(fcport)) { ql_log(ql_log_warn, vha, 0xffff, - "%s: %8phC is being delete - not sending command.\n", - __func__, fcport->port_name); + "%s: %8phC is being delete - not sending command.\n", + __func__, fcport->port_name); fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } - if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) - return rval; + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC online %d flags %x - not sending command.\n", + __func__, fcport->port_name, vha->flags.online, + fcport->flags); + goto done; + } fcport->flags |= FCF_ASYNC_SENT; + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_ADISC_CMD; sp->name = "adisc"; - - lio = &sp->u.iocb_cmd; - lio->timeout = qla2x00_async_iocb_timeout; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_adisc_sp_done); - sp->done = qla2x00_async_adisc_sp_done; - if (data[1] & QLA_LOGIO_LOGIN_RETRIED) + if (data[1] & QLA_LOGIO_LOGIN_RETRIED) { + lio = &sp->u.iocb_cmd; lio->u.logio.flags |= SRB_LOGIN_RETRIED; + } ql_dbg(ql_dbg_disc, vha, 0x206f, "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n", @@ -610,7 +627,8 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport, return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); qla2x00_post_async_adisc_work(vha, fcport, data); @@ -660,7 +678,7 @@ static int qla2x00_find_new_loop_id(scsi_qla_host_t *vha, fc_port_t *dev) spin_unlock_irqrestore(&ha->vport_slock, flags); if (rval == QLA_SUCCESS) - ql_dbg(ql_dbg_disc, dev->vha, 0x2086, + ql_dbg(ql_dbg_disc + ql_dbg_verbose, dev->vha, 0x2086, "Assigning new loopid=%x, portid=%x.\n", dev->loop_id, dev->d_id.b24); else @@ -696,11 +714,11 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, fcport = ea->fcport; ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d\n", + "%s %8phC DS %d LS rc %d %d login %d|%d rscn %d|%d lid %d edif %d\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, ea->rc, fcport->login_gen, fcport->last_login_gen, - fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id); + fcport->rscn_gen, fcport->last_rscn_gen, vha->loop_id, fcport->edif.enable); if (fcport->disc_state == DSC_DELETE_PEND) return; @@ -722,6 +740,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, ql_dbg(ql_dbg_disc, vha, 0x20e0, "%s %8phC login gen changed\n", __func__, fcport->port_name); + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return; } @@ -813,7 +832,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, default: switch (current_login_state) { case DSC_LS_PRLI_COMP: - ql_dbg(ql_dbg_disc + ql_dbg_verbose, + ql_dbg(ql_dbg_disc, vha, 0x20e4, "%s %d %8phC post gpdb\n", __func__, __LINE__, fcport->port_name); @@ -825,7 +844,15 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, qla2x00_post_async_adisc_work(vha, fcport, data); break; + case DSC_LS_PLOGI_COMP: + if (vha->hw->flags.edif_enabled) { + /* check to see if he support Secure */ + qla24xx_post_gpdb_work(vha, fcport, 0); + break; + } + fallthrough; case DSC_LS_PORT_UNAVAIL: + fallthrough; default: if (fcport->loop_id == FC_NO_LOOP_ID) { qla2x00_find_new_loop_id(vha, fcport); @@ -850,20 +877,25 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, * with GNL. Push disc_state back to DELETED * so GNL can go out again */ - qla2x00_set_fcport_disc_state(fcport, - DSC_DELETED); + fcport->disc_state = DSC_DELETED; + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); break; case DSC_LS_PRLI_COMP: if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) fcport->port_type = FCT_INITIATOR; else fcport->port_type = FCT_TARGET; - data[0] = data[1] = 0; qla2x00_post_async_adisc_work(vha, fcport, data); break; case DSC_LS_PLOGI_COMP: + if (vha->hw->flags.edif_enabled && + DBELL_ACTIVE(vha)) { + // check to see if he support secure or not + qla24xx_post_gpdb_work(vha, fcport, 0); + break; + } if (fcport_is_bigger(fcport)) { /* local adapter is smaller */ if (fcport->loop_id != FC_NO_LOOP_ID) @@ -874,6 +906,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, fcport); break; } + fallthrough; default: if (fcport_is_smaller(fcport)) { @@ -1010,7 +1043,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) ql_dbg(ql_dbg_disc, vha, 0x20e8, "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n", - __func__, &wwn, e->port_id[2], e->port_id[1], + __func__, (void *)&wwn, e->port_id[2], e->port_id[1], e->port_id[0], e->current_login_state, e->last_login_state, (loop_id & 0x7fff)); } @@ -1070,7 +1103,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) if (!list_empty(&vha->gnl.fcports)) { /* retrigger gnl */ list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports, - gnl_entry) { + gnl_entry) { list_del_init(&fcport->gnl_entry); fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS) @@ -1079,7 +1112,8 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) @@ -1110,6 +1144,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) vha->gnl.sent = 1; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; @@ -1118,11 +1153,10 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) sp->name = "gnlist"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla24xx_async_gnl_sp_done); mbx = &sp->u.iocb_cmd; - mbx->timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); - mb = sp->u.iocb_cmd.u.mbx.out_mb; mb[0] = MBC_PORT_NODE_NAME_LIST; mb[1] = BIT_2 | BIT_3; @@ -1133,8 +1167,6 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) mb[8] = vha->gnl.size; mb[9] = vha->vp_idx; - sp->done = qla24xx_async_gnl_sp_done; - ql_dbg(ql_dbg_disc, vha, 0x20da, "Async-%s - OUT WWPN %8phC hndl %x\n", sp->name, fcport->port_name, sp->handle); @@ -1146,7 +1178,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT); return rval; @@ -1192,16 +1225,13 @@ static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res) dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, sp->u.iocb_cmd.u.mbx.in_dma); - sp->free(sp); + kref_put(&sp->cmd_kref, qla2x00_sp_release); } -static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport) +int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport) { struct qla_work_evt *e; - if (vha->host->active_mode == MODE_TARGET) - return QLA_FUNCTION_FAILED; - e = qla2x00_alloc_work(vha, QLA_EVT_PRLI); if (!e) return QLA_FUNCTION_FAILED; @@ -1218,7 +1248,7 @@ static void qla2x00_async_prli_sp_done(srb_t *sp, int res) struct event_arg ea; ql_dbg(ql_dbg_disc, vha, 0x2129, - "%s %8phC res %d \n", __func__, + "%s %8phC res %x \n", __func__, sp->fcport->port_name, res); sp->fcport->flags &= ~FCF_ASYNC_SENT; @@ -1231,11 +1261,15 @@ static void qla2x00_async_prli_sp_done(srb_t *sp, int res) ea.iop[0] = lio->u.logio.iop[0]; ea.iop[1] = lio->u.logio.iop[1]; ea.sp = sp; + if (res == QLA_OS_TIMER_EXPIRED) + ea.data[0] = QLA_OS_TIMER_EXPIRED; + else if (res) + ea.data[0] = MBS_COMMAND_ERROR; qla24xx_handle_prli_done_event(vha, &ea); } - sp->free(sp); + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int @@ -1246,16 +1280,20 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) int rval = QLA_FUNCTION_FAILED; if (!vha->flags.online) { - ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", - __func__, __LINE__, fcport->port_name); + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC exit\n", + __func__, __LINE__, + fcport->port_name); return rval; } if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND || fcport->fw_login_state == DSC_LS_PRLI_PEND) && qla_dual_mode_enabled(vha)) { - ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", - __func__, __LINE__, fcport->port_name); + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC exit\n", + __func__, __LINE__, + fcport->port_name); return rval; } @@ -1268,12 +1306,10 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) sp->type = SRB_PRLI_CMD; sp->name = "prli"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_prli_sp_done); lio = &sp->u.iocb_cmd; - lio->timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); - - sp->done = qla2x00_async_prli_sp_done; lio->u.logio.flags = 0; if (NVME_TARGET(vha->hw, fcport)) @@ -1295,7 +1331,8 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) return rval; done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); fcport->flags &= ~FCF_ASYNC_SENT; return rval; } @@ -1324,10 +1361,10 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) struct port_database_24xx *pd; struct qla_hw_data *ha = vha->hw; - if (IS_SESSION_DELETED(fcport)) { + if (SESSION_DELETE(fcport)) { ql_log(ql_log_warn, vha, 0xffff, - "%s: %8phC is being delete - not sending command.\n", - __func__, fcport->port_name); + "%s: %8phC is being delete - not sending command.\n", + __func__, fcport->port_name); fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } @@ -1350,10 +1387,8 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) sp->name = "gpdb"; sp->gen1 = fcport->rscn_gen; sp->gen2 = fcport->login_gen; - - mbx = &sp->u.iocb_cmd; - mbx->timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla24xx_async_gpdb_sp_done); pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); if (pd == NULL) { @@ -1372,11 +1407,10 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) mb[9] = vha->vp_idx; mb[10] = opt; - mbx->u.mbx.in = pd; + mbx = &sp->u.iocb_cmd; + mbx->u.mbx.in = (void *)pd; mbx->u.mbx.in_dma = pd_dma; - sp->done = qla24xx_async_gpdb_sp_done; - ql_dbg(ql_dbg_disc, vha, 0x20dc, "Async-%s %8phC hndl %x opt %x\n", sp->name, fcport->port_name, sp->handle, opt); @@ -1390,7 +1424,7 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) if (pd) dma_pool_free(ha->s_dma_pool, pd, pd_dma); - sp->free(sp); + kref_put(&sp->cmd_kref, qla2x00_sp_release); fcport->flags &= ~FCF_ASYNC_SENT; done: fcport->flags &= ~FCF_ASYNC_ACTIVE; @@ -1408,6 +1442,12 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) ea->fcport->deleted = 0; ea->fcport->logout_on_delete = 1; + ql_dbg(ql_dbg_disc, vha, 0x20dc, + "%s: %8phC login_succ %x , loop_id=%x, portid=%02x%02x%02x.\n", + __func__, ea->fcport->port_name, ea->fcport->login_succ, + ea->fcport->loop_id, ea->fcport->d_id.b.domain, + ea->fcport->d_id.b.area, ea->fcport->d_id.b.al_pa); + if (!ea->fcport->login_succ && !IS_SW_RESV_ADDR(ea->fcport->d_id)) { vha->fcport_count++; ea->fcport->login_succ = 1; @@ -1429,6 +1469,54 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } +static int qla_chk_secure_login(scsi_qla_host_t *vha, fc_port_t *fcport, + struct port_database_24xx *pd) +{ + + if (pd->secure_login) { + ql_dbg(ql_dbg_disc, vha, 0x104d, + "Secure Login established on %8phC\n", + fcport->port_name); + fcport->flags |= FCF_FCSP_DEVICE; + }else { + ql_dbg(ql_dbg_disc, vha, 0x104d, + "non-Secure Login %8phC", + fcport->port_name); + fcport->flags &= ~FCF_FCSP_DEVICE; + } + if (vha->hw->flags.edif_enabled) { + if (fcport->flags & FCF_FCSP_DEVICE) { + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_AUTH_PEND); + /* Start edif prli timer & ring doorbell for app */ + fcport->edif.rx_sa_set = 0; + fcport->edif.tx_sa_set = 0; + fcport->edif.rx_sa_pending = 0; + fcport->edif.tx_sa_pending = 0; + + qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, + fcport->d_id.b24); + + if (DBELL_ACTIVE(vha)) { + ql_dbg(ql_dbg_disc, vha, 0x20ef, + "%s %d %8phC EDIF: post DB_AUTH: AUTH needed \n", + __func__, __LINE__, fcport->port_name); + fcport->edif.app_sess_online = 1; + + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, fcport->d_id.b24, + 0, fcport); + } + + } else if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) { + ql_dbg(ql_dbg_disc, vha, 0x2117, + "%s %d %8phC post prli\n", + __func__, __LINE__, fcport->port_name); + qla24xx_post_prli_work(vha, fcport); + } + return 1; + } + return 0; +} + static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) { @@ -1442,12 +1530,16 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) fcport->flags &= ~FCF_ASYNC_SENT; ql_dbg(ql_dbg_disc, vha, 0x20d2, - "%s %8phC DS %d LS %d fc4_type %x rc %d\n", __func__, - fcport->port_name, fcport->disc_state, pd->current_login_state, - fcport->fc4_type, ea->rc); + "%s %8phC DS %d LS %x fc4_type %x rc %x\n", + __func__, fcport->port_name, + fcport->disc_state, pd->current_login_state, fcport->fc4_type, + ea->rc); - if (fcport->disc_state == DSC_DELETE_PEND) + if (fcport->disc_state == DSC_DELETE_PEND){ + ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, \n", + __func__, __LINE__, fcport->port_name); return; + } if (NVME_TARGET(vha->hw, fcport)) ls = pd->current_login_state >> 4; @@ -1464,6 +1556,8 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) } else if (ea->sp->gen1 != fcport->rscn_gen) { qla_rscn_replay(fcport); qlt_schedule_sess_for_deletion(fcport); + ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n", + __func__, __LINE__, fcport->port_name, ls); return; } @@ -1471,8 +1565,14 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) case PDS_PRLI_COMPLETE: __qla24xx_parse_gpdb(vha, fcport, pd); break; - case PDS_PLOGI_PENDING: case PDS_PLOGI_COMPLETE: + if (qla_chk_secure_login(vha, fcport, pd)) { + ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n", + __func__, __LINE__, fcport->port_name, ls); + return; + } + fallthrough; + case PDS_PLOGI_PENDING: case PDS_PRLI_PENDING: case PDS_PRLI2_PENDING: /* Set discovery state back to GNL to Relogin attempt */ @@ -1481,9 +1581,13 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) qla2x00_set_fcport_disc_state(fcport, DSC_GNL); set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } + ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC, ls %x\n", + __func__, __LINE__, fcport->port_name, ls); return; case PDS_LOGO_PENDING: + fallthrough; case PDS_PORT_UNAVAILABLE: + fallthrough; default: ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); @@ -1554,36 +1658,55 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) u16 sec; ql_dbg(ql_dbg_disc, vha, 0x20d8, - "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n", + "%s %8phC DS %d LS %d P %d fl %x confl %px rscn %d|%d login %d lid %d scan %d fc4type %x\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, fcport->login_pause, fcport->flags, fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen, - fcport->login_gen, fcport->loop_id, fcport->scan_state); + fcport->login_gen, fcport->loop_id, fcport->scan_state, + fcport->fc4_type); if (fcport->scan_state != QLA_FCPORT_FOUND || - fcport->disc_state == DSC_DELETE_PEND) + fcport->disc_state == DSC_DELETE_PEND) return 0; if ((fcport->loop_id != FC_NO_LOOP_ID) && qla_dual_mode_enabled(vha) && ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || - (fcport->fw_login_state == DSC_LS_PRLI_PEND))) + (fcport->fw_login_state == DSC_LS_PRLI_PEND))) { + ql_dbg(ql_dbg_disc, vha, 0x20d8, + "%s %d %8phC exit\n", + __func__, __LINE__, + fcport->port_name); return 0; + } if (fcport->fw_login_state == DSC_LS_PLOGI_COMP && !N2N_TOPO(vha->hw)) { if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) { set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + ql_dbg(ql_dbg_disc, vha, 0x20d8, + "%s %d %8phC exit\n", + __func__, __LINE__, + fcport->port_name); return 0; } } - /* Target won't initiate port login if fabric is present */ - if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw)) + /* for pure Target Mode. Login will not be initiated */ + if (vha->host->active_mode == MODE_TARGET) { + ql_dbg(ql_dbg_disc, vha, 0x20d8, + "%s %d %8phC exit\n", + __func__, __LINE__, + fcport->port_name); return 0; + } - if (fcport->flags & (FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE)) { + if (fcport->flags & (FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE)) { set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + ql_dbg(ql_dbg_disc, vha, 0x20d8, + "%s %d %8phC exit\n", + __func__, __LINE__, + fcport->port_name); return 0; } @@ -1613,12 +1736,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) } break; default: - if (wwn == 0) { - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post GNNID\n", - __func__, __LINE__, fcport->port_name); - qla24xx_post_gnnid_work(vha, fcport); - } else if (fcport->loop_id == FC_NO_LOOP_ID) { + if (fcport->loop_id == FC_NO_LOOP_ID) { ql_dbg(ql_dbg_disc, vha, 0x20bd, "%s %d %8phC post gnl\n", __func__, __LINE__, fcport->port_name); @@ -1679,8 +1797,17 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) break; case DSC_LOGIN_PEND: - if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) + if (vha->hw->flags.edif_enabled) + break; + + if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { + ql_dbg(ql_dbg_disc, vha, 0x2118, + "%s %d %8phC post %s PRLI\n", + __func__, __LINE__, fcport->port_name, + NVME_TARGET(vha->hw, fcport) ? "NVME" : + "FC"); qla24xx_post_prli_work(vha, fcport); + } break; case DSC_UPD_FCPORT: @@ -1734,19 +1861,39 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) case RSCN_PORT_ADDR: fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); if (fcport) { - if (fcport->flags & FCF_FCP2_DEVICE) { + if (fcport->flags & FCF_FCP2_DEVICE && + atomic_read(&fcport->state) == FCS_ONLINE) { ql_dbg(ql_dbg_disc, vha, 0x2115, - "Delaying session delete for FCP2 portid=%06x %8phC ", - fcport->d_id.b24, fcport->port_name); + "Delaying session delete for FCP2 portid=%06x " + "%8phC ", fcport->d_id.b24, fcport->port_name); return; } - fcport->scan_needed = 1; - fcport->rscn_gen++; + + if (vha->hw->flags.edif_enabled && DBELL_ACTIVE(vha)) { + /* + * On ipsec start by remote port, Target port may use RSCN to + * trigger initiator to relogin. If driver is already in the process + * of a relogin, then ignore the RSCN and allow the current relogin + * to continue. This reduces thrashing of the connection. + */ + if (atomic_read(&fcport->state) == FCS_ONLINE) { + /* + * If state = online, then set scan_needed=1 to do relogin. + * Otherwise we're already in the middle of a relogin + */ + fcport->scan_needed = 1; + fcport->rscn_gen++; + } + } else { + fcport->scan_needed = 1; + fcport->rscn_gen++; + } } break; case RSCN_AREA_ADDR: list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (fcport->flags & FCF_FCP2_DEVICE) + if (fcport->flags & FCF_FCP2_DEVICE && + atomic_read(&fcport->state) == FCS_ONLINE) continue; if ((ea->id.b24 & 0xffff00) == (fcport->d_id.b24 & 0xffff00)) { @@ -1757,7 +1904,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) break; case RSCN_DOM_ADDR: list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (fcport->flags & FCF_FCP2_DEVICE) + if (fcport->flags & FCF_FCP2_DEVICE && + atomic_read(&fcport->state) == FCS_ONLINE) continue; if ((ea->id.b24 & 0xff0000) == (fcport->d_id.b24 & 0xff0000)) { @@ -1769,7 +1917,8 @@ void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) case RSCN_FAB_ADDR: default: list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (fcport->flags & FCF_FCP2_DEVICE) + if (fcport->flags & FCF_FCP2_DEVICE && + atomic_read(&fcport->state) == FCS_ONLINE) continue; fcport->scan_needed = 1; @@ -1796,7 +1945,7 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, return; ql_dbg(ql_dbg_disc, vha, 0x2102, - "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", + "%s %8phC DS %d LS %d P %d del %d cnfl %px rscn %d|%d login %d|%d fl %x\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, fcport->login_pause, fcport->deleted, fcport->conflict, @@ -1817,9 +1966,12 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea) { - /* for pure Target Mode, PRLI will not be initiated */ - if (vha->host->active_mode == MODE_TARGET) + if (N2N_TOPO(vha->hw) && fcport_is_smaller(ea->fcport) && + vha->hw->flags.edif_enabled) { + /* check to see if he support Secure */ + qla24xx_post_gpdb_work(vha, ea->fcport, 0); return; + } ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post PRLI\n", @@ -1868,51 +2020,127 @@ qla2x00_tmf_iocb_timeout(void *data) } } spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); - tmf->u.tmf.comp_status = cpu_to_le16(CS_TIMEOUT); + tmf->u.tmf.comp_status = CS_TIMEOUT; tmf->u.tmf.data = QLA_FUNCTION_FAILED; complete(&tmf->u.tmf.comp); } } -static void qla2x00_tmf_sp_done(srb_t *sp, int res) +static void qla_marker_sp_done(srb_t *sp, int res) { struct srb_iocb *tmf = &sp->u.iocb_cmd; + if (res != QLA_SUCCESS) + ql_dbg(ql_dbg_taskm, sp->vha, 0x8004, + "Async-marker fail hdl=%x portid=%06x ctrl=%x lun=%lld qp=%d.\n", + sp->handle, sp->fcport->d_id.b24, sp->u.iocb_cmd.u.tmf.flags, + sp->u.iocb_cmd.u.tmf.lun, sp->qpair->id ); + complete(&tmf->u.tmf.comp); } +#define START_SP_W_RETRIES(_sp, _rval) \ +{\ + int cnt = 5; \ + do { \ + _rval = qla2x00_start_sp(_sp); \ + if (_rval == EAGAIN) \ + msleep(1); \ + else \ + break; \ + cnt--; \ + } while (cnt); \ +} + int -qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, - uint32_t tag) +qla26xx_marker(struct tmf_arg *arg) { - struct scsi_qla_host *vha = fcport->vha; + struct scsi_qla_host *vha = arg->vha; struct srb_iocb *tm_iocb; srb_t *sp; int rval = QLA_FUNCTION_FAILED; + fc_port_t *fcport = arg->fcport; - sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + /* ref: INIT */ + sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL); if (!sp) goto done; + sp->type = SRB_MARKER; + sp->name = "marker"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), qla_marker_sp_done); + sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout; + tm_iocb = &sp->u.iocb_cmd; + init_completion(&tm_iocb->u.tmf.comp); + tm_iocb->u.tmf.modifier = arg->modifier; + tm_iocb->u.tmf.lun = arg->lun; + tm_iocb->u.tmf.loop_id = fcport->loop_id; + tm_iocb->u.tmf.vp_index = vha->vp_idx; + + START_SP_W_RETRIES(sp, rval); + + ql_dbg(ql_dbg_taskm, vha, 0x8006, + "Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n", + sp->handle, fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, sp->qpair->id, rval); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8031, + "Marker IOCB failed (%x).\n", rval); + goto done_free_sp; + } + + wait_for_completion(&tm_iocb->u.tmf.comp); + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + return rval; +} + +static void qla2x00_tmf_sp_done(srb_t *sp, int res) +{ + struct srb_iocb *tmf = &sp->u.iocb_cmd; + + complete(&tmf->u.tmf.comp); +} + +int +__qla2x00_async_tm_cmd(struct tmf_arg *arg) +{ + struct scsi_qla_host *vha = arg->vha; + struct srb_iocb *tm_iocb; + srb_t *sp; + int rval = QLA_FUNCTION_FAILED; + fc_port_t *fcport = arg->fcport; + uint8_t bail; + + /* ref: INIT */ + sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL); + if (!sp) + goto done; + + QLA_VHA_MARK_BUSY(vha, bail); sp->type = SRB_TM_CMD; sp->name = "tmf"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), + qla2x00_tmf_sp_done); + sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout; - tm_iocb->timeout = qla2x00_tmf_iocb_timeout; + tm_iocb = &sp->u.iocb_cmd; init_completion(&tm_iocb->u.tmf.comp); - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)); + tm_iocb->u.tmf.flags = arg->flags; + tm_iocb->u.tmf.lun = arg->lun; - tm_iocb->u.tmf.flags = flags; - tm_iocb->u.tmf.lun = lun; - tm_iocb->u.tmf.data = tag; - sp->done = qla2x00_tmf_sp_done; + START_SP_W_RETRIES(sp, rval); ql_dbg(ql_dbg_taskm, vha, 0x802f, - "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", - sp->handle, fcport->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa); + "Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n", + sp->handle, fcport->loop_id, fcport->d_id.b24, + arg->flags, arg->lun, sp->qpair->id, rval); - rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; wait_for_completion(&tm_iocb->u.tmf.comp); @@ -1924,23 +2152,107 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, "TM IOCB failed (%x).\n", rval); } - if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) { - flags = tm_iocb->u.tmf.flags; - lun = (uint16_t)tm_iocb->u.tmf.lun; - - /* Issue Marker IOCB */ - qla2x00_marker(vha, vha->hw->base_qpair, - fcport->loop_id, lun, - flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); - } + if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) + rval = qla26xx_marker(arg); done_free_sp: - sp->free(sp); - fcport->flags &= ~FCF_ASYNC_SENT; + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } +void qla_put_tmf(fc_port_t *fcport) +{ + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + fcport->active_tmf--; + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); +} + +static +int qla_get_tmf(fc_port_t *fcport) +{ + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + int rc = 0; + LIST_HEAD(tmf_elem); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + list_add_tail(&tmf_elem, &fcport->tmf_pending); + + while (fcport->active_tmf >= MAX_ACTIVE_TMF) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + msleep(1); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (fcport->deleted ) { + rc = EIO; + break; + } + if (fcport->active_tmf < MAX_ACTIVE_TMF && + list_is_first(&tmf_elem, &fcport->tmf_pending)) + break; + } + + list_del(&tmf_elem); + + if (!rc) + fcport->active_tmf++; + + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + return rc; +} + +int +qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun, + uint32_t tag) +{ + struct scsi_qla_host *vha = fcport->vha; + struct qla_qpair *qpair; + struct tmf_arg a; + int i, rval; + + a.vha = fcport->vha; + a.fcport = fcport; + a.lun = lun; + if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET| TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) { + a.modifier = MK_SYNC_ID_LUN; + + if (qla_get_tmf(fcport)) + return QLA_FUNCTION_FAILED; + } else + a.modifier = MK_SYNC_ID; + + if (vha->hw->mqenable) { + for (i=0; i < vha->hw->num_qpairs; i++) { + qpair = vha->hw->queue_pair_map[i]; + if (!qpair) + continue; + a.qpair = qpair; + a.flags = flags|TCF_NOTMCMD_TO_TARGET; + rval = __qla2x00_async_tm_cmd(&a); + if (rval) + break; + } + } + + a.qpair = vha->hw->base_qpair; + a.flags = flags; + rval = __qla2x00_async_tm_cmd(&a); + + if (a.modifier == MK_SYNC_ID_LUN) + qla_put_tmf(fcport); + + return rval; +} + int qla24xx_async_abort_command(srb_t *sp) { @@ -1961,7 +2273,7 @@ qla24xx_async_abort_command(srb_t *sp) if (handle == req->num_outstanding_cmds) { /* Command not found. */ - return QLA_FUNCTION_FAILED; + return QLA_ERR_NOT_FOUND; } if (sp->type == SRB_FXIOCB_DCMD) return qlafx00_fx_disc(vha, &vha->hw->mr.fcport, @@ -1973,6 +2285,7 @@ qla24xx_async_abort_command(srb_t *sp) static void qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) { + struct srb *sp; WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", ea->data[0]); @@ -1993,60 +2306,45 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) qla24xx_post_gpdb_work(vha, ea->fcport, 0); break; default: - if ((ea->iop[0] == LSC_SCODE_ELS_REJECT) && - (ea->iop[1] == 0x50000)) { /* reson 5=busy expl:0x0 */ - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP; - break; - } - + sp = ea->sp; ql_dbg(ql_dbg_disc, vha, 0x2118, - "%s %d %8phC priority %s, fc4type %x\n", - __func__, __LINE__, ea->fcport->port_name, - vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ? - "FCP" : "NVMe", ea->fcport->fc4_type); + "%s %d %8phC priority %s, fc4type %x prev try %s\n", + __func__, __LINE__, ea->fcport->port_name, + vha->hw->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe", + ea->fcport->fc4_type, + (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) ? "NVME" : "FCP"); - if (N2N_TOPO(vha->hw)) { - if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) { - ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME; - ea->fcport->fc4_type |= FS_FC4TYPE_FCP; - } else { - ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP; - ea->fcport->fc4_type |= FS_FC4TYPE_NVME; + if (NVME_FCP_TARGET(ea->fcport)) { + if (sp->u.iocb_cmd.u.logio.flags & SRB_LOGIN_NVME_PRLI) + ea->fcport->do_prli_nvme = 0; + else + ea->fcport->do_prli_nvme = 1; + } else + ea->fcport->do_prli_nvme = 0; + + + if (N2N_TOPO(vha->hw)){ + if (ea->fcport->n2n_link_reset_cnt == vha->hw->login_retry_count && + ea->fcport->flags & FCF_FCSP_DEVICE) { + /* remote authentication app just started */ + ea->fcport->n2n_link_reset_cnt = 0; } - if (ea->fcport->n2n_link_reset_cnt < 3) { + if (ea->fcport->n2n_link_reset_cnt < vha->hw->login_retry_count) { ea->fcport->n2n_link_reset_cnt++; - vha->relogin_jif = jiffies + 2 * HZ; - /* - * PRLI failed. Reset link to kick start - * state machine - */ + vha->relogin_jif = jiffies + 2*HZ; + /* PRLI failed. Reset + link to kick start his state machine */ set_bit(N2N_LINK_RESET, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); } else { ql_log(ql_log_warn, vha, 0x2119, - "%s %d %8phC Unable to reconnect\n", - __func__, __LINE__, - ea->fcport->port_name); + "%s %d %8phC Unable to reconnect\n", + __func__, __LINE__, ea->fcport->port_name); } } else { - /* - * switch connect. login failed. Take connection down - * and allow relogin to retrigger - */ - if (NVME_FCP_TARGET(ea->fcport)) { - ql_dbg(ql_dbg_disc, vha, 0x2118, - "%s %d %8phC post %s prli\n", - __func__, __LINE__, - ea->fcport->port_name, - (ea->fcport->fc4_type & FS_FC4TYPE_NVME) - ? "NVMe" : "FCP"); - if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) - ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME; - else - ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP; - } - + /* switch connect. login failed. Take connection down + and allow relogin to retrigger */ ea->fcport->flags &= ~FCF_ASYNC_SENT; ea->fcport->keep_nport_handle = 0; ea->fcport->logout_on_delete = 1; @@ -2112,26 +2410,38 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) * force a relogin attempt via implicit LOGO, PLOGI, and PRLI * requests. */ - if (NVME_TARGET(vha->hw, ea->fcport)) { - ql_dbg(ql_dbg_disc, vha, 0x2117, - "%s %d %8phC post prli\n", - __func__, __LINE__, ea->fcport->port_name); - qla24xx_post_prli_work(vha, ea->fcport); - } else { - ql_dbg(ql_dbg_disc, vha, 0x20ea, - "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n", - __func__, __LINE__, ea->fcport->port_name, - ea->fcport->loop_id, ea->fcport->d_id.b24); - + if (vha->hw->flags.edif_enabled) { set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; ea->fcport->logout_on_delete = 1; ea->fcport->send_els_logo = 0; - ea->fcport->fw_login_state = DSC_LS_PRLI_COMP; + ea->fcport->fw_login_state = DSC_LS_PLOGI_COMP; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); qla24xx_post_gpdb_work(vha, ea->fcport, 0); + } else { + if (NVME_TARGET(vha->hw, fcport)) { + ql_dbg(ql_dbg_disc, vha, 0x2117, + "%s %d %8phC post prli\n", + __func__, __LINE__, fcport->port_name); + qla24xx_post_prli_work(vha, fcport); + } else { + ql_dbg(ql_dbg_disc, vha, 0x20ea, + "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n", + __func__, __LINE__, fcport->port_name, + fcport->loop_id, fcport->d_id.b24); + + set_bit(fcport->loop_id, vha->hw->loop_id_map); + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + fcport->chip_reset = vha->hw->base_qpair->chip_reset; + fcport->logout_on_delete = 1; + fcport->send_els_logo = 0; + fcport->fw_login_state = DSC_LS_PRLI_COMP; + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + + qla24xx_post_gpdb_work(vha, fcport, 0); + } } break; case MBS_COMMAND_ERROR: @@ -2172,7 +2482,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) ea->fcport->login_pause = 1; ql_dbg(ql_dbg_disc, vha, 0x20ed, - "%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n", + "%s %d %8phC NPortId %06x inuse with loopid 0x%x.\n", __func__, __LINE__, ea->fcport->port_name, ea->fcport->d_id.b24, lid); } else { @@ -2323,7 +2633,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) /* Check for secure flash support */ if (IS_QLA28XX(ha)) { - if (rd_reg_word(®->mailbox12) & BIT_0) + if (RD_REG_DWORD(®->mailbox12) & BIT_0) ha->flags.secure_adapter = 1; ql_log(ql_log_info, vha, 0xffff, "Secure Adapter: %s\n", (ha->flags.secure_adapter) ? "Yes" : "No"); @@ -2461,7 +2771,7 @@ qla2100_pci_config(scsi_qla_host_t *vha) /* Get PCI bus information. */ spin_lock_irqsave(&ha->hardware_lock, flags); - ha->pci_attr = rd_reg_word(®->ctrl_status); + ha->pci_attr = RD_REG_WORD(®->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; @@ -2503,17 +2813,17 @@ qla2300_pci_config(scsi_qla_host_t *vha) spin_lock_irqsave(&ha->hardware_lock, flags); /* Pause RISC. */ - wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); + WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); for (cnt = 0; cnt < 30000; cnt++) { - if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) != 0) + if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0) break; udelay(10); } /* Select FPM registers. */ - wrt_reg_word(®->ctrl_status, 0x20); - rd_reg_word(®->ctrl_status); + WRT_REG_WORD(®->ctrl_status, 0x20); + RD_REG_WORD(®->ctrl_status); /* Get the fb rev level */ ha->fb_rev = RD_FB_CMD_REG(ha, reg); @@ -2522,13 +2832,13 @@ qla2300_pci_config(scsi_qla_host_t *vha) pci_clear_mwi(ha->pdev); /* Deselect FPM registers. */ - wrt_reg_word(®->ctrl_status, 0x0); - rd_reg_word(®->ctrl_status); + WRT_REG_WORD(®->ctrl_status, 0x0); + RD_REG_WORD(®->ctrl_status); /* Release RISC module. */ - wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); + WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); for (cnt = 0; cnt < 30000; cnt++) { - if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) == 0) + if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) == 0) break; udelay(10); @@ -2543,7 +2853,7 @@ qla2300_pci_config(scsi_qla_host_t *vha) /* Get PCI bus information. */ spin_lock_irqsave(&ha->hardware_lock, flags); - ha->pci_attr = rd_reg_word(®->ctrl_status); + ha->pci_attr = RD_REG_WORD(®->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; @@ -2587,7 +2897,7 @@ qla24xx_pci_config(scsi_qla_host_t *vha) /* Get PCI bus information. */ spin_lock_irqsave(&ha->hardware_lock, flags); - ha->pci_attr = rd_reg_dword(®->ctrl_status); + ha->pci_attr = RD_REG_DWORD(®->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; @@ -2691,36 +3001,36 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) if (!IS_QLA2100(ha)) { /* Pause RISC. */ - wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); + WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); if (IS_QLA2200(ha) || IS_QLA2300(ha)) { for (cnt = 0; cnt < 30000; cnt++) { - if ((rd_reg_word(®->hccr) & + if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0) break; udelay(100); } } else { - rd_reg_word(®->hccr); /* PCI Posting. */ + RD_REG_WORD(®->hccr); /* PCI Posting. */ udelay(10); } /* Select FPM registers. */ - wrt_reg_word(®->ctrl_status, 0x20); - rd_reg_word(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->ctrl_status, 0x20); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ /* FPM Soft Reset. */ - wrt_reg_word(®->fpm_diag_config, 0x100); - rd_reg_word(®->fpm_diag_config); /* PCI Posting. */ + WRT_REG_WORD(®->fpm_diag_config, 0x100); + RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ /* Toggle Fpm Reset. */ if (!IS_QLA2200(ha)) { - wrt_reg_word(®->fpm_diag_config, 0x0); - rd_reg_word(®->fpm_diag_config); /* PCI Posting. */ + WRT_REG_WORD(®->fpm_diag_config, 0x0); + RD_REG_WORD(®->fpm_diag_config); /* PCI Posting. */ } /* Select frame buffer registers. */ - wrt_reg_word(®->ctrl_status, 0x10); - rd_reg_word(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->ctrl_status, 0x10); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ /* Reset frame buffer FIFOs. */ if (IS_QLA2200(ha)) { @@ -2738,23 +3048,23 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) } /* Select RISC module registers. */ - wrt_reg_word(®->ctrl_status, 0); - rd_reg_word(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->ctrl_status, 0); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ /* Reset RISC processor. */ - wrt_reg_word(®->hccr, HCCR_RESET_RISC); - rd_reg_word(®->hccr); /* PCI Posting. */ + WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); + RD_REG_WORD(®->hccr); /* PCI Posting. */ /* Release RISC processor. */ - wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); - rd_reg_word(®->hccr); /* PCI Posting. */ + WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); + RD_REG_WORD(®->hccr); /* PCI Posting. */ } - wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); - wrt_reg_word(®->hccr, HCCR_CLR_HOST_INT); + WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); + WRT_REG_WORD(®->hccr, HCCR_CLR_HOST_INT); /* Reset ISP chip. */ - wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); + WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); /* Wait for RISC to recover from reset. */ if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { @@ -2765,7 +3075,7 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) */ udelay(20); for (cnt = 30000; cnt; cnt--) { - if ((rd_reg_word(®->ctrl_status) & + if ((RD_REG_WORD(®->ctrl_status) & CSR_ISP_SOFT_RESET) == 0) break; udelay(100); @@ -2774,13 +3084,13 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) udelay(10); /* Reset RISC processor. */ - wrt_reg_word(®->hccr, HCCR_RESET_RISC); + WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); - wrt_reg_word(®->semaphore, 0); + WRT_REG_WORD(®->semaphore, 0); /* Release RISC processor. */ - wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); - rd_reg_word(®->hccr); /* PCI Posting. */ + WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); + RD_REG_WORD(®->hccr); /* PCI Posting. */ if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { for (cnt = 0; cnt < 30000; cnt++) { @@ -2798,8 +3108,8 @@ qla2x00_reset_chip(scsi_qla_host_t *vha) /* Disable RISC pause on FPM parity error. */ if (!IS_QLA2100(ha)) { - wrt_reg_word(®->hccr, HCCR_DISABLE_PARITY_PAUSE); - rd_reg_word(®->hccr); /* PCI Posting. */ + WRT_REG_WORD(®->hccr, HCCR_DISABLE_PARITY_PAUSE); + RD_REG_WORD(®->hccr); /* PCI Posting. */ } spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -2824,6 +3134,49 @@ qla81xx_reset_mpi(scsi_qla_host_t *vha) return qla81xx_write_mpi_register(vha, mb); } +static int +qla_chk_risc_recovery(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + uint16_t __iomem *mbptr = (uint16_t __iomem *)®->mailbox0; + int i; + u16 mb[32]; + int rc = QLA_SUCCESS; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return rc; + + // this check is only valid after RISC reset. + mb[0] = RD_REG_WORD(mbptr); + mbptr++; + if (mb[0] == 0xf) { + rc = QLA_FUNCTION_FAILED; + + for (i=1; i < 32; i++){ + mb[i] = RD_REG_WORD(mbptr); + mbptr++; + } + + ql_log(ql_log_warn, vha, 0x1015, + "RISC reset failed. mb[0-7] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", + mb[0], mb[1], mb[2], mb[3], mb[4], mb[5], mb[6], mb[7]); + ql_log(ql_log_warn, vha, 0x1015, + "RISC reset failed. mb[8-15] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", + mb[8], mb[9], mb[10], mb[11], mb[12], mb[13], mb[14], + mb[15]); + ql_log(ql_log_warn, vha, 0x1015, + "RISC reset failed. mb[16-23] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", + mb[16], mb[17], mb[18], mb[19], mb[20], mb[21], mb[22], + mb[23]); + ql_log(ql_log_warn, vha, 0x1015, + "RISC reset failed. mb[24-31] %04xh %04xh %04xh %04xh %04xh %04xh %04xh %04xh\n", + mb[24], mb[25], mb[26], mb[27], mb[28], mb[29], mb[30], + mb[31]); + } + return rc; +} + /** * qla24xx_reset_risc() - Perform full reset of ISP24xx RISC. * @vha: HA context @@ -2840,36 +3193,37 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) uint16_t wd; static int abts_cnt; /* ISP abort retry counts */ int rval = QLA_SUCCESS; + int print = 1; spin_lock_irqsave(&ha->hardware_lock, flags); /* Reset RISC. */ - wrt_reg_dword(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); + WRT_REG_DWORD(®->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); for (cnt = 0; cnt < 30000; cnt++) { - if ((rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) + if ((RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE) == 0) break; udelay(10); } - if (!(rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)) + if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)) set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017e, "HCCR: 0x%x, Control Status %x, DMA active status:0x%x\n", - rd_reg_dword(®->hccr), - rd_reg_dword(®->ctrl_status), - (rd_reg_dword(®->ctrl_status) & CSRX_DMA_ACTIVE)); + RD_REG_DWORD(®->hccr), + RD_REG_DWORD(®->ctrl_status), + (RD_REG_DWORD(®->ctrl_status) & CSRX_DMA_ACTIVE)); - wrt_reg_dword(®->ctrl_status, + WRT_REG_DWORD(®->ctrl_status, CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES); pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); udelay(100); /* Wait for firmware to complete NVRAM accesses. */ - rd_reg_word(®->mailbox0); - for (cnt = 10000; rd_reg_word(®->mailbox0) != 0 && + RD_REG_WORD(®->mailbox0); + for (cnt = 10000; RD_REG_WORD(®->mailbox0) != 0 && rval == QLA_SUCCESS; cnt--) { barrier(); if (cnt) @@ -2883,26 +3237,26 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x017f, "HCCR: 0x%x, MailBox0 Status 0x%x\n", - rd_reg_dword(®->hccr), - rd_reg_word(®->mailbox0)); + RD_REG_DWORD(®->hccr), + RD_REG_DWORD(®->mailbox0)); /* Wait for soft-reset to complete. */ - rd_reg_dword(®->ctrl_status); + RD_REG_DWORD(®->ctrl_status); for (cnt = 0; cnt < 60; cnt++) { barrier(); - if ((rd_reg_dword(®->ctrl_status) & + if ((RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET) == 0) break; udelay(5); } - if (!(rd_reg_dword(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) + if (!(RD_REG_DWORD(®->ctrl_status) & CSRX_ISP_SOFT_RESET)) set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015d, "HCCR: 0x%x, Soft Reset status: 0x%x\n", - rd_reg_dword(®->hccr), - rd_reg_dword(®->ctrl_status)); + RD_REG_DWORD(®->hccr), + RD_REG_DWORD(®->ctrl_status)); /* If required, do an MPI FW reset now */ if (test_and_clear_bit(MPI_RESET_NEEDED, &vha->dpc_flags)) { @@ -2921,31 +3275,40 @@ qla24xx_reset_risc(scsi_qla_host_t *vha) } } - wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); - rd_reg_dword(®->hccr); + WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); + RD_REG_DWORD(®->hccr); - wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); - rd_reg_dword(®->hccr); + WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); + RD_REG_DWORD(®->hccr); - wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_RESET); - rd_reg_dword(®->hccr); + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_RESET); + mdelay(10); + RD_REG_DWORD(®->hccr); - rd_reg_word(®->mailbox0); - for (cnt = 60; rd_reg_word(®->mailbox0) != 0 && - rval == QLA_SUCCESS; cnt--) { + wd = RD_REG_WORD(®->mailbox0); + for (cnt = 300; wd != 0 && rval == QLA_SUCCESS; cnt--) { barrier(); - if (cnt) - udelay(5); - else + if (cnt) { + mdelay(1); + if (print && qla_chk_risc_recovery(vha)) + print = 0; + + wd = RD_REG_WORD(®->mailbox0); + } else { rval = QLA_FUNCTION_TIMEOUT; + + ql_log(ql_log_warn, vha, 0x015e, + "RISC reset timeout\n"); + } } + if (rval == QLA_SUCCESS) set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags); ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x015e, "Host Risc 0x%x, mailbox0 0x%x\n", - rd_reg_dword(®->hccr), - rd_reg_word(®->mailbox0)); + RD_REG_DWORD(®->hccr), + RD_REG_WORD(®->mailbox0)); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -2964,8 +3327,9 @@ qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data) { struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; - wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); - *data = rd_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET); + WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); + *data = RD_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET); + } static void @@ -2973,8 +3337,8 @@ qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data) { struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24; - wrt_reg_dword(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); - wrt_reg_dword(®->iobase_window + RISC_REGISTER_WINDOW_OFFSET, data); + WRT_REG_DWORD(®->iobase_addr, RISC_REGISTER_BASE_OFFSET); + WRT_REG_DWORD(®->iobase_window + RISC_REGISTER_WINDOW_OFFET, data); } static void @@ -2990,7 +3354,7 @@ qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha) vha->hw->pdev->subsystem_device != 0x0240) return; - wrt_reg_dword(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE); + WRT_REG_DWORD(&vha->hw->iobase->isp24.hccr, HCCRX_SET_RISC_PAUSE); udelay(100); attempt: @@ -3086,13 +3450,13 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) /* Assume a failed state */ rval = QLA_FUNCTION_FAILED; - ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %p.\n", + ql_dbg(ql_dbg_init, vha, 0x007b, "Testing device at %px.\n", ®->flash_address); spin_lock_irqsave(&ha->hardware_lock, flags); /* Reset ISP chip. */ - wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); + WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); /* * We need to have a delay here since the card will not respond while @@ -3102,7 +3466,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) data = qla2x00_debounce_register(®->ctrl_status); for (cnt = 6000000 ; cnt && (data & CSR_ISP_SOFT_RESET); cnt--) { udelay(5); - data = rd_reg_word(®->ctrl_status); + data = RD_REG_WORD(®->ctrl_status); barrier(); } @@ -3113,8 +3477,8 @@ qla2x00_chip_diag(scsi_qla_host_t *vha) "Reset register cleared by chip reset.\n"); /* Reset RISC processor. */ - wrt_reg_word(®->hccr, HCCR_RESET_RISC); - wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); + WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); + WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); /* Workaround for QLA2312 PCI parity error */ if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { @@ -3563,7 +3927,7 @@ qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req) if (!req->outstanding_cmds) { ql_log(ql_log_fatal, NULL, 0x0126, "Failed to allocate memory for " - "outstanding_cmds for req_que %p.\n", req); + "outstanding_cmds for req_que %px.\n", req); req->num_outstanding_cmds = 0; return QLA_FUNCTION_FAILED; } @@ -3593,6 +3957,20 @@ static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha) u8 str[STR_LEN], *ptr, p; int leftover, len; + ql_dbg(ql_dbg_init, vha, 0x015a, + "SFP: %.*s -> %.*s ->%s%s%s%s%s%s\n", + (int)sizeof(a0->vendor_name), a0->vendor_name, + (int)sizeof(a0->vendor_pn), a0->vendor_pn, + a0->fc_sp_cc10 & FC_SP_32 ? " 32G" : "", + a0->fc_sp_cc10 & FC_SP_16 ? " 16G" : "", + a0->fc_sp_cc10 & FC_SP_8 ? " 8G" : "", + a0->fc_sp_cc10 & FC_SP_4 ? " 4G" : "", + a0->fc_sp_cc10 & FC_SP_2 ? " 2G" : "", + a0->fc_sp_cc10 & FC_SP_1 ? " 1G" : ""); + + if (!(ql2xextended_error_logging & ql_dbg_verbose)) + return; + memset(str, 0, STR_LEN); snprintf(str, SFF_VEN_NAME_LEN+1, a0->vendor_name); ql_dbg(ql_dbg_init, vha, 0x015a, @@ -3665,19 +4043,19 @@ static void qla2xxx_print_sfp_info(struct scsi_qla_host *vha) /** - * qla24xx_detect_sfp() - * - * @vha: adapter state pointer. - * - * @return - * 0 -- Configure firmware to use short-range settings -- normal - * buffer-to-buffer credits. - * - * 1 -- Configure firmware to use long-range settings -- extra - * buffer-to-buffer credits should be allocated with - * ha->lr_distance containing distance settings from NVRAM or SFP - * (if supported). - */ + * qla24xx_detect_sfp() + * + * @param vha + * + * @return + * 0 -- Configure firmware to use short-range settings -- normal + * buffer-to-buffer credits. + * + * 1 -- Configure firmware to use long-range settings -- extra + * buffer-to-buffer credits should be allocated with + * ha->lr_distance containing distance settings from NVRAM or SFP + * (if supported). + */ int qla24xx_detect_sfp(scsi_qla_host_t *vha) { @@ -3686,8 +4064,8 @@ qla24xx_detect_sfp(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct nvram_81xx *nv = ha->nvram; #define LR_DISTANCE_UNKNOWN 2 - static const char * const types[] = { "Short", "Long" }; - static const char * const lengths[] = { "(10km)", "(5km)", "" }; + static char *types[] = { "Short", "Long" }; + static char *lengths[] = { "(10km)", "(5km)", "" }; u8 ll = 0; /* Seed with NVRAM settings. */ @@ -3725,11 +4103,13 @@ qla24xx_detect_sfp(scsi_qla_host_t *vha) ha->lr_distance = LR_DISTANCE_5K; } + if (!vha->flags.init_done) + rc = QLA_SUCCESS; out: ql_dbg(ql_dbg_async, vha, 0x507b, "SFP detect: %s-Range SFP %s (nvr=%x ll=%x lr=%x lrd=%x).\n", types[ha->flags.lr_detected], - ha->flags.lr_detected ? lengths[ha->lr_distance] : + ha->flags.lr_detected ? lengths[ha->lr_distance]: lengths[LR_DISTANCE_UNKNOWN], used_nvram, ll, ha->flags.lr_detected, ha->lr_distance); return ha->flags.lr_detected; @@ -3746,15 +4126,24 @@ void qla_init_iocb_limit(scsi_qla_host_t *vha) ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count; ha->base_qpair->fwres.iocbs_limit = limit; - ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps; + ha->base_qpair->fwres.iocbs_qp_limit = limit/num_qps; ha->base_qpair->fwres.iocbs_used = 0; - for (i = 0; i < ha->max_qpairs; i++) { + + ha->base_qpair->fwres.exch_total = ha->orig_fw_xcb_count; + ha->base_qpair->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT)/100; + ha->base_qpair->fwres.exch_used = 0; + + for (i=0; i < ha->max_qpairs; i++) { if (ha->queue_pair_map[i]) { ha->queue_pair_map[i]->fwres.iocbs_total = ha->orig_fw_iocb_count; ha->queue_pair_map[i]->fwres.iocbs_limit = limit; ha->queue_pair_map[i]->fwres.iocbs_qp_limit = - limit / num_qps; + limit/num_qps; + ha->queue_pair_map[i]->fwres.iocbs_used = 0; + ha->queue_pair_map[i]->fwres.exch_total = ha->orig_fw_xcb_count; + ha->queue_pair_map[i]->fwres.exch_limit = + (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT)/100; ha->queue_pair_map[i]->fwres.iocbs_used = 0; } } @@ -3789,8 +4178,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) { /* Disable SRAM, Instruction RAM and GP RAM parity. */ spin_lock_irqsave(&ha->hardware_lock, flags); - wrt_reg_word(®->hccr, (HCCR_ENABLE_PARITY + 0x0)); - rd_reg_word(®->hccr); + WRT_REG_WORD(®->hccr, (HCCR_ENABLE_PARITY + 0x0)); + RD_REG_WORD(®->hccr); spin_unlock_irqrestore(&ha->hardware_lock, flags); } @@ -3828,7 +4217,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) goto execute_fw_with_lr; } - if (IS_ZIO_THRESHOLD_CAPABLE(ha)) + if (IS_ZIO_THRESHOLD_CAPABLE(ha) && + (ha->zio_mode == QLA_ZIO_MODE_6)) qla27xx_set_zio_threshold(vha, ha->last_zio_threshold); @@ -3886,8 +4276,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) } /* Enable PUREX PASSTHRU */ - if (ql2xrdpenable || ha->flags.scm_supported_f) - qla25xx_set_els_cmds_supported(vha); + qla25xx_set_els_cmds_supported(vha); } else goto failed; @@ -3896,11 +4285,11 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) spin_lock_irqsave(&ha->hardware_lock, flags); if (IS_QLA2300(ha)) /* SRAM parity */ - wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x1); + WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x1); else /* SRAM, Instruction RAM and GP RAM parity */ - wrt_reg_word(®->hccr, HCCR_ENABLE_PARITY + 0x7); - rd_reg_word(®->hccr); + WRT_REG_WORD(®->hccr, HCCR_ENABLE_PARITY + 0x7); + RD_REG_WORD(®->hccr); spin_unlock_irqrestore(&ha->hardware_lock, flags); } @@ -3914,11 +4303,9 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) ha->flags.fac_supported = 1; ha->fdt_block_size = size << 2; } else { - ql_log(ql_log_warn, vha, 0x00ce, - "Unsupported FAC firmware (%d.%02d.%02d).\n", - ha->fw_major_version, ha->fw_minor_version, - ha->fw_subminor_version); - + ql_log(ql_log_info, vha, 0x00ce, + "Flash Access Control MB cmd failed (%d).\n", + rval); if (IS_QLA83XX(ha)) { ha->flags.fac_supported = 0; rval = QLA_SUCCESS; @@ -4024,6 +4411,9 @@ qla2x00_update_fw_options(scsi_qla_host_t *vha) (tx_sens & (BIT_1 | BIT_0)); } + /* Enable PUREX */ + ha->fw_options[1] |= FO1_ENABLE_PURE_IOCB; + /* FCP2 options. */ /* Return command IOCBs without waiting for an ABTS to complete. */ ha->fw_options[3] |= BIT_13; @@ -4070,7 +4460,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) } /* Move PUREX, ABTS RX & RIDA to ATIOQ */ - if (ql2xmvasynctoatio && + if (ql2xmvasynctoatio && !ha->flags.edif_enabled && (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) { if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) @@ -4089,17 +4479,33 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) qla_dual_mode_enabled(vha)) ha->fw_options[2] |= BIT_4; else - ha->fw_options[2] &= ~BIT_4; + ha->fw_options[2] &= ~(BIT_4); /* Reserve 1/2 of emergency exchanges for ELS.*/ if (qla2xuseresexchforels) ha->fw_options[2] |= BIT_8; else ha->fw_options[2] &= ~BIT_8; + + /* N2N: set Secure=1 for PLOGI ACC and + * fw shal not send PRLI after PLOGI Acc + */ + if (ha->flags.edif_enabled && + DBELL_ACTIVE(vha)) { + ha->fw_options[3] |= BIT_15; + ha->flags.n2n_fw_acc_sec = 1; + } else { + ha->fw_options[3] &= ~BIT_15; + ha->flags.n2n_fw_acc_sec = 0; + } + } - if (ql2xrdpenable || ha->flags.scm_supported_f) + if (ha->flags.edif_enabled || ql2xrdpenable || ha->flags.scm_supported_f) { ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB; + ha->fw_options[3] |= ADD_FO3_COPY_FLOGI_ACC_PL; + ha->flags.flogi_acc_enabled = 1; + } /* Enable Async 8130/8131 events -- transceiver insertion/removal */ if (IS_BPM_RANGE_CAPABLE(ha)) @@ -4115,7 +4521,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha) /* Update Serial Link options. */ if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0) - return; + return; rval = qla2x00_set_serdes_params(vha, le16_to_cpu(ha->fw_seriallink_options24[1]), @@ -4143,11 +4549,11 @@ qla2x00_config_rings(struct scsi_qla_host *vha) put_unaligned_le64(req->dma, &ha->init_cb->request_q_address); put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address); - wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0); - wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0); - wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0); - wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0); - rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ + WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), 0); + WRT_REG_WORD(ISP_REQ_Q_OUT(ha, reg), 0); + WRT_REG_WORD(ISP_RSP_Q_IN(ha, reg), 0); + WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), 0); + RD_REG_WORD(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */ } void @@ -4161,6 +4567,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha) uint16_t rid = 0; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; + u32 temp; /* Setup ring parameters in initialization control block. */ icb = (struct init_cb_24xx *)ha->init_cb; @@ -4209,15 +4616,15 @@ qla24xx_config_rings(struct scsi_qla_host *vha) } icb->firmware_options_2 |= cpu_to_le32(BIT_23); - wrt_reg_dword(®->isp25mq.req_q_in, 0); - wrt_reg_dword(®->isp25mq.req_q_out, 0); - wrt_reg_dword(®->isp25mq.rsp_q_in, 0); - wrt_reg_dword(®->isp25mq.rsp_q_out, 0); + WRT_REG_DWORD(®->isp25mq.req_q_in, 0); + WRT_REG_DWORD(®->isp25mq.req_q_out, 0); + WRT_REG_DWORD(®->isp25mq.rsp_q_in, 0); + WRT_REG_DWORD(®->isp25mq.rsp_q_out, 0); } else { - wrt_reg_dword(®->isp24.req_q_in, 0); - wrt_reg_dword(®->isp24.req_q_out, 0); - wrt_reg_dword(®->isp24.rsp_q_in, 0); - wrt_reg_dword(®->isp24.rsp_q_out, 0); + WRT_REG_DWORD(®->isp24.req_q_in, 0); + WRT_REG_DWORD(®->isp24.req_q_out, 0); + WRT_REG_DWORD(®->isp24.rsp_q_in, 0); + WRT_REG_DWORD(®->isp24.rsp_q_out, 0); } qlt_24xx_config_rings(vha); @@ -4227,11 +4634,14 @@ qla24xx_config_rings(struct scsi_qla_host *vha) ql_dbg(ql_dbg_init, vha, 0x00fd, "Speed set by user : %s Gbps \n", qla2x00_get_link_speed_str(ha, ha->set_data_rate)); - icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13); + temp = le32_to_cpu(icb->firmware_options_3); + temp &= ~FWO3_DATA_RATE_MASK; + temp |= (ha->set_data_rate << FWO3_DATA_RATE_SHIFT); + icb->firmware_options_3 |= cpu_to_le32(temp); } /* PCI posting */ - rd_reg_word(&ioreg->hccr); + RD_REG_DWORD(&ioreg->hccr); } /** @@ -4262,7 +4672,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) req = ha->req_q_map[que]; if (!req || !test_bit(que, ha->req_qid_map)) continue; - req->out_ptr = (uint16_t *)(req->ring + req->length); + req->out_ptr = (void *)(req->ring + req->length); *req->out_ptr = 0; for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) req->outstanding_cmds[cnt] = NULL; @@ -4279,7 +4689,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) rsp = ha->rsp_q_map[que]; if (!rsp || !test_bit(que, ha->rsp_qid_map)) continue; - rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length); + rsp->in_ptr = (void *)(rsp->ring + rsp->length); *rsp->in_ptr = 0; /* Initialize response queue entries */ if (IS_QLAFX00(ha)) @@ -4297,8 +4707,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha) spin_unlock_irqrestore(&ha->hardware_lock, flags); - ql_dbg(ql_dbg_init, vha, 0x00d1, "Issue init firmware.\n"); - if (IS_QLAFX00(ha)) { rval = qlafx00_init_firmware(vha, ha->init_cb_size); goto next_check; @@ -4307,6 +4715,13 @@ qla2x00_init_rings(scsi_qla_host_t *vha) /* Update any ISP specific firmware options before initialization. */ ha->isp_ops->update_fw_options(vha); + ql_dbg(ql_dbg_init, vha, 0x00d1, + "Issue init firmware FW opt 1-3= %08x %08x %08x.\n", + le32_to_cpu(mid_init_cb->init_cb.firmware_options_1), + le32_to_cpu(mid_init_cb->init_cb.firmware_options_2), + le32_to_cpu(mid_init_cb->init_cb.firmware_options_3)); + + if (ha->flags.npiv_supported) { if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha)) ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1; @@ -4318,18 +4733,22 @@ qla2x00_init_rings(scsi_qla_host_t *vha) mid_init_cb->init_cb.execution_throttle = cpu_to_le16(ha->cur_fw_xcb_count); ha->flags.dport_enabled = - (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) & - BIT_7) != 0; + (mid_init_cb->init_cb.firmware_options_1 & BIT_7) != 0; ql_dbg(ql_dbg_init, vha, 0x0191, "DPORT Support: %s.\n", (ha->flags.dport_enabled) ? "enabled" : "disabled"); /* FA-WWPN Status */ ha->flags.fawwpn_enabled = - (le32_to_cpu(mid_init_cb->init_cb.firmware_options_1) & - BIT_6) != 0; + (mid_init_cb->init_cb.firmware_options_1 & BIT_6) != 0; ql_dbg(ql_dbg_init, vha, 0x00bc, "FA-WWPN Support: %s.\n", (ha->flags.fawwpn_enabled) ? "enabled" : "disabled"); + /* Init_cb will be reused for other command(s). Save a backup copy of port_name */ + memcpy(ha->port_name, ha->init_cb->port_name, WWN_SIZE); } + /* ELS pass through payload is limit by frame size. */ + if (ha->flags.edif_enabled) + mid_init_cb->init_cb.frame_payload_size = cpu_to_le16(ELS_MAX_PAYLOAD); + rval = qla2x00_init_firmware(vha, ha->init_cb_size); next_check: if (rval) { @@ -4358,7 +4777,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) unsigned long wtime, mtime, cs84xx_time; uint16_t min_wait; /* Minimum wait time if loop is down */ uint16_t wait_time; /* Wait time if loop is coming ready */ - uint16_t state[6]; + uint16_t state[16]; struct qla_hw_data *ha = vha->hw; if (IS_QLAFX00(vha->hw)) @@ -4541,11 +4960,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) /* initialize */ ha->min_external_loopid = SNS_FIRST_LOOP_ID; ha->operating_mode = LOOP; - ha->switch_cap = 0; switch (topo) { case 0: ql_dbg(ql_dbg_disc, vha, 0x200b, "HBA in NL topology.\n"); + ha->switch_cap = 0; ha->current_topology = ISP_CFG_NL; strcpy(connect_type, "(Loop)"); break; @@ -4559,6 +4978,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) case 2: ql_dbg(ql_dbg_disc, vha, 0x200d, "HBA in N P2P topology.\n"); + ha->switch_cap = 0; ha->operating_mode = P2P; ha->current_topology = ISP_CFG_N; strcpy(connect_type, "(N_Port-to-N_Port)"); @@ -4575,6 +4995,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) default: ql_dbg(ql_dbg_disc, vha, 0x200f, "HBA in unknown topology %x, using NL.\n", topo); + ha->switch_cap = 0; ha->current_topology = ISP_CFG_NL; strcpy(connect_type, "(Loop)"); break; @@ -4587,8 +5008,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) id.b.al_pa = al_pa; id.b.rsvd_1 = 0; spin_lock_irqsave(&ha->hardware_lock, flags); - if (!(topo == 2 && ha->flags.n2n_bigger)) - qlt_update_host_map(vha, id); + if (vha->hw->flags.edif_enabled) { + if (topo != 2) + qla_update_host_map(vha, id); + } else if (!(topo == 2 && ha->flags.n2n_bigger)) + qla_update_host_map(vha, id); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (!vha->flags.init_done) @@ -4704,7 +5128,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) ha->nvram_size = sizeof(*nv); ha->nvram_base = 0; if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) - if ((rd_reg_word(®->ctrl_status) >> 14) == 1) + if ((RD_REG_WORD(®->ctrl_status) >> 14) == 1) ha->nvram_base = 0x80; /* Get NVRAM data and calculate checksum. */ @@ -4739,18 +5163,18 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) nv->firmware_options[1] = BIT_7 | BIT_5; nv->add_firmware_options[0] = BIT_5; nv->add_firmware_options[1] = BIT_5 | BIT_4; - nv->frame_payload_size = cpu_to_le16(2048); + nv->frame_payload_size = 2048; nv->special_options[1] = BIT_7; } else if (IS_QLA2200(ha)) { nv->firmware_options[0] = BIT_2 | BIT_1; nv->firmware_options[1] = BIT_7 | BIT_5; nv->add_firmware_options[0] = BIT_5; nv->add_firmware_options[1] = BIT_5 | BIT_4; - nv->frame_payload_size = cpu_to_le16(1024); + nv->frame_payload_size = 1024; } else if (IS_QLA2100(ha)) { nv->firmware_options[0] = BIT_3 | BIT_1; nv->firmware_options[1] = BIT_5; - nv->frame_payload_size = cpu_to_le16(1024); + nv->frame_payload_size = 1024; } nv->max_iocb_allocation = cpu_to_le16(256); @@ -4983,32 +5407,17 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) return (rval); } -static void -qla2x00_rport_del(void *data) -{ - fc_port_t *fcport = data; - struct fc_rport *rport; - unsigned long flags; - - spin_lock_irqsave(fcport->vha->host->host_lock, flags); - rport = fcport->drport ? fcport->drport : fcport->rport; - fcport->drport = NULL; - spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); - if (rport) { - ql_dbg(ql_dbg_disc, fcport->vha, 0x210b, - "%s %8phN. rport %p roles %x\n", - __func__, fcport->port_name, rport, - rport->roles); - - fc_remote_port_delete(rport); - } -} - void qla2x00_set_fcport_state(fc_port_t *fcport, int state) { int old_state; old_state = atomic_read(&fcport->state); + + if (state == FCS_ONLINE) + fcport->online_time = jiffies; + else if (old_state == FCS_ONLINE) + fcport->offline_time = jiffies; + atomic_set(&fcport->state, state); /* Don't print state transitions during initial allocation of fcport */ @@ -5049,6 +5458,21 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) /* Setup fcport template structure. */ fcport->vha = vha; + fcport->sfc.vha = vha; + fcport->sfc.mode = ql2x_scmr_throttle_mode; + memcpy(&fcport->sfc.profile, &vha->hw->sfc.profile, + sizeof(struct qla_scmr_port_profile)); + fcport->scm.last_event_timestamp = qla_get_real_seconds(); + qla_scmr_set_tgt(&fcport->sfc); + fcport->sfc.fcport = fcport; + fcport->sfc.rstats = &fcport->scm.rstats; + fcport->vl.v_lane = VL_NORMAL; + fcport->vl.prio_hi = vha->hw->flogi_acc.rx_vl[VL_NORMAL].prio_hi; + fcport->vl.prio_lo = vha->hw->flogi_acc.rx_vl[VL_NORMAL].prio_lo; + memcpy(fcport->sfc.scmr_down_delta, vha->hw->sfc.scmr_down_delta, + sizeof(fcport->sfc.scmr_down_delta)); + memcpy(fcport->sfc.scmr_up_delta, vha->hw->sfc.scmr_up_delta, + sizeof(fcport->sfc.scmr_up_delta)); fcport->port_type = FCT_UNKNOWN; fcport->loop_id = FC_NO_LOOP_ID; qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); @@ -5061,19 +5485,22 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) fcport->login_retry = vha->hw->login_retry_count; fcport->chip_reset = vha->hw->base_qpair->chip_reset; fcport->logout_on_delete = 1; - - if (!fcport->ct_desc.ct_sns) { - ql_log(ql_log_warn, vha, 0xd049, - "Failed to allocate ct_sns request.\n"); - kfree(fcport); - return NULL; - } + fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; + fcport->tgt_short_link_down_cnt = 0; + fcport->dev_loss_tmo = 0; INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); - INIT_WORK(&fcport->free_work, qlt_free_session_done); INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); INIT_LIST_HEAD(&fcport->gnl_entry); INIT_LIST_HEAD(&fcport->list); + INIT_LIST_HEAD(&fcport->tmf_pending); + + spin_lock_init(&fcport->edif.sa_list_lock); + INIT_LIST_HEAD(&fcport->edif.tx_sa_list); + INIT_LIST_HEAD(&fcport->edif.rx_sa_list); + + spin_lock_init(&fcport->edif.indx_list_lock); + INIT_LIST_HEAD(&fcport->edif.edif_indx_list); return fcport; } @@ -5088,8 +5515,13 @@ qla2x00_free_fcport(fc_port_t *fcport) fcport->ct_desc.ct_sns = NULL; } + + qla_edif_flush_sa_ctl_lists(fcport); list_del(&fcport->list); qla2x00_clear_loop_id(fcport); + + qla_edif_list_del(fcport); + kfree(fcport); } @@ -5098,24 +5530,25 @@ static void qla_get_login_template(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; int rval; u32 *bp, sz; - __be32 *q; memset(ha->init_cb, 0, ha->init_cb_size); - sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size); + sz = min_t(int, sizeof(struct fc_els_flogi), + ha->init_cb_size); rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma, - ha->init_cb, sz); - if (rval != QLA_SUCCESS) { + ha->init_cb, sz); + if (rval == QLA_SUCCESS) { + __be32 *q = (__be32 *)&ha->plogi_els_payld.fl_csp; + + bp = (uint32_t *)ha->init_cb; + cpu_to_be32_array(q, bp, sz / 4); + ha->flags.plogi_template_valid = 1; + } else { ql_dbg(ql_dbg_init, vha, 0x00d1, - "PLOGI ELS param read fail.\n"); - return; + "PLOGI ELS param read fail.\n"); } - q = (__be32 *)&ha->plogi_els_payld.fl_csp; - - bp = (uint32_t *)ha->init_cb; - cpu_to_be32_array(q, bp, sz / 4); - ha->flags.plogi_template_valid = 1; } + /* * qla2x00_configure_loop * Updates Fibre Channel Device Database with what is actually on loop. @@ -5163,14 +5596,20 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) /* Determine what we need to do */ if ((ha->current_topology == ISP_CFG_FL || - ha->current_topology == ISP_CFG_F) && + ha->current_topology == ISP_CFG_F) && (test_bit(LOCAL_LOOP_UPDATE, &flags))) { set_bit(RSCN_UPDATE, &flags); clear_bit(LOCAL_LOOP_UPDATE, &flags); - } else if (ha->current_topology == ISP_CFG_NL || - ha->current_topology == ISP_CFG_N) { + } else if (ha->current_topology == ISP_CFG_N) { + clear_bit(RSCN_UPDATE, &flags); + if (qla_tgt_mode_enabled(vha)) { + /* allow the other side to start the login */ + clear_bit(LOCAL_LOOP_UPDATE, &flags); + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + } + } else if (ha->current_topology == ISP_CFG_NL) { clear_bit(RSCN_UPDATE, &flags); set_bit(LOCAL_LOOP_UPDATE, &flags); } else if (!vha->flags.online || @@ -5208,6 +5647,12 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) "LOOP READY.\n"); ha->flags.fw_init_done = 1; + /* + * use link up to wake up app to get ready for authentication. + */ + if (ha->flags.edif_enabled && DBELL_INACTIVE(vha)) + qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); + /* * Process any ATIO queue entries that came in * while we weren't online. @@ -5227,7 +5672,8 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) "%s *** FAILED ***.\n", __func__); } else { ql_dbg(ql_dbg_disc, vha, 0x206b, - "%s: exiting normally.\n", __func__); + "%s: exiting normally. local port wwpn %8phN id %06x)\n", + __func__, vha->port_name, vha->d_id.b24); } /* Restore state if a resync event occurred during processing */ @@ -5242,11 +5688,14 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) return (rval); } + static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha) { unsigned long flags; fc_port_t *fcport; + ql_dbg(ql_dbg_disc, vha, 0x206a, "%s %d.\n", __func__, __LINE__); + if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) set_bit(RELOGIN_NEEDED, &vha->dpc_flags); @@ -5268,6 +5717,22 @@ static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha) return QLA_FUNCTION_FAILED; } +static void +qla_reinitialize_link(scsi_qla_host_t *vha) +{ + int rval; + + atomic_set(&vha->loop_state, LOOP_DOWN); + atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); + rval = qla2x00_full_login_lip(vha); + if (rval == QLA_SUCCESS) { + ql_dbg(ql_dbg_disc, vha, 0xd050, "Link reinitialized\n"); + } else { + ql_dbg(ql_dbg_disc, vha, 0xd051, + "Link reinitialization failed (%d)\n", rval); + } +} + /* * qla2x00_configure_local_loop * Updates Fibre Channel Device Database with local loop devices. @@ -5319,6 +5784,19 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) spin_unlock_irqrestore(&vha->work_lock, flags); if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { + u8 loop_map_entries = 0; + int rc; + + rc = qla2x00_get_fcal_position_map(vha, NULL, + &loop_map_entries); + if (rc == QLA_SUCCESS && loop_map_entries > 1) { + /* + * There are devices that are still not logged + * in. Reinitialize to give them a chance. + */ + qla_reinitialize_link(vha); + return QLA_FUNCTION_FAILED; + } set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } @@ -5547,8 +6025,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) if (atomic_read(&fcport->state) == FCS_ONLINE) return; - qla2x00_set_fcport_state(fcport, FCS_ONLINE); - rport_ids.node_name = wwn_to_u64(fcport->node_name); rport_ids.port_name = wwn_to_u64(fcport->port_name); rport_ids.port_id = fcport->d_id.b.domain << 16 | @@ -5564,6 +6040,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) spin_lock_irqsave(fcport->vha->host->host_lock, flags); *((fc_port_t **)rport->dd_data) = fcport; spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); + fcport->dev_loss_tmo = rport->dev_loss_tmo; rport->supported_classes = fcport->supported_classes; @@ -5579,13 +6056,14 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) if (fcport->port_type & FCT_NVME_DISCOVERY) rport_ids.roles |= FC_PORT_ROLE_NVME_DISCOVERY; + fc_remote_port_rolechg(rport, rport_ids.roles); + ql_dbg(ql_dbg_disc, vha, 0x20ee, - "%s %8phN. rport %p is %s mode\n", - __func__, fcport->port_name, rport, + "%s: %8phN. rport %ld:0:%d (%px) is %s mode\n", + __func__, fcport->port_name, vha->host_no, + rport->scsi_target_id, rport, (fcport->port_type == FCT_TARGET) ? "tgt" : ((fcport->port_type & FCT_NVME) ? "nvme" : "ini")); - - fc_remote_port_rolechg(rport, rport_ids.roles); } /* @@ -5622,6 +6100,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) fcport->logout_on_delete = 1; fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0; + if (fcport->tgt_link_down_time < fcport->dev_loss_tmo) { + fcport->tgt_short_link_down_cnt++; + fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; + } + switch (vha->hw->current_topology) { case ISP_CFG_N: case ISP_CFG_NL: @@ -5635,13 +6118,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) qla2x00_dfs_create_rport(vha, fcport); - if (NVME_TARGET(vha->hw, fcport)) { - qla_nvme_register_remote(vha, fcport); - qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); - qla2x00_set_fcport_state(fcport, FCS_ONLINE); - return; - } - qla24xx_update_fcport_fcp_prio(vha, fcport); switch (vha->host->active_mode) { @@ -5649,7 +6125,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) qla2x00_reg_remote_port(vha, fcport); break; case MODE_TARGET: - qla2x00_set_fcport_state(fcport, FCS_ONLINE); if (!vha->vha_tgt.qla_tgt->tgt_stop && !vha->vha_tgt.qla_tgt->tgt_stopped) qlt_fc_port_added(vha, fcport); @@ -5664,6 +6139,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) break; } + if (NVME_TARGET(vha->hw, fcport)) + qla_nvme_register_remote(vha, fcport); + + qla2x00_set_fcport_state(fcport, FCS_ONLINE); + if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) { if (fcport->id_changed) { fcport->id_changed = 0; @@ -5695,6 +6175,10 @@ void qla_register_fcport_fn(struct work_struct *work) qla2x00_update_fcport(fcport->vha, fcport); + ql_dbg(ql_dbg_disc, fcport->vha, 0x911e, + "%s rscn gen %d/%d next DS %d\n", __func__, + rscn_gen, fcport->rscn_gen, fcport->next_disc_state); + if (rscn_gen != fcport->rscn_gen) { /* RSCN(s) came in while registration */ switch (fcport->next_disc_state) { @@ -5777,6 +6261,17 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha) return rval; } + /* + * Check if FW has support for SCM and if driver has to send EDC and RDF + * Get the features from firmware and use that to build payload + */ + if (QLA_DRV_SEND_ELS(ha)) { + ql_dbg(ql_dbg_scm, vha, 0xffff, + "Adapter and Firmware support SCM, send EDC and RDF \n"); + if (!qla2xxx_scm_get_features(vha)) + qla2xxx_send_uscm_els(vha); + } + /* FDMI support. */ if (ql2xfdmienable && test_and_clear_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags)) @@ -6092,7 +6587,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) break; } - if (found && NVME_TARGET(vha->hw, fcport)) { + if (NVME_TARGET(vha->hw, fcport)) { if (fcport->disc_state == DSC_DELETE_PEND) { qla2x00_set_fcport_disc_state(fcport, DSC_GNL); vha->fcport_count--; @@ -6445,43 +6940,16 @@ int qla2x00_perform_loop_resync(scsi_qla_host_t *ha) atomic_set(&ha->loop_state, LOOP_UP); set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags); set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags); - set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); - - rval = qla2x00_loop_resync(ha); - } else - atomic_set(&ha->loop_state, LOOP_DEAD); - - clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); - } - - return rval; -} - -void -qla2x00_update_fcports(scsi_qla_host_t *base_vha) -{ - fc_port_t *fcport; - struct scsi_qla_host *vha; - struct qla_hw_data *ha = base_vha->hw; - unsigned long flags; - - spin_lock_irqsave(&ha->vport_slock, flags); - /* Go with deferred removal of rport references. */ - list_for_each_entry(vha, &base_vha->hw->vp_list, list) { - atomic_inc(&vha->vref_count); - list_for_each_entry(fcport, &vha->vp_fcports, list) { - if (fcport->drport && - atomic_read(&fcport->state) != FCS_UNCONFIGURED) { - spin_unlock_irqrestore(&ha->vport_slock, flags); - qla2x00_rport_del(fcport); + set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags); - spin_lock_irqsave(&ha->vport_slock, flags); - } - } - atomic_dec(&vha->vref_count); - wake_up(&vha->vref_waitq); + rval = qla2x00_loop_resync(ha); + } else + atomic_set(&ha->loop_state, LOOP_DEAD); + + clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags); } - spin_unlock_irqrestore(&ha->vport_slock, flags); + + return rval; } /* Assumes idc_lock always held on entry */ @@ -6778,7 +7246,7 @@ qla2xxx_mctp_dump(scsi_qla_host_t *vha) "Failed to capture mctp dump\n"); } else { ql_log(ql_log_info, vha, 0x5070, - "Mctp dump capture for host (%ld/%p).\n", + "Mctp dump capture for host (%ld/%px).\n", vha->host_no, ha->mctp_dump); ha->mctp_dumped = 1; } @@ -6812,17 +7280,28 @@ void qla2x00_quiesce_io(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; - struct scsi_qla_host *vp; + struct scsi_qla_host *vp, *tvp; + unsigned long flags; ql_dbg(ql_dbg_dpc, vha, 0x401d, - "Quiescing I/O - ha=%p.\n", ha); + "Quiescing I/O - ha=%px.\n", ha); atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); qla2x00_mark_all_devices_lost(vha); - list_for_each_entry(vp, &ha->vp_list, list) + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { + atomic_inc(&vp->vref_count); + spin_unlock_irqrestore(&ha->vport_slock, flags); + qla2x00_mark_all_devices_lost(vp); + + spin_lock_irqsave(&ha->vport_slock, flags); + atomic_dec(&vp->vref_count); + } + spin_unlock_irqrestore(&ha->vport_slock, flags); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, @@ -6837,7 +7316,7 @@ void qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; - struct scsi_qla_host *vp; + struct scsi_qla_host *vp, *tvp; unsigned long flags; fc_port_t *fcport; u16 i; @@ -6852,7 +7331,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) vha->qla_stats.total_isp_aborts++; ql_log(ql_log_info, vha, 0x00af, - "Performing ISP error recovery - ha=%p.\n", ha); + "Performing ISP error recovery - ha=%px.\n", ha); ha->flags.purge_mbox = 1; /* For ISP82XX, reset_chip is just disabling interrupts. @@ -6862,6 +7341,17 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) if (!(IS_P3P_TYPE(ha))) ha->isp_ops->reset_chip(vha); + /* Clear SCM stats and throttling, if SCM is enabled */ + qla_scm_clear_host(vha); + qla_scm_clear_all_tgt_sess(vha); + ha->flags.scm_enabled = 0; + ha->flags.conn_fabric_cisco_er_rdy = 0; + ha->flags.conn_fabric_brocade = 0; + ha->flags.flogi_acc_enabled = 0; + ha->flags.flogi_acc_pl_in_cont_iocb = 0; + ha->flags.scm_supported_vl = 0; + memset(&ha->flogi_acc, 0, sizeof(struct flogi_acc_payld)); + ha->link_data_rate = PORT_SPEED_UNKNOWN; SAVE_TOPO(ha); ha->flags.rida_fmt2 = 0; @@ -6872,10 +7362,16 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) ha->flags.fw_init_done = 0; ha->chip_reset++; ha->base_qpair->chip_reset = ha->chip_reset; + ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0; + ha->base_qpair->prev_completion_cnt = 0; for (i = 0; i < ha->max_qpairs; i++) { - if (ha->queue_pair_map[i]) + if (ha->queue_pair_map[i]) { ha->queue_pair_map[i]->chip_reset = ha->base_qpair->chip_reset; + ha->queue_pair_map[i]->cmd_cnt = + ha->queue_pair_map[i]->cmd_completion_cnt = 0; + ha->base_qpair->prev_completion_cnt = 0; + } } /* purge MBox commands */ @@ -6901,7 +7397,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) qla2x00_mark_all_devices_lost(vha); spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vp, &ha->vp_list, list) { + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); @@ -6923,7 +7419,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) fcport->scan_state = 0; } spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vp, &ha->vp_list, list) { + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); @@ -6935,22 +7431,20 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) } spin_unlock_irqrestore(&ha->vport_slock, flags); - if (!ha->flags.eeh_busy) { - /* Make sure for ISP 82XX IO DMA is complete */ - if (IS_P3P_TYPE(ha)) { - qla82xx_chip_reset_cleanup(vha); - ql_log(ql_log_info, vha, 0x00b4, - "Done chip reset cleanup.\n"); - - /* Done waiting for pending commands. - * Reset the online flag. - */ - vha->flags.online = 0; - } + /* Make sure for ISP 82XX IO DMA is complete */ + if (IS_P3P_TYPE(ha)) { + qla82xx_chip_reset_cleanup(vha); + ql_log(ql_log_info, vha, 0x00b4, + "Done chip reset cleanup.\n"); - /* Requeue all commands in outstanding command list. */ - qla2x00_abort_all_cmds(vha, DID_RESET << 16); + /* Done waiting for pending commands. + * Reset the online flag. + */ + vha->flags.online = 0; } + + /* Requeue all commands in outstanding command list. */ + qla2x00_abort_all_cmds(vha, DID_RESET << 16); /* memory barrier */ wmb(); } @@ -6971,13 +7465,25 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) int rval; uint8_t status = 0; struct qla_hw_data *ha = vha->hw; - struct scsi_qla_host *vp; + struct scsi_qla_host *vp, *tvp; struct req_que *req = ha->req_q_map[0]; unsigned long flags; if (vha->flags.online) { qla2x00_abort_isp_cleanup(vha); + vha->dport_status |= DPORT_DIAG_CHIP_RESET_IN_PROGRESS; + vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS; + + if (vha->hw->flags.port_isolated) + return status; + + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x803f, + "PCI/Register disconnect 1, exiting.\n"); + return status; + } + if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) { ha->flags.chip_reset_done = 1; vha->flags.online = 1; @@ -7007,8 +7513,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) return 0; break; case QLA2XXX_INI_MODE_DUAL: - if (!qla_dual_mode_enabled(vha) && - !qla_ini_mode_enabled(vha)) + if (!qla_dual_mode_enabled(vha)) return 0; break; case QLA2XXX_INI_MODE_ENABLED: @@ -7018,8 +7523,18 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) ha->isp_ops->get_flash_version(vha, req->ring); + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x803f, + "PCI/Register disconnect 2, exiting.\n"); + return status; + } ha->isp_ops->nvram_config(vha); + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, vha, 0x803f, + "PCI/Register disconnect 3, exiting.\n"); + return status; + } if (!qla2x00_restart_isp(vha)) { clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); @@ -7100,11 +7615,16 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) } + if (vha->hw->flags.port_isolated) { + qla2x00_abort_isp_cleanup(vha); + return status; + } + if (!status) { ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__); qla2x00_configure_hba(vha); spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vp, &ha->vp_list, list) { + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { if (vp->vp_idx) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); @@ -7145,41 +7665,36 @@ qla2x00_abort_isp(scsi_qla_host_t *vha) static int qla2x00_restart_isp(scsi_qla_host_t *vha) { - int status; + int status = 0; struct qla_hw_data *ha = vha->hw; /* If firmware needs to be loaded */ if (qla2x00_isp_firmware(vha)) { vha->flags.online = 0; status = ha->isp_ops->chip_diag(vha); - if (status) - return status; - status = qla2x00_setup_chip(vha); - if (status) - return status; + if (!status) + status = qla2x00_setup_chip(vha); } - status = qla2x00_init_rings(vha); - if (status) - return status; + if (!status && !(status = qla2x00_init_rings(vha))) { + clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); + ha->flags.chip_reset_done = 1; - clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags); - ha->flags.chip_reset_done = 1; + /* Initialize the queues in use */ + qla25xx_init_queues(ha); - /* Initialize the queues in use */ - qla25xx_init_queues(ha); + status = qla2x00_fw_ready(vha); + if (!status) { + /* Issue a marker after FW becomes ready. */ + qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + } - status = qla2x00_fw_ready(vha); - if (status) { /* if no cable then assume it's good */ - return vha->device_flags & DFLG_NO_CABLE ? 0 : status; + if ((vha->device_flags & DFLG_NO_CABLE)) + status = 0; } - - /* Issue a marker after FW becomes ready. */ - qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL); - set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - - return 0; + return (status); } static int @@ -7243,10 +7758,10 @@ qla2x00_reset_adapter(scsi_qla_host_t *vha) ha->isp_ops->disable_intrs(ha); spin_lock_irqsave(&ha->hardware_lock, flags); - wrt_reg_word(®->hccr, HCCR_RESET_RISC); - rd_reg_word(®->hccr); /* PCI Posting. */ - wrt_reg_word(®->hccr, HCCR_RELEASE_RISC); - rd_reg_word(®->hccr); /* PCI Posting. */ + WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); + RD_REG_WORD(®->hccr); /* PCI Posting. */ + WRT_REG_WORD(®->hccr, HCCR_RELEASE_RISC); + RD_REG_WORD(®->hccr); /* PCI Posting. */ spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; @@ -7258,24 +7773,25 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha) unsigned long flags = 0; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + int rval = QLA_SUCCESS; if (IS_P3P_TYPE(ha)) - return QLA_SUCCESS; + return rval; vha->flags.online = 0; ha->isp_ops->disable_intrs(ha); spin_lock_irqsave(&ha->hardware_lock, flags); - wrt_reg_dword(®->hccr, HCCRX_SET_RISC_RESET); - rd_reg_dword(®->hccr); - wrt_reg_dword(®->hccr, HCCRX_REL_RISC_PAUSE); - rd_reg_dword(®->hccr); + WRT_REG_DWORD(®->hccr, HCCRX_SET_RISC_RESET); + RD_REG_DWORD(®->hccr); + WRT_REG_DWORD(®->hccr, HCCRX_REL_RISC_PAUSE); + RD_REG_DWORD(®->hccr); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (IS_NOPOLLING_TYPE(ha)) ha->isp_ops->enable_intrs(ha); - return QLA_SUCCESS; + return rval; } /* On sparc systems, obtain port and node WWN from firmware @@ -7307,7 +7823,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) int rval; struct init_cb_24xx *icb; struct nvram_24xx *nv; - __le32 *dptr; + uint32_t *dptr; uint8_t *dptr1, *dptr2; uint32_t chksum; uint16_t cnt; @@ -7335,7 +7851,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4); /* Get NVRAM data into cache and calculate checksum. */ - dptr = (__force __le32 *)nv; + dptr = (uint32_t *)nv; ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size); for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) chksum += le32_to_cpu(*dptr); @@ -7363,7 +7879,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) memset(nv, 0, ha->nvram_size); nv->nvram_version = cpu_to_le16(ICB_VERSION); nv->version = cpu_to_le16(ICB_VERSION); - nv->frame_payload_size = cpu_to_le16(2048); + nv->frame_payload_size = 2048; nv->execution_throttle = cpu_to_le16(0xFFFF); nv->exchange_count = cpu_to_le16(0); nv->hard_address = cpu_to_le16(124); @@ -7531,7 +8047,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha) ha->login_retry_count = ql2xloginretrycount; /* N2N: driver will initiate Login instead of FW */ - icb->firmware_options_3 |= cpu_to_le32(BIT_8); + icb->firmware_options_3 |= BIT_8; /* Enable ZIO. */ if (!vha->flags.init_done) { @@ -7599,7 +8115,7 @@ qla27xx_check_image_status_signature(struct qla27xx_image_status *image_status) static ulong qla27xx_image_status_checksum(struct qla27xx_image_status *image_status) { - __le32 *p = (__force __le32 *)image_status; + uint32_t *p = (void *)image_status; uint n = sizeof(*image_status) / sizeof(*p); uint32_t sum = 0; @@ -7631,6 +8147,9 @@ qla28xx_component_status( active_regions->aux.npiv_config_2_3 = qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NPIV_CONFIG_2_3); + + active_regions->aux.nvme_params = + qla28xx_component_bitmask(aux, QLA28XX_AUX_IMG_NVME_PARAMS); } static int @@ -7662,7 +8181,7 @@ qla28xx_get_aux_images( goto check_sec_image; } - qla24xx_read_flash_data(vha, (uint32_t *)&pri_aux_image_status, + qla24xx_read_flash_data(vha, (void *)&pri_aux_image_status, ha->flt_region_aux_img_status_pri, sizeof(pri_aux_image_status) >> 2); qla27xx_print_image(vha, "Primary aux image", &pri_aux_image_status); @@ -7695,7 +8214,7 @@ qla28xx_get_aux_images( goto check_valid_image; } - qla24xx_read_flash_data(vha, (uint32_t *)&sec_aux_image_status, + qla24xx_read_flash_data(vha, (void *)&sec_aux_image_status, ha->flt_region_aux_img_status_sec, sizeof(sec_aux_image_status) >> 2); qla27xx_print_image(vha, "Secondary aux image", &sec_aux_image_status); @@ -7739,11 +8258,12 @@ qla28xx_get_aux_images( } ql_dbg(ql_dbg_init, vha, 0x018f, - "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u\n", + "aux images active: BCFG=%u VPD/NVR=%u NPIV0/1=%u NPIV2/3=%u, NVME=%u\n", active_regions->aux.board_config, active_regions->aux.vpd_nvram, active_regions->aux.npiv_config_0_1, - active_regions->aux.npiv_config_2_3); + active_regions->aux.npiv_config_2_3, + active_regions->aux.nvme_params); } void @@ -7760,7 +8280,7 @@ qla27xx_get_active_image(struct scsi_qla_host *vha, goto check_sec_image; } - if (qla24xx_read_flash_data(vha, (uint32_t *)&pri_image_status, + if (qla24xx_read_flash_data(vha, (void *)(&pri_image_status), ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) != QLA_SUCCESS) { WARN_ON_ONCE(true); @@ -7867,7 +8387,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, ql_dbg(ql_dbg_init, vha, 0x008b, "FW: Loading firmware from flash (%x).\n", faddr); - dcode = (uint32_t *)req->ring; + dcode = (void *)req->ring; qla24xx_read_flash_data(vha, dcode, faddr, 8); if (qla24xx_risc_firmware_invalid(dcode)) { ql_log(ql_log_fatal, vha, 0x008c, @@ -7880,18 +8400,18 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, return QLA_FUNCTION_FAILED; } - dcode = (uint32_t *)req->ring; + dcode = (void *)req->ring; *srisc_addr = 0; segments = FA_RISC_CODE_SEGMENTS; for (j = 0; j < segments; j++) { ql_dbg(ql_dbg_init, vha, 0x008d, "-> Loading segment %u...\n", j); qla24xx_read_flash_data(vha, dcode, faddr, 10); - risc_addr = be32_to_cpu((__force __be32)dcode[2]); - risc_size = be32_to_cpu((__force __be32)dcode[3]); + risc_addr = be32_to_cpu(dcode[2]); + risc_size = be32_to_cpu(dcode[3]); if (!*srisc_addr) { *srisc_addr = risc_addr; - risc_attr = be32_to_cpu((__force __be32)dcode[9]); + risc_attr = be32_to_cpu(dcode[9]); } dlen = ha->fw_transfer_size >> 2; @@ -7931,9 +8451,9 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr, fwdt->template = NULL; fwdt->length = 0; - dcode = (uint32_t *)req->ring; + dcode = (void *)req->ring; qla24xx_read_flash_data(vha, dcode, faddr, 7); - risc_size = be32_to_cpu((__force __be32)dcode[2]); + risc_size = be32_to_cpu(dcode[2]); ql_dbg(ql_dbg_init, vha, 0x0161, "-> fwdt%u template array at %#x (%#x dwords)\n", j, faddr, risc_size); @@ -8002,8 +8522,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; int i, fragment; - uint16_t *wcode; - __be16 *fwcode; + uint16_t *wcode, *fwcode; uint32_t risc_addr, risc_size, fwclen, wlen, *seg; struct fw_blob *blob; struct qla_hw_data *ha = vha->hw; @@ -8023,7 +8542,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) wcode = (uint16_t *)req->ring; *srisc_addr = 0; - fwcode = (__force __be16 *)blob->fw->data; + fwcode = (uint16_t *)blob->fw->data; fwclen = 0; /* Validate firmware image by checking version. */ @@ -8071,7 +8590,7 @@ qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) "words 0x%x.\n", risc_addr, wlen); for (i = 0; i < wlen; i++) - wcode[i] = swab16((__force u32)fwcode[i]); + wcode[i] = swab16(fwcode[i]); rval = qla2x00_load_ram(vha, req->dma, risc_addr, wlen); @@ -8108,7 +8627,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) ulong i; uint j; struct fw_blob *blob; - __be32 *fwcode; + uint32_t *fwcode; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct fwdt *fwdt = ha->fwdt; @@ -8124,8 +8643,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) return QLA_FUNCTION_FAILED; } - fwcode = (__force __be32 *)blob->fw->data; - dcode = (__force uint32_t *)fwcode; + fwcode = (void *)blob->fw->data; + dcode = fwcode; if (qla24xx_risc_firmware_invalid(dcode)) { ql_log(ql_log_fatal, vha, 0x0093, "Unable to verify integrity of firmware image (%zd).\n", @@ -8136,7 +8655,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) return QLA_FUNCTION_FAILED; } - dcode = (uint32_t *)req->ring; + dcode = (void *)req->ring; *srisc_addr = 0; segments = FA_RISC_CODE_SEGMENTS; for (j = 0; j < segments; j++) { @@ -8162,7 +8681,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) dlen); for (i = 0; i < dlen; i++) - dcode[i] = swab32((__force u32)fwcode[i]); + dcode[i] = swab32(fwcode[i]); rval = qla2x00_load_ram(vha, req->dma, risc_addr, dlen); if (rval) { @@ -8216,7 +8735,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr) dcode = fwdt->template; for (i = 0; i < risc_size; i++) - dcode[i] = (__force u32)fwcode[i]; + dcode[i] = fwcode[i]; if (!qla27xx_fwdt_template_valid(dcode)) { ql_log(ql_log_warn, vha, 0x0175, @@ -8479,15 +8998,16 @@ qla84xx_init_chip(scsi_qla_host_t *vha) QLA_SUCCESS; } +DECLARE_ENUM2STR_LOOKUP(qla_get_profile_type, ql_scm_profile_type, + QL_SCM_PROFILE_TYPES_INIT); /* 81XX Support **************************************************************/ - int qla81xx_nvram_config(scsi_qla_host_t *vha) { int rval; struct init_cb_81xx *icb; struct nvram_81xx *nv; - __le32 *dptr; + uint32_t *dptr; uint8_t *dptr1, *dptr2; uint32_t chksum; uint16_t cnt; @@ -8534,7 +9054,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) "primary" : "secondary"); ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); - dptr = (__force __le32 *)nv; + dptr = (uint32_t *)nv; for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) chksum += le32_to_cpu(*dptr); @@ -8561,7 +9081,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) memset(nv, 0, ha->nvram_size); nv->nvram_version = cpu_to_le16(ICB_VERSION); nv->version = cpu_to_le16(ICB_VERSION); - nv->frame_payload_size = cpu_to_le16(2048); + nv->frame_payload_size = 2048; nv->execution_throttle = cpu_to_le16(0xFFFF); nv->exchange_count = cpu_to_le16(0); nv->port_name[0] = 0x21; @@ -8601,11 +9121,15 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) nv->enode_mac[4] = 0x05; nv->enode_mac[5] = 0x06 + ha->port_no + 1; + if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { + nv->scmr_throttle_profile = 1; /* Conservative */ + nv->scmr_control_flags = 0; /* Driver default */ + } rval = 1; } if (IS_T10_PI_CAPABLE(ha)) - nv->frame_payload_size &= cpu_to_le16(~7); + nv->frame_payload_size &= ~7; qlt_81xx_config_nvram_stage1(vha, nv); @@ -8667,10 +9191,24 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) icb->node_name[0] &= 0xF0; } - if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { - if ((nv->enhanced_features & BIT_7) == 0) - ha->flags.scm_supported_a = 1; - } + /* Reset the flag - in case SCM/VL is disabled via CLI */ + ha->flags.scm_supported_a = 0; + ha->flags.scm_supported_vl = 0; + + /* SCM Enabled in NVRAM */ + if ((le16_to_cpu(nv->enhanced_features) & BIT_7) == 0) { + ql_log(ql_log_info, vha, 0x0072, + "USCM enabled in NVRAM \n"); + ha->flags.scm_supported_a = 1; + /* Check if VL is enabled in NVRAM or mod param */ + if ((nv->port_features & BIT_2) || (ql2xvirtuallane)) { + ql_log(ql_log_info, vha, 0x0073, + "Virtual Lane Support enabled via NVRAM:%u" + " and/or Mod Param:%d\n", (nv->port_features >> 2), ql2xvirtuallane); + ha->flags.scm_supported_vl = 1; + icb->firmware_options_3 |= cpu_to_le32(BIT_2); + } + } /* Set host adapter parameters. */ ha->flags.disable_risc_code_load = 0; @@ -8781,6 +9319,22 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) /* Determine NVMe/FCP priority for target ports */ ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha); + /* Set USCM profile */ + if (nv->scmr_control_flags & QLA_USE_NVRAM_CONFIG) { + /* Use NVRAM values */ + ha->sfc.profile.scmr_profile = nv->scmr_throttle_profile; + ql_log(ql_log_info, vha, 0x0077, + "SCM profile from NVRAM:%s\n", + qla_get_profile_type(ha->sfc.profile.scmr_profile)); + } else { /* Use module param */ + ha->sfc.profile.scmr_profile = ql2x_scmr_profile; + ql_log(ql_log_info, vha, 0x0078, + "SCM profile from driver:%s\n", + qla_get_profile_type(ha->sfc.profile.scmr_profile)); + } + /* We might choose to get these values from NVRAM in the future */ + qla2xxx_scmr_init_deltas(&ha->sfc); + if (rval) { ql_log(ql_log_warn, vha, 0x0076, "NVRAM configuration failed.\n"); @@ -8793,7 +9347,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) { int status, rval; struct qla_hw_data *ha = vha->hw; - struct scsi_qla_host *vp; + struct scsi_qla_host *vp, *tvp; unsigned long flags; status = qla2x00_init_rings(vha); @@ -8865,7 +9419,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) "qla82xx_restart_isp succeeded.\n"); spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vp, &ha->vp_list, list) { + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { if (vp->vp_idx) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); @@ -8886,6 +9440,72 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) return status; } +void +qla83xx_update_fw_options(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (ql2xrdpenable) + ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB; + + qla2x00_set_fw_options(vha, ha->fw_options); +} + +void +qla81xx_update_fw_options(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + /* Hold status IOCBs until ABTS response received. */ + if (ql2xfwholdabts) + ha->fw_options[3] |= BIT_12; + + /* Set Retry FLOGI in case of P2P connection */ + if (ha->operating_mode == P2P) { + ha->fw_options[2] |= BIT_3; + ql_dbg(ql_dbg_disc, vha, 0x2103, + "(%s): Setting FLOGI retry BIT in fw_options[2]: 0x%x\n", + __func__, ha->fw_options[2]); + } + + /* Move PUREX, ABTS RX & RIDA to ATIOQ */ + if (ql2xmvasynctoatio) { + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) + ha->fw_options[2] |= BIT_11; + else + ha->fw_options[2] &= ~BIT_11; + } + + if (qla_tgt_mode_enabled(vha) || + qla_dual_mode_enabled(vha)) { + /* FW auto send SCSI status during */ + ha->fw_options[1] |= BIT_8; + ha->fw_options[10] |= (u16)SAM_STAT_BUSY << 8; + + /* FW perform Exchange validation */ + ha->fw_options[2] |= BIT_4; + } else { + ha->fw_options[1] &= ~BIT_8; + ha->fw_options[10] &= 0x00ff; + + ha->fw_options[2] &= ~BIT_4; + } + + if (ql2xetsenable) { + /* Enable ETS Burst. */ + memset(ha->fw_options, 0, sizeof(ha->fw_options)); + ha->fw_options[2] |= BIT_9; + } + + ql_dbg(ql_dbg_init, vha, 0x00e9, + "%s, add FW options 1-3 = 0x%04x 0x%04x 0x%04x mode %x\n", + __func__, ha->fw_options[1], ha->fw_options[2], + ha->fw_options[3], vha->host->active_mode); + + qla2x00_set_fw_options(vha, ha->fw_options); +} + /* * qla24xx_get_fcp_prio * Gets the fcp cmd priority value for the logged in port. @@ -9114,6 +9734,9 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, qpair->enable_explicit_conf = ha->base_qpair->enable_explicit_conf; + if (qos == 1) + ha->slow_queue_id = qpair_id; + for (i = 0; i < ha->msix_count; i++) { msix = &ha->msix_entries[i]; if (msix->in_use) @@ -9159,8 +9782,6 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, qpair->req = ha->req_q_map[req_id]; qpair->rsp->req = qpair->req; qpair->rsp->qpair = qpair; - /* init qpair to this cpu. Will adjust at run time. */ - qla_cpu_update(qpair, raw_smp_processor_id()); if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { if (ha->fw_attributes & BIT_4) @@ -9175,6 +9796,13 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, goto fail_mempool; } + if (qla_create_buf_pool(vha, qpair)) { + ql_log(ql_log_warn, vha, 0xd036, + "Failed to initialize buf pool for qpair %d\n", + qpair->id); + goto fail_bufpool; + } + /* Mark as online */ qpair->online = 1; @@ -9190,7 +9818,10 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, } return qpair; +fail_bufpool: + mempool_destroy(qpair->srb_mempool); fail_mempool: + qla25xx_delete_req_que(vha, qpair->req); fail_req: qla25xx_delete_rsp_que(vha, qpair->rsp); fail_rsp: @@ -9200,6 +9831,8 @@ struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *vha, int qos, if (list_empty(&vha->qp_list)) vha->flags.qpairs_available = 0; fail_msix: + if (qos == 1) + ha->slow_queue_id = 0; ha->queue_pair_map[qpair_id] = NULL; clear_bit(qpair_id, ha->qpair_qid_map); ha->num_qpairs--; @@ -9216,6 +9849,8 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) qpair->delete_in_progress = 1; + qla_free_buf_pool(qpair); + ret = qla25xx_delete_req_que(vha, qpair->req); if (ret != QLA_SUCCESS) goto fail; @@ -9242,3 +9877,348 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) fail: return ret; } + +uint64_t +qla2x00_count_set_bits(uint32_t num) +{ + /* Brian Kernighan's Alogorithm */ + uint64_t count = 0; + while (num) + { + num &= (num - 1); + count++; + } + return count; +} + +uint64_t +qla2x00_get_num_tgts(scsi_qla_host_t *vha) +{ + fc_port_t *f, *tf; + uint64_t count = 0; + + f = tf = NULL; + + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { + if (f->port_type != FCT_TARGET) + continue; + count++; + } + return count; +} + +int qla2xxx_reset_stats(struct Scsi_Host *host, uint32_t flags) +{ + scsi_qla_host_t *vha = shost_priv(host); + fc_port_t *fcport = NULL; + unsigned long int_flags; + + if (flags & QLA2XX_HW_ERROR) + vha->hw_err_cnt = 0; + if (flags & QLA2XX_SHT_LNK_DWN) + vha->short_link_down_cnt = 0; + if (flags & QLA2XX_INT_ERR) + vha->interface_err_cnt = 0; + if (flags & QLA2XX_CMD_TIMEOUT) + vha->cmd_timeout_cnt = 0; + if (flags & QLA2XX_RESET_CMD_ERR) + vha->reset_cmd_err_cnt = 0; + if (flags & QLA2XX_TGT_SHT_LNK_DOWN) { + spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags); + list_for_each_entry(fcport, &vha->vp_fcports, list) { + fcport->tgt_short_link_down_cnt = 0; + fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags); + } + vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; + return 0; + +} + +int qla2xxx_start_stats(struct Scsi_Host *host, uint32_t flags) +{ + return qla2xxx_reset_stats(host, flags); +} + +int qla2xxx_stop_stats(struct Scsi_Host *host, uint32_t flags) +{ + return qla2xxx_reset_stats(host, flags); +} + +int qla2xxx_get_ini_stats(struct Scsi_Host *host, uint32_t flags, + void *data, uint64_t size) +{ + scsi_qla_host_t *vha = shost_priv(host); + struct ql_vnd_host_stats_resp *resp = (struct ql_vnd_host_stats_resp *)data; + struct ql_vnd_stats *rsp_data = &resp->stats; + uint64_t ini_entry_count = 0; + uint64_t i = 0; + uint64_t entry_count = 0; + uint64_t num_tgt = 0; + uint32_t tmp_stat_type = 0; + fc_port_t *fcport = NULL; + unsigned long int_flags; + + /* Copy stat type to work on it */ + tmp_stat_type = flags; + + if (tmp_stat_type & BIT_17) + { + num_tgt = qla2x00_get_num_tgts(vha); + /* unset BIT_17 */ + tmp_stat_type &= ~ ( 1 << 17); + } + ini_entry_count = qla2x00_count_set_bits(tmp_stat_type); + + entry_count = ini_entry_count + num_tgt; + + rsp_data->entry_count = entry_count; + + i = 0; + if (flags & QLA2XX_HW_ERROR) + { + rsp_data->entry[i].stat_type = QLA2XX_HW_ERROR; + rsp_data->entry[i].tgt_num = 0x0; + rsp_data->entry[i].cnt = vha->hw_err_cnt; + i++; + } + if (flags & QLA2XX_SHT_LNK_DWN) + { + rsp_data->entry[i].stat_type = QLA2XX_SHT_LNK_DWN; + rsp_data->entry[i].tgt_num = 0x0; + rsp_data->entry[i].cnt = vha->short_link_down_cnt; + i++; + } + if (flags & QLA2XX_INT_ERR) + { + rsp_data->entry[i].stat_type = QLA2XX_INT_ERR; + rsp_data->entry[i].tgt_num = 0x0; + rsp_data->entry[i].cnt = vha->interface_err_cnt; + i++; + } + if (flags & QLA2XX_CMD_TIMEOUT) + { + rsp_data->entry[i].stat_type = QLA2XX_CMD_TIMEOUT; + rsp_data->entry[i].tgt_num = 0x0; + rsp_data->entry[i].cnt = vha->cmd_timeout_cnt; + i++; + } + if (flags & QLA2XX_RESET_CMD_ERR) + { + rsp_data->entry[i].stat_type = QLA2XX_RESET_CMD_ERR; + rsp_data->entry[i].tgt_num = 0x0; + rsp_data->entry[i].cnt = vha->reset_cmd_err_cnt; + i++; + } + + /* i will continue from previous loop, as target + * entries are after initiator + */ + if (flags & QLA2XX_TGT_SHT_LNK_DOWN) { + spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags); + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->port_type != FCT_TARGET) + continue; + if (!fcport->rport) + continue; + rsp_data->entry[i].stat_type = QLA2XX_TGT_SHT_LNK_DOWN; + rsp_data->entry[i].tgt_num = fcport->rport->number; + rsp_data->entry[i].cnt = fcport->tgt_short_link_down_cnt; + i++; + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags); + } + resp->status = EXT_STATUS_OK; + + return 0; +} + +int qla2xxx_get_tgt_stats(struct Scsi_Host *host, uint32_t flags, + struct fc_rport *rport, void *data, uint64_t size) +{ + struct ql_vnd_tgt_stats_resp *tgt_data = data; + fc_port_t *fcport = *(fc_port_t **)rport->dd_data; + + tgt_data->status = 0; + tgt_data->stats.entry_count = 1; + tgt_data->stats.entry[0].stat_type = flags; + tgt_data->stats.entry[0].tgt_num = rport->number; + tgt_data->stats.entry[0].cnt = fcport->tgt_short_link_down_cnt; + + return 0; +} + +int qla2xxx_disable_port(struct Scsi_Host *host) +{ + scsi_qla_host_t *vha = shost_priv(host); + + vha->hw->flags.port_isolated = 1; + + if (qla2x00_isp_reg_stat(vha->hw)) { + ql_log(ql_log_info, vha, 0x9006, + "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); + return FAILED; + } + if (qla2x00_chip_is_down(vha)) + return 0; + + if (vha->flags.online) { + qla2x00_abort_isp_cleanup(vha); + qla2x00_wait_for_sess_deletion(vha); + } + + return 0; +} + +int qla2xxx_enable_port(struct Scsi_Host *host) +{ + scsi_qla_host_t *vha = shost_priv(host); + + if (qla2x00_isp_reg_stat(vha->hw)) { + ql_log(ql_log_info, vha, 0x9001, + "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); + return FAILED; + } + + vha->hw->flags.port_isolated = 0; + /* Set the flag to 1, so that isp_abort can proceed */ + vha->flags.online = 1; + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + + return 0; +} +#ifdef QLA2XXX_LATENCY_MEASURE + +#define QLA_NANO 1000000000L +uint64_t ts_to_ns(struct timespec64 *ts) +{ + uint64_t ns; + + ns = ts->tv_sec; + ns *= QLA_NANO; /* sec to nanosec */ + ns += ts->tv_nsec; + + return ns; +} + + +void qla_get_scsi_cmd_latency(srb_t *sp) +{ + struct scsi_qla_host *vha = sp->vha; + struct scsi_cmnd *cmd = GET_CMD_SP(sp); + struct timespec64 ts1, ts2, ts3, ts_tot; + uint8_t *c = cmd->cmnd; + uint64_t ns_tot, ns1, ns2, ns3; + int op; + int type; + + if (c) + op = c[0]; + else + return; + + switch (op) { + case READ_6: + type = read6; + break; + case WRITE_6: + type = write6; + break; + case READ_10: + type = read10; + break; + case WRITE_10: + type = write10; + break; + case READ_12: + type = read12; + break; + case WRITE_12: + type = write12; + break; + case READ_16: + type = read16; + break; + case WRITE_16: + type = write16; + break; + default: + return; + } + + ts_tot = timespec64_sub(sp->cmd_to_ml, sp->q_cmd); + ts1 = timespec64_sub(sp->cmd_to_req_q, sp->q_cmd); + ts2 = timespec64_sub(sp->cmd_from_rsp_q, sp->cmd_to_req_q); + ts3 = timespec64_sub(sp->cmd_to_ml, sp->cmd_from_rsp_q); + + ns_tot = ts_to_ns(&ts_tot); + ns1 = ts_to_ns(&ts1); + ns2 = ts_to_ns(&ts2); + ns3 = ts_to_ns(&ts3); + + /* weed out outliers, use only entries with time < ~4s */ + if (ns_tot >= 0xffffffff || ns1 >= 0xffffffff || + ns2 >= 0xffffffff || ns3 >= 0xffffffff) + vha->qla_stats.latency_outliers++; + ns_tot = ns_tot < 0xffffffff ? ns_tot : 0; + ns1 = ns1 < 0xffffffff ? ns1 : 0; + ns2 = ns2 < 0xffffffff ? ns2 : 0; + ns3 = ns3 < 0xffffffff ? ns3 : 0; + + vha->latency_counters.qla_tot_cmds[type] += 1; + vha->latency_counters.qla_time_qcmd_to_ml[type] += ns_tot; + vha->latency_counters.qla_time_qcmd_to_req_q[type] += ns1; + vha->latency_counters.qla_time_req_q_to_rsp_q[type] += ns2; + vha->latency_counters.qla_time_rsq_q_to_ml[type] += ns3; + +} + +void qla_get_nvme_cmd_latency(srb_t *sp) +{ + struct scsi_qla_host *vha = sp->vha; + struct timespec64 ts1, ts2, ts3, ts_tot; + uint64_t ns_tot, ns1, ns2, ns3; + int type; + struct srb_iocb *nvme = &sp->u.iocb_cmd; + struct nvmefc_fcp_req *fd = nvme->u.nvme.desc; + + /* For NVMe use read16 and write16 for READ and WRITE */ + if (fd->io_dir == NVMEFC_FCP_READ) + type = ql_nvme_read; + else if (fd->io_dir == NVMEFC_FCP_WRITE) + type = ql_nvme_write; + else + return; + + ts_tot = timespec64_sub(sp->cmd_to_ml, sp->q_cmd); + ts1 = timespec64_sub(sp->cmd_to_req_q, sp->q_cmd); + ts2 = timespec64_sub(sp->cmd_from_rsp_q, sp->cmd_to_req_q); + ts3 = timespec64_sub(sp->cmd_to_ml, sp->cmd_from_rsp_q); + + ns_tot = ts_to_ns(&ts_tot); + ns1 = ts_to_ns(&ts1); + ns2 = ts_to_ns(&ts2); + ns3 = ts_to_ns(&ts3); + + /* weed out outliers, use only entries with time < ~4s */ + if (ns_tot >= 0xffffffff || ns1 >= 0xffffffff || + ns2 >= 0xffffffff || ns3 >= 0xffffffff) + vha->qla_stats.latency_outliers++; + ns_tot = ns_tot < 0xffffffff ? ns_tot : 0; + ns1 = ns1 < 0xffffffff ? ns1 : 0; + ns2 = ns2 < 0xffffffff ? ns2 : 0; + ns3 = ns3 < 0xffffffff ? ns3 : 0; + + vha->latency_counters.qla_nvme_tot_cmds[type] += 1; + vha->latency_counters.qla_nvme_qcmd_to_ml[type] += ns_tot; + vha->latency_counters.qla_nvme_qcmd_to_req_q[type] += ns1; + vha->latency_counters.qla_nvme_req_q_to_rsp_q[type] += ns2; + vha->latency_counters.qla_nvme_rsp_q_to_ml[type] += ns3; + +} + +#endif diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index e80e41b6c9e1d..7bb39eb60c6f6 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_target.h" @@ -10,7 +11,7 @@ * Continuation Type 1 IOCBs to allocate. * * @vha: HA context - * @dsds: number of data segment descriptors needed + * @dsds: number of data segment decriptors needed * * Returns the number of IOCB entries needed to store @dsds. */ @@ -28,6 +29,46 @@ qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds) return iocbs; } +static inline void +qla2xxx_atomic_sub(atomic_t *v, int new) +{ + int old; + + do + old = atomic_read(v); + while (old != atomic_cmpxchg(v, old, old - new)); +} + +static inline void +qla2xxx_atomic_add(atomic_t *v, int new) +{ + int old; + + do + old = atomic_read(v); + while (old != atomic_cmpxchg(v, old, old + new)); +} + +static inline void +qla2xxx_atomic64_sub(atomic64_t *v, int new) +{ + uint64_t old; + + do + old = atomic64_read(v); + while (old != atomic64_cmpxchg(v, old, old - new)); +} + +static inline void +qla2xxx_atomic64_add(atomic64_t *v, int new) +{ + uint64_t old; + + do + old = atomic64_read(v); + while (old != atomic64_cmpxchg(v, old, old + new)); +} + /* * qla2x00_debounce_register * Debounce register. @@ -39,16 +80,16 @@ qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds) * register value. */ static __inline__ uint16_t -qla2x00_debounce_register(volatile __le16 __iomem *addr) +qla2x00_debounce_register(volatile uint16_t __iomem *addr) { volatile uint16_t first; volatile uint16_t second; do { - first = rd_reg_word(addr); + first = RD_REG_WORD(addr); barrier(); cpu_relax(); - second = rd_reg_word(addr); + second = RD_REG_WORD(addr); } while (first != second); return (first); @@ -109,11 +150,13 @@ qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state) { int old_val; uint8_t shiftbits, mask; + uint8_t port_dstate_str_sz; /* This will have to change when the max no. of states > 16 */ shiftbits = 4; mask = (1 << shiftbits) - 1; + port_dstate_str_sz = sizeof(port_dstate_str)/sizeof(char *); fcport->disc_state = state; while (1) { old_val = atomic_read(&fcport->shadow_disc_state); @@ -121,7 +164,8 @@ qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state) old_val, (old_val << shiftbits) | state)) { ql_dbg(ql_dbg_disc, fcport->vha, 0x2134, "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n", - fcport->port_name, port_dstate_str[old_val & mask], + fcport->port_name, ((old_val & mask) < port_dstate_str_sz) ? + port_dstate_str[old_val & mask] : "Unknown", port_dstate_str[state], fcport->d_id.b24); return; } @@ -184,6 +228,9 @@ static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha, sp->vha = vha; sp->qpair = qpair; sp->cmd_type = TYPE_SRB; + sp->start_jiffies = jiffies; + /* ref : INIT - normal flow */ + kref_init(&sp->cmd_kref); INIT_LIST_HEAD(&sp->elem); } @@ -199,6 +246,10 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair, return NULL; sp = mempool_alloc(qpair->srb_mempool, flag); + /* Avoid trace for calls from qla2x00_get_sp */ + if (vha->hw->base_qpair != qpair) + ql_srb_trace_ext(ql_dbg_io, vha, fcport, + "sp=%px", sp); if (sp) qla2xxx_init_sp(sp, vha, qpair, fcport); else @@ -206,15 +257,15 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair, return sp; } -void qla2xxx_rel_done_warning(srb_t *sp, int res); -void qla2xxx_rel_free_warning(srb_t *sp); - static inline void qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp) { + /* Avoid trace for calls from qla2x00_get_sp */ + if (qpair->vha->hw->base_qpair != qpair) + ql_srb_trace_ext(ql_dbg_io, sp->vha, sp->fcport, + "sp=%px type=%d", sp, sp->type); sp->qpair = NULL; - sp->done = qla2xxx_rel_done_warning; - sp->free = qla2xxx_rel_free_warning; + sp->done_jiffies = jiffies; /* for crash debugging */ mempool_free(sp, qpair->srb_mempool); QLA_QPAIR_MARK_NOT_BUSY(qpair); } @@ -232,6 +283,7 @@ qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) qpair = vha->hw->base_qpair; sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag); + ql_srb_trace_ext(ql_dbg_disc, vha, fcport, "sp=%px", sp); if (!sp) goto done; @@ -245,6 +297,13 @@ qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag) static inline void qla2x00_rel_sp(srb_t *sp) { + /* + * Some paths (like EDC/RDF) uses temporary fcport that would be + * freed before this call, so do not use sp->fcport here blindly. + */ + ql_srb_trace_ext(ql_dbg_disc, sp->vha, NULL, + "sp=%px type=%d", sp, sp->type); + QLA_VHA_MARK_NOT_BUSY(sp->vha); qla2xxx_rel_qpair_sp(sp->qpair, sp); } @@ -270,41 +329,11 @@ qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status) } static inline void -qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t sts_qual) +qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay) { - u8 scope; - u16 qual; -#define SQ_SCOPE_MASK 0xc000 /* SAM-6 rev5 5.3.2 */ -#define SQ_SCOPE_SHIFT 14 -#define SQ_QUAL_MASK 0x3fff - -#define SQ_MAX_WAIT_SEC 60 /* Max I/O hold off time in seconds. */ -#define SQ_MAX_WAIT_TIME (SQ_MAX_WAIT_SEC * 10) /* in 100ms. */ - - if (!sts_qual) /* Common case. */ - return; - - scope = (sts_qual & SQ_SCOPE_MASK) >> SQ_SCOPE_SHIFT; - /* Handle only scope 1 or 2, which is for I-T nexus. */ - if (scope != 1 && scope != 2) - return; - - /* Skip processing, if retry delay timer is already in effect. */ - if (fcport->retry_delay_timestamp && - time_before(jiffies, fcport->retry_delay_timestamp)) - return; - - qual = sts_qual & SQ_QUAL_MASK; - if (qual < 1 || qual > 0x3fef) - return; - qual = min(qual, (u16)SQ_MAX_WAIT_TIME); - - /* qual is expressed in 100ms increments. */ - fcport->retry_delay_timestamp = jiffies + (qual * HZ / 10); - - ql_log(ql_log_warn, fcport->vha, 0x5101, - "%8phC: I/O throttling requested (status qualifier = %04xh), holding off I/Os for %ums.\n", - fcport->port_name, sts_qual, qual * 100); + if (retry_delay) + fcport->retry_delay_timestamp = jiffies + + (retry_delay * HZ / 10); } static inline bool @@ -363,7 +392,7 @@ qla_83xx_start_iocbs(struct qla_qpair *qpair) } else req->ring_ptr++; - wrt_reg_dword(req->req_q_in, req->ring_index); + WRT_REG_DWORD(req->req_q_in, req->ring_index); } static inline int @@ -380,55 +409,192 @@ qla2xxx_get_fc4_priority(struct scsi_qla_host *vha) enum { RESOURCE_NONE, - RESOURCE_INI, + RESOURCE_IOCB = BIT_0, + RESOURCE_EXCH = BIT_1, /* exchange */ + RESOURCE_FORCE = BIT_2, }; static inline int -qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores) +qla_get_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores) { u16 iocbs_used, i; + u16 exch_used; struct qla_hw_data *ha = qp->vha->hw; if (!ql2xenforce_iocb_limit) { iores->res_type = RESOURCE_NONE; return 0; } + if (iores->res_type & RESOURCE_FORCE) + goto force; - if ((iores->iocb_cnt + qp->fwres.iocbs_used) < qp->fwres.iocbs_qp_limit) { - qp->fwres.iocbs_used += iores->iocb_cnt; - return 0; - } else { + if ((iores->iocb_cnt + qp->fwres.iocbs_used) >= qp->fwres.iocbs_qp_limit) { /* no need to acquire qpair lock. It's just rough calculation */ iocbs_used = ha->base_qpair->fwres.iocbs_used; - for (i = 0; i < ha->max_qpairs; i++) { - if (ha->queue_pair_map[i]) + for (i=0; i < ha->max_qpairs; i++) { + if (ha->queue_pair_map[i]) { iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used; + } } - if ((iores->iocb_cnt + iocbs_used) < qp->fwres.iocbs_limit) { - qp->fwres.iocbs_used += iores->iocb_cnt; - return 0; - } else { + if ((iores->iocb_cnt + iocbs_used) >= qp->fwres.iocbs_limit) { + iores->res_type = RESOURCE_NONE; + return -ENOSPC; + } + } + + if (iores->res_type & RESOURCE_EXCH) { + exch_used = ha->base_qpair->fwres.exch_used; + for (i=0; i < ha->max_qpairs; i++) { + if (ha->queue_pair_map[i]) { + exch_used += ha->queue_pair_map[i]->fwres.exch_used; + } + } + + if ((exch_used + iores->exch_cnt) >= qp->fwres.exch_limit) { iores->res_type = RESOURCE_NONE; return -ENOSPC; } } +force: + qp->fwres.iocbs_used += iores->iocb_cnt; + qp->fwres.exch_used += iores->exch_cnt; + return 0; } static inline void -qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores) +qla_put_fw_resources(struct qla_qpair *qp, struct iocb_resource *iores) { - switch (iores->res_type) { - case RESOURCE_NONE: - break; - default: + if (iores->res_type & RESOURCE_IOCB) { if (qp->fwres.iocbs_used >= iores->iocb_cnt) { qp->fwres.iocbs_used -= iores->iocb_cnt; } else { // should not happen qp->fwres.iocbs_used = 0; } - break; + } + + if (iores->res_type & RESOURCE_EXCH) { + if (qp->fwres.exch_used >= iores->exch_cnt) { + qp->fwres.exch_used -= iores->exch_cnt; + } else { + // should not happen + qp->fwres.exch_used = 0; + } } iores->res_type = RESOURCE_NONE; } + +#define ISP_REG_DISCONNECT 0xffffffffU +/************************************************************************** +* qla2x00_isp_reg_stat +* +* Description: +* Read the host status register of ISP before aborting the command. +* +* Input: +* ha = pointer to host adapter structure. +* +* +* Returns: +* Either true or false. +* +* Note: Return true if there is register disconnect. +**************************************************************************/ +static inline +uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) +{ + struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; + + if (IS_P3P_TYPE(ha)) + return ((RD_REG_DWORD(®82->host_int)) == ISP_REG_DISCONNECT); + else + return ((RD_REG_DWORD(®->host_status)) == + ISP_REG_DISCONNECT); +} + +static inline +bool qla_pci_disconnected(struct scsi_qla_host *vha, + struct device_reg_24xx __iomem *reg) +{ + uint32_t stat; + bool ret = false; + + stat = RD_REG_DWORD(®->host_status); + if (stat == 0xffffffff) { + ql_log(ql_log_info, vha, 0x8041, + "detected PCI disconnect.\n"); + qla_schedule_eeh_work(vha); + ret = true; + } + return ret; +} + +static inline bool +fcport_is_smaller(fc_port_t *fcport) +{ + if (wwn_to_u64(fcport->port_name) < + wwn_to_u64(fcport->vha->port_name)) + return true; + else + return false; +} + +static inline bool +fcport_is_bigger(fc_port_t *fcport) +{ + return !fcport_is_smaller(fcport); +} + +static inline struct qla_qpair * +qla_mapq_nvme_select_qpair(struct qla_hw_data *ha, struct qla_qpair *qpair) +{ + int cpuid = smp_processor_id(); + if ((qpair->cpuid != cpuid) && + ha->qp_cpu_map[cpuid]) { + qpair = ha->qp_cpu_map[cpuid]; + } + return qpair; +} + +static inline void +qla_mapq_init_qp_cpu_map(struct qla_hw_data *ha, + struct qla_msix_entry *msix, struct qla_qpair *qpair) +{ + const struct cpumask *mask; + unsigned int cpu; + if (!ha->qp_cpu_map) { + return; + } + mask = pci_irq_get_affinity(ha->pdev, msix->vector_base0); + qpair->cpuid = cpumask_first(mask); + for_each_cpu(cpu, mask) { + ha->qp_cpu_map[cpu] = qpair; + } + msix->cpuid = qpair->cpuid; +} + +static inline void +qla_mapq_free_qp_cpu_map(struct qla_hw_data *ha) +{ + if (ha->qp_cpu_map) { + kfree(ha->qp_cpu_map); + ha->qp_cpu_map = NULL; + } +} + +static inline int qla_mapq_alloc_qp_cpu_map(struct qla_hw_data *ha) +{ + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + if (!ha->qp_cpu_map) { + ha->qp_cpu_map = kcalloc(NR_CPUS, sizeof(struct qla_qpair *), + GFP_KERNEL); + if (!ha->qp_cpu_map) { + ql_log(ql_log_fatal, vha, 0x0180, + "Unable to allocate memory for qp_cpu_map ptrs.\n"); + return -1; + } + } + return 0; +} diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index e54cc2a761dd4..892a465b9f8f1 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -1,7 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include "qla_target.h" @@ -39,11 +40,12 @@ qla2x00_get_cmd_direction(srb_t *sp) return (cflags); } + /** * qla2x00_calc_iocbs_32() - Determine number of Command Type 2 and * Continuation Type 0 IOCBs to allocate. * - * @dsds: number of data segment descriptors needed + * @dsds: number of data segment decriptors needed * * Returns the number of IOCB entries needed to store @dsds. */ @@ -65,7 +67,7 @@ qla2x00_calc_iocbs_32(uint16_t dsds) * qla2x00_calc_iocbs_64() - Determine number of Command Type 3 and * Continuation Type 1 IOCBs to allocate. * - * @dsds: number of data segment descriptors needed + * @dsds: number of data segment decriptors needed * * Returns the number of IOCB entries needed to store @dsds. */ @@ -118,7 +120,7 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha) * * Returns a pointer to the continuation type 1 IOCB packet. */ -static inline cont_a64_entry_t * +cont_a64_entry_t * qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) { cont_a64_entry_t *cont_pkt; @@ -375,7 +377,7 @@ qla2x00_start_scsi(srb_t *sp) /* Calculate the number of request entries needed. */ req_cnt = ha->isp_ops->calc_req_entries(tot_dsds); if (req->cnt < (req_cnt + 2)) { - cnt = rd_reg_word_relaxed(ISP_REQ_Q_OUT(ha, reg)); + cnt = RD_REG_WORD_RELAXED(ISP_REQ_Q_OUT(ha, reg)); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -427,8 +429,8 @@ qla2x00_start_scsi(srb_t *sp) sp->flags |= SRB_DMA_VALID; /* Set chip new ring index. */ - wrt_reg_word(ISP_REQ_Q_IN(ha, reg), req->ring_index); - rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ + WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index); + RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, reg)); /* PCI Posting. */ /* Manage unprocessed RIO/ZIO commands in response queue. */ if (vha->flags.process_response_queue && @@ -471,21 +473,21 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) /* Set chip new ring index. */ if (ha->mqenable || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { - wrt_reg_dword(req->req_q_in, req->ring_index); + WRT_REG_DWORD(req->req_q_in, req->ring_index); } else if (IS_QLA83XX(ha)) { - wrt_reg_dword(req->req_q_in, req->ring_index); - rd_reg_dword_relaxed(&ha->iobase->isp24.hccr); + WRT_REG_DWORD(req->req_q_in, req->ring_index); + RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr); } else if (IS_QLAFX00(ha)) { - wrt_reg_dword(®->ispfx00.req_q_in, req->ring_index); - rd_reg_dword_relaxed(®->ispfx00.req_q_in); + WRT_REG_DWORD(®->ispfx00.req_q_in, req->ring_index); + RD_REG_DWORD_RELAXED(®->ispfx00.req_q_in); QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); } else if (IS_FWI2_CAPABLE(ha)) { - wrt_reg_dword(®->isp24.req_q_in, req->ring_index); - rd_reg_dword_relaxed(®->isp24.req_q_in); + WRT_REG_DWORD(®->isp24.req_q_in, req->ring_index); + RD_REG_DWORD_RELAXED(®->isp24.req_q_in); } else { - wrt_reg_word(ISP_REQ_Q_IN(ha, ®->isp), + WRT_REG_WORD(ISP_REQ_Q_IN(ha, ®->isp), req->ring_index); - rd_reg_word_relaxed(ISP_REQ_Q_IN(ha, ®->isp)); + RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, ®->isp)); } } } @@ -520,21 +522,25 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, return (QLA_FUNCTION_FAILED); } + mrk24 = (struct mrk_entry_24xx *) mrk; + mrk->entry_type = MARKER_TYPE; mrk->modifier = type; if (type != MK_SYNC_ALL) { if (IS_FWI2_CAPABLE(ha)) { - mrk24 = (struct mrk_entry_24xx *) mrk; mrk24->nport_handle = cpu_to_le16(loop_id); int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); mrk24->vp_index = vha->vp_idx; - mrk24->handle = make_handle(req->id, mrk24->handle); } else { SET_TARGET_ID(ha, mrk->target, loop_id); mrk->lun = cpu_to_le16((uint16_t)lun); } } + + if (IS_FWI2_CAPABLE(ha)) + mrk24->handle = QLA_SKIP_HANDLE; + wmb(); qla2x00_start_iocbs(vha, req); @@ -621,7 +627,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, } cur_seg = scsi_sglist(cmd); - ctx = sp->u.scmd.ct6_ctx; + ctx = &sp->u.scmd.ct6_ctx; while (tot_dsds) { avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? @@ -658,10 +664,12 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, } /* Null termination */ - cur_dsd->address = 0; - cur_dsd->length = 0; - cur_dsd++; - cmd_pkt->control_flags |= cpu_to_le16(CF_DATA_SEG_DESCR_ENABLE); + if (cur_dsd) { + cur_dsd->address = 0; + cur_dsd->length = 0; + cur_dsd++; + } + cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE; return 0; } @@ -669,7 +677,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, * qla24xx_calc_dsd_lists() - Determine number of DSD list required * for Command Type 6. * - * @dsds: number of data segment descriptors needed + * @dsds: number of data segment decriptors needed * * Returns the number of dsd list needed to store @dsds. */ @@ -756,8 +764,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, } struct fw_dif_context { - __le32 ref_tag; - __le16 app_tag; + uint32_t ref_tag; + uint16_t app_tag; uint8_t ref_tag_mask[4]; /* Validation/Replacement Mask*/ uint8_t app_tag_mask[2]; /* Validation/Replacement Mask*/ }; @@ -1101,7 +1109,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, difctx = sp->u.scmd.crc_ctx; direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE; ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, - "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n", + "%s: scsi_cmnd: %px, crc_ctx: %px, sp: %px\n", __func__, cmd, difctx, sp); } else if (tc) { vha = tc->vha; @@ -1390,7 +1398,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, uint16_t tot_dsds, uint16_t tot_prot_dsds, uint16_t fw_prot_opts) { struct dsd64 *cur_dsd; - __be32 *fcp_dl; + uint32_t *fcp_dl; scsi_qla_host_t *vha; struct scsi_cmnd *cmd; uint32_t total_bytes = 0; @@ -1431,6 +1439,8 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, cpu_to_le16(CF_READ_DATA); } + //printk(KERN_INFO "cmd->ctl 0x%x ", cmd_pkt->control_flags); + if ((scsi_get_prot_op(cmd) == SCSI_PROT_READ_INSERT) || (scsi_get_prot_op(cmd) == SCSI_PROT_WRITE_STRIP) || (scsi_get_prot_op(cmd) == SCSI_PROT_READ_STRIP) || @@ -1457,7 +1467,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, &crc_ctx_pkt->ref_tag, tot_prot_dsds); put_unaligned_le64(crc_ctx_dma, &cmd_pkt->crc_context_address); - cmd_pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); + cmd_pkt->crc_context_len = CRC_CONTEXT_LEN_FW; /* Determine SCSI command length -- align to 4 byte boundary */ if (cmd->cmd_len > 16) { @@ -1486,7 +1496,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, put_unaligned_le64(crc_ctx_dma + CRC_CONTEXT_FCPCMND_OFF, &cmd_pkt->fcp_cmnd_dseg_address); fcp_cmnd->task_management = 0; - fcp_cmnd->task_attribute = TSK_SIMPLE; + fcp_cmnd->task_attribute = qla_scsi_get_task_attr(cmd); cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ @@ -1546,7 +1556,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, crc_ctx_pkt->guard_seed = cpu_to_le16(0); /* Fibre channel byte count */ cmd_pkt->byte_count = cpu_to_le32(total_bytes); - fcp_dl = (__be32 *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + + fcp_dl = (uint32_t *)(crc_ctx_pkt->fcp_cmnd.cdb + 16 + additional_fcpcdb_len); *fcp_dl = htonl(total_bytes); @@ -1600,12 +1610,17 @@ qla24xx_start_scsi(srb_t *sp) uint16_t req_cnt; uint16_t tot_dsds; struct req_que *req = NULL; + struct rsp_que *rsp; struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; + if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE)) + return qla28xx_start_scsi_edif(sp); + /* Setup device pointers. */ req = vha->req; + rsp = req->rsp; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; @@ -1637,14 +1652,21 @@ qla24xx_start_scsi(srb_t *sp) tot_dsds = nseg; req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); - sp->iores.res_type = RESOURCE_INI; + sp->iores.res_type = RESOURCE_IOCB|RESOURCE_EXCH; + sp->iores.exch_cnt = 1; sp->iores.iocb_cnt = req_cnt; - if (qla_get_iocbs(sp->qpair, &sp->iores)) + if (qla_get_fw_resources(sp->qpair, &sp->iores)) goto queuing_error; if (req->cnt < (req_cnt + 2)) { - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : - rd_reg_dword_relaxed(req->req_q_out); + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = RD_REG_DWORD_RELAXED(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } + if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -1662,7 +1684,7 @@ qla24xx_start_scsi(srb_t *sp) req->cnt -= req_cnt; cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; - cmd_pkt->handle = make_handle(req->id, handle); + cmd_pkt->handle = MAKE_HANDLE(req->id, handle); /* Zero out remaining portion of packet. */ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ @@ -1680,7 +1702,7 @@ qla24xx_start_scsi(srb_t *sp) int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); - cmd_pkt->task = TSK_SIMPLE; + cmd_pkt->task = qla_scsi_get_task_attr(cmd); /* Load SCSI command packet. */ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); @@ -1702,19 +1724,29 @@ qla24xx_start_scsi(srb_t *sp) } else req->ring_ptr++; + sp->qpair->cmd_cnt++; sp->flags |= SRB_DMA_VALID; /* Set chip new ring index. */ - wrt_reg_dword(req->req_q_in, req->ring_index); + WRT_REG_DWORD(req->req_q_in, req->ring_index); + + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (vha->flags.process_response_queue && + rsp->ring_ptr->signature != RESPONSE_PROCESSED) + qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&ha->hardware_lock, flags); + +#ifdef QLA2XXX_LATENCY_MEASURE + ktime_get_real_ts64(&sp->cmd_to_req_q); +#endif return QLA_SUCCESS; queuing_error: if (tot_dsds) scsi_dma_unmap(cmd); - qla_put_iocbs(sp->qpair, &sp->iores); + qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_FUNCTION_FAILED; @@ -1829,14 +1861,20 @@ qla24xx_dif_start_scsi(srb_t *sp) tot_prot_dsds = nseg; tot_dsds += nseg; - sp->iores.res_type = RESOURCE_INI; + sp->iores.res_type = RESOURCE_IOCB|RESOURCE_EXCH; + sp->iores.exch_cnt = 1; sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); - if (qla_get_iocbs(sp->qpair, &sp->iores)) + if (qla_get_fw_resources(sp->qpair, &sp->iores)) goto queuing_error; if (req->cnt < (req_cnt + 2)) { - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : - rd_reg_dword_relaxed(req->req_q_out); + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = RD_REG_DWORD_RELAXED(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -1857,7 +1895,7 @@ qla24xx_dif_start_scsi(srb_t *sp) /* Fill-in common area */ cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; - cmd_pkt->handle = make_handle(req->id, handle); + cmd_pkt->handle = MAKE_HANDLE(req->id, handle); clr_ptr = (uint32_t *)cmd_pkt + 2; memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); @@ -1894,11 +1932,21 @@ qla24xx_dif_start_scsi(srb_t *sp) } else req->ring_ptr++; + sp->qpair->cmd_cnt++; /* Set chip new ring index. */ - wrt_reg_dword(req->req_q_in, req->ring_index); + WRT_REG_DWORD(req->req_q_in, req->ring_index); + + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (vha->flags.process_response_queue && + rsp->ring_ptr->signature != RESPONSE_PROCESSED) + qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&ha->hardware_lock, flags); +#ifdef QLA2XXX_LATENCY_MEASURE + ktime_get_real_ts64(&sp->cmd_to_req_q); +#endif + return QLA_SUCCESS; queuing_error: @@ -1908,8 +1956,9 @@ qla24xx_dif_start_scsi(srb_t *sp) } /* Cleanup will be performed by the caller (queuecommand) */ - qla_put_iocbs(sp->qpair, &sp->iores); + qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(&ha->hardware_lock, flags); + return QLA_FUNCTION_FAILED; } @@ -1931,16 +1980,22 @@ qla2xxx_start_scsi_mq(srb_t *sp) uint16_t req_cnt; uint16_t tot_dsds; struct req_que *req = NULL; + struct rsp_que *rsp; struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; struct qla_qpair *qpair = sp->qpair; + + if (sp->fcport->edif.enable && (sp->fcport->flags & FCF_FCSP_DEVICE)) + return qla28xx_start_scsi_edif(sp); + /* Acquire qpair specific lock */ spin_lock_irqsave(&qpair->qp_lock, flags); /* Setup qpair pointers */ req = qpair->req; + rsp = qpair->rsp; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; @@ -1971,14 +2026,21 @@ qla2xxx_start_scsi_mq(srb_t *sp) tot_dsds = nseg; req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); - sp->iores.res_type = RESOURCE_INI; + sp->iores.res_type = RESOURCE_IOCB|RESOURCE_EXCH; + sp->iores.exch_cnt = 1; sp->iores.iocb_cnt = req_cnt; - if (qla_get_iocbs(sp->qpair, &sp->iores)) + if (qla_get_fw_resources(sp->qpair, &sp->iores)) goto queuing_error; if (req->cnt < (req_cnt + 2)) { - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : - rd_reg_dword_relaxed(req->req_q_out); + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = RD_REG_DWORD_RELAXED(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } + if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -1996,7 +2058,7 @@ qla2xxx_start_scsi_mq(srb_t *sp) req->cnt -= req_cnt; cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; - cmd_pkt->handle = make_handle(req->id, handle); + cmd_pkt->handle = MAKE_HANDLE(req->id, handle); /* Zero out remaining portion of packet. */ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ @@ -2014,7 +2076,7 @@ qla2xxx_start_scsi_mq(srb_t *sp) int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); - cmd_pkt->task = TSK_SIMPLE; + cmd_pkt->task = qla_scsi_get_task_attr(cmd); /* Load SCSI command packet. */ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); @@ -2036,19 +2098,28 @@ qla2xxx_start_scsi_mq(srb_t *sp) } else req->ring_ptr++; + sp->qpair->cmd_cnt++; sp->flags |= SRB_DMA_VALID; /* Set chip new ring index. */ - wrt_reg_dword(req->req_q_in, req->ring_index); + WRT_REG_DWORD(req->req_q_in, req->ring_index); + + /* Manage unprocessed RIO/ZIO commands in response queue. */ + if (vha->flags.process_response_queue && + rsp->ring_ptr->signature != RESPONSE_PROCESSED) + qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&qpair->qp_lock, flags); +#ifdef QLA2XXX_LATENCY_MEASURE + ktime_get_real_ts64(&sp->cmd_to_req_q); +#endif return QLA_SUCCESS; queuing_error: if (tot_dsds) scsi_dma_unmap(cmd); - qla_put_iocbs(sp->qpair, &sp->iores); + qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(&qpair->qp_lock, flags); return QLA_FUNCTION_FAILED; @@ -2178,14 +2249,21 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) tot_prot_dsds = nseg; tot_dsds += nseg; - sp->iores.res_type = RESOURCE_INI; + sp->iores.res_type = RESOURCE_IOCB|RESOURCE_EXCH; + sp->iores.exch_cnt = 1; sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds); - if (qla_get_iocbs(sp->qpair, &sp->iores)) + if (qla_get_fw_resources(sp->qpair, &sp->iores)) goto queuing_error; if (req->cnt < (req_cnt + 2)) { - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : - rd_reg_dword_relaxed(req->req_q_out); + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = RD_REG_DWORD_RELAXED(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } + if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -2206,7 +2284,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) /* Fill-in common area */ cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; - cmd_pkt->handle = make_handle(req->id, handle); + cmd_pkt->handle = MAKE_HANDLE(req->id, handle); clr_ptr = (uint32_t *)cmd_pkt + 2; memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); @@ -2241,8 +2319,9 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) } else req->ring_ptr++; + sp->qpair->cmd_cnt++; /* Set chip new ring index. */ - wrt_reg_dword(req->req_q_in, req->ring_index); + WRT_REG_DWORD(req->req_q_in, req->ring_index); /* Manage unprocessed RIO/ZIO commands in response queue. */ if (vha->flags.process_response_queue && @@ -2251,6 +2330,9 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) spin_unlock_irqrestore(&qpair->qp_lock, flags); +#ifdef QLA2XXX_LATENCY_MEASURE + ktime_get_real_ts64(&sp->cmd_to_req_q); +#endif return QLA_SUCCESS; queuing_error: @@ -2260,8 +2342,9 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) } /* Cleanup will be performed by the caller (queuecommand) */ - qla_put_iocbs(sp->qpair, &sp->iores); + qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(&qpair->qp_lock, flags); + return QLA_FUNCTION_FAILED; } @@ -2295,17 +2378,22 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) cnt = *req->out_ptr; else if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) - cnt = rd_reg_dword(®->isp25mq.req_q_out); + cnt = RD_REG_DWORD(®->isp25mq.req_q_out); else if (IS_P3P_TYPE(ha)) - cnt = rd_reg_dword(reg->isp82.req_q_out); + cnt = RD_REG_DWORD(®->isp82.req_q_out); else if (IS_FWI2_CAPABLE(ha)) - cnt = rd_reg_dword(®->isp24.req_q_out); + cnt = RD_REG_DWORD(®->isp24.req_q_out); else if (IS_QLAFX00(ha)) - cnt = rd_reg_dword(®->ispfx00.req_q_out); + cnt = RD_REG_DWORD(®->ispfx00.req_q_out); else cnt = qla2x00_debounce_register( ISP_REQ_Q_OUT(ha, ®->isp)); + if (!qpair->use_shadow_reg && cnt == ISP_REG16_DISCONNECT) { + qla_schedule_eeh_work(vha); + return NULL; + } + if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -2334,8 +2422,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) pkt = req->ring_ptr; memset(pkt, 0, REQUEST_ENTRY_SIZE); if (IS_QLAFX00(ha)) { - wrt_reg_byte((u8 __force __iomem *)&pkt->entry_count, req_cnt); - wrt_reg_dword((__le32 __force __iomem *)&pkt->handle, handle); + WRT_REG_BYTE((void __iomem *)&pkt->entry_count, req_cnt); + WRT_REG_WORD((void __iomem *)&pkt->handle, handle); } else { pkt->entry_count = req_cnt; pkt->handle = handle; @@ -2373,17 +2461,16 @@ qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; logio->control_flags = cpu_to_le16(LCF_COMMAND_PRLI); if (lio->u.logio.flags & SRB_LOGIN_NVME_PRLI) { - logio->control_flags |= cpu_to_le16(LCF_NVME_PRLI); + logio->control_flags |= LCF_NVME_PRLI; if (sp->vha->flags.nvme_first_burst) - logio->io_parameter[0] = - cpu_to_le32(NVME_PRLI_SP_FIRST_BURST); + logio->io_parameter[0] = NVME_PRLI_SP_FIRST_BURST; if (sp->vha->flags.nvme2_enabled) { - /* Set service parameter BIT_8 for SLER support */ - logio->io_parameter[0] |= - cpu_to_le32(NVME_PRLI_SP_SLER); - /* Set service parameter BIT_9 for PI control support */ - logio->io_parameter[0] |= - cpu_to_le32(NVME_PRLI_SP_PI_CTRL); + /* Set service paramter BIT_7 for NVME CONF support */ + logio->io_parameter[0] |= NVME_PRLI_SP_CONF; + /* Set service paramter BIT_8 for SLER support */ + logio->io_parameter[0] |= NVME_PRLI_SP_SLER; + /* Set service paramter BIT_9 for PI control support */ + logio->io_parameter[0] |= NVME_PRLI_SP_PI_CTRL; } } @@ -2410,6 +2497,12 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI); if (lio->u.logio.flags & SRB_LOGIN_SKIP_PRLI) logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI); + if (lio->u.logio.flags & SRB_LOGIN_FCSP) { + logio->control_flags |= + cpu_to_le16(LCF_COMMON_FEAT | LCF_SKIP_PRLI); + logio->io_parameter[0] = + cpu_to_le32(LIO_COMM_FEAT_FCSP | LIO_COMM_FEAT_CIO); + } } logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; @@ -2447,7 +2540,6 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) { u16 control_flags = LCF_COMMAND_LOGO; logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; - if (sp->fcport->explicit_logout) { control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT; } else { @@ -2522,14 +2614,14 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) scsi_qla_host_t *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; struct srb_iocb *iocb = &sp->u.iocb_cmd; - struct req_que *req = vha->req; + struct req_que *req = sp->qpair->req; flags = iocb->u.tmf.flags; lun = iocb->u.tmf.lun; tsk->entry_type = TSK_MGMT_IOCB_TYPE; tsk->entry_count = 1; - tsk->handle = make_handle(req->id, tsk->handle); + tsk->handle = MAKE_HANDLE(req->id, tsk->handle); tsk->nport_handle = cpu_to_le16(fcport->loop_id); tsk->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); tsk->control_flags = cpu_to_le32(flags); @@ -2538,18 +2630,45 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) tsk->port_id[2] = fcport->d_id.b.domain; tsk->vp_index = fcport->vha->vp_idx; - if (flags == TCF_LUN_RESET) { + if (flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET| + TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) { int_to_scsilun(lun, &tsk->lun); host_to_fcp_swap((uint8_t *)&tsk->lun, sizeof(tsk->lun)); } } -void qla2x00_init_timer(srb_t *sp, unsigned long tmo) +static void +qla2x00_async_done(struct srb *sp, int res) { - timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0); - sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; + if (del_timer(&sp->u.iocb_cmd.timer)) { + /* + * Succcesfully cancelled the timeout handler + * ref: TMR + */ + if (kref_put(&sp->cmd_kref, qla2x00_sp_release)) + return; + } + sp->async_done(sp, res); +} + +void +qla2x00_sp_release(struct kref *kref) +{ + struct srb *sp = container_of(kref, struct srb, cmd_kref); + sp->free(sp); +} + +void +qla2x00_init_async_sp(srb_t *sp, unsigned long tmo, + void (*done)(struct srb *sp, int res)) +{ + qla_timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0, sp); + sp->done = qla2x00_async_done; + sp->async_done = done; sp->free = qla2x00_sp_free; + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; + sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD) init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); sp->start_timer = 1; @@ -2636,7 +2755,9 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, return -ENOMEM; } - /* Alloc SRB structure */ + /* Alloc SRB structure + * ref: INIT + */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) { kfree(fcport); @@ -2657,18 +2778,19 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, sp->type = SRB_ELS_DCMD; sp->name = "ELS_DCMD"; sp->fcport = fcport; - elsio->timeout = qla2x00_els_dcmd_iocb_timeout; - qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT); - init_completion(&sp->u.iocb_cmd.u.els_logo.comp); - sp->done = qla2x00_els_dcmd_sp_done; + qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT, + qla2x00_els_dcmd_sp_done); sp->free = qla2x00_els_dcmd_sp_free; + sp->u.iocb_cmd.timeout = qla2x00_els_dcmd_iocb_timeout; + init_completion(&sp->u.iocb_cmd.u.els_logo.comp); elsio->u.els_logo.els_logo_pyld = dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE, &elsio->u.els_logo.els_logo_pyld_dma, GFP_KERNEL); if (!elsio->u.els_logo.els_logo_pyld) { - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return QLA_FUNCTION_FAILED; } @@ -2684,14 +2806,11 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld, sizeof(struct els_logo_payload)); - ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:"); - ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a, - elsio->u.els_logo.els_logo_pyld, - sizeof(*elsio->u.els_logo.els_logo_pyld)); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return QLA_FUNCTION_FAILED; } @@ -2702,7 +2821,8 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, wait_for_completion(&elsio->u.els_logo.comp); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return rval; } @@ -2718,7 +2838,7 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->entry_status = 0; els_iocb->handle = sp->handle; els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); - els_iocb->tx_dsd_count = cpu_to_le16(1); + els_iocb->tx_dsd_count = 1; els_iocb->vp_index = vha->vp_idx; els_iocb->sof_type = EST_SOFI3; els_iocb->rx_dsd_count = 0; @@ -2733,12 +2853,16 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->s_id[0] = vha->d_id.b.domain; if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) { - els_iocb->control_flags = 0; + if (vha->hw->flags.edif_enabled) + els_iocb->control_flags = cpu_to_le16(ECF_SEC_LOGIN); + else + els_iocb->control_flags = 0; + els_iocb->tx_byte_count = els_iocb->tx_len = cpu_to_le32(sizeof(struct els_plogi_payload)); put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma, &els_iocb->tx_address); - els_iocb->rx_dsd_count = cpu_to_le16(1); + els_iocb->rx_dsd_count = 1; els_iocb->rx_byte_count = els_iocb->rx_len = cpu_to_le32(sizeof(struct els_plogi_payload)); put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma, @@ -2747,10 +2871,8 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI ELS IOCB:\n"); ql_dump_buffer(ql_log_info, vha, 0x0109, - (uint8_t *)els_iocb, - sizeof(*els_iocb)); + (uint8_t *)els_iocb, 0x70); } else { - els_iocb->control_flags = cpu_to_le16(1 << 13); els_iocb->tx_byte_count = cpu_to_le32(sizeof(struct els_logo_payload)); put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma, @@ -2760,17 +2882,12 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->rx_byte_count = 0; els_iocb->rx_address = 0; els_iocb->rx_len = 0; - ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076, - "LOGO ELS IOCB:"); - ql_dump_buffer(ql_log_info, vha, 0x010b, - els_iocb, - sizeof(*els_iocb)); } sp->vha->qla_stats.control_requests++; } -static void +void qla2x00_els_dcmd2_iocb_timeout(void *data) { srb_t *sp = data; @@ -2825,7 +2942,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) struct qla_work_evt *e; struct fc_port *conflict_fcport; port_id_t cid; /* conflict Nport id */ - const __le32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; + u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; u16 lid; ql_dbg(ql_dbg_disc, vha, 0x3072, @@ -2833,12 +2950,14 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) sp->name, res, sp->handle, fcport->d_id.b24, fcport->port_name); fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE); - del_timer(&sp->u.iocb_cmd.timer); + /* For edif, set logout on delete to ensure any residual key from FW is flushed.*/ + fcport->logout_on_delete = 1; + fcport->chip_reset = vha->hw->base_qpair->chip_reset; if (sp->flags & SRB_WAKEUP_ON_COMP) complete(&lio->u.els_plogi.comp); else { - switch (le32_to_cpu(fw_status[0])) { + switch (fw_status[0]) { case CS_DATA_UNDERRUN: case CS_COMPLETE: memset(&ea, 0, sizeof(ea)); @@ -2848,9 +2967,10 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) break; case CS_IOCB_ERROR: - switch (le32_to_cpu(fw_status[1])) { + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED); + switch (fw_status[1]) { case LSC_SCODE_PORTID_USED: - lid = le32_to_cpu(fw_status[2]) & 0xffff; + lid = fw_status[2] & 0xffff; qlt_find_sess_invalidate_other(vha, wwn_to_u64(fcport->port_name), fcport->d_id, lid, &conflict_fcport); @@ -2865,7 +2985,7 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) conflict_fcport->conflict = fcport; fcport->login_pause = 1; ql_dbg(ql_dbg_disc, vha, 0x20ed, - "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n", + "%s %d %8phC pid %06x inuse with lid %#x.\n", __func__, __LINE__, fcport->port_name, fcport->d_id.b24, lid); @@ -2884,11 +3004,9 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) break; case LSC_SCODE_NPORT_USED: - cid.b.domain = (le32_to_cpu(fw_status[2]) >> 16) - & 0xff; - cid.b.area = (le32_to_cpu(fw_status[2]) >> 8) - & 0xff; - cid.b.al_pa = le32_to_cpu(fw_status[2]) & 0xff; + cid.b.domain = (fw_status[2] >> 16) & 0xff; + cid.b.area = (fw_status[2] >> 8) & 0xff; + cid.b.al_pa = fw_status[2] & 0xff; cid.b.rsvd_1 = 0; ql_dbg(ql_dbg_disc, vha, 0x20ec, @@ -2941,7 +3059,8 @@ static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) struct srb_iocb *elsio = &sp->u.iocb_cmd; qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return; } e->u.iosb.sp = sp; @@ -2959,7 +3078,9 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, int rval = QLA_SUCCESS; void *ptr, *resp_ptr; - /* Alloc SRB structure */ + /* Alloc SRB structure + * ref: INIT + */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) { ql_log(ql_log_info, vha, 0x70e6, @@ -2972,19 +3093,18 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); elsio = &sp->u.iocb_cmd; ql_dbg(ql_dbg_io, vha, 0x3073, - "Enter: PLOGI portid=%06x\n", fcport->d_id.b24); - - sp->type = SRB_ELS_DCMD; - sp->name = "ELS_DCMD"; - sp->fcport = fcport; + "%s Enter: PLOGI portid=%06x\n", __func__, fcport->d_id.b24); - elsio->timeout = qla2x00_els_dcmd2_iocb_timeout; if (wait) sp->flags = SRB_WAKEUP_ON_COMP; - qla2x00_init_timer(sp, ELS_DCMD_TIMEOUT + 2); + sp->type = SRB_ELS_DCMD; + sp->name = "ELS_DCMD"; + sp->fcport = fcport; + qla2x00_init_async_sp(sp, ELS_DCMD_TIMEOUT + 2, + qla2x00_els_dcmd2_sp_done); + sp->u.iocb_cmd.timeout = qla2x00_els_dcmd2_iocb_timeout; - sp->done = qla2x00_els_dcmd2_sp_done; elsio->u.els_plogi.tx_size = elsio->u.els_plogi.rx_size = DMA_POOL_SIZE; ptr = elsio->u.els_plogi.els_plogi_pyld = @@ -3005,7 +3125,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, goto out; } - ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %p %p\n", ptr, resp_ptr); + ql_dbg(ql_dbg_io, vha, 0x3073, "PLOGI %px %px\n", ptr, resp_ptr); memset(ptr, 0, sizeof(struct els_plogi_payload)); memset(resp_ptr, 0, sizeof(struct els_plogi_payload)); @@ -3015,10 +3135,17 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, elsio->u.els_plogi.els_cmd = els_opcode; elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode; + if (els_opcode == ELS_DCMD_PLOGI && DBELL_ACTIVE(vha)) { + struct fc_els_flogi *p = ptr; + + //ql_dump_buffer(ql_dbg_disc, vha, 0x3074, p, LOGIN_TEMPLATE_SIZE); + p->fl_csp.sp_features |= cpu_to_be16(FC_SP_FT_SEC); + } + + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n"); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109, - (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, - sizeof(*elsio->u.els_plogi.els_plogi_pyld)); + (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70); init_completion(&elsio->u.els_plogi.comp); rval = qla2x00_start_sp(sp); @@ -3043,28 +3170,67 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, out: fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } +/* it is assume qpair lock is held */ +void qla_els_pt_iocb(struct scsi_qla_host *vha, + struct els_entry_24xx *els_iocb, + struct qla_els_pt_arg *a) +{ + els_iocb->entry_type = ELS_IOCB_TYPE; + els_iocb->entry_count = 1; + els_iocb->sys_define = 0; + els_iocb->entry_status = 0; + els_iocb->handle = QLA_SKIP_HANDLE; + els_iocb->nport_handle = a->nport_handle; + els_iocb->rx_xchg_address = a->rx_xchg_address; + els_iocb->tx_dsd_count = cpu_to_le16(1); + els_iocb->vp_index = a->vp_idx; + els_iocb->sof_type = EST_SOFI3; + els_iocb->rx_dsd_count = cpu_to_le16(0); + els_iocb->opcode = a->els_opcode; + + els_iocb->d_id[0] = a->did.b.al_pa; + els_iocb->d_id[1] = a->did.b.area; + els_iocb->d_id[2] = a->did.b.domain; + /* For SID the byte order is different than DID */ + els_iocb->s_id[1] = vha->d_id.b.al_pa; + els_iocb->s_id[2] = vha->d_id.b.area; + els_iocb->s_id[0] = vha->d_id.b.domain; + + els_iocb->control_flags = cpu_to_le16(a->control_flags); + + els_iocb->tx_byte_count = cpu_to_le32(a->tx_byte_count); + els_iocb->tx_len = cpu_to_le32(a->tx_len); + put_unaligned_le64(a->tx_addr, &els_iocb->tx_address); + + els_iocb->rx_byte_count = cpu_to_le32(a->rx_byte_count); + els_iocb->rx_len = cpu_to_le32(a->rx_len); + put_unaligned_le64(a->rx_addr, &els_iocb->rx_address); + +} + static void qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) { - struct bsg_job *bsg_job = sp->u.bsg_job; + bsg_job_t *bsg_job = sp->u.bsg_job; struct fc_bsg_request *bsg_request = bsg_job->request; - els_iocb->entry_type = ELS_IOCB_TYPE; - els_iocb->entry_count = 1; - els_iocb->sys_define = 0; - els_iocb->entry_status = 0; - els_iocb->handle = sp->handle; + els_iocb->entry_type = ELS_IOCB_TYPE; + els_iocb->entry_count = 1; + els_iocb->sys_define = 0; + els_iocb->entry_status = 0; + els_iocb->handle = sp->handle; els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); - els_iocb->tx_dsd_count = cpu_to_le16(bsg_job->request_payload.sg_cnt); + els_iocb->tx_dsd_count = cpu_to_le16(1); els_iocb->vp_index = sp->vha->vp_idx; - els_iocb->sof_type = EST_SOFI3; - els_iocb->rx_dsd_count = cpu_to_le16(bsg_job->reply_payload.sg_cnt); + els_iocb->sof_type = EST_SOFI3; + els_iocb->rx_dsd_count = cpu_to_le16(1); els_iocb->opcode = sp->type == SRB_ELS_CMD_RPT ? bsg_request->rqst_data.r_els.els_code : @@ -3072,25 +3238,25 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->d_id[0] = sp->fcport->d_id.b.al_pa; els_iocb->d_id[1] = sp->fcport->d_id.b.area; els_iocb->d_id[2] = sp->fcport->d_id.b.domain; - els_iocb->control_flags = 0; - els_iocb->rx_byte_count = - cpu_to_le32(bsg_job->reply_payload.payload_len); - els_iocb->tx_byte_count = - cpu_to_le32(bsg_job->request_payload.payload_len); + els_iocb->control_flags = 0; + els_iocb->rx_byte_count = + cpu_to_le32(bsg_job->reply_payload.payload_len); + els_iocb->tx_byte_count = + cpu_to_le32(bsg_job->request_payload.payload_len); put_unaligned_le64(sg_dma_address(bsg_job->request_payload.sg_list), - &els_iocb->tx_address); - els_iocb->tx_len = cpu_to_le32(sg_dma_len - (bsg_job->request_payload.sg_list)); + &els_iocb->tx_address); + els_iocb->tx_len = cpu_to_le32(sg_dma_len + (bsg_job->request_payload.sg_list)); put_unaligned_le64(sg_dma_address(bsg_job->reply_payload.sg_list), - &els_iocb->rx_address); - els_iocb->rx_len = cpu_to_le32(sg_dma_len - (bsg_job->reply_payload.sg_list)); - + &els_iocb->rx_address); + els_iocb->rx_len = cpu_to_le32(sg_dma_len + (bsg_job->reply_payload.sg_list)); sp->vha->qla_stats.control_requests++; } + static void qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) { @@ -3101,7 +3267,7 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb) uint16_t tot_dsds; scsi_qla_host_t *vha = sp->vha; struct qla_hw_data *ha = vha->hw; - struct bsg_job *bsg_job = sp->u.bsg_job; + bsg_job_t *bsg_job = sp->u.bsg_job; int entry_count = 1; memset(ct_iocb, 0, sizeof(ms_iocb_entry_t)); @@ -3168,7 +3334,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb) uint16_t cmd_dsds, rsp_dsds; scsi_qla_host_t *vha = sp->vha; struct qla_hw_data *ha = vha->hw; - struct bsg_job *bsg_job = sp->u.bsg_job; + bsg_job_t *bsg_job = sp->u.bsg_job; int entry_count = 1; cont_a64_entry_t *cont_pkt = NULL; @@ -3253,7 +3419,7 @@ qla82xx_start_scsi(srb_t *sp) uint16_t tot_dsds; struct device_reg_82xx __iomem *reg; uint32_t dbval; - __be32 *fcp_dl; + uint32_t *fcp_dl; uint8_t additional_cdb_len; struct ct6_dsd *ctx; struct scsi_qla_host *vha = sp->vha; @@ -3277,7 +3443,7 @@ qla82xx_start_scsi(srb_t *sp) if (qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x300c, - "qla2x00_marker failed for cmd=%p.\n", cmd); + "qla2x00_marker failed for cmd=%px.\n", cmd); return QLA_FUNCTION_FAILED; } vha->marker_needed = 0; @@ -3310,7 +3476,7 @@ qla82xx_start_scsi(srb_t *sp) more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds); if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) { ql_dbg(ql_dbg_io, vha, 0x300d, - "Num of DSD list %d is than %d for cmd=%p.\n", + "Num of DSD list %d is than %d for cmd=%px.\n", more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN, cmd); goto queuing_error; @@ -3326,7 +3492,7 @@ qla82xx_start_scsi(srb_t *sp) if (!dsd_ptr) { ql_log(ql_log_fatal, vha, 0x300e, "Failed to allocate memory for dsd_dma " - "for cmd=%p.\n", cmd); + "for cmd=%px.\n", cmd); goto queuing_error; } @@ -3336,7 +3502,7 @@ qla82xx_start_scsi(srb_t *sp) kfree(dsd_ptr); ql_log(ql_log_fatal, vha, 0x300f, "Failed to allocate memory for dsd_addr " - "for cmd=%p.\n", cmd); + "for cmd=%px.\n", cmd); goto queuing_error; } list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list); @@ -3347,7 +3513,7 @@ qla82xx_start_scsi(srb_t *sp) req_cnt = 1; if (req->cnt < (req_cnt + 2)) { - cnt = (uint16_t)rd_reg_dword_relaxed( + cnt = (uint16_t)RD_REG_DWORD_RELAXED( ®->req_q_out[0]); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; @@ -3358,20 +3524,14 @@ qla82xx_start_scsi(srb_t *sp) goto queuing_error; } - ctx = sp->u.scmd.ct6_ctx = - mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); - if (!ctx) { - ql_log(ql_log_fatal, vha, 0x3010, - "Failed to allocate ctx for cmd=%p.\n", cmd); - goto queuing_error; - } + ctx = &sp->u.scmd.ct6_ctx; memset(ctx, 0, sizeof(struct ct6_dsd)); ctx->fcp_cmnd = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, GFP_ATOMIC, &ctx->fcp_cmnd_dma); if (!ctx->fcp_cmnd) { ql_log(ql_log_fatal, vha, 0x3011, - "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); + "Failed to allocate fcp_cmnd for cmd=%px.\n", cmd); goto queuing_error; } @@ -3387,7 +3547,7 @@ qla82xx_start_scsi(srb_t *sp) */ ql_log(ql_log_warn, vha, 0x3012, "scsi cmd len %d not multiple of 4 " - "for cmd=%p.\n", cmd->cmd_len, cmd); + "for cmd=%px.\n", cmd->cmd_len, cmd); goto queuing_error_fcp_cmnd; } ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4; @@ -3397,7 +3557,7 @@ qla82xx_start_scsi(srb_t *sp) } cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; - cmd_pkt->handle = make_handle(req->id, handle); + cmd_pkt->handle = MAKE_HANDLE(req->id, handle); /* Zero out remaining portion of packet. */ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ @@ -3435,7 +3595,7 @@ qla82xx_start_scsi(srb_t *sp) memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len); - fcp_dl = (__be32 *)(ctx->fcp_cmnd->cdb + 16 + + fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 + additional_cdb_len); *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd)); @@ -3456,7 +3616,7 @@ qla82xx_start_scsi(srb_t *sp) req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); if (req->cnt < (req_cnt + 2)) { - cnt = (uint16_t)rd_reg_dword_relaxed( + cnt = (uint16_t)RD_REG_DWORD_RELAXED( ®->req_q_out[0]); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; @@ -3468,7 +3628,7 @@ qla82xx_start_scsi(srb_t *sp) goto queuing_error; cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; - cmd_pkt->handle = make_handle(req->id, handle); + cmd_pkt->handle = MAKE_HANDLE(req->id, handle); /* Zero out remaining portion of packet. */ /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ @@ -3532,10 +3692,10 @@ qla82xx_start_scsi(srb_t *sp) if (ql2xdbwr) qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr, dbval); else { - wrt_reg_dword(ha->nxdb_wr_ptr, dbval); + WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); wmb(); - while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) { - wrt_reg_dword(ha->nxdb_wr_ptr, dbval); + while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { + WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); wmb(); } } @@ -3569,11 +3729,12 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) struct srb_iocb *aio = &sp->u.iocb_cmd; scsi_qla_host_t *vha = sp->vha; struct req_que *req = sp->qpair->req; + srb_t *orig_sp = sp->cmd_sp; memset(abt_iocb, 0, sizeof(struct abort_entry_24xx)); abt_iocb->entry_type = ABORT_IOCB_TYPE; abt_iocb->entry_count = 1; - abt_iocb->handle = make_handle(req->id, sp->handle); + abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); if (sp->fcport) { abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; @@ -3581,10 +3742,15 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb) abt_iocb->port_id[2] = sp->fcport->d_id.b.domain; } abt_iocb->handle_to_abort = - make_handle(le16_to_cpu(aio->u.abt.req_que_no), - aio->u.abt.cmd_hndl); + cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no, + aio->u.abt.cmd_hndl)); abt_iocb->vp_index = vha->vp_idx; - abt_iocb->req_que_no = aio->u.abt.req_que_no; + abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no); + + /* need to pass original sp */ + if (orig_sp) + qla_nvme_abort_set_option(abt_iocb, orig_sp); + /* Send the command to the firmware */ wmb(); } @@ -3599,7 +3765,7 @@ qla2x00_mb_iocb(srb_t *sp, struct mbx_24xx_entry *mbx) sz = min(ARRAY_SIZE(mbx->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.out_mb)); for (i = 0; i < sz; i++) - mbx->mb[i] = sp->u.iocb_cmd.u.mbx.out_mb[i]; + mbx->mb[i] = cpu_to_le16(sp->u.iocb_cmd.u.mbx.out_mb[i]); } static void @@ -3623,7 +3789,7 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp, nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { nack->u.isp24.flags = ntfy->u.isp24.flags & - cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); + cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); } nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; nack->u.isp24.status = ntfy->u.isp24.status; @@ -3636,34 +3802,48 @@ static void qla2x00_send_notify_ack_iocb(srb_t *sp, nack->u.isp24.srr_reject_code = 0; nack->u.isp24.srr_reject_code_expl = 0; nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; + + // TODO qualify this with EDIF enable + /* If our target mode policy is SECURITY then send respond ACK WITH FCSP */ + if ((ntfy->u.isp24.status_subcode == ELS_PLOGI) && + (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { + ql_dbg(ql_dbg_disc, sp->vha, 0x3074, + "%s PLOGI NACK sent with FC SECURITY bit, hdl=%x, loopid=%x, to pid %02x%02x%02x\n ", + sp->name, sp->handle,sp->fcport->loop_id, + sp->fcport->d_id.b.domain, sp->fcport->d_id.b.area, sp->fcport->d_id.b.al_pa); + nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP); + } } /* * Build NVME LS request */ -static void +static int qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt) { struct srb_iocb *nvme; + int rval = QLA_SUCCESS; nvme = &sp->u.iocb_cmd; cmd_pkt->entry_type = PT_LS4_REQUEST; cmd_pkt->entry_count = 1; - cmd_pkt->control_flags = cpu_to_le16(CF_LS4_ORIGINATOR << CF_LS4_SHIFT); + cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT; cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec); cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); cmd_pkt->vp_index = sp->fcport->vha->vp_idx; - cmd_pkt->tx_dseg_count = cpu_to_le16(1); - cmd_pkt->tx_byte_count = cpu_to_le32(nvme->u.nvme.cmd_len); - cmd_pkt->dsd[0].length = cpu_to_le32(nvme->u.nvme.cmd_len); + cmd_pkt->tx_dseg_count = 1; + cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len; + cmd_pkt->dsd[0].length = nvme->u.nvme.cmd_len; put_unaligned_le64(nvme->u.nvme.cmd_dma, &cmd_pkt->dsd[0].address); - cmd_pkt->rx_dseg_count = cpu_to_le16(1); - cmd_pkt->rx_byte_count = cpu_to_le32(nvme->u.nvme.rsp_len); - cmd_pkt->dsd[1].length = cpu_to_le32(nvme->u.nvme.rsp_len); + cmd_pkt->rx_dseg_count = 1; + cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len; + cmd_pkt->dsd[1].length = nvme->u.nvme.rsp_len; put_unaligned_le64(nvme->u.nvme.rsp_dma, &cmd_pkt->dsd[1].address); + + return rval; } static void @@ -3700,6 +3880,80 @@ qla24xx_prlo_iocb(srb_t *sp, struct logio_entry_24xx *logio) logio->vp_index = sp->fcport->vha->vp_idx; } +int qla_get_iocbs_resource(struct srb *sp) +{ + bool get_exch; + bool push_it_through = false; + + if (!ql2xenforce_iocb_limit) { + sp->iores.res_type = RESOURCE_NONE; + return 0; + } + sp->iores.res_type = RESOURCE_NONE; + + switch (sp->type) { + case SRB_TM_CMD: + case SRB_PRLI_CMD: + case SRB_ADISC_CMD: + push_it_through = true; + fallthrough; + case SRB_LOGIN_CMD: + case SRB_ELS_CMD_RPT: + case SRB_ELS_CMD_HST: + case SRB_ELS_CMD_HST_NOLOGIN: + case SRB_CT_CMD: + case SRB_NVME_LS: + case SRB_ELS_DCMD: + case SRB_ELS_RDF: + case SRB_ELS_EDC: + get_exch = true; + break; + + case SRB_FXIOCB_DCMD: + case SRB_FXIOCB_BCMD: + sp->iores.res_type = RESOURCE_NONE; + return 0; + + case SRB_SA_UPDATE: + case SRB_SA_REPLACE: + case SRB_MB_IOCB: + case SRB_ABT_CMD: + case SRB_NACK_PLOGI: + case SRB_NACK_PRLI: + case SRB_NACK_LOGO: + case SRB_LOGOUT_CMD: + case SRB_CTRL_VP: + case SRB_MARKER: + default: + push_it_through = true; + get_exch = false; + } + + sp->iores.res_type |= RESOURCE_IOCB; + sp->iores.iocb_cnt = 1; + if (get_exch) { + sp->iores.res_type |= RESOURCE_EXCH; + sp->iores.exch_cnt = 1; + } + if (push_it_through) + sp->iores.res_type |= RESOURCE_FORCE; + + return qla_get_fw_resources(sp->qpair, &sp->iores); +} + +static void +qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk) +{ + mrk->entry_type = MARKER_TYPE; + mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier; + if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) { + mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id); + int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun); + host_to_fcp_swap(mrk->lun, sizeof(mrk->lun)); + mrk->vp_index = sp->u.iocb_cmd.u.tmf.vp_index; + } +} + int qla2x00_start_sp(srb_t *sp) { @@ -3710,7 +3964,20 @@ qla2x00_start_sp(srb_t *sp) void *pkt; unsigned long flags; + if (vha->hw->flags.eeh_busy) + return -EIO; + + ql_srb_trace_ext(ql_dbg_disc, vha, sp->fcport, + "caller=%ps sp=%px type=%d", + __builtin_return_address(0), sp, sp->type); + spin_lock_irqsave(qp->qp_lock_ptr, flags); + rval = qla_get_iocbs_resource(sp); + if (rval) { + spin_unlock_irqrestore(qp->qp_lock_ptr, flags); + return EAGAIN; + } + pkt = __qla2x00_alloc_iocbs(sp->qpair, sp); if (!pkt) { rval = EAGAIN; @@ -3737,6 +4004,10 @@ qla2x00_start_sp(srb_t *sp) case SRB_ELS_CMD_HST: qla24xx_els_iocb(sp, pkt); break; + case SRB_ELS_CMD_HST_NOLOGIN: + qla_els_pt_iocb(sp->vha, pkt, &sp->u.bsg_cmd.u.els_arg); + ((struct els_entry_24xx *)pkt)->handle = sp->handle; + break; case SRB_CT_CMD: IS_FWI2_CAPABLE(ha) ? qla24xx_ct_iocb(sp, pkt) : @@ -3784,16 +4055,39 @@ qla2x00_start_sp(srb_t *sp) case SRB_PRLO_CMD: qla24xx_prlo_iocb(sp, pkt); break; + case SRB_SA_UPDATE: + qla24xx_sa_update_iocb(sp, pkt); + break; + case SRB_SA_REPLACE: + qla24xx_sa_replace_iocb(sp, pkt); + break; + case SRB_ELS_RDF: + case SRB_ELS_EDC: + qla_els_pt_iocb(sp->vha, pkt, &sp->u.iocb_cmd.u.drv_els.els_pt_arg); + ((struct els_entry_24xx *)pkt)->handle = sp->handle; + break; + case SRB_MARKER: + qla_marker_iocb(sp, pkt); + break; default: break; } - if (sp->start_timer) + if (sp->start_timer) { + /* ref: TMR timer ref + * this code should be just before start_iocbs function + * This will make sure that caller function don't to do + * kref_put even on failure + */ + kref_get(&sp->cmd_kref); add_timer(&sp->u.iocb_cmd.timer); + } wmb(); qla2x00_start_iocbs(vha, qp->req); done: + if (rval) + qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(qp->qp_lock_ptr, flags); return rval; } @@ -3809,7 +4103,7 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha, struct scatterlist *sg; int index; int entry_count = 1; - struct bsg_job *bsg_job = sp->u.bsg_job; + bsg_job_t *bsg_job = sp->u.bsg_job; /*Update entry type to indicate bidir command */ put_unaligned_le32(COMMAND_BIDIRECTIONAL, &cmd_pkt->entry_type); @@ -3927,8 +4221,14 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) /* Check for room on request queue. */ if (req->cnt < req_cnt + 2) { - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : - rd_reg_dword_relaxed(req->req_q_out); + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = RD_REG_DWORD_RELAXED(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) + goto queuing_error; + } + if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; else @@ -3941,7 +4241,7 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) } cmd_pkt = (struct cmd_bidir *)req->ring_ptr; - cmd_pkt->handle = make_handle(req->id, handle); + cmd_pkt->handle = MAKE_HANDLE(req->id, handle); /* Zero out remaining portion of packet. */ /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/ @@ -3967,5 +4267,6 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) qla2x00_start_iocbs(vha, req); queuing_error: spin_unlock_irqrestore(&ha->hardware_lock, flags); + return rval; } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index c5c7d60ab2524..3535fee916e59 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1,15 +1,16 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include "qla_target.h" +#include "qla_gbl.h" #include #include #include -#include #include #include #include @@ -19,33 +20,17 @@ static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t); static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *); static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *); +static void qla27xx_status_cont_type_1(scsi_qla_host_t *, sts_cont_entry_t *); static int qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *, sts_entry_t *); -static void qla27xx_process_purex_fpin(struct scsi_qla_host *vha, - struct purex_item *item); static struct purex_item *qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size); static struct purex_item *qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt); static struct purex_item *qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt, struct rsp_que **rsp); - -static void -qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item) -{ - void *pkt = &item->iocb; - uint16_t pkt_size = item->size; - - ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508d, - "%s: Enter\n", __func__); - - ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x508e, - "-------- ELS REQ -------\n"); - ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x508f, - pkt, pkt_size); - - fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt); -} +static void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, + struct purex_item *pkt); const char *const port_state_str[] = { "Unknown", @@ -55,119 +40,151 @@ const char *const port_state_str[] = { "ONLINE" }; -static void -qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt) + +/** + * __qla_consume_iocb - this routine is used to tell fw driver has processed + * or consumed the head IOCB along with the continuation IOCB's from the + * provided respond queue. + * @vha: host adapter pointer + * @pkt: pointer to current packet. On return, this pointer shall move + * to the next packet. + * @rsp: respond queue pointer. + * + * it is assumed pkt is the head iocb, not the continuation iocbk + */ +void __qla_consume_iocb(struct scsi_qla_host *vha, + void **pkt, struct rsp_que **rsp) { - struct abts_entry_24xx *abts = - (struct abts_entry_24xx *)&pkt->iocb; - struct qla_hw_data *ha = vha->hw; - struct els_entry_24xx *rsp_els; - struct abts_entry_24xx *abts_rsp; - dma_addr_t dma; - uint32_t fctl; - int rval; + struct rsp_que *rsp_q = *rsp; + response_t *new_pkt; + uint16_t entry_count_remaining; + struct purex_entry_24xx *purex = *pkt; - ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__); + entry_count_remaining = purex->entry_count; + while (entry_count_remaining > 0) { + new_pkt = rsp_q->ring_ptr; + *pkt = new_pkt; - ql_log(ql_log_warn, vha, 0x0287, - "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n", - abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id, - abts->seq_id, abts->seq_cnt); - ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, - "-------- ABTS RCV -------\n"); - ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, - (uint8_t *)abts, sizeof(*abts)); + rsp_q->ring_index++; + if (rsp_q->ring_index == rsp_q->length) { + rsp_q->ring_index = 0; + rsp_q->ring_ptr = rsp_q->ring; + } else { + rsp_q->ring_ptr++; + } - rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma, - GFP_KERNEL); - if (!rsp_els) { - ql_log(ql_log_warn, vha, 0x0287, - "Failed allocate dma buffer ABTS/ELS RSP.\n"); - return; + new_pkt->signature = RESPONSE_PROCESSED; + /* flush signature */ + wmb(); + --entry_count_remaining; } +} - /* terminate exchange */ - rsp_els->entry_type = ELS_IOCB_TYPE; - rsp_els->entry_count = 1; - rsp_els->nport_handle = cpu_to_le16(~0); - rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort; - rsp_els->control_flags = cpu_to_le16(EPD_RX_XCHG); - ql_dbg(ql_dbg_init, vha, 0x0283, - "Sending ELS Response to terminate exchange %#x...\n", - abts->rx_xch_addr_to_abort); - ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, - "-------- ELS RSP -------\n"); - ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, - (uint8_t *)rsp_els, sizeof(*rsp_els)); - rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0); - if (rval) { - ql_log(ql_log_warn, vha, 0x0288, - "%s: iocb failed to execute -> %x\n", __func__, rval); - } else if (rsp_els->comp_status) { - ql_log(ql_log_warn, vha, 0x0289, - "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", - __func__, rsp_els->comp_status, - rsp_els->error_subcode_1, rsp_els->error_subcode_2); - } else { - ql_dbg(ql_dbg_init, vha, 0x028a, - "%s: abort exchange done.\n", __func__); - } +/** + * __qla_copy_purex_to_buffer - extract ELS payload from Purex IOCB + * and save to provided buffer + * @vha: host adapter pointer + * @pkt: pointer Purex IOCB + * @rsp: respond queue + * @buf: extracted ELS payload copy here + * @buf_len: buffer length + */ +int __qla_copy_purex_to_buffer(struct scsi_qla_host *vha, + void **pkt, struct rsp_que **rsp, u8 *buf, u32 buf_len) +{ + struct purex_entry_24xx *purex = *pkt; + struct rsp_que *rsp_q = *rsp; + sts_cont_entry_t *new_pkt; + uint16_t no_bytes = 0, total_bytes = 0, pending_bytes = 0; + uint16_t buffer_copy_offset = 0; + uint16_t entry_count_remaining; + u16 tpad; - /* send ABTS response */ - abts_rsp = (void *)rsp_els; - memset(abts_rsp, 0, sizeof(*abts_rsp)); - abts_rsp->entry_type = ABTS_RSP_TYPE; - abts_rsp->entry_count = 1; - abts_rsp->nport_handle = abts->nport_handle; - abts_rsp->vp_idx = abts->vp_idx; - abts_rsp->sof_type = abts->sof_type & 0xf0; - abts_rsp->rx_xch_addr = abts->rx_xch_addr; - abts_rsp->d_id[0] = abts->s_id[0]; - abts_rsp->d_id[1] = abts->s_id[1]; - abts_rsp->d_id[2] = abts->s_id[2]; - abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC; - abts_rsp->s_id[0] = abts->d_id[0]; - abts_rsp->s_id[1] = abts->d_id[1]; - abts_rsp->s_id[2] = abts->d_id[2]; - abts_rsp->cs_ctl = abts->cs_ctl; - /* include flipping bit23 in fctl */ - fctl = ~(abts->f_ctl[2] | 0x7F) << 16 | - FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT; - abts_rsp->f_ctl[0] = fctl >> 0 & 0xff; - abts_rsp->f_ctl[1] = fctl >> 8 & 0xff; - abts_rsp->f_ctl[2] = fctl >> 16 & 0xff; - abts_rsp->type = FC_TYPE_BLD; - abts_rsp->rx_id = abts->rx_id; - abts_rsp->ox_id = abts->ox_id; - abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id; - abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id; - abts_rsp->payload.ba_acc.high_seq_cnt = cpu_to_le16(~0); - abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort; - ql_dbg(ql_dbg_init, vha, 0x028b, - "Sending BA ACC response to ABTS %#x...\n", - abts->rx_xch_addr_to_abort); - ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, - "-------- ELS RSP -------\n"); - ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, - (uint8_t *)abts_rsp, sizeof(*abts_rsp)); - rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0); - if (rval) { - ql_log(ql_log_warn, vha, 0x028c, - "%s: iocb failed to execute -> %x\n", __func__, rval); - } else if (abts_rsp->comp_status) { - ql_log(ql_log_warn, vha, 0x028d, - "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", - __func__, abts_rsp->comp_status, - abts_rsp->payload.error.subcode1, - abts_rsp->payload.error.subcode2); - } else { - ql_dbg(ql_dbg_init, vha, 0x028ea, - "%s: done.\n", __func__); + entry_count_remaining = purex->entry_count; + total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) + - PURX_ELS_HEADER_SIZE; + + /* + * end of payload may not end in 4bytes boundary. Need to + * round up / pad for room to swap, before saving data + */ + tpad = roundup(total_bytes, 4); + + if (buf_len < tpad) { + ql_dbg(ql_dbg_async, vha, 0x5084, + "%s buffer is too small %d < %d\n", + __func__, buf_len, tpad); + __qla_consume_iocb(vha, pkt, rsp); + return -EIO; } - dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma); + pending_bytes = total_bytes = tpad; + no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ? + sizeof(purex->els_frame_payload) : pending_bytes; + + memcpy(buf, &purex->els_frame_payload[0], no_bytes); + buffer_copy_offset += no_bytes; + pending_bytes -= no_bytes; + --entry_count_remaining; + + ((response_t *)purex)->signature = RESPONSE_PROCESSED; + /* flush signature */ + wmb(); + + do { + while ((total_bytes > 0) && (entry_count_remaining > 0)) { + new_pkt = (sts_cont_entry_t *)rsp_q->ring_ptr; + *pkt = new_pkt; + + if (new_pkt->entry_type != STATUS_CONT_TYPE) { + ql_log(ql_log_warn, vha, 0x507a, + "Unexpected IOCB type, partial data 0x%x\n", + buffer_copy_offset); + break; + } + + rsp_q->ring_index++; + if (rsp_q->ring_index == rsp_q->length) { + rsp_q->ring_index = 0; + rsp_q->ring_ptr = rsp_q->ring; + } else { + rsp_q->ring_ptr++; + } + no_bytes = (pending_bytes > sizeof(new_pkt->data)) ? + sizeof(new_pkt->data) : pending_bytes; + if ((buffer_copy_offset + no_bytes) <= total_bytes) { + memcpy((buf + buffer_copy_offset), new_pkt->data, + no_bytes); + buffer_copy_offset += no_bytes; + pending_bytes -= no_bytes; + --entry_count_remaining; + } else { + ql_log(ql_log_warn, vha, 0x5044, + "Attempt to copy more that we got, optimizing..%x\n", + buffer_copy_offset); + memcpy((buf + buffer_copy_offset), new_pkt->data, + total_bytes - buffer_copy_offset); + } + + ((response_t *)new_pkt)->signature = RESPONSE_PROCESSED; + /* flush signature */ + wmb(); + } + + if (pending_bytes != 0 || entry_count_remaining != 0) { + ql_log(ql_log_fatal, vha, 0x508b, + "Dropping partial Data, underrun bytes = 0x%x, entry cnts 0x%x\n", + total_bytes, entry_count_remaining); + return -EIO; + } + } while (entry_count_remaining > 0); + + be32_to_cpu_array((u32 *)buf, (__be32 *)buf, total_bytes >> 2); + + return 0; } + /** * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200. * @irq: interrupt number @@ -204,7 +221,7 @@ qla2100_intr_handler(int irq, void *dev_id) spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { - hccr = rd_reg_word(®->hccr); + hccr = RD_REG_WORD(®->hccr); if (qla2x00_check_reg16_for_disconnect(vha, hccr)) break; if (hccr & HCCR_RISC_PAUSE) { @@ -216,18 +233,19 @@ qla2100_intr_handler(int irq, void *dev_id) * bit to be cleared. Schedule a big hammer to get * out of the RISC PAUSED state. */ - wrt_reg_word(®->hccr, HCCR_RESET_RISC); - rd_reg_word(®->hccr); + WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); + RD_REG_WORD(®->hccr); + vha->hw_err_cnt++; - ha->isp_ops->fw_dump(vha); + ha->isp_ops->fw_dump(vha, 1); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; - } else if ((rd_reg_word(®->istatus) & ISR_RISC_INT) == 0) + } else if ((RD_REG_WORD(®->istatus) & ISR_RISC_INT) == 0) break; - if (rd_reg_word(®->semaphore) & BIT_0) { - wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); - rd_reg_word(®->hccr); + if (RD_REG_WORD(®->semaphore) & BIT_0) { + WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); + RD_REG_WORD(®->hccr); /* Get mailbox data. */ mb[0] = RD_MAILBOX_REG(ha, reg, 0); @@ -246,13 +264,13 @@ qla2100_intr_handler(int irq, void *dev_id) mb[0]); } /* Release mailbox registers. */ - wrt_reg_word(®->semaphore, 0); - rd_reg_word(®->semaphore); + WRT_REG_WORD(®->semaphore, 0); + RD_REG_WORD(®->semaphore); } else { qla2x00_process_response_queue(rsp); - wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); - rd_reg_word(®->hccr); + WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); + RD_REG_WORD(®->hccr); } } qla2x00_handle_mbx_completion(ha, status); @@ -269,12 +287,7 @@ qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg) if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) && !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) && !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) { - /* - * Schedule this (only once) on the default system - * workqueue so that all the adapter workqueues and the - * DPC thread can be shutdown cleanly. - */ - schedule_work(&vha->hw->board_disable); + qla_schedule_eeh_work(vha); } return true; } else @@ -324,14 +337,15 @@ qla2300_intr_handler(int irq, void *dev_id) spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { - stat = rd_reg_dword(®->u.isp2300.host_status); + stat = RD_REG_DWORD(®->u.isp2300.host_status); if (qla2x00_check_reg32_for_disconnect(vha, stat)) break; if (stat & HSR_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; - hccr = rd_reg_word(®->hccr); + hccr = RD_REG_WORD(®->hccr); + vha->hw_err_cnt++; if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8)) ql_log(ql_log_warn, vha, 0x5026, @@ -347,10 +361,10 @@ qla2300_intr_handler(int irq, void *dev_id) * interrupt bit to be cleared. Schedule a big * hammer to get out of the RISC PAUSED state. */ - wrt_reg_word(®->hccr, HCCR_RESET_RISC); - rd_reg_word(®->hccr); + WRT_REG_WORD(®->hccr, HCCR_RESET_RISC); + RD_REG_WORD(®->hccr); - ha->isp_ops->fw_dump(vha); + ha->isp_ops->fw_dump(vha, 1); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } else if ((stat & HSR_RISC_INT) == 0) @@ -365,7 +379,7 @@ qla2300_intr_handler(int irq, void *dev_id) status |= MBX_INTERRUPT; /* Release mailbox registers. */ - wrt_reg_word(®->semaphore, 0); + WRT_REG_WORD(®->semaphore, 0); break; case 0x12: mb[0] = MSW(stat); @@ -393,8 +407,8 @@ qla2300_intr_handler(int irq, void *dev_id) "Unrecognized interrupt type (%d).\n", stat & 0xff); break; } - wrt_reg_word(®->hccr, HCCR_CLR_RISC_INT); - rd_reg_word_relaxed(®->hccr); + WRT_REG_WORD(®->hccr, HCCR_CLR_RISC_INT); + RD_REG_WORD_RELAXED(®->hccr); } qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -412,7 +426,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) { uint16_t cnt; uint32_t mboxes; - __le16 __iomem *wptr; + uint16_t __iomem *wptr; struct qla_hw_data *ha = vha->hw; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; @@ -428,15 +442,15 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) ha->flags.mbox_int = 1; ha->mailbox_out[0] = mb0; mboxes >>= 1; - wptr = MAILBOX_REG(ha, reg, 1); + wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1); for (cnt = 1; cnt < ha->mbx_count; cnt++) { if (IS_QLA2200(ha) && cnt == 8) - wptr = MAILBOX_REG(ha, reg, 8); + wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8); if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0)) ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr); else if (mboxes & BIT_0) - ha->mailbox_out[cnt] = rd_reg_word(wptr); + ha->mailbox_out[cnt] = RD_REG_WORD(wptr); wptr++; mboxes >>= 1; @@ -451,19 +465,19 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr) int rval; struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24; struct device_reg_82xx __iomem *reg82 = &vha->hw->iobase->isp82; - __le16 __iomem *wptr; + uint16_t __iomem *wptr; uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS]; /* Seed data -- mailbox1 -> mailbox7. */ if (IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) - wptr = ®24->mailbox1; + wptr = (uint16_t __iomem *)®24->mailbox1; else if (IS_QLA8044(vha->hw)) - wptr = ®82->mailbox_out[1]; + wptr = (uint16_t __iomem *)®82->mailbox_out[1]; else return; for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++) - mb[cnt] = rd_reg_word(wptr); + mb[cnt] = RD_REG_WORD(wptr); ql_dbg(ql_dbg_async, vha, 0x5021, "Inter-Driver Communication %s -- " @@ -509,7 +523,7 @@ const char * qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed) { static const char *const link_speeds[] = { - "1", "2", "?", "4", "8", "16", "32", "10" + "1", "2", "?", "4", "8", "16", "32", "64", "10" }; #define QLA_LAST_SPEED (ARRAY_SIZE(link_speeds) - 1) @@ -684,131 +698,28 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) } } -int -qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) -{ - struct qla_hw_data *ha = vha->hw; - scsi_qla_host_t *vp; - uint32_t vp_did; - unsigned long flags; - int ret = 0; - - if (!ha->num_vhosts) - return ret; - - spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vp, &ha->vp_list, list) { - vp_did = vp->d_id.b24; - if (vp_did == rscn_entry) { - ret = 1; - break; - } - } - spin_unlock_irqrestore(&ha->vport_slock, flags); - - return ret; -} - -fc_port_t * -qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) -{ - fc_port_t *f, *tf; - - f = tf = NULL; - list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) - if (f->loop_id == loop_id) - return f; - return NULL; -} - -fc_port_t * -qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) -{ - fc_port_t *f, *tf; - - f = tf = NULL; - list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { - if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { - if (incl_deleted) - return f; - else if (f->deleted == 0) - return f; - } - } - return NULL; -} - -fc_port_t * -qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, - u8 incl_deleted) -{ - fc_port_t *f, *tf; - - f = tf = NULL; - list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { - if (f->d_id.b24 == id->b24) { - if (incl_deleted) - return f; - else if (f->deleted == 0) - return f; - } - } - return NULL; -} - -/* Shall be called only on supported adapters. */ -static void -qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) -{ - struct qla_hw_data *ha = vha->hw; - bool reset_isp_needed = 0; - - ql_log(ql_log_warn, vha, 0x02f0, - "MPI Heartbeat stop. MPI reset is%s needed. " - "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n", - mb[1] & BIT_8 ? "" : " not", - mb[0], mb[1], mb[2], mb[3]); - - if ((mb[1] & BIT_8) == 0) - return; - - ql_log(ql_log_warn, vha, 0x02f1, - "MPI Heartbeat stop. FW dump needed\n"); - - if (ql2xfulldump_on_mpifail) { - ha->isp_ops->fw_dump(vha); - reset_isp_needed = 1; - } - - ha->isp_ops->mpi_fw_dump(vha, 1); - - if (reset_isp_needed) { - vha->hw->flags.fw_init_done = 0; - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); - qla2xxx_wake_dpc(vha); - } -} - -static struct purex_item * +struct purex_item * qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size) { struct purex_item *item = NULL; uint8_t item_hdr_size = sizeof(*item); + uint8_t default_usable = 0; if (size > QLA_DEFAULT_PAYLOAD_SIZE) { item = kzalloc(item_hdr_size + (size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC); } else { - if (atomic_inc_return(&vha->default_item.in_use) == 1) { + item = kzalloc(item_hdr_size, GFP_ATOMIC); + default_usable = 1; + } + if (!item) { + if (default_usable && + (atomic_inc_return(&vha->default_item.in_use) == 1)) { item = &vha->default_item; goto initialize_purex_header; - } else { - item = kzalloc(item_hdr_size, GFP_ATOMIC); } - } - if (!item) { ql_log(ql_log_warn, vha, 0x5092, - ">> Failed allocate purex list item.\n"); + ">> Failed to allocate purex list item.\n"); return NULL; } @@ -819,7 +730,7 @@ qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size) return item; } -static void +void qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt, void (*process_item)(struct scsi_qla_host *vha, struct purex_item *pkt)) @@ -857,6 +768,85 @@ static struct purex_item return item; } +static uint +qla25xx_rdp_port_speed_capability(struct qla_hw_data *ha) +{ + if (IS_CNA_CAPABLE(ha)) + return RDP_PORT_SPEED_10GB; + + if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) { + uint speeds = 0; + if (ha->max_supported_speed == 2) { + if (ha->min_supported_speed <= 6) + speeds |= RDP_PORT_SPEED_64GB; + } + if (ha->max_supported_speed == 2 || + ha->max_supported_speed == 1) { + if (ha->min_supported_speed <= 5) + speeds |= RDP_PORT_SPEED_32GB; + } + if (ha->max_supported_speed == 2 || + ha->max_supported_speed == 1 || + ha->max_supported_speed == 0) { + if (ha->min_supported_speed <= 4) + speeds |= RDP_PORT_SPEED_16GB; + } + if (ha->max_supported_speed == 1 || + ha->max_supported_speed == 0) { + if (ha->min_supported_speed <= 3) + speeds |= RDP_PORT_SPEED_8GB; + } + if (ha->max_supported_speed == 0) { + if (ha->min_supported_speed <= 2) + speeds |= RDP_PORT_SPEED_4GB; + } + return speeds; + } + + if (IS_QLA2031(ha)) + return RDP_PORT_SPEED_16GB|RDP_PORT_SPEED_8GB| + RDP_PORT_SPEED_4GB; + + if (IS_QLA25XX(ha)) + return RDP_PORT_SPEED_8GB|RDP_PORT_SPEED_4GB| + RDP_PORT_SPEED_2GB|RDP_PORT_SPEED_1GB; + + if (IS_QLA24XX_TYPE(ha)) + return RDP_PORT_SPEED_4GB|RDP_PORT_SPEED_2GB| + RDP_PORT_SPEED_1GB; + + if (IS_QLA23XX(ha)) + return RDP_PORT_SPEED_2GB|RDP_PORT_SPEED_1GB; + + return RDP_PORT_SPEED_1GB; +} + +static uint +qla25xx_rdp_port_speed_currently(struct qla_hw_data *ha) +{ + switch (ha->link_data_rate) { + case PORT_SPEED_1GB: + return RDP_PORT_SPEED_1GB; + case PORT_SPEED_2GB: + return RDP_PORT_SPEED_2GB; + case PORT_SPEED_4GB: + return RDP_PORT_SPEED_4GB; + case PORT_SPEED_8GB: + return RDP_PORT_SPEED_8GB; + case PORT_SPEED_10GB: + return RDP_PORT_SPEED_10GB; + case PORT_SPEED_16GB: + return RDP_PORT_SPEED_16GB; + case PORT_SPEED_32GB: + return RDP_PORT_SPEED_32GB; + case PORT_SPEED_64GB: + return RDP_PORT_SPEED_64GB; + default: + return RDP_PORT_SPEED_UNKNOWN; + } +} + + /** * qla27xx_copy_fpin_pkt() - Copy over fpin packets that can * span over multiple IOCBs. @@ -877,13 +867,13 @@ qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt, struct purex_item *item; void *fpin_pkt = NULL; - total_bytes = (le16_to_cpu(purex->frame_size) & 0x0FFF) + total_bytes = le16_to_cpu(purex->frame_size & 0x0FFF) - PURX_ELS_HEADER_SIZE; pending_bytes = total_bytes; entry_count = entry_count_remaining = purex->entry_count; no_bytes = (pending_bytes > sizeof(purex->els_frame_payload)) ? sizeof(purex->els_frame_payload) : pending_bytes; - ql_log(ql_log_info, vha, 0x509a, + ql_dbg(ql_dbg_async, vha, 0x509a, "FPIN ELS, frame_size 0x%x, entry count %d\n", total_bytes, entry_count); @@ -962,6 +952,112 @@ qla27xx_copy_fpin_pkt(struct scsi_qla_host *vha, void **pkt, return item; } +int +qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry) +{ + struct qla_hw_data *ha = vha->hw; + scsi_qla_host_t *vp; + uint32_t vp_did; + unsigned long flags; + int ret = 0; + + if (!ha->num_vhosts) + return ret; + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) { + vp_did = vp->d_id.b24; + if (vp_did == rscn_entry) { + ret = 1; + break; + } + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + + return ret; +} + +fc_port_t * +qla2x00_find_fcport_by_loopid(scsi_qla_host_t *vha, uint16_t loop_id) +{ + fc_port_t *f, *tf; + + f = tf = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) + if (f->loop_id == loop_id) + return f; + return NULL; +} + +fc_port_t * +qla2x00_find_fcport_by_wwpn(scsi_qla_host_t *vha, u8 *wwpn, u8 incl_deleted) +{ + fc_port_t *f, *tf; + + f = tf = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { + if (memcmp(f->port_name, wwpn, WWN_SIZE) == 0) { + if (incl_deleted) + return f; + else if (f->deleted == 0) + return f; + } + } + return NULL; +} + +fc_port_t * +qla2x00_find_fcport_by_nportid(scsi_qla_host_t *vha, port_id_t *id, + u8 incl_deleted) +{ + fc_port_t *f, *tf; + + f = tf = NULL; + list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) { + if (f->d_id.b24 == id->b24) { + if (incl_deleted) + return f; + else if (f->deleted == 0) + return f; + } + } + return NULL; +} + +/* Shall be called only on supported adapters. */ +static void +qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb) +{ + struct qla_hw_data *ha = vha->hw; + bool reset_isp_needed = 0; + + ql_log(ql_log_warn, vha, 0x02f0, + "MPI Heartbeat stop. MPI reset is%s needed. " + "MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n", + mb[1] & BIT_8 ? "" : " not", + mb[0], mb[1], mb[2], mb[3]); + + if ((mb[1] & BIT_8) == 0) + return; + + ql_log(ql_log_warn, vha, 0x02f1, + "MPI Heartbeat stop. FW dump needed\n"); + + if (ql2xfulldump_on_mpifail) { + ha->isp_ops->fw_dump(vha, 1); + reset_isp_needed = 1; + } + + ha->isp_ops->mpi_fw_dump(vha, 1); + + if (reset_isp_needed) { + vha->hw->flags.fw_init_done = 0; + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } +} + + /** * qla2x00_async_event() - Process aynchronous events. * @vha: SCSI driver HA context @@ -991,7 +1087,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) goto skip_rio; switch (mb[0]) { case MBA_SCSI_COMPLETION: - handles[0] = make_handle(mb[2], mb[1]); + handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); handle_cnt = 1; break; case MBA_CMPLT_1_16BIT: @@ -1030,9 +1126,10 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) mb[0] = MBA_SCSI_COMPLETION; break; case MBA_CMPLT_2_32BIT: - handles[0] = make_handle(mb[2], mb[1]); - handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7), - RD_MAILBOX_REG(ha, reg, 6)); + handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1])); + handles[1] = le32_to_cpu( + ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) | + RD_MAILBOX_REG(ha, reg, 6)); handle_cnt = 2; mb[0] = MBA_SCSI_COMPLETION; break; @@ -1059,17 +1156,20 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) case MBA_SYSTEM_ERR: /* System Error */ mbx = 0; + + vha->hw_err_cnt++; + if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { u16 m[4]; - - m[0] = rd_reg_word(®24->mailbox4); - m[1] = rd_reg_word(®24->mailbox5); - m[2] = rd_reg_word(®24->mailbox6); - mbx = m[3] = rd_reg_word(®24->mailbox7); + m[0] = RD_REG_WORD(®24->mailbox4); + m[1] = RD_REG_WORD(®24->mailbox5); + m[2] = RD_REG_WORD(®24->mailbox6); + mbx = m[3] = RD_REG_WORD(®24->mailbox7); ql_log(ql_log_warn, vha, 0x5003, - "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n", + "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh " + "mbx4=%xh mbx5=%xh mbx6=%xh mbx7=%xh.\n", mb[1], mb[2], mb[3], m[0], m[1], m[2], m[3]); } else ql_log(ql_log_warn, vha, 0x5003, @@ -1077,9 +1177,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) mb[1], mb[2], mb[3]); if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) && - rd_reg_word(®24->mailbox7) & BIT_8) + RD_REG_WORD(®24->mailbox7) & BIT_8) ha->isp_ops->mpi_fw_dump(vha, 1); - ha->isp_ops->fw_dump(vha); + ha->isp_ops->fw_dump(vha, 1); ha->flags.fw_init_done = 0; QLA_FW_STOPPED(ha); @@ -1112,6 +1212,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) ql_log(ql_log_warn, vha, 0x5006, "ISP Request Transfer Error (%x).\n", mb[1]); + vha->hw_err_cnt++; + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; @@ -1119,6 +1221,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) ql_log(ql_log_warn, vha, 0x5007, "ISP Response Transfer Error (%x).\n", mb[1]); + vha->hw_err_cnt++; + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; @@ -1167,6 +1271,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) "LOOP UP detected (%s Gbps).\n", qla2x00_get_link_speed_str(ha, ha->link_data_rate)); + /* Reset Virtual Lane to Normal */ + qla_scm_host_clear_vl_state(vha); + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (mb[2] & BIT_0) ql_log(ql_log_info, vha, 0x11a0, @@ -1176,16 +1283,22 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) vha->flags.management_server_logged_in = 0; qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); + if (vha->link_down_time < vha->hw->port_down_retry_count) { + vha->short_link_down_cnt++; + vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME; + } + break; case MBA_LOOP_DOWN: /* Loop Down Event */ SAVE_TOPO(ha); ha->flags.lip_ae = 0; ha->current_topology = 0; + vha->link_down_time = 0; mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) - ? rd_reg_word(®24->mailbox4) : 0; - mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(®82->mailbox_out[4]) + ? RD_REG_WORD(®24->mailbox4) : 0; + mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) : mbx; ql_log(ql_log_info, vha, 0x500b, "LOOP DOWN detected (%x %x %x %x).\n", @@ -1202,9 +1315,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) if (!vha->vp_idx) { if (ha->flags.fawwpn_enabled && (ha->current_topology == ISP_CFG_F)) { - void *wwpn = ha->init_cb->port_name; - - memcpy(vha->port_name, wwpn, WWN_SIZE); + memcpy(vha->port_name, ha->port_name, WWN_SIZE); fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); ql_dbg(ql_dbg_init + ql_dbg_verbose, @@ -1227,6 +1338,17 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) vha->flags.management_server_logged_in = 0; ha->link_data_rate = PORT_SPEED_UNKNOWN; + ha->flags.conn_fabric_cisco_er_rdy = 0; + ha->flags.conn_fabric_brocade = 0; + ha->scm.scm_fabric_connection_flags = 0; + + /* Clear SCM stats and throttling, if SCM is enabled */ + if (vha->hw->flags.scm_enabled) { + qla2xxx_scmr_clear_congn(&ha->sfc); + qla2xxx_scmr_clear_throttle(&ha->sfc); + qla_scm_clear_previous_event(vha); + } + qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0); break; @@ -1351,6 +1473,19 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) if (mb[1] == 0xffff) goto global_port_update; + if (mb[1] == NPH_F_PORT) { + if (vha->vp_idx) { + atomic_set(&vha->vp_state, VP_FAILED); + fc_vport_set_state(vha->fc_vport, + FC_VPORT_FAILED); + } + /* F-Port LOGO. Logout from all devices. */ + qla2x00_mark_all_devices_lost(vha); + vha->flags.management_server_logged_in = 0; + + break; + } + if (mb[1] == NPH_SNS_LID(ha)) { set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); @@ -1416,6 +1551,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) ql_dbg(ql_dbg_async, vha, 0x5011, "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", mb[1], mb[2], mb[3]); + + qlt_async_event(mb[0], vha, mb); break; } @@ -1432,6 +1569,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(VP_CONFIG_OK, &vha->vp_flags); + + qlt_async_event(mb[0], vha, mb); break; case MBA_RSCN_UPDATE: /* State Change Registration */ @@ -1442,7 +1581,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff)) break; - ql_dbg(ql_dbg_async, vha, 0x5013, + ql_log(ql_log_warn, vha, 0x5013, "RSCN database changed -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); @@ -1475,18 +1614,26 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); } break; - case MBA_CONGN_NOTI_RECV: + case MBA_CONGESTION_NOTIFICATION_RECV: if (!ha->flags.scm_enabled || mb[1] != QLA_CON_PRIMITIVE_RECEIVED) break; - if (mb[2] == QLA_CONGESTION_ARB_WARNING) { - ql_dbg(ql_dbg_async, vha, 0x509b, - "Congestion Warning %04x %04x.\n", mb[1], mb[2]); + ha->scm.sev.cn_warning++; + ha->sig_sev.cn_warning_sig++; + atomic_inc(&ha->sfc.num_sig_warning); + ha->scm.congestion.severity = + SCM_CONGESTION_SEVERITY_WARNING; } else if (mb[2] == QLA_CONGESTION_ARB_ALARM) { - ql_log(ql_log_warn, vha, 0x509b, - "Congestion Alarm %04x %04x.\n", mb[1], mb[2]); + ha->scm.sev.cn_alarm++; + ha->sig_sev.cn_alarm_sig++; + atomic_inc(&ha->sfc.num_sig_alarm); + ha->scm.congestion.severity = + SCM_CONGESTION_SEVERITY_ERROR; } + ha->sfc.event_period = 1; + ha->sfc.throttle_period = 1; + ha->sfc.event_period_buffer = 0; break; /* case MBA_RIO_RESPONSE: */ case MBA_ZIO_RESPONSE: @@ -1503,6 +1650,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) ql_dbg(ql_dbg_async, vha, 0x5016, "Discard RND Frame -- %04x %04x %04x.\n", mb[1], mb[2], mb[3]); + vha->interface_err_cnt++; break; case MBA_TRACE_NOTIFICATION: @@ -1565,7 +1713,7 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) break; case MBA_IDC_NOTIFY: if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) { - mb[4] = rd_reg_word(®24->mailbox4); + mb[4] = RD_REG_WORD(®24->mailbox4); if (((mb[2] & 0x7fff) == MBC_PORT_RESET || (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) && (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) { @@ -1592,12 +1740,13 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) case MBA_IDC_AEN: if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + vha->hw_err_cnt++; qla27xx_handle_8200_aen(vha, mb); } else if (IS_QLA83XX(ha)) { - mb[4] = rd_reg_word(®24->mailbox4); - mb[5] = rd_reg_word(®24->mailbox5); - mb[6] = rd_reg_word(®24->mailbox6); - mb[7] = rd_reg_word(®24->mailbox7); + mb[4] = RD_REG_WORD(®24->mailbox4); + mb[5] = RD_REG_WORD(®24->mailbox5); + mb[6] = RD_REG_WORD(®24->mailbox6); + mb[7] = RD_REG_WORD(®24->mailbox7); qla83xx_handle_8200_aen(vha, mb); } else { ql_dbg(ql_dbg_async, vha, 0x5052, @@ -1607,6 +1756,9 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) break; case MBA_DPORT_DIAGNOSTICS: + if ((mb[1]&0xF) == AEN_DONE_DIAG_TEST_WITH_NOERR || + (mb[1]&0xF) == AEN_DONE_DIAG_TEST_WITH_ERR) + vha->dport_status &= ~DPORT_DIAG_IN_PROGRESS; ql_dbg(ql_dbg_async, vha, 0x5052, "D-Port Diagnostics: %04x %04x %04x %04x\n", mb[0], mb[1], mb[2], mb[3]); @@ -1641,8 +1793,8 @@ qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) case MBA_TEMPERATURE_ALERT: ql_dbg(ql_dbg_async, vha, 0x505e, "TEMPERATURE ALERT: %04x %04x %04x\n", mb[1], mb[2], mb[3]); - if (mb[1] == 0x12) - schedule_work(&ha->board_disable); + //if (mb[1] == 0x12) + //schedule_work(&ha->board_disable); break; case MBA_TRANS_INSERT: @@ -1715,35 +1867,45 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func, { struct qla_hw_data *ha = vha->hw; sts_entry_t *pkt = iocb; - srb_t *sp; + srb_t *sp = NULL; uint16_t index; + if (pkt->handle == QLA_SKIP_HANDLE) + goto done; + index = LSW(pkt->handle); if (index >= req->num_outstanding_cmds) { ql_log(ql_log_warn, vha, 0x5031, - "%s: Invalid command index (%x) type %8ph.\n", - func, index, iocb); + "Invalid command index (%x) type %8ph.\n", + index, iocb); + + if (is_debug(QDBG_FW_DUMP)) + ha->isp_ops->fw_dump(vha, 1); + + BUG_ON(is_debug(QDBG_CRASH_ON_ERR)); + if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); else set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); - return NULL; + goto done; } sp = req->outstanding_cmds[index]; if (!sp) { ql_log(ql_log_warn, vha, 0x5032, - "%s: Invalid completion handle (%x) -- timed-out.\n", - func, index); - return NULL; + "Invalid completion handle (%x) -- timed-out.\n", index); + return sp; } if (sp->handle != index) { ql_log(ql_log_warn, vha, 0x5033, - "%s: SRB handle (%x) mismatch %x.\n", func, - sp->handle, index); + "SRB handle (%x) mismatch %x.\n", sp->handle, index); return NULL; } req->outstanding_cmds[index] = NULL; + + qla_put_fw_resources(sp->qpair, &sp->iores); +done: return sp; } @@ -1838,7 +2000,6 @@ qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, struct mbx_24xx_entry *pkt) { const char func[] = "MBX-IOCB2"; - struct qla_hw_data *ha = vha->hw; srb_t *sp; struct srb_iocb *si; u16 sz, i; @@ -1848,23 +2009,11 @@ qla24xx_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, if (!sp) return; - if (sp->type == SRB_SCSI_CMD || - sp->type == SRB_NVME_CMD || - sp->type == SRB_TM_CMD) { - ql_log(ql_log_warn, vha, 0x509d, - "Inconsistent event entry type %d\n", sp->type); - if (IS_P3P_TYPE(ha)) - set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); - else - set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); - return; - } - si = &sp->u.iocb_cmd; sz = min(ARRAY_SIZE(pkt->mb), ARRAY_SIZE(sp->u.iocb_cmd.u.mbx.in_mb)); for (i = 0; i < sz; i++) - si->u.mbx.in_mb[i] = pkt->mb[i]; + si->u.mbx.in_mb[i] = le16_to_cpu(pkt->mb[i]); res = (si->u.mbx.in_mb[0] & MBS_MASK); @@ -1896,7 +2045,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, const char func[] = "CT_IOCB"; const char *type; srb_t *sp; - struct bsg_job *bsg_job; + bsg_job_t *bsg_job; struct fc_bsg_reply *bsg_reply; uint16_t comp_status; int res = 0; @@ -1962,29 +2111,73 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, } static void -qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, +qla24xx_els_ct_entry(scsi_qla_host_t *v, struct req_que *req, struct sts_entry_24xx *pkt, int iocb_type) { - struct els_sts_entry_24xx *ese = (struct els_sts_entry_24xx *)pkt; const char func[] = "ELS_CT_IOCB"; const char *type; srb_t *sp; - struct bsg_job *bsg_job; + bsg_job_t *bsg_job; struct fc_bsg_reply *bsg_reply; uint16_t comp_status; uint32_t fw_status[3]; - int res; + int res, logit=1; struct srb_iocb *els; + uint n; + scsi_qla_host_t *vha; + struct els_sts_entry_24xx *e = (struct els_sts_entry_24xx*)pkt; - sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + sp = qla2x00_get_sp_from_handle(v, func, req, pkt); if (!sp) return; + bsg_job = sp->u.bsg_job; + vha = sp->vha; type = NULL; + + comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); + fw_status[1] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_1); + fw_status[2] = le32_to_cpu(((struct els_sts_entry_24xx *)pkt)->error_subcode_2); + switch (sp->type) { case SRB_ELS_CMD_RPT: case SRB_ELS_CMD_HST: + type = "rpt hst"; + break; + case SRB_ELS_EDC: + case SRB_ELS_RDF: + type = "scm els"; + logit = 0; + break; + case SRB_ELS_CMD_HST_NOLOGIN: type = "els"; + { + struct els_entry_24xx *els = (void *)pkt; + struct qla_bsg_auth_els_request *p = + (struct qla_bsg_auth_els_request *)bsg_job->request; + + ql_dbg(ql_dbg_user, vha, 0x700f, + "%s %s complete portid=%02x%02x%02x status %x xchg %x bsg ptr %px\n", + __func__, sc_to_str(p->e.sub_cmd), + e->d_id[2],e->d_id[1],e->d_id[0], + comp_status, p->e.extra_rx_xchg_address, bsg_job); + + if (! (le16_to_cpu(els->control_flags) & ECF_PAYLOAD_DESCR_MASK)) { + if (sp->remap.remapped) { + n = sg_copy_from_buffer( + bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + sp->remap.rsp.buf, sp->remap.rsp.len); + ql_dbg(ql_dbg_user+ql_dbg_verbose, vha, 0x700e, + "%s: SG copied %x of %x\n", + __func__, n, sp->remap.rsp.len); + } else { + ql_dbg(ql_dbg_user, vha, 0x700f, + "%s: NOT REMAPPED (error)...!!!\n", + __func__); + } + } + } break; case SRB_CT_CMD: type = "ct pass-through"; @@ -1993,7 +2186,7 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, type = "Driver ELS logo"; if (iocb_type != ELS_IOCB_TYPE) { ql_dbg(ql_dbg_user, vha, 0x5047, - "Completing %s: (%p) type=%d.\n", + "Completing %s: (%px) type=%d.\n", type, sp, sp->type); sp->done(sp, 0); return; @@ -2010,36 +2203,80 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, return; default: ql_dbg(ql_dbg_user, vha, 0x503e, - "Unrecognized SRB: (%p) type=%d.\n", sp, sp->type); + "Unrecognized SRB: (%px) type=%d.\n", sp, sp->type); return; } - comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status); - fw_status[1] = le32_to_cpu(ese->error_subcode_1); - fw_status[2] = le32_to_cpu(ese->error_subcode_2); - if (iocb_type == ELS_IOCB_TYPE) { els = &sp->u.iocb_cmd; - els->u.els_plogi.fw_status[0] = cpu_to_le32(fw_status[0]); - els->u.els_plogi.fw_status[1] = cpu_to_le32(fw_status[1]); - els->u.els_plogi.fw_status[2] = cpu_to_le32(fw_status[2]); - els->u.els_plogi.comp_status = cpu_to_le16(fw_status[0]); + els->u.els_plogi.fw_status[0] = fw_status[0]; + els->u.els_plogi.fw_status[1] = fw_status[1]; + els->u.els_plogi.fw_status[2] = fw_status[2]; + els->u.els_plogi.comp_status = fw_status[0]; if (comp_status == CS_COMPLETE) { res = DID_OK << 16; } else { if (comp_status == CS_DATA_UNDERRUN) { res = DID_OK << 16; - els->u.els_plogi.len = cpu_to_le16(le32_to_cpu( - ese->total_byte_count)); + els->u.els_plogi.len = + le16_to_cpu(((struct els_sts_entry_24xx *) + pkt)->total_byte_count); + + if (sp->remap.remapped && + ((u8*)sp->remap.rsp.buf)[0] == ELS_LS_ACC) { + ql_dbg(ql_dbg_user, vha, 0x503f, + "%s IOCB Done LS_ACC %02x%02x%02x -> %02x%02x%02x", + __func__, e->s_id[0], e->s_id[2],e->s_id[1], + e->d_id[2], e->d_id[1], e->d_id[0]); + logit = 0; + } + + } else if (comp_status == CS_PORT_LOGGED_OUT) { + ql_dbg(ql_dbg_disc, vha, 0x911e, + "%s %d sche delete\n", __func__, __LINE__); + + els->u.els_plogi.len = 0; + res = DID_IMM_RETRY << 16; + qlt_schedule_sess_for_deletion(sp->fcport); } else { els->u.els_plogi.len = 0; res = DID_ERROR << 16; } + + if (sp->remap.remapped && + ((u8 *)sp->remap.rsp.buf)[0] == ELS_LS_RJT) { + if (logit) { + ql_dbg(ql_dbg_user, vha, 0x503f, + "%s IOCB Done LS_RJT hdl=%x comp_status=0x%x\n", + type, sp->handle, comp_status); + + ql_dbg(ql_dbg_user, vha, 0x503f, + "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n", + fw_status[1], fw_status[2], + le32_to_cpu(((struct els_sts_entry_24xx *) + pkt)->total_byte_count), + e->s_id[0], e->s_id[2], e->s_id[1], + e->d_id[2], e->d_id[1], e->d_id[0]); + } + if (sp->fcport && sp->fcport->flags & FCF_FCSP_DEVICE && + sp->type == SRB_ELS_CMD_HST_NOLOGIN) { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s rcv reject. Sched delete\n", __func__); + qlt_schedule_sess_for_deletion(sp->fcport); + } + } else if (logit) { + ql_log(ql_log_info, vha, 0x503f, + "%s IOCB Done hdl=%x comp_status=0x%x\n", + type, sp->handle, comp_status); + ql_log(ql_log_info, vha, 0x503f, + "subcode 1=0x%x subcode 2=0x%x bytes=0x%x %02x%02x%02x -> %02x%02x%02x\n", + fw_status[1], fw_status[2], + le32_to_cpu(((struct els_sts_entry_24xx *) + pkt)->total_byte_count), + e->s_id[0], e->s_id[2], e->s_id[1], + e->d_id[2], e->d_id[1], e->d_id[0]); + } } - ql_dbg(ql_dbg_disc, vha, 0x503f, - "ELS IOCB Done -%s hdl=%x comp_status=0x%x error subcode 1=0x%x error subcode 2=0x%x total_byte=0x%x\n", - type, sp->handle, comp_status, fw_status[1], fw_status[2], - le32_to_cpu(ese->total_byte_count)); goto els_ct_done; } @@ -2055,20 +2292,23 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req, if (comp_status == CS_DATA_UNDERRUN) { res = DID_OK << 16; bsg_reply->reply_payload_rcv_len = - le32_to_cpu(ese->total_byte_count); + le16_to_cpu(((struct els_sts_entry_24xx *)pkt)->total_byte_count); ql_dbg(ql_dbg_user, vha, 0x503f, "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n", type, sp->handle, comp_status, fw_status[1], fw_status[2], - le32_to_cpu(ese->total_byte_count)); + le16_to_cpu(((struct els_sts_entry_24xx *) + pkt)->total_byte_count)); } else { ql_dbg(ql_dbg_user, vha, 0x5040, "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x " "error subcode 1=0x%x error subcode 2=0x%x.\n", type, sp->handle, comp_status, - le32_to_cpu(ese->error_subcode_1), - le32_to_cpu(ese->error_subcode_2)); + le16_to_cpu(((struct els_sts_entry_24xx *) + pkt)->error_subcode_1), + le16_to_cpu(((struct els_sts_entry_24xx *) + pkt)->error_subcode_2)); res = DID_ERROR << 16; bsg_reply->reply_payload_rcv_len = 0; } @@ -2098,6 +2338,7 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, struct srb_iocb *lio; uint16_t *data; uint32_t iop[2]; + int logit = 1; sp = qla2x00_get_sp_from_handle(vha, func, req, logio); if (!sp) @@ -2125,8 +2366,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, } if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { - ql_dbg(ql_dbg_async, sp->vha, 0x5036, - "Async-%s complete: handle=%x pid=%06x wwpn=%8phC iop0=%x\n", + ql_dbg(ql_dbg_async, sp->vha, 0x5036, "Async-%s complete: " + "handle=%x pid=%06x wwpn=%8phC iop0=%x\n", type, sp->handle, fcport->d_id.b24, fcport->port_name, le32_to_cpu(logio->io_parameter[0])); @@ -2144,6 +2385,13 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, if (sp->type != SRB_LOGIN_CMD) goto logio_done; + ql_dump_buffer(ql_dbg_edif + ql_dbg_verbose, sp->vha, 0x5055, + logio, 64); + + lio->u.logio.iop[1] = le32_to_cpu(logio->io_parameter[5]); + if (le32_to_cpu(logio->io_parameter[5]) & LIO_COMM_FEAT_FCSP) + fcport->flags |= FCF_FCSP_DEVICE; + iop[0] = le32_to_cpu(logio->io_parameter[0]); if (iop[0] & BIT_4) { fcport->port_type = FCT_TARGET; @@ -2171,9 +2419,11 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, case LSC_SCODE_PORTID_USED: data[0] = MBS_PORT_ID_USED; data[1] = LSW(iop[1]); + logit = 0; break; case LSC_SCODE_NPORT_USED: data[0] = MBS_LOOP_ID_USED; + logit = 0; break; case LSC_SCODE_CMD_FAILED: if (iop[1] == 0x0606) { @@ -2206,12 +2456,20 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, break; } - ql_dbg(ql_dbg_async, sp->vha, 0x5037, - "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", - type, sp->handle, fcport->d_id.b24, fcport->port_name, - le16_to_cpu(logio->comp_status), - le32_to_cpu(logio->io_parameter[0]), - le32_to_cpu(logio->io_parameter[1])); + if (logit) + ql_log(ql_log_warn, sp->vha, 0x5037, "Async-%s failed: " + "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", + type, sp->handle, fcport->d_id.b24, fcport->port_name, + le16_to_cpu(logio->comp_status), + le32_to_cpu(logio->io_parameter[0]), + le32_to_cpu(logio->io_parameter[1])); + else + ql_dbg(ql_dbg_disc, sp->vha, 0x5037, "Async-%s failed: " + "handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n", + type, sp->handle, fcport->d_id.b24, fcport->port_name, + le16_to_cpu(logio->comp_status), + le32_to_cpu(logio->io_parameter[0]), + le32_to_cpu(logio->io_parameter[1])); logio_done: sp->done(sp, 0); @@ -2226,11 +2484,13 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) srb_t *sp; struct srb_iocb *iocb; struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk; + u16 comp_status; sp = qla2x00_get_sp_from_handle(vha, func, req, tsk); if (!sp) return; + comp_status = le16_to_cpu(sts->comp_status); iocb = &sp->u.iocb_cmd; type = sp->name; fcport = sp->fcport; @@ -2244,11 +2504,12 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) } else if (sts->comp_status != cpu_to_le16(CS_COMPLETE)) { ql_log(ql_log_warn, fcport->vha, 0x5039, "Async-%s error - hdl=%x completion status(%x).\n", - type, sp->handle, sts->comp_status); + type, sp->handle, comp_status); iocb->u.tmf.data = QLA_FUNCTION_FAILED; } else if ((le16_to_cpu(sts->scsi_status) & SS_RESPONSE_INFO_LEN_VALID)) { host_to_fcp_swap(sts->data, sizeof(sts->data)); + if (le32_to_cpu(sts->rsp_data_len) < 4) { ql_log(ql_log_warn, fcport->vha, 0x503b, "Async-%s error - hdl=%x not enough response(%d).\n", @@ -2261,6 +2522,30 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk) } } + switch (comp_status) { + case CS_PORT_LOGGED_OUT: + case CS_PORT_CONFIG_CHG: + case CS_PORT_BUSY: + case CS_INCOMPLETE: + case CS_PORT_UNAVAILABLE: + case CS_TIMEOUT: + case CS_RESET: + if (atomic_read(&fcport->state) == FCS_ONLINE) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, + "-Port to be marked lost on fcport=%02x%02x%02x, current " + "port state= %s comp_status %x.\n", fcport->d_id.b.domain, + fcport->d_id.b.area, fcport->d_id.b.al_pa, + port_state_str[FCS_ONLINE], + comp_status); + + qlt_schedule_sess_for_deletion(fcport); + } + break; + + default: + break; + } + if (iocb->u.tmf.data != QLA_SUCCESS) ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, sp->vha, 0x5055, sts, sizeof(*sts)); @@ -2277,7 +2562,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, uint16_t state_flags; struct nvmefc_fcp_req *fd; uint16_t ret = QLA_SUCCESS; - __le16 comp_status = sts->comp_status; + uint16_t comp_status = le16_to_cpu(sts->comp_status); int logit = 0; iocb = &sp->u.iocb_cmd; @@ -2286,8 +2571,11 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, state_flags = le16_to_cpu(sts->state_flags); fd = iocb->u.nvme.desc; + if (unlikely(iocb->u.nvme.aen_op)) atomic_dec(&sp->vha->hw->nvme_active_aen_cnt); + else + sp->qpair->cmd_completion_cnt++; if (unlikely(comp_status != CS_COMPLETE)) logit = 1; @@ -2308,7 +2596,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, } else if ((state_flags & (SF_FCP_RSP_DMA | SF_NVME_ERSP)) == (SF_FCP_RSP_DMA | SF_NVME_ERSP)) { /* Response already DMA'd to fd->rspaddr. */ - iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; + iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); } else if ((state_flags & SF_FCP_RSP_DMA)) { /* * Non-zero value in first 12 bytes of NVMe_RSP IU, treat this @@ -2325,20 +2613,23 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, inbuf = (uint32_t *)&sts->nvme_ersp_data; outbuf = (uint32_t *)fd->rspaddr; - iocb->u.nvme.rsp_pyld_len = sts->nvme_rsp_pyld_len; - if (unlikely(le16_to_cpu(iocb->u.nvme.rsp_pyld_len) > - sizeof(struct nvme_fc_ersp_iu))) { + iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); + if (unlikely(iocb->u.nvme.rsp_pyld_len > + sizeof(struct nvme_fc_ersp_iu))) { if (ql_mask_match(ql_dbg_io)) { - WARN_ONCE(1, "Unexpected response payload length %u.\n", - iocb->u.nvme.rsp_pyld_len); + WARN_ONCE(1, "%8phC: Unexpected response payload length %u.\n", + fcport->port_name, + iocb->u.nvme.rsp_pyld_len); ql_log(ql_log_warn, fcport->vha, 0x5100, - "Unexpected response payload length %u.\n", - iocb->u.nvme.rsp_pyld_len); + "%8phC: Unexpected response payload length %u.\n", + fcport->port_name, + iocb->u.nvme.rsp_pyld_len); + logit = 1; } iocb->u.nvme.rsp_pyld_len = - cpu_to_le16(sizeof(struct nvme_fc_ersp_iu)); + sizeof(struct nvme_fc_ersp_iu); } - iter = le16_to_cpu(iocb->u.nvme.rsp_pyld_len) >> 2; + iter = iocb->u.nvme.rsp_pyld_len >> 2; for (; iter; iter--) *outbuf++ = swab32(*inbuf++); } @@ -2349,11 +2640,11 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len); if (fd->transferred_length != tgt_xfer_len) { - ql_dbg(ql_dbg_io, fcport->vha, 0x3079, + ql_log(ql_log_warn, fcport->vha, 0x3079, "Dropped frame(s) detected (sent/rcvd=%u/%u).\n", tgt_xfer_len, fd->transferred_length); logit = 1; - } else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) { + } else if (comp_status == CS_DATA_UNDERRUN) { /* * Do not log if this is just an underflow and there * is no data loss. @@ -2363,7 +2654,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, } if (unlikely(logit)) - ql_log(ql_log_warn, fcport->vha, 0x5060, + ql_dbg(ql_dbg_io, fcport->vha, 0x5060, "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n", sp->name, sp->handle, comp_status, fd->transferred_length, le32_to_cpu(sts->residual_len), @@ -2373,7 +2664,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, * If transport error then Failure (HBA rejects request) * otherwise transport will handle. */ - switch (le16_to_cpu(comp_status)) { + switch (comp_status) { case CS_COMPLETE: break; @@ -2381,6 +2672,15 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, case CS_PORT_UNAVAILABLE: case CS_PORT_LOGGED_OUT: fcport->nvme_flag |= NVME_FLAG_RESETTING; + if (atomic_read(&fcport->state) == FCS_ONLINE) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x3021, + "Port to be marked lost on fcport=%06x, current " + "port state= %s comp_status %x.\n", fcport->d_id.b24, + port_state_str[FCS_ONLINE], + comp_status); + + qlt_schedule_sess_for_deletion(fcport); + } fallthrough; case CS_ABORTED: case CS_PORT_BUSY: @@ -2515,7 +2815,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp) } /* Adjust ring index */ - wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); + WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index); } static inline void @@ -2549,8 +2849,8 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, if (sense_len) { ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c, - "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", - sp->vha->host_no, cp->device->id, cp->device->lun, + "Check condition Sense data, nexus%ld:%d:%llu cmd=%px.\n", + sp->vha->host_no, cp->device->id, lun_cast(cp->device->lun), cp); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b, cp->sense_buffer, sense_len); @@ -2584,15 +2884,15 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) * swab32 of the "data" field in the beginning of qla2x00_status_entry() * would make guard field appear at offset 2 */ - a_guard = get_unaligned_le16(ap + 2); - a_app_tag = get_unaligned_le16(ap + 0); - a_ref_tag = get_unaligned_le32(ap + 4); - e_guard = get_unaligned_le16(ep + 2); - e_app_tag = get_unaligned_le16(ep + 0); - e_ref_tag = get_unaligned_le32(ep + 4); + a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); + a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); + a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); + e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); + e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); + e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); ql_dbg(ql_dbg_io, vha, 0x3023, - "iocb(s) %p Returned STATUS.\n", sts24); + "iocb(s) %px Returned STATUS.\n", sts24); ql_dbg(ql_dbg_io, vha, 0x3024, "DIF ERROR in cmd 0x%x lba 0x%llx act ref" @@ -2606,9 +2906,9 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) * For type 3: ref & app tag is all 'f's * For type 0,1,2: app tag is all 'f's */ - if (a_app_tag == be16_to_cpu(T10_PI_APP_ESCAPE) && - (scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3 || - a_ref_tag == be32_to_cpu(T10_PI_REF_ESCAPE))) { + if ((a_app_tag == QL_T10_PI_APP_ESCAPE) && + ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || + (a_ref_tag == QL_T10_PI_REF_ESCAPE))) { uint32_t blocks_done, resid; sector_t lba_s = scsi_get_lba(cmd); @@ -2625,7 +2925,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) if (scsi_prot_sg_count(cmd)) { uint32_t i, j = 0, k = 0, num_ent; struct scatterlist *sg; - struct t10_pi_tuple *spt; + QL_T10_PI_TUPLE *spt; /* Patch the corresponding protection tags */ scsi_for_each_prot_sg(cmd, sg, @@ -2650,9 +2950,9 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) spt = page_address(sg_page(sg)) + sg->offset; spt += j; - spt->app_tag = T10_PI_APP_ESCAPE; + spt->app_tag = QL_T10_PI_APP_ESCAPE; if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) - spt->ref_tag = T10_PI_REF_ESCAPE; + spt->ref_tag = QL_T10_PI_REF_ESCAPE; } return 0; @@ -2687,7 +2987,6 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) cmd->result |= SAM_STAT_CHECK_CONDITION; return 1; } - return 1; } @@ -2701,7 +3000,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, uint16_t scsi_status; uint16_t thread_id; uint32_t rval = EXT_STATUS_OK; - struct bsg_job *bsg_job = NULL; + bsg_job_t *bsg_job = NULL; struct fc_bsg_request *bsg_request; struct fc_bsg_reply *bsg_reply; sts_entry_t *sts = pkt; @@ -2868,7 +3167,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) int logit = 1; int res = 0; uint16_t state_flags = 0; - uint16_t sts_qual = 0; + uint16_t retry_delay = 0; if (IS_FWI2_CAPABLE(ha)) { comp_status = le16_to_cpu(sts24->comp_status); @@ -2886,8 +3185,14 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) if (req == NULL || que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) { ql_dbg(ql_dbg_io, vha, 0x3059, - "Invalid status handle (0x%x): Bad req pointer. req=%p, " + "Invalid status handle (0x%x): Bad req pointer. req=%px, " "que=%u.\n", sts->handle, req, que); + if (is_debug(QDBG_FW_DUMP)) { + ha->isp_ops->fw_dump(vha, 1); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + } + + BUG_ON(is_debug(QDBG_CRASH_ON_ERR)); return; } @@ -2905,6 +3210,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) "Invalid status handle, out of range (0x%x).\n", sts->handle); + if (is_debug(QDBG_FW_DUMP)) + ha->isp_ops->fw_dump(vha, 1); + + BUG_ON(is_debug(QDBG_CRASH_ON_ERR)); + if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { if (IS_P3P_TYPE(ha)) set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags); @@ -2914,12 +3224,26 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) } return; } - qla_put_iocbs(sp->qpair, &sp->iores); + + ql_srb_trace_ext(ql_dbg_io, vha, sp->fcport, + "sp=%px handle=0x%x type=%d done=%ps", + sp, sp->handle, sp->type, sp->done); + +#ifdef QLA2XXX_LATENCY_MEASURE + if (sp->type == SRB_SCSI_CMD || sp->type == SRB_NVME_CMD) + ktime_get_real_ts64(&sp->cmd_from_rsp_q); +#endif + qla_put_fw_resources(sp->qpair, &sp->iores); + + if (sp->abort) + sp->aborted = 1; + else + sp->completed = 1; if (sp->cmd_type != TYPE_SRB) { req->outstanding_cmds[handle] = NULL; ql_dbg(ql_dbg_io, vha, 0x3015, - "Unknown sp->cmd_type %x %p).\n", + "Unknown sp->cmd_type %x %px).\n", sp->cmd_type, sp); return; } @@ -2943,6 +3267,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) } /* Fast path completion. */ + qla_chk_edif_rx_sa_delete_pending(vha, sp, sts24); + sp->qpair->cmd_completion_cnt++; + if (comp_status == CS_COMPLETE && scsi_status == 0) { qla2x00_process_completed_request(vha, req, handle); @@ -2953,7 +3280,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) cp = GET_CMD_SP(sp); if (cp == NULL) { ql_dbg(ql_dbg_io, vha, 0x3018, - "Command already returned (0x%x/%p).\n", + "Command already returned (0x%x/%px).\n", sts->handle, sp); return; @@ -2980,7 +3307,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) host_to_fcp_swap(sts24->data, sizeof(sts24->data)); ox_id = le16_to_cpu(sts24->ox_id); par_sense_len = sizeof(sts24->data); - sts_qual = le16_to_cpu(sts24->status_qualifier); + /* Valid values of the retry delay timer are 0x1-0xffef */ + if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) { + retry_delay = sts24->retry_delay & 0x3fff; + ql_dbg(ql_dbg_io, sp->vha, 0x3033, + "%s: scope=%#x retry_delay=%#x\n", __func__, + sts24->retry_delay >> 14, retry_delay); + } } else { if (scsi_status & SS_SENSE_LEN_VALID) sense_len = le16_to_cpu(sts->req_sense_length); @@ -3018,9 +3351,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) * Check retry_delay_timer value if we receive a busy or * queue full. */ - if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL || - lscsi_status == SAM_STAT_BUSY)) - qla2x00_set_retry_delay_timestamp(fcport, sts_qual); + if (lscsi_status == SAM_STAT_TASK_SET_FULL || + lscsi_status == SAM_STAT_BUSY) + qla2x00_set_retry_delay_timestamp(fcport, retry_delay); /* * Based on Host and scsi status generate status code for Linux @@ -3072,10 +3405,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) scsi_set_resid(cp, resid); if (scsi_status & SS_RESIDUAL_UNDER) { if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) { - ql_dbg(ql_dbg_io, fcport->vha, 0x301d, + ql_log(ql_log_warn, fcport->vha, 0x301d, "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); + vha->interface_err_cnt++; + res = DID_ERROR << 16 | lscsi_status; goto check_scsi_status; } @@ -3097,10 +3432,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) * task not completed. */ - ql_dbg(ql_dbg_io, fcport->vha, 0x301f, + ql_log(ql_log_warn, fcport->vha, 0x301f, "Dropped frame(s) detected (0x%x of 0x%x bytes).\n", resid, scsi_bufflen(cp)); + vha->interface_err_cnt++; + res = DID_ERROR << 16 | lscsi_status; goto check_scsi_status; } else { @@ -3143,6 +3480,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) case CS_PORT_UNAVAILABLE: case CS_TIMEOUT: case CS_RESET: + case CS_EDIF_INV_REQ: /* * We are going to have the fc class block the rport @@ -3183,6 +3521,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) case CS_TRANSPORT: res = DID_ERROR << 16; + vha->hw_err_cnt++; if (!IS_PI_SPLIT_DET_CAPABLE(ha)) break; @@ -3195,14 +3534,15 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) case CS_DMA: ql_log(ql_log_info, fcport->vha, 0x3022, - "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", + "CS_DMA error: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%06x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%px cp=%px.\n", comp_status, scsi_status, res, vha->host_no, - cp->device->id, cp->device->lun, fcport->d_id.b24, + cp->device->id, lun_cast(cp->device->lun), fcport->d_id.b24, ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len, resid_len, fw_resid_len, sp, cp); ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee, pkt, sizeof(*sts24)); res = DID_ERROR << 16; + vha->hw_err_cnt++; break; default: res = DID_ERROR << 16; @@ -3214,9 +3554,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) ql_dbg(ql_dbg_io, fcport->vha, 0x3022, "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu " "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x " - "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n", + "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%px cp=%px.\n", comp_status, scsi_status, res, vha->host_no, - cp->device->id, cp->device->lun, fcport->d_id.b.domain, + cp->device->id, lun_cast(cp->device->lun), fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id, cp->cmnd, scsi_bufflen(cp), rsp_info_len, resid_len, fw_resid_len, sp, cp); @@ -3243,8 +3583,9 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) uint32_t sense_len; uint8_t *sense_ptr; - if (!sp || !GET_CMD_SENSE_LEN(sp)) + if (!sp || !GET_CMD_SENSE_LEN(sp)) { return; + } sense_len = GET_CMD_SENSE_LEN(sp); sense_ptr = GET_CMD_SENSE_PTR(sp); @@ -3252,7 +3593,7 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) cp = GET_CMD_SP(sp); if (cp == NULL) { ql_log(ql_log_warn, vha, 0x3025, - "cmd is NULL: already returned to OS (sp=%p).\n", sp); + "cmd is NULL: already returned to OS (sp=%px).\n", sp); rsp->status_srb = NULL; return; @@ -3283,6 +3624,161 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) } } +/* Debug code - will be removed */ +static void dump_flogi_acc_payld(scsi_qla_host_t *vha, struct qla_hw_data *ha) +{ + int i; + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x502a, + "page_code = 0x%x\n", ha->flogi_acc.page_code); + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x502a, + "page_len = 0x%x\n", le16_to_cpu(ha->flogi_acc.page_len)); + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x502a, + "vendor :%s\n", ha->flogi_acc.vendor_code); + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x502a, + "er_rdy_desc_len = 0x%x\n", le16_to_cpu(ha->flogi_acc.er_rdy_desc_len)); + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x502a, + "er_rdy_desc_tag = 0x%x\n", le16_to_cpu(ha->flogi_acc.er_rdy_desc_tag)); + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x502a, + "rx_vl_desc_len = 0x%x\n", le16_to_cpu(ha->flogi_acc.rx_vl_desc_len)); + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x502a, + "rx_vl_desc_tag = 0x%x\n", le16_to_cpu(ha->flogi_acc.rx_vl_desc_tag)); + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x502a, + "num_rx_vl = 0x%x\n", le16_to_cpu(ha->flogi_acc.num_rx_vl)); + + for (i = 0; i < 7; i++ ) { + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x502a, + "Pr_H:0x%x, Pr_L:0x%x, N_C:0x%x \n", ha->flogi_acc.rx_vl[i].prio_hi, + ha->flogi_acc.rx_vl[i].prio_lo, le16_to_cpu(ha->flogi_acc.rx_vl[i].num_credits)); + } + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x502a, + "tx_vl_desc_len = 0x%x\n", le16_to_cpu(ha->flogi_acc.tx_vl_desc_len)); + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x502a, + "tx_vl_desc_tag = 0x%x\n", le16_to_cpu(ha->flogi_acc.tx_vl_desc_tag)); + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x502a, + "num_tx_vl = 0x%x\n", le16_to_cpu(ha->flogi_acc.num_tx_vl)); + + for (i = 0; i < 7; i++ ) { + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x502a, + "Pr_H:0x%x, Pr_L:0x%x, N_C:0x%x \n", ha->flogi_acc.tx_vl[i].prio_hi, + ha->flogi_acc.tx_vl[i].prio_lo, le16_to_cpu(ha->flogi_acc.tx_vl[i].num_credits)); + } +} +/** + * qla27xx_status_cont_type_1() - Process a Status Continuation type 1 entry. + * @ha: SCSI driver HA context + * @pkt: Entry pointer + * + */ +static void +qla27xx_status_cont_type_1(scsi_qla_host_t *vha, sts_cont_entry_t *pkt) +{ + uint8_t page_code, vendor_id_data[8]; + uint16_t page_length; + uint16_t er_rdy = 0; + uint16_t num_rxvl = 0; + uint16_t num_txvl = 0; + struct qla_hw_data *ha = vha->hw; + struct flogi_acc_payld *fl_p = &ha->flogi_acc; + + if (ha->flags.flogi_acc_pl_in_cont_iocb) { + if (ha->flogi_acc_curr_offset == 0) { + // FLOGI ACC payload bytes 4-5 + ha->attached_port_bb_credit = *((uint16_t *)pkt + 4); + // FLOGI ACC payload bytes 10-11 + ha->flogi_acc_common_features = *((uint16_t *)pkt + 7); + } else if (ha->flogi_acc_curr_offset == + (sizeof(sts_cont_entry_t) - 4)) { + // FLOGI ACC payload byte offset 60, Class 3 + // service parameters (dword offs 17,bytes off 68) + ha->flogi_acc_cl3_sp_options = *((uint16_t *)pkt + 7); + } else if (ha->flogi_acc_curr_offset == + 2*(sizeof(sts_cont_entry_t) - 4)) { + // FLOGI ACC payload byte offset 120, login extension + // data length (dword off 31,bytes off 124) + ha->flogi_acc_login_ex_length = *((uint32_t *)pkt + 2); + } else if (ha->flogi_acc_curr_offset == + 4*(sizeof(sts_cont_entry_t) - 4)) { + if (ha->flogi_acc_login_ex_length > 0) {// There is login extension data + // FLOGI ACC payload byte offset 240, login extension + // data (dword off 64, bytes off 256) + + /* Copy the remaining 44 bytes to the flogi_acc structure */ + memcpy(fl_p, ((uint16_t *)pkt + 10), 44); + /* Swap the vendor fields */ + be32_to_cpus((uint32_t *)&ha->flogi_acc.vendor_code[0]); + be32_to_cpus((uint32_t *)&ha->flogi_acc.vendor_code[4]); + + dump_flogi_acc_payld(vha, ha); + } + } else if (ha->flogi_acc_curr_offset > + 4*(sizeof(sts_cont_entry_t) - 4)) { // Offset > 300, the last segment. + uint8_t *ptr; + + ptr = (uint8_t *)&ha->flogi_acc + 44; + memcpy(ptr, pkt->data, ha->flogi_acc_pld_remaining); + + dump_flogi_acc_payld(vha, ha); + page_code = ha->flogi_acc.page_code; + page_length = le16_to_cpu(ha->flogi_acc.page_len); + /* Check if VL is enabled */ + if ((page_code == 0xf0) && (page_length > 0)) { + er_rdy = le16_to_cpu(ha->flogi_acc.er_rdy_desc_tag); + num_txvl = le16_to_cpu(ha->flogi_acc.num_tx_vl); + num_rxvl = le16_to_cpu(ha->flogi_acc.num_rx_vl); + memcpy(vendor_id_data, ha->flogi_acc.vendor_code, 8); + if ((memcmp(vendor_id_data, "CISCO", 5) == 0) && + (er_rdy == ER_RDY_DESC_TAG) && + (NUM_VLS_IN_RANGE(num_txvl, num_rxvl))) { + ha->flags.conn_fabric_cisco_er_rdy = 1; + ha->scm.scm_fabric_connection_flags + = SCM_FLAG_CISCO_CONNECTED; + ql_log(ql_log_info, vha, 0x5075, + "Port: %8phC connected to Cisco Fabric \n", vha->port_name); + ql_log(ql_log_info, vha, 0x5076, + "Num. Rx VLs:%d, Num. Tx VLs:%d\n",num_rxvl,num_txvl); + ql_log(ql_log_info, vha, 0x5076, + "ER_RDY/VL supported by Switch and HBA \n"); + } else if (memcmp(vendor_id_data, "BROCADE", 7) == 0) { + ha->flags.conn_fabric_brocade = 1; + ha->scm.scm_fabric_connection_flags + = SCM_FLAG_BROCADE_CONNECTED; + ql_log(ql_log_info, vha, 0x5075, + "Port: %8phC connected to Brocade Fabric \n", vha->port_name); + } + } + } + + if (ha->flogi_acc_pld_remaining > + (sizeof(sts_cont_entry_t) - 4)) { + ha->flogi_acc_pld_remaining -= + (sizeof(sts_cont_entry_t) - 4); + ha->flogi_acc_curr_offset += + (sizeof(sts_cont_entry_t) - 4); + } else { + ha->flogi_acc_curr_offset += + (sizeof(sts_cont_entry_t) - 4); + ha->flogi_acc_pld_remaining -= + (sizeof(sts_cont_entry_t) - 4); + if (ha->flogi_acc_pld_remaining == 0) { + ha->flags.flogi_acc_pl_in_cont_iocb = 0; + } else { + ql_dbg(ql_log_warn, vha, 0x5075, + "Un-accounted bytes balance, %d\n", + ha->flogi_acc_pld_remaining); + ha->flags.flogi_acc_pl_in_cont_iocb = 0; + ha->flogi_acc_pld_remaining = 0; + } + } + } +} + /** * qla2x00_error_entry() - Process an error entry. * @vha: SCSI driver HA context @@ -3327,12 +3823,12 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) default: sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (sp) { - qla_put_iocbs(sp->qpair, &sp->iores); sp->done(sp, res); return 0; } break; + case SA_UPDATE_IOCB_TYPE: case ABTS_RESP_24XX: case CTIO_TYPE7: case CTIO_CRC2: @@ -3341,6 +3837,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt) fatal: ql_log(ql_log_warn, vha, 0x5030, "Error entry - invalid handle/queue (%04x).\n", que); + + if (is_debug(QDBG_FW_DUMP)) { + ha->isp_ops->fw_dump(vha, 1); + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + } + + BUG_ON(is_debug(QDBG_CRASH_ON_ERR)); + return 0; } @@ -3354,7 +3858,7 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) { uint16_t cnt; uint32_t mboxes; - __le16 __iomem *wptr; + uint16_t __iomem *wptr; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; @@ -3370,11 +3874,11 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) ha->flags.mbox_int = 1; ha->mailbox_out[0] = mb0; mboxes >>= 1; - wptr = ®->mailbox1; + wptr = (uint16_t __iomem *)®->mailbox1; for (cnt = 1; cnt < ha->mbx_count; cnt++) { if (mboxes & BIT_0) - ha->mailbox_out[cnt] = rd_reg_word(wptr); + ha->mailbox_out[cnt] = RD_REG_WORD(wptr); mboxes >>= 1; wptr++; @@ -3387,14 +3891,23 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, { const char func[] = "ABT_IOCB"; srb_t *sp; + srb_t *orig_sp = NULL; struct srb_iocb *abt; + struct qla_hw_data *ha; + ha = vha->hw; sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); if (!sp) return; abt = &sp->u.iocb_cmd; - abt->u.abt.comp_status = pkt->nport_handle; + abt->u.abt.comp_status = le16_to_cpu(pkt->comp_status); + + orig_sp = sp->cmd_sp; + /* Need to pass original sp */ + if(orig_sp) + qla_nvme_abort_process_comp_status(pkt, orig_sp); + sp->done(sp, 0); } @@ -3413,6 +3926,636 @@ void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha, sp->done(sp, comp_status); } +static void +qla24xx_process_abts(struct scsi_qla_host *vha, struct purex_item *pkt) +{ + struct abts_entry_24xx *abts = + (struct abts_entry_24xx *)&pkt->iocb; + struct qla_hw_data *ha = vha->hw; + struct els_entry_24xx *rsp_els; + struct abts_entry_24xx *abts_rsp; + dma_addr_t dma; + uint32_t fctl; + int rval; + + ql_dbg(ql_dbg_init, vha, 0x0286, "%s: entered.\n", __func__); + + ql_log(ql_log_warn, vha, 0x0287, + "Processing ABTS xchg=%#x oxid=%#x rxid=%#x seqid=%#x seqcnt=%#x\n", + abts->rx_xch_addr_to_abort, abts->ox_id, abts->rx_id, + abts->seq_id,abts->seq_cnt); + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, + "-------- ABTS RCV -------\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0287, + (uint8_t *)abts, sizeof(*abts)); + + rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma, + GFP_KERNEL); + if (!rsp_els) { + ql_log(ql_log_warn, vha, 0x0287, + "Failed allocate dma buffer ABTS/ELS RSP.\n"); + return; + } + + /* terminate exchange */ + memset(rsp_els, 0, sizeof(*rsp_els)); + rsp_els->entry_type = ELS_IOCB_TYPE; + rsp_els->entry_count = 1; + rsp_els->nport_handle = ~0; + rsp_els->rx_xchg_address = abts->rx_xch_addr_to_abort; + rsp_els->control_flags = EPD_RX_XCHG; + ql_dbg(ql_dbg_init, vha, 0x0283, + "Sending ELS Response to terminate exchange %#x...\n", + abts->rx_xch_addr_to_abort); + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, + "-------- ELS RSP -------\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0283, + (uint8_t *)rsp_els, sizeof(*rsp_els)); + rval = qla2x00_issue_iocb(vha, rsp_els, dma, 0); + if (rval) { + ql_log(ql_log_warn, vha, 0x0288, + "%s: iocb failed to execute -> %x\n", __func__, rval); + } else if (rsp_els->comp_status) { + ql_log(ql_log_warn, vha, 0x0289, + "%s: iocb failed to complete -> " + "completion=%#x subcode=(%#x,%#x)\n", + __func__, rsp_els->comp_status, + rsp_els->error_subcode_1, rsp_els->error_subcode_2); + } else { + ql_dbg(ql_dbg_init, vha, 0x028a, + "%s: abort exchange done.\n", __func__); + } + + /* send ABTS response */ + abts_rsp = (void *)rsp_els; + memset(abts_rsp, 0, sizeof(*abts_rsp)); + abts_rsp->entry_type = ABTS_RSP_TYPE; + abts_rsp->entry_count = 1; + abts_rsp->nport_handle = abts->nport_handle; + abts_rsp->vp_idx = abts->vp_idx; + abts_rsp->sof_type = abts->sof_type & 0xf0; + abts_rsp->rx_xch_addr = abts->rx_xch_addr; + abts_rsp->d_id[0] = abts->s_id[0]; + abts_rsp->d_id[1] = abts->s_id[1]; + abts_rsp->d_id[2] = abts->s_id[2]; + abts_rsp->r_ctl = FC_ROUTING_BLD | FC_R_CTL_BLD_BA_ACC; + abts_rsp->s_id[0] = abts->d_id[0]; + abts_rsp->s_id[1] = abts->d_id[1]; + abts_rsp->s_id[2] = abts->d_id[2]; + abts_rsp->cs_ctl = abts->cs_ctl; + /* include flipping bit23 in fctl */ + fctl = ~(abts->f_ctl[2] | 0x7F) << 16 | + FC_F_CTL_LAST_SEQ | FC_F_CTL_END_SEQ | FC_F_CTL_SEQ_INIT; + abts_rsp->f_ctl[0] = fctl >> 0 & 0xff; + abts_rsp->f_ctl[1] = fctl >> 8 & 0xff; + abts_rsp->f_ctl[2] = fctl >> 16 & 0xff; + abts_rsp->type = FC_TYPE_BLD; + abts_rsp->rx_id = abts->rx_id; + abts_rsp->ox_id = abts->ox_id; + abts_rsp->payload.ba_acc.aborted_rx_id = abts->rx_id; + abts_rsp->payload.ba_acc.aborted_ox_id = abts->ox_id; + abts_rsp->payload.ba_acc.high_seq_cnt = ~0; + abts_rsp->rx_xch_addr_to_abort = abts->rx_xch_addr_to_abort; + ql_dbg(ql_dbg_init, vha, 0x028b, + "Sending BA ACC response to ABTS %#x...\n", + abts->rx_xch_addr_to_abort); + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, + "-------- ELS RSP -------\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x028b, + (uint8_t *)abts_rsp, sizeof(*abts_rsp)); + rval = qla2x00_issue_iocb(vha, abts_rsp, dma, 0); + if (rval) { + ql_log(ql_log_warn, vha, 0x028c, + "%s: iocb failed to execute -> %x\n", __func__, rval); + } else if (abts_rsp->comp_status) { + ql_log(ql_log_warn, vha, 0x028d, + "%s: iocb failed to complete -> " + "completion=%#x subcode=(%#x,%#x)\n", + __func__, abts_rsp->comp_status, + abts_rsp->payload.error.subcode1, + abts_rsp->payload.error.subcode2); + } else { + ql_dbg(ql_dbg_init, vha, 0x028ea, + "%s: done.\n", __func__); + } + + dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma); +} + +static bool +qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha, + struct purex_entry_24xx *purex) +{ + char fwstr[16]; + u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0]; + struct port_database_24xx *pdb; + + /* Domain Controller is always logged-out. */ + /* if RDP request is not from Domain Controller: */ + if (sid != 0xfffc01) + return false; + + ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid); + + pdb = kzalloc(sizeof(*pdb), GFP_KERNEL); + if (!pdb) { + ql_dbg(ql_dbg_init, vha, 0x0181, + "%s: Failed allocate pdb\n", __func__); + } else if (qla24xx_get_port_database(vha, purex->nport_handle, pdb)) { + ql_dbg(ql_dbg_init, vha, 0x0181, + "%s: Failed get pdb sid=%x\n", __func__, sid); + } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE && + pdb->current_login_state != PDS_PRLI_COMPLETE) { + ql_dbg(ql_dbg_init, vha, 0x0181, + "%s: Port not logged in sid=%#x\n", __func__, sid); + } else { + /* RDP request is from logged in port */ + kfree(pdb); + return false; + } + kfree(pdb); + + vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr)); + fwstr[strcspn(fwstr, " ")] = 0; + /* if FW version allows RDP response length upto 2048 bytes: */ + if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0) + return false; + + ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr); + + /* RDP response length is to be reduced to maximum 256 bytes */ + return true; +} + +/* + * Function Name: qla24xx_process_purex_iocb + * + * Description: + * Prepare a RDP response and send to Fabric switch + * + * PARAMETERS: + * vha: SCSI qla host + * purex: RDP request received by HBA + */ +static void +qla24xx_process_purex_rdp(struct scsi_qla_host *vha, + struct purex_item *item) +{ + struct qla_hw_data *ha = vha->hw; + struct purex_entry_24xx *purex = + (struct purex_entry_24xx *)&item->iocb; + dma_addr_t rsp_els_dma; + dma_addr_t rsp_payload_dma; + dma_addr_t stat_dma; + dma_addr_t sfp_dma; + struct els_entry_24xx *rsp_els = NULL; + struct rdp_rsp_payload *rsp_payload = NULL; + struct link_statistics *stat = NULL; + uint8_t *sfp = NULL; + uint16_t sfp_flags = 0; + uint rsp_payload_length = sizeof(*rsp_payload); + int rval; + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180, + "%s: Enter\n", __func__); + + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181, + "-------- ELS REQ -------\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182, + purex, sizeof(*purex)); + + if (qla25xx_rdp_rsp_reduce_size(vha, purex)) { + rsp_payload_length = + offsetof(typeof(*rsp_payload), optical_elmt_desc); + ql_dbg(ql_dbg_init, vha, 0x0181, + "Reducing RSP payload length to %u bytes...\n", + rsp_payload_length); + } + + rsp_els = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), + &rsp_els_dma, GFP_KERNEL); + if (!rsp_els) { + ql_log(ql_log_warn, vha, 0x0183, + "Failed to allocate dma buffer ELS RSP.\n"); + goto dealloc; + } + + rsp_payload = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload), + &rsp_payload_dma, GFP_KERNEL); + if (!rsp_payload) { + ql_log(ql_log_warn, vha, 0x0184, + "Failed allocate dma buffer ELS RSP payload.\n"); + goto dealloc; + } + + sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN, + &sfp_dma, GFP_KERNEL); + + stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat), + &stat_dma, GFP_KERNEL); + + /* Prepare Response IOCB */ + rsp_els->entry_type = ELS_IOCB_TYPE; + rsp_els->entry_count = 1; + rsp_els->sys_define = 0; + rsp_els->entry_status = 0; + rsp_els->handle = 0; + rsp_els->nport_handle = purex->nport_handle; + rsp_els->tx_dsd_count = 1; + rsp_els->vp_index = purex->vp_idx; + rsp_els->sof_type = EST_SOFI3; + rsp_els->rx_xchg_address = purex->rx_xchg_addr; + rsp_els->rx_dsd_count = 0; + rsp_els->opcode = purex->els_frame_payload[0]; + + rsp_els->d_id[0] = purex->s_id[0]; + rsp_els->d_id[1] = purex->s_id[1]; + rsp_els->d_id[2] = purex->s_id[2]; + + rsp_els->control_flags = EPD_ELS_ACC; + rsp_els->rx_byte_count = 0; + rsp_els->tx_byte_count = rsp_payload_length; + + rsp_els->tx_address = rsp_payload_dma; + rsp_els->tx_len = rsp_els->tx_byte_count; + + rsp_els->rx_address = 0; + rsp_els->rx_len = 0; + + /* Prepare Response Payload */ + rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */ + rsp_payload->hdr.len = cpu_to_be32( + rsp_els->tx_byte_count - sizeof(rsp_payload->hdr)); + + /* Link service Request Info Descriptor */ + rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1); + rsp_payload->ls_req_info_desc.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc)); + rsp_payload->ls_req_info_desc.req_payload_word_0 = + cpu_to_be32p((uint32_t *)purex->els_frame_payload); + + /* Link service Request Info Descriptor 2 */ + rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1); + rsp_payload->ls_req_info_desc2.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2)); + rsp_payload->ls_req_info_desc2.req_payload_word_0 = + cpu_to_be32p((uint32_t *)purex->els_frame_payload); + + + rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000); + rsp_payload->sfp_diag_desc.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc)); + + if (sfp) { + /* SFP Flags */ + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0); + if (!rval) { + /* SFP Flags bits 3-0: Port Tx Laser Type */ + if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5)) + sfp_flags |= BIT_0; /* short wave */ + else if (sfp[0] & BIT_1) + sfp_flags |= BIT_1; /* long wave 1310nm */ + else if (sfp[1] & BIT_4) + sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */ + } + + /* SFP Type */ + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0); + if (!rval) { + sfp_flags |= BIT_4; /* optical */ + if (sfp[0] == 0x3) + sfp_flags |= BIT_6; /* sfp+ */ + } + + rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags); + + /* SFP Diagnostics */ + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0); + if (!rval) { + uint16_t *trx = (void *)sfp; /* already be16 */ + rsp_payload->sfp_diag_desc.temperature = trx[0]; + rsp_payload->sfp_diag_desc.vcc = trx[1]; + rsp_payload->sfp_diag_desc.tx_bias = trx[2]; + rsp_payload->sfp_diag_desc.tx_power = trx[3]; + rsp_payload->sfp_diag_desc.rx_power = trx[4]; + } + } + + /* Port Speed Descriptor */ + rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001); + rsp_payload->port_speed_desc.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc)); + rsp_payload->port_speed_desc.speed_capab = cpu_to_be16( + qla25xx_rdp_port_speed_capability(ha)); + rsp_payload->port_speed_desc.operating_speed = cpu_to_be16( + qla25xx_rdp_port_speed_currently(ha)); + + /* Link Error Status Descriptor */ + rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002); + rsp_payload->ls_err_desc.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc)); + + if (stat) { + rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0); + if (!rval) { + rsp_payload->ls_err_desc.link_fail_cnt = + cpu_to_be32(stat->link_fail_cnt); + rsp_payload->ls_err_desc.loss_sync_cnt = + cpu_to_be32(stat->loss_sync_cnt); + rsp_payload->ls_err_desc.loss_sig_cnt = + cpu_to_be32(stat->loss_sig_cnt); + rsp_payload->ls_err_desc.prim_seq_err_cnt = + cpu_to_be32(stat->prim_seq_err_cnt); + rsp_payload->ls_err_desc.inval_xmit_word_cnt = + cpu_to_be32(stat->inval_xmit_word_cnt); + rsp_payload->ls_err_desc.inval_crc_cnt = + cpu_to_be32(stat->inval_crc_cnt); + rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6; + } + } + + /* Portname Descriptor */ + rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003); + rsp_payload->port_name_diag_desc.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc)); + memcpy(rsp_payload->port_name_diag_desc.WWNN, + vha->node_name, + sizeof(rsp_payload->port_name_diag_desc.WWNN)); + memcpy(rsp_payload->port_name_diag_desc.WWPN, + vha->port_name, + sizeof(rsp_payload->port_name_diag_desc.WWPN)); + + /* F-Port Portname Descriptor */ + rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003); + rsp_payload->port_name_direct_desc.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc)); + memcpy(rsp_payload->port_name_direct_desc.WWNN, + vha->fabric_node_name, + sizeof(rsp_payload->port_name_direct_desc.WWNN)); + memcpy(rsp_payload->port_name_direct_desc.WWPN, + vha->fabric_port_name, + sizeof(rsp_payload->port_name_direct_desc.WWPN)); + + /* Bufer Credit Descriptor */ + rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006); + rsp_payload->buffer_credit_desc.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc)); + rsp_payload->buffer_credit_desc.fcport_b2b = 0; + rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0); + rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0); + + if (ha->flags.plogi_template_valid) { + uint32_t tmp = + be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred); + rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp); + } + + if (rsp_payload_length < sizeof(*rsp_payload)) + goto send; + + /* Optical Element Descriptor, Temperature */ + rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007); + rsp_payload->optical_elmt_desc[0].desc_len = + cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); + /* Optical Element Descriptor, Voltage */ + rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007); + rsp_payload->optical_elmt_desc[1].desc_len = + cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); + /* Optical Element Descriptor, Tx Bias Current */ + rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007); + rsp_payload->optical_elmt_desc[2].desc_len = + cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); + /* Optical Element Descriptor, Tx Power */ + rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007); + rsp_payload->optical_elmt_desc[3].desc_len = + cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); + /* Optical Element Descriptor, Rx Power */ + rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007); + rsp_payload->optical_elmt_desc[4].desc_len = + cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); + + if (sfp) { + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0); + if (!rval) { + uint16_t *trx = (void *)sfp; /* already be16 */ + + /* Optical Element Descriptor, Temperature */ + rsp_payload->optical_elmt_desc[0].high_alarm = trx[0]; + rsp_payload->optical_elmt_desc[0].low_alarm = trx[1]; + rsp_payload->optical_elmt_desc[0].high_warn = trx[2]; + rsp_payload->optical_elmt_desc[0].low_warn = trx[3]; + rsp_payload->optical_elmt_desc[0].element_flags = + cpu_to_be32(1 << 28); + + /* Optical Element Descriptor, Voltage */ + rsp_payload->optical_elmt_desc[1].high_alarm = trx[4]; + rsp_payload->optical_elmt_desc[1].low_alarm = trx[5]; + rsp_payload->optical_elmt_desc[1].high_warn = trx[6]; + rsp_payload->optical_elmt_desc[1].low_warn = trx[7]; + rsp_payload->optical_elmt_desc[1].element_flags = + cpu_to_be32(2 << 28); + + /* Optical Element Descriptor, Tx Bias Current */ + rsp_payload->optical_elmt_desc[2].high_alarm = trx[8]; + rsp_payload->optical_elmt_desc[2].low_alarm = trx[9]; + rsp_payload->optical_elmt_desc[2].high_warn = trx[10]; + rsp_payload->optical_elmt_desc[2].low_warn = trx[11]; + rsp_payload->optical_elmt_desc[2].element_flags = + cpu_to_be32(3 << 28); + + /* Optical Element Descriptor, Tx Power */ + rsp_payload->optical_elmt_desc[3].high_alarm = trx[12]; + rsp_payload->optical_elmt_desc[3].low_alarm = trx[13]; + rsp_payload->optical_elmt_desc[3].high_warn = trx[14]; + rsp_payload->optical_elmt_desc[3].low_warn = trx[15]; + rsp_payload->optical_elmt_desc[3].element_flags = + cpu_to_be32(4 << 28); + + /* Optical Element Descriptor, Rx Power */ + rsp_payload->optical_elmt_desc[4].high_alarm = trx[16]; + rsp_payload->optical_elmt_desc[4].low_alarm = trx[17]; + rsp_payload->optical_elmt_desc[4].high_warn = trx[18]; + rsp_payload->optical_elmt_desc[4].low_warn = trx[19]; + rsp_payload->optical_elmt_desc[4].element_flags = + cpu_to_be32(5 << 28); + } + + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0); + if (!rval) { + /* Temperature high/low alarm/warning */ + rsp_payload->optical_elmt_desc[0].element_flags |= + cpu_to_be32( + (sfp[0] >> 7 & 1) << 3 | (sfp[0] >> 6 & 1) << 2 | + (sfp[4] >> 7 & 1) << 1 | (sfp[4] >> 6 & 1) << 0); + + /* Voltage high/low alarm/warning */ + rsp_payload->optical_elmt_desc[1].element_flags |= + cpu_to_be32( + (sfp[0] >> 5 & 1) << 3 | (sfp[0] >> 4 & 1) << 2 | + (sfp[4] >> 5 & 1) << 1 | (sfp[4] >> 4 & 1) << 0); + + /* Tx Bias Current high/low alarm/warning */ + rsp_payload->optical_elmt_desc[2].element_flags |= + cpu_to_be32( + (sfp[0] >> 3 & 1) << 3 | (sfp[0] >> 2 & 1) << 2 | + (sfp[4] >> 3 & 1) << 1 | (sfp[4] >> 2 & 1) << 0); + + /* Tx Power high/low alarm/warning */ + rsp_payload->optical_elmt_desc[3].element_flags |= + cpu_to_be32( + (sfp[0] >> 1 & 1) << 3 | (sfp[0] >> 0 & 1) << 2 | + (sfp[4] >> 1 & 1) << 1 | (sfp[4] >> 0 & 1) << 0); + + /* Rx Power high/low alarm/warning */ + rsp_payload->optical_elmt_desc[4].element_flags |= + cpu_to_be32( + (sfp[1] >> 7 & 1) << 3 | (sfp[1] >> 6 & 1) << 2 | + (sfp[5] >> 7 & 1) << 1 | (sfp[5] >> 6 & 1) << 0); + } + } + + /* Optical Product Data Descriptor */ + rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008); + rsp_payload->optical_prod_desc.desc_len = + cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc)); + + if (sfp) { + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0); + if (!rval) { + memcpy(rsp_payload->optical_prod_desc.vendor_name, + sfp + 0, + sizeof(rsp_payload->optical_prod_desc.vendor_name)); + memcpy(rsp_payload->optical_prod_desc.part_number, + sfp + 20, + sizeof(rsp_payload->optical_prod_desc.part_number)); + memcpy(rsp_payload->optical_prod_desc.revision, + sfp + 36, + sizeof(rsp_payload->optical_prod_desc.revision)); + memcpy(rsp_payload->optical_prod_desc.serial_number, + sfp + 48, + sizeof(rsp_payload->optical_prod_desc.serial_number)); + } + + memset(sfp, 0, SFP_RTDI_LEN); + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0); + if (!rval) { + memcpy(rsp_payload->optical_prod_desc.date, + sfp + 0, + sizeof(rsp_payload->optical_prod_desc.date)); + } + } + +send: + ql_dbg(ql_dbg_init, vha, 0x0183, + "Sending ELS Response to RDP Request...\n"); + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184, + "-------- ELS RSP -------\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185, + rsp_els, sizeof(*rsp_els)); + ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186, + "-------- ELS RSP PAYLOAD -------\n"); + ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187, + rsp_payload, rsp_payload_length); + + rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0); + + if (rval) { + ql_log(ql_log_warn, vha, 0x0188, + "%s: iocb failed to execute -> %x\n", __func__, rval); + } else if (rsp_els->comp_status) { + ql_log(ql_log_warn, vha, 0x0189, + "%s: iocb failed to complete -> " + "completion=%#x subcode=(%#x,%#x)\n", + __func__, rsp_els->comp_status, + rsp_els->error_subcode_1, rsp_els->error_subcode_2); + } else { + ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__); + } + +dealloc: + if (stat) + dma_free_coherent(&ha->pdev->dev, sizeof(*stat), + stat, stat_dma); + if (sfp) + dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN, + sfp, sfp_dma); + if (rsp_payload) + dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload), + rsp_payload, rsp_payload_dma); + if (rsp_els) + dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), + rsp_els, rsp_els_dma); +} + +/** + * qla_chk_cont_iocb_avail - check for all continuation iocbs are available + * before iocb processing can start. + * @vha: host adapter pointer + * @rsp: respond queue + * @pkt: head iocb describing how many continuation iocb + * Return: 0 all iocbs has arrived, xx- all iocbs have not arrived. +*/ +static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha, + struct rsp_que *rsp, response_t *pkt, u32 rsp_q_in) +{ + int start_pkt_ring_index; + u32 iocb_cnt = 0; + int rc = 0; + + if (pkt->entry_count == 1 ) + return rc; + + /* ring_index was pre-increment. Set it back to current pkt */ + if (rsp->ring_index == 0) + start_pkt_ring_index = rsp->length - 1; + else + start_pkt_ring_index = rsp->ring_index - 1; + + if (rsp_q_in < start_pkt_ring_index) + /* q in ptr is wrapped */ + iocb_cnt = rsp->length - start_pkt_ring_index + rsp_q_in; + else + iocb_cnt = rsp_q_in - start_pkt_ring_index; + + if (iocb_cnt < pkt->entry_count) + rc = -EIO; + + ql_dbg(ql_dbg_init, vha, 0x5091, + "%s - ring %p pkt %p entry count %d iocb_cnt %d rsp_q_in %d rc %d\n", + __func__, rsp->ring, pkt, pkt->entry_count, iocb_cnt, rsp_q_in, rc); + + return rc; +} + +static void qla_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct mrk_entry_24xx *pkt) +{ + const char func[] = "MRK-IOCB"; + srb_t *sp; + int res = QLA_SUCCESS; + + if (!IS_FWI2_CAPABLE(vha->hw)) + return; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + if (pkt->entry_status) { + ql_dbg(ql_dbg_taskm, vha, 0x8025, "marker failure.\n"); + res = QLA_COMMAND_ERROR; + } + sp->u.iocb_cmd.u.tmf.data = res; + sp->done(sp, res); +} + + /** * qla24xx_process_response_queue() - Process response queue entries. * @vha: SCSI driver HA context @@ -3425,17 +4568,36 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, struct qla_hw_data *ha = vha->hw; struct purex_entry_24xx *purex_entry; struct purex_item *pure_item; + u16 rsp_in = 0, cur_ring_index; + int follow_inptr, is_shadow_hba; if (!ha->flags.fw_started) return; if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) { rsp->qpair->rcv_intr = 1; - qla_cpu_update(rsp->qpair, smp_processor_id()); } - while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { +#define __update_rsp_in(_update, _is_shadow_hba, _rsp, _rsp_in) \ + if (_update) { \ + _rsp_in = _is_shadow_hba ? *(_rsp)->in_ptr : \ + RD_REG_DWORD_RELAXED((_rsp)->rsp_q_in); \ + } + + is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha); + follow_inptr = is_shadow_hba ? ql2xrspq_follow_inptr : + ql2xrspq_follow_inptr_legacy; + + __update_rsp_in(follow_inptr, is_shadow_hba, rsp, rsp_in); + + while ((likely(follow_inptr && + rsp->ring_index != rsp_in && + rsp->ring_ptr->signature != RESPONSE_PROCESSED)) || + (!follow_inptr && + rsp->ring_ptr->signature != RESPONSE_PROCESSED)) { + pkt = (struct sts_entry_24xx *)rsp->ring_ptr; + cur_ring_index = rsp->ring_index; rsp->ring_index++; if (rsp->ring_index == rsp->length) { @@ -3466,6 +4628,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, qla24xx_report_id_acquisition(vha, (struct vp_rpt_id_entry_24xx *)pkt); break; + case STATUS_CONT_TYPE_1: + qla27xx_status_cont_type_1(vha, (sts_cont_entry_t *)pkt); + break; case LOGINOUT_PORT_IOCB_TYPE: qla24xx_logio_entry(vha, rsp->req, (struct logio_entry_24xx *)pkt); @@ -3481,6 +4646,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, pure_item = qla24xx_copy_std_pkt(vha, pkt); if (!pure_item) break; + qla24xx_queue_purex_item(vha, pure_item, qla24xx_process_abts); break; @@ -3513,9 +4679,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, (struct nack_to_isp *)pkt); break; case MARKER_TYPE: - /* Do nothing in this case, this check is to prevent it - * from falling into default case - */ + qla_marker_iocb_entry(vha, rsp->req, (struct mrk_entry_24xx*)pkt); break; case ABORT_IOCB_TYPE: qla24xx_abort_iocb_entry(vha, rsp->req, @@ -3532,14 +4696,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, case PUREX_IOCB_TYPE: purex_entry = (void *)pkt; switch (purex_entry->els_frame_payload[3]) { - case ELS_RDP: + case ELS_COMMAND_RDP: pure_item = qla24xx_copy_std_pkt(vha, pkt); if (!pure_item) break; qla24xx_queue_purex_item(vha, pure_item, qla24xx_process_purex_rdp); break; - case ELS_FPIN: + case ELS_COMMAND_FPIN: if (!vha->hw->flags.scm_enabled) { ql_log(ql_log_warn, vha, 0x5094, "SCM not active for this port\n"); @@ -3547,25 +4711,78 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, } pure_item = qla27xx_copy_fpin_pkt(vha, (void **)&pkt, &rsp); + __update_rsp_in(follow_inptr, is_shadow_hba, + rsp, rsp_in); if (!pure_item) break; qla24xx_queue_purex_item(vha, pure_item, qla27xx_process_purex_fpin); break; - + case ELS_AUTH_ELS: + if (qla_chk_cont_iocb_avail(vha, rsp, (response_t *) pkt, rsp_in)) { + /* + * ring_ptr and ring_index were pre-incremented above. + * Reset them back to current. Wait for next interrupt + * with all IOCBs to arrive and re-process. + */ + rsp->ring_ptr = (response_t *)pkt; + rsp->ring_index = cur_ring_index; + + ql_dbg(ql_dbg_init, vha, 0x5091, + "Defer processing ELS opcode %#x...\n", + purex_entry->els_frame_payload[3]); + return; + } + qla24xx_auth_els(vha, (void**)&pkt, &rsp); + break; + case ELS_COMMAND_RDF: + if (!vha->hw->flags.scm_enabled) { + ql_log(ql_log_warn, vha, 0x5095, + "RDF received when SCM not active for this port\n"); + break; + } + pure_item = qla24xx_copy_std_pkt(vha, pkt); + if (!pure_item) + break; + pure_item->qpair = rsp->qpair; + vha->rdf_retry_cnt = 0; + qla24xx_queue_purex_item(vha, pure_item, + qla2xxx_scm_process_purex_rdf); + break; + case ELS_COMMAND_EDC: + if (!vha->hw->flags.scm_enabled) { + ql_log(ql_log_warn, vha, 0x5096, + "EDC received when SCM not active for this port\n"); + break; + } + pure_item = qla24xx_copy_std_pkt(vha, pkt); + if (!pure_item) + break; + pure_item->qpair = rsp->qpair; + vha->hw->edc_retry_cnt = 0; + qla24xx_queue_purex_item(vha, pure_item, + qla2xx_scm_process_purex_edc); + break; default: ql_log(ql_log_warn, vha, 0x509c, "Discarding ELS Request opcode 0x%x\n", purex_entry->els_frame_payload[3]); } break; + case SA_UPDATE_IOCB_TYPE: + qla28xx_sa_update_iocb_entry(vha, rsp->req, + (struct sa_update_28xx *)pkt); + break; + default: /* Type Not Supported. */ ql_dbg(ql_dbg_async, vha, 0x5042, - "Received unknown response pkt type 0x%x entry status=%x.\n", - pkt->entry_type, pkt->entry_status); + "Received unknown response pkt type %x " + "entry status=%x.\n", + pkt->entry_type, pkt->entry_status); break; } + ((response_t *)pkt)->signature = RESPONSE_PROCESSED; wmb(); } @@ -3574,16 +4791,15 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, if (IS_P3P_TYPE(ha)) { struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; - wrt_reg_dword(®->rsp_q_out[0], rsp->ring_index); + WRT_REG_DWORD(®->rsp_q_out[0], rsp->ring_index); } else { - wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index); + WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); } } static void qla2xxx_check_risc_status(scsi_qla_host_t *vha) { - int rval; uint32_t cnt; struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; @@ -3592,42 +4808,33 @@ qla2xxx_check_risc_status(scsi_qla_host_t *vha) !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) return; - rval = QLA_SUCCESS; - wrt_reg_dword(®->iobase_addr, 0x7C00); - rd_reg_dword(®->iobase_addr); - wrt_reg_dword(®->iobase_window, 0x0001); - for (cnt = 10000; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && - rval == QLA_SUCCESS; cnt--) { - if (cnt) { - wrt_reg_dword(®->iobase_window, 0x0001); - udelay(10); - } else - rval = QLA_FUNCTION_TIMEOUT; - } - if (rval == QLA_SUCCESS) - goto next_test; - - rval = QLA_SUCCESS; - wrt_reg_dword(®->iobase_window, 0x0003); - for (cnt = 100; (rd_reg_dword(®->iobase_window) & BIT_0) == 0 && - rval == QLA_SUCCESS; cnt--) { - if (cnt) { - wrt_reg_dword(®->iobase_window, 0x0003); - udelay(10); - } else - rval = QLA_FUNCTION_TIMEOUT; - } - if (rval != QLA_SUCCESS) - goto done; - + WRT_REG_DWORD(®->iobase_addr, 0x7C00); + RD_REG_DWORD(®->iobase_addr); + WRT_REG_DWORD(®->iobase_window, 0x0001); + for (cnt = 10000; cnt; cnt--) { + if (RD_REG_DWORD(®->iobase_window) & BIT_0) + goto next_test; + WRT_REG_DWORD(®->iobase_window, 0x0001); + udelay(10); + } + + WRT_REG_DWORD(®->iobase_window, 0x0003); + for (cnt = 100; cnt; cnt--) { + if (RD_REG_DWORD(®->iobase_window) & BIT_0) + goto next_test; + WRT_REG_DWORD(®->iobase_window, 0x0003); + udelay(10); + } + + goto done; next_test: - if (rd_reg_dword(®->iobase_c8) & BIT_3) + if (RD_REG_DWORD(®->iobase_c8) & BIT_3) ql_log(ql_log_info, vha, 0x504c, "Additional code -- 0x55AA.\n"); done: - wrt_reg_dword(®->iobase_window, 0x0000); - rd_reg_dword(®->iobase_window); + WRT_REG_DWORD(®->iobase_window, 0x0000); + RD_REG_DWORD(®->iobase_window); } /** @@ -3671,22 +4878,23 @@ qla24xx_intr_handler(int irq, void *dev_id) spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 50; iter--; ) { - stat = rd_reg_dword(®->host_status); + stat = RD_REG_DWORD(®->host_status); if (qla2x00_check_reg32_for_disconnect(vha, stat)) break; if (stat & HSRX_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; - hccr = rd_reg_dword(®->hccr); + hccr = RD_REG_DWORD(®->hccr); ql_log(ql_log_warn, vha, 0x504b, "RISC paused -- HCCR=%x, Dumping firmware.\n", hccr); qla2xxx_check_risc_status(vha); + vha->hw_err_cnt++; - ha->isp_ops->fw_dump(vha); + ha->isp_ops->fw_dump(vha, 1); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } else if ((stat & HSRX_RISC_INT) == 0) @@ -3703,9 +4911,9 @@ qla24xx_intr_handler(int irq, void *dev_id) break; case INTR_ASYNC_EVENT: mb[0] = MSW(stat); - mb[1] = rd_reg_word(®->mailbox1); - mb[2] = rd_reg_word(®->mailbox2); - mb[3] = rd_reg_word(®->mailbox3); + mb[1] = RD_REG_WORD(®->mailbox1); + mb[2] = RD_REG_WORD(®->mailbox2); + mb[3] = RD_REG_WORD(®->mailbox3); qla2x00_async_event(vha, rsp, mb); break; case INTR_RSP_QUE_UPDATE: @@ -3725,8 +4933,8 @@ qla24xx_intr_handler(int irq, void *dev_id) "Unrecognized interrupt type (%d).\n", stat * 0xff); break; } - wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); - rd_reg_dword_relaxed(®->hccr); + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); + RD_REG_DWORD_RELAXED(®->hccr); if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1))) ndelay(3500); } @@ -3765,8 +4973,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, rsp); if (!ha->flags.disable_msix_handshake) { - wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); - rd_reg_dword_relaxed(®->hccr); + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); + RD_REG_DWORD_RELAXED(®->hccr); } spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -3800,22 +5008,23 @@ qla24xx_msix_default(int irq, void *dev_id) spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); do { - stat = rd_reg_dword(®->host_status); + stat = RD_REG_DWORD(®->host_status); if (qla2x00_check_reg32_for_disconnect(vha, stat)) break; if (stat & HSRX_RISC_PAUSED) { if (unlikely(pci_channel_offline(ha->pdev))) break; - hccr = rd_reg_dword(®->hccr); + hccr = RD_REG_DWORD(®->hccr); ql_log(ql_log_info, vha, 0x5050, "RISC paused -- HCCR=%x, Dumping firmware.\n", hccr); qla2xxx_check_risc_status(vha); + vha->hw_err_cnt++; - ha->isp_ops->fw_dump(vha); + ha->isp_ops->fw_dump(vha, 1); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } else if ((stat & HSRX_RISC_INT) == 0) @@ -3832,9 +5041,9 @@ qla24xx_msix_default(int irq, void *dev_id) break; case INTR_ASYNC_EVENT: mb[0] = MSW(stat); - mb[1] = rd_reg_word(®->mailbox1); - mb[2] = rd_reg_word(®->mailbox2); - mb[3] = rd_reg_word(®->mailbox3); + mb[1] = RD_REG_WORD(®->mailbox1); + mb[2] = RD_REG_WORD(®->mailbox2); + mb[3] = RD_REG_WORD(®->mailbox3); qla2x00_async_event(vha, rsp, mb); break; case INTR_RSP_QUE_UPDATE: @@ -3854,7 +5063,7 @@ qla24xx_msix_default(int irq, void *dev_id) "Unrecognized interrupt type (%d).\n", stat & 0xff); break; } - wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); } while (0); qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -3905,9 +5114,10 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id) reg = &ha->iobase->isp24; spin_lock_irqsave(&ha->hardware_lock, flags); - wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT); + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); + queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work); return IRQ_HANDLED; @@ -3953,10 +5163,12 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) if (USER_CTRL_IRQ(ha) || !ha->mqiobase) { /* user wants to control IRQ setting for target mode */ ret = pci_alloc_irq_vectors(ha->pdev, min_vecs, - ha->msix_count, PCI_IRQ_MSIX); + min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)), + PCI_IRQ_MSIX); } else ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs, - ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, + min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)), + PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc); if (ret < 0) { @@ -3999,6 +5211,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) for (i = 0; i < ha->msix_count; i++) { qentry = &ha->msix_entries[i]; qentry->vector = pci_irq_vector(ha->pdev, i); + qentry->vector_base0 = i; qentry->entry = i; qentry->have_irq = 0; qentry->in_use = 0; @@ -4056,22 +5269,19 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) } /* Enable MSI-X vector for response queue update for queue 0 */ - if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { - if (ha->msixbase && ha->mqiobase && - (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || - ql2xmqsupport)) - ha->mqenable = 1; - } else - if (ha->mqiobase && - (ha->max_rsp_queues > 1 || ha->max_req_queues > 1 || - ql2xmqsupport)) - ha->mqenable = 1; + if (IS_MQUE_CAPABLE(ha) && (ha->msixbase && ha->mqiobase && ha->max_qpairs)) + ha->mqenable = 1; + else + ha->mqenable = 0; + ql_dbg(ql_dbg_multiq, vha, 0xc005, - "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", - ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); + "mqiobase=%px, max_rsp_queues=%d, max_req_queues=%d" + "mqenable=%d\n", ha->mqiobase, ha->max_rsp_queues, + ha->max_req_queues, ha->mqenable); ql_dbg(ql_dbg_init, vha, 0x0055, - "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n", - ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues); + "mqiobase=%px, max_rsp_queues=%d, max_req_queues=%d" + "mqenable=%d\n", ha->mqiobase, ha->max_rsp_queues, + ha->max_req_queues, ha->mqenable); msix_out: return ret; @@ -4108,11 +5318,16 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) goto skip_msi; } - if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) { - ql_log(ql_log_warn, vha, 0x0035, - "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", - ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); + if (ql2xenablemsix == 2) goto skip_msix; + + if (IS_QLA2432(ha)) { + if (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX) { + ql_log(ql_log_warn, vha, 0x0035, + "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n", + ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX); + goto skip_msix; + } } ret = qla24xx_enable_msix(ha, rsp); @@ -4168,7 +5383,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) goto fail; spin_lock_irq(&ha->hardware_lock); - wrt_reg_word(®->isp.semaphore, 0); + WRT_REG_WORD(®->isp.semaphore, 0); spin_unlock_irq(&ha->hardware_lock); fail: @@ -4230,5 +5445,8 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair, } msix->have_irq = 1; msix->handle = qpair; + if (!(IS_SCM_CAPABLE(ha) && (qpair->id == ha->slow_queue_id))) + qla_mapq_init_qp_cpu_map(ha, msix, qpair); + return ret; } diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index bbb57edc1f662..f029f26ec1e77 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -1,7 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include "qla_target.h" @@ -23,6 +24,8 @@ static struct mb_cmd_name { {MBC_GET_ID_LIST, "GIDList"}, {MBC_GET_LINK_PRIV_STATS, "Stats"}, {MBC_GET_RESOURCE_COUNTS, "ResCnt"}, + {MBC_MPI_PASSTHROUGH, "MpiPT"}, + {MBC_GET_RNID_PARAMS, "GetParm"}, }; static const char *mb_to_str(uint16_t cmd) @@ -108,11 +111,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) int rval, i; unsigned long flags = 0; device_reg_t *reg; - uint8_t abort_active; + uint8_t abort_active, eeh_delay; uint8_t io_lock_on; uint16_t command = 0; uint16_t *iptr; - __le16 __iomem *optr; + uint16_t __iomem *optr; uint32_t cnt; uint32_t mboxes; unsigned long wait_time; @@ -123,9 +126,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ql_dbg(ql_dbg_mbx, vha, 0x1000, "Entered %s.\n", __func__); - if (ha->pdev->error_state == pci_channel_io_perm_failure) { + if (ha->pdev->error_state > pci_channel_io_frozen) { ql_log(ql_log_warn, vha, 0x1001, - "PCI channel failed permanently, exiting.\n"); + "error_state is greater than pci_channel_io_frozen, " + "exiting.\n"); return QLA_FUNCTION_TIMEOUT; } @@ -142,7 +146,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) "PCI error, exiting.\n"); return QLA_FUNCTION_TIMEOUT; } - + eeh_delay = 0; reg = ha->iobase; io_lock_on = base_vha->flags.init_done; @@ -165,10 +169,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) } /* check if ISP abort is active and return cmd with timeout */ - if ((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || + if (((test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) || test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) || test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) && - !is_rom_cmd(mcp->mb[0])) { + !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) { ql_log(ql_log_info, vha, 0x1005, "Cmd 0x%x aborted with timeout since ISP Abort is pending\n", mcp->mb[0]); @@ -186,11 +190,17 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) ql_log(ql_log_warn, vha, 0xd035, "Cmd access timeout, cmd=0x%x, Exiting.\n", mcp->mb[0]); + vha->hw_err_cnt++; atomic_dec(&ha->num_pend_mbx_stage1); return QLA_FUNCTION_TIMEOUT; } atomic_dec(&ha->num_pend_mbx_stage1); - if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { + if (ha->flags.purge_mbox || chip_reset != ha->chip_reset || + ha->flags.eeh_busy) { + ql_log(ql_log_warn, vha, 0xd035, + "Error detected: purge[%d] eeh[%d] cmd=0x%x, Exiting.\n", + ha->flags.purge_mbox, ha->flags.eeh_busy, + mcp->mb[0]); rval = QLA_ABORTED; goto premature_exit; } @@ -214,11 +224,11 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) /* Load mailbox registers. */ if (IS_P3P_TYPE(ha)) - optr = ®->isp82.mailbox_in[0]; + optr = (uint16_t __iomem *)®->isp82.mailbox_in[0]; else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) - optr = ®->isp24.mailbox0; + optr = (uint16_t __iomem *)®->isp24.mailbox0; else - optr = MAILBOX_REG(ha, ®->isp, 0); + optr = (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 0); iptr = mcp->mb; command = mcp->mb[0]; @@ -228,11 +238,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) "Mailbox registers (OUT):\n"); for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (IS_QLA2200(ha) && cnt == 8) - optr = MAILBOX_REG(ha, ®->isp, 8); + optr = + (uint16_t __iomem *)MAILBOX_REG(ha, ®->isp, 8); if (mboxes & BIT_0) { ql_dbg(ql_dbg_mbx, vha, 0x1112, "mbox[%d]<-0x%04x\n", cnt, *iptr); - wrt_reg_word(optr, *iptr); + WRT_REG_WORD(optr, *iptr); + } else { + WRT_REG_WORD(optr, 0); } mboxes >>= 1; @@ -241,7 +254,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) } ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117, - "I/O Address = %p.\n", optr); + "I/O Address = %px.\n", optr); /* Issue set host interrupt command to send cmd out. */ ha->flags.mbox_int = 0; @@ -258,18 +271,26 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); if (IS_P3P_TYPE(ha)) - wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); + WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); else if (IS_FWI2_CAPABLE(ha)) - wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); + WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); else - wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); + WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); wait_time = jiffies; atomic_inc(&ha->num_pend_mbx_stage3); if (!wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ)) { + ql_dbg(ql_dbg_mbx, vha, 0x117a, + "cmd=%x Timeout.\n", command); + spin_lock_irqsave(&ha->hardware_lock, flags); + clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (chip_reset != ha->chip_reset) { + eeh_delay = ha->flags.eeh_busy ? 1 : 0; + spin_lock_irqsave(&ha->hardware_lock, flags); ha->flags.mbox_busy = 0; spin_unlock_irqrestore(&ha->hardware_lock, @@ -279,14 +300,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) rval = QLA_ABORTED; goto premature_exit; } - ql_dbg(ql_dbg_mbx, vha, 0x117a, - "cmd=%x Timeout.\n", command); - spin_lock_irqsave(&ha->hardware_lock, flags); - clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); - spin_unlock_irqrestore(&ha->hardware_lock, flags); - } else if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { + eeh_delay = ha->flags.eeh_busy ? 1 : 0; + spin_lock_irqsave(&ha->hardware_lock, flags); ha->flags.mbox_busy = 0; spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -305,7 +322,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) "Cmd=%x Polling Mode.\n", command); if (IS_P3P_TYPE(ha)) { - if (rd_reg_dword(®->isp82.hint) & + if (RD_REG_DWORD(®->isp82.hint) & HINT_MBX_INT_PENDING) { ha->flags.mbox_busy = 0; spin_unlock_irqrestore(&ha->hardware_lock, @@ -313,20 +330,23 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) atomic_dec(&ha->num_pend_mbx_stage2); ql_dbg(ql_dbg_mbx, vha, 0x1012, "Pending mailbox timeout, exiting.\n"); + vha->hw_err_cnt++; rval = QLA_FUNCTION_TIMEOUT; goto premature_exit; } - wrt_reg_dword(®->isp82.hint, HINT_MBX_INT_PENDING); + WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); } else if (IS_FWI2_CAPABLE(ha)) - wrt_reg_dword(®->isp24.hccr, HCCRX_SET_HOST_INT); + WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); else - wrt_reg_word(®->isp.hccr, HCCR_SET_HOST_INT); + WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); spin_unlock_irqrestore(&ha->hardware_lock, flags); wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */ while (!ha->flags.mbox_int) { if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) { + eeh_delay = ha->flags.eeh_busy ? 1 : 0; + spin_lock_irqsave(&ha->hardware_lock, flags); ha->flags.mbox_busy = 0; spin_unlock_irqrestore(&ha->hardware_lock, @@ -339,6 +359,14 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) if (time_after(jiffies, wait_time)) break; + /* + * Check if it's UNLOADING, cause we cannot poll in + * this case, or else a NULL pointer dereference + * is triggered. + */ + if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags))) + return QLA_FUNCTION_TIMEOUT; + /* Check for pending interrupts. */ qla2x00_poll(ha->rsp_q_map[0]); @@ -410,27 +438,29 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) uint16_t w; if (IS_FWI2_CAPABLE(ha)) { - mb[0] = rd_reg_word(®->isp24.mailbox0); - mb[1] = rd_reg_word(®->isp24.mailbox1); - mb[2] = rd_reg_word(®->isp24.mailbox2); - mb[3] = rd_reg_word(®->isp24.mailbox3); - mb[7] = rd_reg_word(®->isp24.mailbox7); - ictrl = rd_reg_dword(®->isp24.ictrl); - host_status = rd_reg_dword(®->isp24.host_status); - hccr = rd_reg_dword(®->isp24.hccr); + mb[0] = RD_REG_WORD(®->isp24.mailbox0); + mb[1] = RD_REG_WORD(®->isp24.mailbox1); + mb[2] = RD_REG_WORD(®->isp24.mailbox2); + mb[3] = RD_REG_WORD(®->isp24.mailbox3); + mb[7] = RD_REG_WORD(®->isp24.mailbox7); + ictrl = RD_REG_DWORD(®->isp24.ictrl); + host_status = RD_REG_DWORD(®->isp24.host_status); + hccr = RD_REG_DWORD(®->isp24.hccr); ql_log(ql_log_warn, vha, 0xd04c, "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n", command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3], mb[7], host_status, hccr); + vha->hw_err_cnt++; } else { mb[0] = RD_MAILBOX_REG(ha, ®->isp, 0); - ictrl = rd_reg_word(®->isp.ictrl); + ictrl = RD_REG_WORD(®->isp.ictrl); ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx " "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]); + vha->hw_err_cnt++; } ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); @@ -459,7 +489,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) * a dump */ if (mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) - qla2xxx_dump_fw(vha); + ha->isp_ops->fw_dump(vha, 0); rval = QLA_FUNCTION_TIMEOUT; } } @@ -503,6 +533,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " "abort.\n", command, mcp->mb[0], ha->flags.eeh_busy); + vha->hw_err_cnt++; set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } @@ -527,11 +558,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) "Mailbox cmd timeout occurred, cmd=0x%x, " "mb[0]=0x%x. Scheduling ISP abort ", command, mcp->mb[0]); + vha->hw_err_cnt++; set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); /* Allow next mbx cmd to come in. */ complete(&ha->mbx_cmd_comp); - if (ha->isp_ops->abort_isp(vha)) { + if (ha->isp_ops->abort_isp(vha) && + !ha->flags.eeh_busy) { /* Failed. retry later. */ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); @@ -570,20 +603,28 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) { ql_dbg(ql_dbg_mbx, vha, 0x1198, "host_status=%#x intr_ctrl=%#x intr_status=%#x\n", - rd_reg_dword(®->isp24.host_status), - rd_reg_dword(®->isp24.ictrl), - rd_reg_dword(®->isp24.istatus)); + RD_REG_DWORD(®->isp24.host_status), + RD_REG_DWORD(®->isp24.ictrl), + RD_REG_DWORD(®->isp24.istatus)); } else { ql_dbg(ql_dbg_mbx, vha, 0x1206, "ctrl_status=%#x ictrl=%#x istatus=%#x\n", - rd_reg_word(®->isp.ctrl_status), - rd_reg_word(®->isp.ictrl), - rd_reg_word(®->isp.istatus)); + RD_REG_WORD(®->isp.ctrl_status), + RD_REG_WORD(®->isp.ictrl), + RD_REG_WORD(®->isp.istatus)); } } else { ql_dbg(ql_dbg_mbx, base_vha, 0x1021, "Done %s.\n", __func__); } + i=500; + while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) { + /* The caller of this mailbox encounter pci error. + Hold the thread until PCIE link reset complete to make + sure caller does not unmap dma while recovery is in progress */ + msleep(1); + i--; + } return rval; } @@ -631,6 +672,9 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, ql_dbg(ql_dbg_mbx, vha, 0x1023, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); + vha->hw_err_cnt++; + if (LOCKDOWN_ERROR(vha->hw, mcp->mb[1])) + rval = QLA_FLASH_LOCKDOWN; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024, "Done %s.\n", __func__); @@ -640,6 +684,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr, } #define NVME_ENABLE_FLAG BIT_3 +#define EDIF_HW_SUPPORT BIT_10 /* * qla2x00_execute_fw @@ -665,7 +710,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) mbx_cmd_t *mcp = &mc; u8 semaphore = 0; #define EXE_FW_FORCE_SEMAPHORE BIT_7 - u8 retry = 3; + u8 retry = 5; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025, "Entered %s.\n", __func__); @@ -707,6 +752,9 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) if (IS_PPCARCH) mcp->mb[11] |= BIT_4; + + if (ql2xnvmeenable) + mcp->mb[4] |= NVME_ENABLE_FLAG; } if (ha->flags.exlogins_enabled) @@ -719,7 +767,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) mcp->mb[11] |= EXE_FW_FORCE_SEMAPHORE; mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; - mcp->in_mb |= MBX_3 | MBX_2 | MBX_1; + mcp->in_mb |= MBX_5 | MBX_3 | MBX_2 | MBX_1; } else { mcp->mb[1] = LSW(risc_addr); mcp->out_mb |= MBX_1; @@ -743,8 +791,15 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) goto again; } + if (retry) { + retry--; + ql_dbg(ql_dbg_async, vha, 0x509d, + "Exe FW retry: mb[0]=%x retry[%d]\n", mcp->mb[0], retry); + goto again; + } ql_dbg(ql_dbg_mbx, vha, 0x1026, "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + vha->hw_err_cnt++; return rval; } @@ -774,6 +829,12 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) } } + if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) { + ha->flags.edif_hw = 1; + ql_log(ql_log_info + ql_dbg_edif, vha, 0xffff, + "%s: edif HW\n", __func__); + } + done: ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028, "Done %s.\n", __func__); @@ -1086,6 +1147,11 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) "%s: Firmware supports Exchange Offload 0x%x\n", __func__, ha->fw_attributes_h); + if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED) + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, + "%s: Firmware supports SCM 0x%x\n", + __func__, ha->fw_attributes_ext[0]); + /* * FW supports nvme and driver load parameter requested nvme. * BIT 26 of fw_attributes indicates NVMe support. @@ -1104,11 +1170,20 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) /* BIT_13 of Extended FW Attributes informs about NVMe2 support */ if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191, + "%s: Firmware supports NVMe2 0x%x\n", + __func__, ha->fw_attributes_ext[0]); ql_log(ql_log_info, vha, 0xd302, - "Firmware supports NVMe2 0x%x\n", - ha->fw_attributes_ext[0]); + "Firmware supports NVMe2 0x%x\n", + ha->fw_attributes_ext[0]); vha->flags.nvme2_enabled = 1; } + if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable && + (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) { + ha->flags.edif_enabled = 1; + ql_log(ql_log_info, vha, 0xffff, + "%s: edif is enabled\n", __func__); + } } if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { @@ -1131,25 +1206,39 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha) ql_log(ql_log_info, vha, 0xffff, "Secure Flash Update in FW: %s\n", - (ha->flags.secure_fw) ? "Supported" : - "Not Supported"); + (ha->flags.secure_fw) ? "Supported" : "Not Supported"); } - if (ha->flags.scm_supported_a && - (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) { - ha->flags.scm_supported_f = 1; - ha->sf_init_cb->flags |= cpu_to_le16(BIT_13); + if(ha->flags.scm_supported_a && + (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) { + ha->flags.scm_supported_f = 1; + if (ha->sf_init_cb) { + ha->sf_init_cb->flags |= cpu_to_le16(SCM_SUPPORT); + /* Virtual Lane support */ + if (ha->flags.scm_supported_vl == 1) { + ha->sf_init_cb->flags |= cpu_to_le16(SCM_PUN_SUPPORT); + ha->sf_init_cb->flags |= cpu_to_le16(SCM_VL_SUPPORT); + ql_log(ql_log_info, vha, 0x11a2, + "Driver supports Virtual Lane handling \n"); + } + if (ql2xcontrol_edc_rdf) { + ha->sf_init_cb->flags |= cpu_to_le16(SCM_DRIVER_CTRL_ELS); + ql_log(ql_log_info, vha, 0xffff, + "Driver supports sending RDF and EDC ELS \n"); + } + } } ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n", (ha->flags.scm_supported_f) ? "Supported" : "Not Supported"); if (vha->flags.nvme2_enabled) { - /* set BIT_15 of special feature control block for SLER */ + /* set BIT_15 of special feature control block for SLER */ ha->sf_init_cb->flags |= cpu_to_le16(BIT_15); - /* set BIT_14 of special feature control block for PI CTRL*/ + /* set BIT_14 of special feature control block for PI CTRL*/ ha->sf_init_cb->flags |= cpu_to_le16(BIT_14); } + } failed: @@ -1322,6 +1411,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha) if (rval != QLA_SUCCESS) { /*EMPTY*/ ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); + vha->hw_err_cnt++; } else { /*EMPTY*/ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034, @@ -1413,7 +1503,7 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, mbx_cmd_t *mcp = &mc; if (!vha->hw->flags.fw_started) - return QLA_INVALID_COMMAND; + return QLA_INVALID_COMMAND; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038, "Entered %s.\n", __func__); @@ -1440,8 +1530,7 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer, sts_entry->entry_status &= IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a, - "Done %s (status=%x).\n", __func__, - sts_entry->entry_status); + "Done %s (status=%x).\n", __func__, sts_entry->entry_status); } return rval; @@ -1659,9 +1748,10 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10; if (IS_FWI2_CAPABLE(vha->hw)) mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16; - if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) - mcp->in_mb |= MBX_15|MBX_21|MBX_22|MBX_23; - + if (IS_QLA27XX(vha->hw) || IS_QLA28XX(vha->hw)) { + mcp->in_mb |= MBX_15; + mcp->out_mb |= MBX_7|MBX_21|MBX_22|MBX_23; + } mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -1718,15 +1808,15 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa, vha->bbcr = mcp->mb[15]; if (mcp->mb[7] & SCM_EDC_ACC_RECEIVED) { ql_log(ql_log_info, vha, 0x11a4, - "SCM: EDC ELS completed, flags 0x%x\n", + "SCM: EDC ELS completed from FW, flags 0x%x\n", mcp->mb[21]); } if (mcp->mb[7] & SCM_RDF_ACC_RECEIVED) { vha->hw->flags.scm_enabled = 1; - vha->scm_fabric_connection_flags |= + vha->hw->scm.scm_fabric_connection_flags |= SCM_FLAG_RDF_COMPLETED; ql_log(ql_log_info, vha, 0x11a5, - "SCM: RDF ELS completed, flags 0x%x\n", + "SCM: RDF ELS completed from FW, flags 0x%x\n", mcp->mb[23]); } } @@ -1843,7 +1933,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size) mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10; } - if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) { + if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled){ mcp->mb[1] |= BIT_1; mcp->mb[16] = MSW(ha->sf_init_cb_dma); mcp->mb[17] = LSW(ha->sf_init_cb_dma); @@ -2084,7 +2174,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) int qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle, - struct port_database_24xx *pdb) + struct port_database_24xx *pdb) { mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; @@ -2166,6 +2256,13 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; else mcp->in_mb = MBX_1|MBX_0; + + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + mcp->mb[12] = 0; + mcp->out_mb |= MBX_12; + mcp->in_mb |= MBX_12; + } + mcp->tov = MBX_TOV_SECONDS; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -2178,6 +2275,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states) states[3] = mcp->mb[4]; states[4] = mcp->mb[5]; states[5] = mcp->mb[6]; /* DPORT status */ + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) + states[11] = mcp->mb[12]; /* MPI state. */ } if (rval != QLA_SUCCESS) { @@ -2472,7 +2571,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; - lg->handle = make_handle(req->id, lg->handle); + lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); lg->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI); if (opt & BIT_0) @@ -2742,11 +2841,16 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain, req = vha->req; lg->entry_type = LOGINOUT_PORT_IOCB_TYPE; lg->entry_count = 1; - lg->handle = make_handle(req->id, lg->handle); + lg->handle = MAKE_HANDLE(req->id, lg->handle); lg->nport_handle = cpu_to_le16(loop_id); - lg->control_flags = - cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| - LCF_FREE_NPORT); + if (!ha->flags.edif_enabled) { + lg->control_flags = + cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO| + LCF_FREE_NPORT); + } else { + lg->control_flags = + cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); + } lg->port_id[0] = al_pa; lg->port_id[1] = area; lg->port_id[2] = domain; @@ -3015,7 +3119,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha) * Kernel context. */ int -qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) +qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map, + u8 *num_entries) { int rval; mbx_cmd_t mc; @@ -3055,6 +3160,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map) if (pos_map) memcpy(pos_map, pmap, FCAL_MAP_SIZE); + if (num_entries) + *num_entries = pmap[0]; } dma_pool_free(ha->s_dma_pool, pmap, pmap_dma); @@ -3088,7 +3195,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - uint32_t *iter = (uint32_t *)stats; + uint32_t *iter = (void *)stats; ushort dwords = offsetof(typeof(*stats), link_up_cnt)/sizeof(*iter); struct qla_hw_data *ha = vha->hw; @@ -3147,7 +3254,7 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, int rval; mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - uint32_t *iter = (uint32_t *)stats; + uint32_t *iter = (void *)stats; ushort dwords = sizeof(*stats)/sizeof(*iter); ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, @@ -3160,8 +3267,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, mc.mb[6] = MSW(MSD(stats_dma)); mc.mb[7] = LSW(MSD(stats_dma)); mc.mb[8] = dwords; - mc.mb[9] = vha->vp_idx; - mc.mb[10] = options; + mc.mb[9] = cpu_to_le16(vha->vp_idx); + mc.mb[10] = cpu_to_le16(options); rval = qla24xx_send_mb_cmd(vha, &mc); @@ -3206,7 +3313,7 @@ qla24xx_abort_command(srb_t *sp) if (sp->qpair) req = sp->qpair->req; else - return QLA_FUNCTION_FAILED; + return QLA_ERR_NO_QPAIR; if (ql2xasynctmfenable) return qla24xx_async_abort_command(sp); @@ -3219,7 +3326,7 @@ qla24xx_abort_command(srb_t *sp) spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); if (handle == req->num_outstanding_cmds) { /* Command not found. */ - return QLA_FUNCTION_FAILED; + return QLA_ERR_NOT_FOUND; } abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma); @@ -3231,15 +3338,17 @@ qla24xx_abort_command(srb_t *sp) abt->entry_type = ABORT_IOCB_TYPE; abt->entry_count = 1; - abt->handle = make_handle(req->id, abt->handle); + abt->handle = MAKE_HANDLE(req->id, abt->handle); abt->nport_handle = cpu_to_le16(fcport->loop_id); - abt->handle_to_abort = make_handle(req->id, handle); + abt->handle_to_abort = MAKE_HANDLE(req->id, handle); abt->port_id[0] = fcport->d_id.b.al_pa; abt->port_id[1] = fcport->d_id.b.area; abt->port_id[2] = fcport->d_id.b.domain; abt->vp_index = fcport->vha->vp_idx; abt->req_que_no = cpu_to_le16(req->id); + /* Need to pass original sp */ + qla_nvme_abort_set_option(abt, sp); rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0); if (rval != QLA_SUCCESS) { @@ -3254,7 +3363,7 @@ qla24xx_abort_command(srb_t *sp) ql_dbg(ql_dbg_mbx, vha, 0x1090, "Failed to complete IOCB -- completion status (%x).\n", le16_to_cpu(abt->nport_handle)); - if (abt->nport_handle == cpu_to_le16(CS_IOCB_ERROR)) + if (abt->nport_handle == CS_IOCB_ERROR) rval = QLA_FUNCTION_PARAMETER_ERROR; else rval = QLA_FUNCTION_FAILED; @@ -3262,6 +3371,10 @@ qla24xx_abort_command(srb_t *sp) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091, "Done %s.\n", __func__); } + if (rval == QLA_SUCCESS) + qla_nvme_abort_process_comp_status(abt, sp); + + qla_wait_nvme_release_cmd_kref(sp); dma_pool_free(ha->s_dma_pool, abt, abt_dma); @@ -3310,7 +3423,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport, tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; tsk->p.tsk.entry_count = 1; - tsk->p.tsk.handle = make_handle(req->id, tsk->p.tsk.handle); + tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle); tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); tsk->p.tsk.control_flags = cpu_to_le32(type); @@ -3948,7 +4061,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, rptid_entry->port_id[2], rptid_entry->port_id[1], rptid_entry->port_id[0]); ha->current_topology = ISP_CFG_NL; - qlt_update_host_map(vha, id); + qla_update_host_map(vha, id); } else if (rptid_entry->format == 1) { /* fabric */ @@ -3966,6 +4079,24 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, "Format 1: WWPN %8phC.\n", vha->port_name); + if ((ha->flags.flogi_acc_enabled == 1) && + (rptid_entry->vp_idx == 0) && + (rptid_entry->vp_status == 0 || + rptid_entry->vp_status == 2)) { + if (ha->flags.flogi_acc_pl_in_cont_iocb == 0) { + ha->flags.flogi_acc_pl_in_cont_iocb = 1; + ha->flogi_acc_pld_remaining = + rptid_entry->u.f1.flogi_acc_payload_size; + ha->flogi_acc_curr_offset = 0; + ql_dbg(ql_dbg_async, vha, 0x5075, + "FLOGI ACC payload size %x\n", + ha->flogi_acc_pld_remaining); + } else { + ql_dbg(ql_log_warn, vha, 0x5075, + "Working on previous FLOGI ACC payload\n"); + } + } + switch (rptid_entry->u.f1.flags & TOPO_MASK) { case TOPO_N2N: ha->current_topology = ISP_CFG_N; @@ -3983,7 +4114,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, id.b.al_pa = 2; ql_dbg(ql_dbg_async, vha, 0x5075, - "Format 1: assign local id %x remote id %x\n", + "Format 1: assign local id %x remote id %x \n", vha->d_id.b24, id.b24); } else { ql_dbg(ql_dbg_async, vha, 0x5075, @@ -4004,6 +4135,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, fcport->scan_state = QLA_FCPORT_FOUND; fcport->n2n_flag = 1; fcport->keep_nport_handle = 1; + fcport->login_retry = vha->hw->login_retry_count; + fcport->fc4_type = FS_FC4TYPE_FCP; + if (vha->flags.nvme_enabled) + fcport->fc4_type |= FS_FC4TYPE_NVME; + if (wwn_to_u64(vha->port_name) > wwn_to_u64(fcport->port_name)) { @@ -4032,6 +4168,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, /* if our portname is higher then initiate N2N login */ set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); + ha->flags.n2n_ae = 1; return; break; case TOPO_FL: @@ -4039,6 +4176,13 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, break; case TOPO_F: ha->current_topology = ISP_CFG_F; + if (QLA_DRV_SEND_ELS(ha)) { + vha->hw->edc_retry_cnt = 0; + vha->rdf_retry_cnt = 0; + set_bit(SCM_SEND_EDC, &vha->dpc_flags); + set_bit(SCM_SEND_RDF, &vha->dpc_flags); + } + break; default: break; @@ -4061,11 +4205,12 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, WWN_SIZE); } - qlt_update_host_map(vha, id); + qla_update_host_map(vha, id); } set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags); set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags); + } else { if (rptid_entry->vp_status != VP_STAT_COMPL && rptid_entry->vp_status != VP_STAT_ID_CHG) { @@ -4088,7 +4233,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, if (!found) return; - qlt_update_host_map(vp, id); + qla_update_host_map(vp, id); /* * Cannot configure here as we are still sitting on the @@ -4097,6 +4242,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, set_bit(VP_IDX_ACQUIRED, &vp->vp_flags); set_bit(REGISTER_FC4_NEEDED, &vp->dpc_flags); set_bit(REGISTER_FDMI_NEEDED, &vp->dpc_flags); + } set_bit(VP_DPC_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); @@ -4119,7 +4265,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, ha->flags.n2n_ae = 1; spin_lock_irqsave(&ha->vport_slock, flags); - qlt_update_vp_map(vha, SET_AL_PA); + qla_update_vp_map(vha, SET_AL_PA); spin_unlock_irqrestore(&ha->vport_slock, flags); list_for_each_entry(fcport, &vha->vp_fcports, list) { @@ -4142,6 +4288,16 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, rptid_entry->u.f2.remote_nport_id[1]; fcport->d_id.b.al_pa = rptid_entry->u.f2.remote_nport_id[0]; + + /* + * For the case where remote port sending PRLO, FW sends up + * RIDA Format 2 as an indication of session loss. In other + * word, FW state change from PRLI complete back to PLOGI + * complete. Delete the session and let relogin drive the + * reconnect. + */ + if (atomic_read(&fcport->state) == FCS_ONLINE) + qlt_schedule_sess_for_deletion(fcport); } } } @@ -4476,9 +4632,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req) spin_lock_irqsave(&ha->hardware_lock, flags); if (!(req->options & BIT_0)) { - wrt_reg_dword(req->req_q_in, 0); + WRT_REG_DWORD(req->req_q_in, 0); if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) - wrt_reg_dword(req->req_q_out, 0); + WRT_REG_DWORD(req->req_q_out, 0); } spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -4547,9 +4703,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) spin_lock_irqsave(&ha->hardware_lock, flags); if (!(rsp->options & BIT_0)) { - wrt_reg_dword(rsp->rsp_q_out, 0); + WRT_REG_DWORD(rsp->rsp_q_out, 0); if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) - wrt_reg_dword(rsp->rsp_q_in, 0); + WRT_REG_DWORD(rsp->rsp_q_in, 0); } spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -4776,7 +4932,7 @@ qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) mbx_cmd_t *mcp = &mc; int i; int len; - __le16 *str; + uint16_t *str; struct qla_hw_data *ha = vha->hw; if (!IS_P3P_TYPE(ha)) @@ -4785,14 +4941,14 @@ qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version) ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b, "Entered %s.\n", __func__); - str = (__force __le16 *)version; + str = (void *)version; len = strlen(version); mcp->mb[0] = MBC_SET_RNID_PARAMS; mcp->mb[1] = RNID_TYPE_SET_VERSION << 8; mcp->out_mb = MBX_1|MBX_0; for (i = 4; i < 16 && len; i++, str++, len -= 2) { - mcp->mb[i] = le16_to_cpup(str); + mcp->mb[i] = cpu_to_le16p(str); mcp->out_mb |= 1<hw; if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) && @@ -4944,11 +5101,29 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha) return QLA_MEMORY_ALLOC_FAILED; } + memset(els_cmd_map, 0, ELS_CMD_MAP_SIZE); + /* List of Purex ELS */ - cmd_opcode[0] = ELS_FPIN; - cmd_opcode[1] = ELS_RDP; + if (ql2xrdpenable) { + cmd_opcode[active_cnt] = ELS_COMMAND_RDP; + active_cnt++; + } + if (ha->flags.scm_supported_f) { + cmd_opcode[active_cnt] = ELS_COMMAND_FPIN; + active_cnt++; + } + if (QLA_DRV_SEND_ELS(ha)) { + cmd_opcode[active_cnt] = ELS_COMMAND_RDF; + active_cnt++; + cmd_opcode[active_cnt] = ELS_COMMAND_EDC; + active_cnt++; + } + if (ha->flags.edif_enabled) { + cmd_opcode[active_cnt] = ELS_AUTH_ELS; + active_cnt++; + } - for (i = 0; i < PUREX_CMD_COUNT; i++) { + for (i = 0; i < active_cnt; i++) { index = cmd_opcode[i] / 8; purex_bit = cmd_opcode[i] % 8; els_cmd_map[index] |= 1 << purex_bit; @@ -5210,7 +5385,7 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data) mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_1|MBX_0; mcp->in_mb = MBX_3|MBX_2|MBX_0; - mcp->tov = MBX_TOV_SECONDS; + mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { @@ -5398,13 +5573,15 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data) mcp->mb[8] = MSW(risc_addr); mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_1|MBX_0; - mcp->tov = MBX_TOV_SECONDS; + mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1101, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); + if (LOCKDOWN_ERROR(vha->hw, mcp->mb[1])) + rval = QLA_FLASH_LOCKDOWN; } else { ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102, "Done %s.\n", __func__); @@ -5430,18 +5607,18 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); /* Write the MBC data to the registers */ - wrt_reg_word(®->mailbox0, MBC_WRITE_MPI_REGISTER); - wrt_reg_word(®->mailbox1, mb[0]); - wrt_reg_word(®->mailbox2, mb[1]); - wrt_reg_word(®->mailbox3, mb[2]); - wrt_reg_word(®->mailbox4, mb[3]); + WRT_REG_WORD(®->mailbox0, MBC_WRITE_MPI_REGISTER); + WRT_REG_WORD(®->mailbox1, mb[0]); + WRT_REG_WORD(®->mailbox2, mb[1]); + WRT_REG_WORD(®->mailbox3, mb[2]); + WRT_REG_WORD(®->mailbox4, mb[3]); - wrt_reg_dword(®->hccr, HCCRX_SET_HOST_INT); + WRT_REG_DWORD(®->hccr, HCCRX_SET_HOST_INT); /* Poll for MBC interrupt */ for (timer = 6000000; timer; timer--) { /* Check for pending interrupts. */ - stat = rd_reg_dword(®->host_status); + stat = RD_REG_DWORD(®->host_status); if (stat & HSRX_RISC_INT) { stat &= 0xff; @@ -5449,10 +5626,10 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb) stat == 0x10 || stat == 0x11) { set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); - mb0 = rd_reg_word(®->mailbox0); - wrt_reg_dword(®->hccr, + mb0 = RD_REG_WORD(®->mailbox0); + WRT_REG_DWORD(®->hccr, HCCRX_CLR_RISC_INT); - rd_reg_dword(®->hccr); + RD_REG_DWORD(®->hccr); break; } } @@ -5564,13 +5741,11 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha) } else { if (mcp->mb[1] != 0x7) ha->link_data_rate = mcp->mb[1]; - if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (mcp->mb[4] & BIT_0) ql_log(ql_log_info, vha, 0x11a2, "FEC=enabled (data rate).\n"); } - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108, "Done %s.\n", __func__); if (mcp->mb[1] != 0x7) @@ -5644,6 +5819,122 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb) return rval; } +/* Check if the priority for a VL, falls within the range + * specificed. + * Note: This could change based on switch and firmware recommendations + */ + +static bool qla2xxx_chk_prio_range(uint8_t vl, uint8_t prio) +{ + bool ret = false; + + switch (vl) { + case VL_NORMAL: + if ((prio == 0) || (prio == 8)) + ret = true; + break; + + case VL_SLOW: + if ((prio == 2) || (prio == 4) + || (prio == 6)) + ret = true; + break; + default: + ret = false; + } + return ret; +} + +int +qla2xxx_set_vl(fc_port_t *fcport, uint8_t vl) +{ + int rval; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f, + "Entered %s.\n", __func__); + + memset(mcp, 0, sizeof(mbx_cmd_t)); + + if (!IS_QLA28XX(vha->hw)) + return QLA_FUNCTION_FAILED; + + if (atomic_read(&fcport->state) != FCS_ONLINE) { + ql_log(ql_log_warn, vha, 0x1016, + "fc port is not online, state:%d\n",atomic_read(&fcport->state)); + return QLA_FUNCTION_FAILED; + } + + mcp->mb[0] = MBC_PORT_PARAMS; + mcp->mb[1] = fcport->loop_id; + mcp->mb[2] = BIT_0|BIT_8; + /* Set the priority (low) in MB 5 + * If the priority value is not available + * in the FPIN, use the default priority. + * This could happen when the congestion is cleared owing + * to time-period expiry + */ + if (qla2xxx_chk_prio_range(vl, fcport->vl.prio_lo)) + mcp->mb[5] = fcport->vl.prio_lo; + else { + ql_log(ql_log_info, vha, 0x1017, + "Prio: %d not in range for VL: %d, Using default \n",fcport->vl.prio_lo, + vl); + if (vl == VL_NORMAL) + mcp->mb[5] = VL_NORMAL_DEF_PRIO; + else if (vl == VL_SLOW) + mcp->mb[5] = ha->flogi_acc.rx_vl[VL_SLOW].prio_lo; + } + + switch (vl) { + case VL_SLOW: + mcp->mb[2] |= SCM_MARK_DEVICE_SLOW; + /* TODO: We are NOT changing the target speed, as this + * causes a significant drop in throughput. Could be + * changed in future + * //mcp->mb[3] = 0; + */ + mcp->mb[3] = fcport->fp_speed & 0x3F; + ql_log(ql_log_info, vha, 0x0203, + "USCM: Moving %8phN to slow VL, prio:%d\n", + fcport->port_name, mcp->mb[5]); + break; + case VL_NORMAL: + mcp->mb[2] |= SCM_MARK_DEVICE_NORMAL; + mcp->mb[3] = fcport->fp_speed & 0x3F; + ql_log(ql_log_info, vha, 0x0203, + "USCM: Moving %8phN to normal VL, prio:%d\n", + fcport->port_name, mcp->mb[5]); + break; + case VL_FAST: + mcp->mb[2] |= SCM_MARK_DEVICE_FAST; + mcp->mb[3] = fcport->fp_speed & 0x3F; + ql_log(ql_log_info, vha, 0x0203, + "USCM: Moving %8phN to fast VL\n", fcport->port_name); + break; + } + + mcp->mb[9] = vha->vp_idx; + mcp->in_mb = MBX_2|MBX_1|MBX_0; + mcp->out_mb = MBX_9|MBX_5|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->tov = 30; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_info, vha, 0x1016, + "Mbx Failed=%x mb[0]=%x, mb[1]=%x.\n", rval, + mcp->mb[0], mcp->mb[1]); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc, + "Done %s.\n", __func__); + } + + return rval; +} int qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, @@ -5670,7 +5961,7 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority, mcp->mb[9] = vha->vp_idx; mcp->out_mb = MBX_9|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; mcp->in_mb = MBX_4|MBX_3|MBX_1|MBX_0; - mcp->tov = MBX_TOV_SECONDS; + mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); if (mb != NULL) { @@ -5757,7 +6048,7 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha) mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_0; - mcp->tov = MBX_TOV_SECONDS; + mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -5792,7 +6083,7 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) mcp->out_mb = MBX_1|MBX_0; mcp->in_mb = MBX_0; - mcp->tov = MBX_TOV_SECONDS; + mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -5984,7 +6275,7 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) if (IS_QLA8031(ha)) mcp->out_mb |= MBX_6|MBX_5|MBX_4|MBX_3; mcp->in_mb = MBX_0; - mcp->tov = MBX_TOV_SECONDS; + mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -6020,7 +6311,7 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg) mcp->in_mb = MBX_2|MBX_1|MBX_0; if (IS_QLA8031(ha)) mcp->in_mb |= MBX_6|MBX_5|MBX_4|MBX_3; - mcp->tov = MBX_TOV_SECONDS; + mcp->tov = 30; mcp->flags = 0; rval = qla2x00_mailbox_command(vha, mcp); @@ -6230,7 +6521,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha) ql_dbg(ql_dbg_mbx, vha, 0x1144, "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, mcp->mb[0], mcp->mb[1]); - qla2xxx_dump_fw(vha); + ha->isp_ops->fw_dump(vha, 0); } else { ql_dbg(ql_dbg_mbx, vha, 0x1145, "Done %s.\n", __func__); } @@ -6275,7 +6566,10 @@ qla83xx_access_control(scsi_qla_host_t *vha, uint16_t options, "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[3]=%x mb[4]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], mcp->mb[4]); - qla2xxx_dump_fw(vha); + if (LOCKDOWN_ERROR(vha->hw, mcp->mb[1])) + rval = QLA_FLASH_LOCKDOWN; + else + ha->isp_ops->fw_dump(vha, 0); } else { if (subcode & BIT_5) *sector_size = mcp->mb[1]; @@ -6390,10 +6684,60 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha, return rval; } +int +qla26xx_dport_diagnostics_v2(scsi_qla_host_t *vha, + struct qla_dport_diag_v2 *dd, mbx_cmd_t *mcp) +{ + int rval; + dma_addr_t dd_dma; + uint size = sizeof(dd->buf); + uint16_t options = dd->options; + + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x119f, + "Entered %s.\n", __func__); + + dd_dma = dma_map_single(&vha->hw->pdev->dev, + dd->buf, size, DMA_FROM_DEVICE); + if (dma_mapping_error(&vha->hw->pdev->dev, dd_dma)) { + ql_log(ql_log_warn, vha, 0x1194, "Failed to map dma buffer.\n"); + return QLA_MEMORY_ALLOC_FAILED; + } + + memset(dd->buf, 0, size); + + mcp->mb[0] = MBC_DPORT_DIAGNOSTICS; + mcp->mb[1] = options; + mcp->mb[2] = MSW(LSD(dd_dma)); + mcp->mb[3] = LSW(LSD(dd_dma)); + mcp->mb[6] = MSW(MSD(dd_dma)); + mcp->mb[7] = LSW(MSD(dd_dma)); + mcp->mb[8] = size; + mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->buf_size = size; + mcp->flags = MBX_DMA_IN; + mcp->tov = MBX_TOV_SECONDS * 4; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1195, "Failed=%x.\n", rval); + } else { + ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1196, + "Done %s.\n", __func__); + } + + dma_unmap_single(&vha->hw->pdev->dev, dd_dma, + size, DMA_FROM_DEVICE); + + return rval; +} + static void qla2x00_async_mb_sp_done(srb_t *sp, int res) { sp->u.iocb_cmd.u.mbx.rc = res; + ql_dbg(ql_dbg_mbx, sp->vha, 0x113f, "MB:%s hndl %x completed\n", + sp->name, sp->handle); complete(&sp->u.iocb_cmd.u.mbx.comp); /* don't free sp here. Let the caller do the free */ } @@ -6409,26 +6753,25 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) srb_t *sp; struct srb_iocb *c; + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s %d\n", __func__,__LINE__); if (!vha->hw->flags.fw_started) goto done; + /* ref: INIT */ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); if (!sp) goto done; - sp->type = SRB_MB_IOCB; - sp->name = mb_to_str(mcp->mb[0]); - c = &sp->u.iocb_cmd; - c->timeout = qla2x00_async_iocb_timeout; init_completion(&c->u.mbx.comp); - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + sp->type = SRB_MB_IOCB; + sp->name = mb_to_str(mcp->mb[0]); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_mb_sp_done); memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); - sp->done = qla2x00_async_mb_sp_done; - rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_mbx, vha, 0x1018, @@ -6460,7 +6803,8 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) } done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } @@ -6489,13 +6833,13 @@ int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) memset(&mc, 0, sizeof(mc)); mc.mb[0] = MBC_GET_PORT_DATABASE; - mc.mb[1] = fcport->loop_id; + mc.mb[1] = cpu_to_le16(fcport->loop_id); mc.mb[2] = MSW(pd_dma); mc.mb[3] = LSW(pd_dma); mc.mb[6] = MSW(MSD(pd_dma)); mc.mb[7] = LSW(MSD(pd_dma)); - mc.mb[9] = vha->vp_idx; - mc.mb[10] = opt; + mc.mb[9] = cpu_to_le16(vha->vp_idx); + mc.mb[10] = cpu_to_le16((uint16_t)opt); rval = qla24xx_send_mb_cmd(vha, &mc); if (rval != QLA_SUCCESS) { @@ -6558,6 +6902,13 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, fcport->d_id.b.al_pa = pd->port_id[2]; fcport->d_id.b.rsvd_1 = 0; + ql_dbg(ql_dbg_disc, vha, 0x2062, + "%8phC SVC Param w3 %02x%02x", + fcport->port_name, + pd->prli_svc_param_word_3[1], + pd->prli_svc_param_word_3[0]); + + if (NVME_TARGET(vha->hw, fcport)) { fcport->port_type = FCT_NVME; if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) @@ -6606,7 +6957,7 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *vha, mc.mb[6] = MSW(MSD(id_list_dma)); mc.mb[7] = LSW(MSD(id_list_dma)); mc.mb[8] = 0; - mc.mb[9] = vha->vp_idx; + mc.mb[9] = cpu_to_le16(vha->vp_idx); rval = qla24xx_send_mb_cmd(vha, &mc); if (rval != QLA_SUCCESS) { @@ -6632,8 +6983,8 @@ int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value) memset(mcp->mb, 0 , sizeof(mcp->mb)); mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; - mcp->mb[1] = 1; - mcp->mb[2] = value; + mcp->mb[1] = cpu_to_le16(1); + mcp->mb[2] = cpu_to_le16(value); mcp->out_mb = MBX_2 | MBX_1 | MBX_0; mcp->in_mb = MBX_2 | MBX_0; mcp->tov = MBX_TOV_SECONDS; @@ -6658,7 +7009,7 @@ int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value) memset(mcp->mb, 0, sizeof(mcp->mb)); mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD; - mcp->mb[1] = 0; + mcp->mb[1] = cpu_to_le16(0); mcp->out_mb = MBX_1 | MBX_0; mcp->in_mb = MBX_2 | MBX_0; mcp->tov = MBX_TOV_SECONDS; @@ -6861,7 +7212,7 @@ ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led) mbx_cmd_t *mcp = &mc; int rval; - if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha) ) return QLA_FUNCTION_FAILED; ql_dbg(ql_dbg_mbx, vha, 0x7070, "Entered %s (options=%x).\n", @@ -6909,3 +7260,127 @@ ql26xx_led_config(scsi_qla_host_t *vha, uint16_t options, uint16_t *led) return rval; } + + +int qla_get_features(scsi_qla_host_t *vha, dma_addr_t dma, u16 buf_len) +{ + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int rval; + + + if (buf_len < FW_FEATURES_SIZE || !dma) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s %d\n", __func__,__LINE__); + return QLA_FUNCTION_FAILED; + } + + memset(&mc, 0, sizeof(mc)); + mcp->mb[0] = MBC_GET_RNID_PARAMS; + mcp->mb[1] = RNID_TYPE_GET_FEATURES << 8; + mcp->mb[2] = MSW(LSD(dma)); + mcp->mb[3] = LSW(LSD(dma)); + mcp->mb[6] = MSW(MSD(dma)); + mcp->mb[7] = LSW(MSD(dma)); + + /* send mb via iocb */ + rval = qla24xx_send_mb_cmd(vha, mcp); + + return rval; +} + +int qla_mpipt_get_status(scsi_qla_host_t *vha, u16 page, u16 *ret_buf, u16 buf_len) +{ + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int rval; + + if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha) ) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s %d\n", __func__,__LINE__); + return QLA_FUNCTION_FAILED; + } + + if (buf_len < 8 || !ret_buf) { + ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s %d\n", __func__,__LINE__); + return QLA_FUNCTION_FAILED; + } + + memset(&mc, 0, sizeof(mc)); + mcp->mb[0] = MBC_MPI_PASSTHROUGH; + mcp->mb[1] = MPIPT_SUBCMD_GET_STATUS; + mcp->mb[2] = MPIPT_REQ_V1; + mcp->mb[3] = page; + + /* send mb via iocb */ + rval = qla24xx_send_mb_cmd(vha, &mc); + if (rval) { + ql_dbg(ql_dbg_mbx, vha, 0x7071, "Failed %s %x (mb=%x,%x,%x)\n", + __func__, rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); + } else { + ql_dbg(ql_dbg_mbx, vha, 0x7070, + "%s, page %x mb=%x,%x,%x,%x.\n", + __func__, page,mcp->mb[2], mcp->mb[3], mcp->mb[4], mcp->mb[5]); + memcpy(ret_buf, &mcp->mb[2], 8); + } + + return rval; +} + +/* This MB is use to check if FW is still alive and able to generate an interrupt. + * Otherwise, a timeout will trigger FW dump + reset + */ +void qla_no_op_mb(struct scsi_qla_host *vha) +{ + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int rval; + + memset(&mc, 0, sizeof(mc)); + mcp->mb[0] = 0; // noop cmd= 0 + mcp->out_mb = MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = 5; + mcp->flags = 0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval) { + ql_dbg(ql_dbg_async, vha, 0x7071, + "Failed %s %x \n", __func__, rval); + } + + return; +} + + +int qla_mailbox_passthru(scsi_qla_host_t *vha, + uint16_t *mbx_in, uint16_t *mbx_out) +{ + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int rval = -EINVAL; + + memset(&mc, 0, sizeof(mc)); + // Receiving all 32 register's contents + memcpy(&(mcp->mb), (char *)mbx_in, (32 * sizeof(uint16_t))); + + mcp->out_mb = 0xFFFFFFFF; + mcp->in_mb = 0xFFFFFFFF; + + mcp->tov = MBX_TOV_SECONDS; + mcp->flags = 0; + mcp->bufp = NULL; + + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) + ql_dbg (ql_dbg_mbx, vha, 0xf0a2, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + else { + ql_dbg (ql_dbg_mbx + ql_dbg_verbose, vha, 0xf0a3, \ + "Done %s.\n", __func__); + // passing all 32 register's contents + memcpy(mbx_out, &(mcp->mb), 32 *sizeof(uint16_t)); + } + + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index c7caf322f445b..0dcd48d90bae8 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -1,7 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include "qla_gbl.h" @@ -16,6 +17,8 @@ #include #include +extern void qla24xx_process_purex_list(struct purex_list *); + void qla2x00_vp_stop_timer(scsi_qla_host_t *vha) { @@ -52,7 +55,7 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha) spin_unlock_irqrestore(&ha->vport_slock, flags); spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_update_vp_map(vha, SET_VP_IDX); + qla_update_vp_map(vha, SET_VP_IDX); spin_unlock_irqrestore(&ha->hardware_lock, flags); mutex_unlock(&ha->vport_lock); @@ -65,7 +68,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) uint16_t vp_id; struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; - u8 i; + u32 i, bailout; mutex_lock(&ha->vport_lock); /* @@ -75,21 +78,29 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) * ensures no active vp_list traversal while the vport is removed * from the queue) */ - for (i = 0; i < 10; i++) { - if (wait_event_timeout(vha->vref_waitq, - !atomic_read(&vha->vref_count), HZ) > 0) + bailout = 0; + for (i = 0; i < 500; i++) { + spin_lock_irqsave(&ha->vport_slock, flags); + if (atomic_read(&vha->vref_count) == 0) { + list_del(&vha->list); + qla_update_vp_map(vha, RESET_VP_IDX); + bailout = 1; + } + spin_unlock_irqrestore(&ha->vport_slock, flags); + + if (bailout) break; + else + msleep(20); } - - spin_lock_irqsave(&ha->vport_slock, flags); - if (atomic_read(&vha->vref_count)) { - ql_dbg(ql_dbg_vport, vha, 0xfffa, - "vha->vref_count=%u timeout\n", vha->vref_count.counter); - vha->vref_count = (atomic_t)ATOMIC_INIT(0); + if (!bailout) { + ql_log(ql_log_info, vha, 0xfffa, + "vha->vref_count=%u timeout\n", vha->vref_count.counter); + spin_lock_irqsave(&ha->vport_slock, flags); + list_del(&vha->list); + qla_update_vp_map(vha, RESET_VP_IDX); + spin_unlock_irqrestore(&ha->vport_slock, flags); } - list_del(&vha->list); - qlt_update_vp_map(vha, RESET_VP_IDX); - spin_unlock_irqrestore(&ha->vport_slock, flags); vp_id = vha->vp_idx; ha->num_vhosts--; @@ -158,6 +169,14 @@ qla24xx_disable_vp(scsi_qla_host_t *vha) int ret = QLA_SUCCESS; fc_port_t *fcport; + if (vha->hw->flags.edif_enabled) { + if (DBELL_ACTIVE(vha)) + qla2x00_post_aen_work(vha, FCH_EVT_VENDOR_UNIQUE, + FCH_EVT_VENDOR_UNIQUE_VPORT_DOWN); + /* delete sessions and flush sa_indexes */ + qla2x00_wait_for_sess_deletion(vha); + } + if (vha->hw->flags.fw_started) ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); @@ -166,11 +185,12 @@ qla24xx_disable_vp(scsi_qla_host_t *vha) list_for_each_entry(fcport, &vha->vp_fcports, list) fcport->logout_on_delete = 0; - qla2x00_mark_all_devices_lost(vha); + if (!vha->hw->flags.edif_enabled) + qla2x00_wait_for_sess_deletion(vha); /* Remove port id from vp target map */ spin_lock_irqsave(&vha->hw->hardware_lock, flags); - qlt_update_vp_map(vha, RESET_AL_PA); + qla_update_vp_map(vha, RESET_AL_PA); spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); qla2x00_mark_vp_devices_dead(vha); @@ -257,13 +277,13 @@ qla24xx_configure_vp(scsi_qla_host_t *vha) void qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) { - scsi_qla_host_t *vha; + scsi_qla_host_t *vha, *tvp; struct qla_hw_data *ha = rsp->hw; int i = 0; unsigned long flags; spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vha, &ha->vp_list, list) { + list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) { if (vha->vp_idx) { if (test_bit(VPORT_DELETE, &vha->dpc_flags)) continue; @@ -279,7 +299,7 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) case MBA_POINT_TO_POINT: case MBA_CHG_IN_CONNECTION: ql_dbg(ql_dbg_async, vha, 0x5024, - "Async_event for VP[%d], mb=0x%x vha=%p.\n", + "Async_event for VP[%d], mb=0x%x vha=%px.\n", i, *mb, vha); qla2x00_async_event(vha, rsp, mb); break; @@ -287,7 +307,7 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) case MBA_RSCN_UPDATE: if ((mb[3] & 0xff) == vha->vp_idx) { ql_dbg(ql_dbg_async, vha, 0x5024, - "Async_event for VP[%d], mb=0x%x vha=%p\n", + "Async_event for VP[%d], mb=0x%x vha=%px\n", i, *mb, vha); qla2x00_async_event(vha, rsp, mb); } @@ -367,13 +387,31 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha) } } - if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) { + if (test_and_clear_bit(SCM_SEND_EDC, &vha->dpc_flags)) { + if (QLA_DRV_SEND_ELS(ha)) { + ql_dbg(ql_dbg_dpc, vha, 0x4018, + "SCM ELS EDC scheduled.\n"); + qla2xxx_scm_send_edc_els(vha); + ql_dbg(ql_dbg_dpc, vha, 0x4019, + "SCM ELS EDC Sent\n"); + } + } + if (test_and_clear_bit(SCM_SEND_RDF, &vha->dpc_flags)) { + if (QLA_DRV_SEND_ELS(ha)) { + ql_dbg(ql_dbg_dpc, vha, 0x4018, + "SCM ELS RDF scheduled.\n"); + qla2xxx_scm_send_rdf_els(vha); + ql_dbg(ql_dbg_dpc, vha, 0x4019, + "SCM ELS RDF Sent\n"); + } + } + if (test_bit(SCM_NOTIFY_FW, &vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, vha, 0x4016, - "FCPort update scheduled.\n"); - qla2x00_update_fcports(vha); - clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags); + "SCM update scheduled.\n"); + qla2xxx_update_scm_fcport(vha); + clear_bit(SCM_NOTIFY_FW, &vha->dpc_flags); ql_dbg(ql_dbg_dpc, vha, 0x4017, - "FCPort update end.\n"); + "SCM update end.\n"); } if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) && @@ -416,7 +454,7 @@ void qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; - scsi_qla_host_t *vp; + scsi_qla_host_t *vp, *tvp; unsigned long flags = 0; if (vha->vp_idx) @@ -430,7 +468,7 @@ qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) return; spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vp, &ha->vp_list, list) { + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { if (vp->vp_idx) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); @@ -544,7 +582,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) host->transportt = qla2xxx_transport_vport_template; ql_dbg(ql_dbg_vport, vha, 0xa007, - "Detect vport hba %ld at address = %p.\n", + "Detect vport hba %ld at address = %px.\n", vha->host_no, vha); vha->flags.init_done = 1; @@ -769,15 +807,15 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, req->req_q_in = ®->isp25mq.req_q_in; req->req_q_out = ®->isp25mq.req_q_out; req->max_q_depth = ha->req_q_map[0]->max_q_depth; - req->out_ptr = (uint16_t *)(req->ring + req->length); + req->out_ptr = (void *)(req->ring + req->length); mutex_unlock(&ha->mq_lock); ql_dbg(ql_dbg_multiq, base_vha, 0xc004, - "ring_ptr=%p ring_index=%d, " + "ring_ptr=%px ring_index=%d, " "cnt=%d id=%d max_q_depth=%d.\n", req->ring_ptr, req->ring_index, req->cnt, req->id, req->max_q_depth); ql_dbg(ql_dbg_init, base_vha, 0x00de, - "ring_ptr=%p ring_index=%d, " + "ring_ptr=%px ring_index=%d, " "cnt=%d id=%d max_q_depth=%d.\n", req->ring_ptr, req->ring_index, req->cnt, req->id, req->max_q_depth); @@ -810,6 +848,7 @@ static void qla_do_work(struct work_struct *work) struct scsi_qla_host *vha = qpair->vha; spin_lock_irqsave(&qpair->qp_lock, flags); +// vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, qpair->rsp); spin_unlock_irqrestore(&qpair->qp_lock, flags); @@ -861,7 +900,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, rsp->vp_idx = vp_idx; rsp->hw = ha; ql_dbg(ql_dbg_init, base_vha, 0x00e4, - "rsp queue_id=%d rid=%d vp_idx=%d hw=%p.\n", + "rsp queue_id=%d rid=%d vp_idx=%d hw=%px.\n", que_id, rsp->rid, rsp->vp_idx, rsp->hw); /* Use alternate PCI bus number */ if (MSB(rsp->rid)) @@ -881,20 +920,21 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, reg = ISP_QUE_REG(ha, que_id); rsp->rsp_q_in = ®->isp25mq.rsp_q_in; rsp->rsp_q_out = ®->isp25mq.rsp_q_out; - rsp->in_ptr = (uint16_t *)(rsp->ring + rsp->length); + rsp->in_ptr = (void *)(rsp->ring + rsp->length); mutex_unlock(&ha->mq_lock); ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, - "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", + "options=%x id=%d rsp_q_in=%px rsp_q_out=%px\n", rsp->options, rsp->id, rsp->rsp_q_in, rsp->rsp_q_out); ql_dbg(ql_dbg_init, base_vha, 0x00e5, - "options=%x id=%d rsp_q_in=%p rsp_q_out=%p\n", + "options=%x id=%d rsp_q_in=%px rsp_q_out=%px\n", rsp->options, rsp->id, rsp->rsp_q_in, rsp->rsp_q_out); ret = qla25xx_request_irq(ha, qpair, qpair->msix, ha->flags.disable_msix_handshake ? - QLA_MSIX_QPAIR_MULTIQ_RSP_Q : QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS); + QLA_MSIX_QPAIR_MULTIQ_RSP_Q: + QLA_MSIX_QPAIR_MULTIQ_RSP_Q_HS); if (ret) goto que_failed; @@ -952,6 +992,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) if (vp_index == 0 || vp_index >= ha->max_npiv_vports) return QLA_PARAMETER_ERROR; + /* ref: INIT */ sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL); if (!sp) return rval; @@ -959,9 +1000,8 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) sp->type = SRB_CTRL_VP; sp->name = "ctrl_vp"; sp->comp = ∁ - sp->done = qla_ctrlvp_sp_done; - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla_ctrlvp_sp_done); sp->u.iocb_cmd.u.ctrlvp.cmd = cmd; sp->u.iocb_cmd.u.ctrlvp.vp_index = vp_index; @@ -995,6 +1035,198 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) break; } done: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); return rval; } + +struct scsi_qla_host *qla_find_host_by_vp_idx(struct scsi_qla_host *vha, uint16_t vp_idx) +{ + struct qla_hw_data *ha = vha->hw; + + if (vha->vp_idx == vp_idx) + return vha; + + BUG_ON(ha->vp_map == NULL); + if (likely(test_bit(vp_idx, ha->vp_idx_map))) + return ha->vp_map[vp_idx].vha; + + return NULL; +} + +/* vport_slock to be held by the caller */ +void +qla_update_vp_map(struct scsi_qla_host *vha, int cmd) +{ + void *slot; + u32 key; + int rc; + + if (!vha->hw->vp_map) + return; + + key = vha->d_id.b24; + + switch (cmd) { + case SET_VP_IDX: + vha->hw->vp_map[vha->vp_idx].vha = vha; + break; + case SET_AL_PA: + slot = btree_lookup32(&vha->hw->host_map, key); + if (!slot) { + ql_dbg(ql_dbg_disc, vha, 0xf018, + "Save vha in host_map %px %06x\n", vha, key); + rc = btree_insert32(&vha->hw->host_map, + key, vha, GFP_ATOMIC); + if (rc) + ql_log(ql_log_info, vha, 0xd03e, + "Unable to insert s_id into host_map: %06x\n", + key); + return; + } + ql_dbg(ql_dbg_disc, vha, 0xf019, + "replace existing vha in host_map %px %06x\n", vha, key); + btree_update32(&vha->hw->host_map, key, vha); + break; + case RESET_VP_IDX: + vha->hw->vp_map[vha->vp_idx].vha = NULL; + break; + case RESET_AL_PA: + ql_dbg(ql_dbg_disc, vha, 0xf01a, + "clear vha in host_map %px %06x\n", vha, key); + slot = btree_lookup32(&vha->hw->host_map, key); + if (slot) + btree_remove32(&vha->hw->host_map, key); + vha->d_id.b24 = 0; + break; + } +} + +void qla_update_host_map(struct scsi_qla_host *vha, port_id_t id) +{ + + if (!vha->d_id.b24) { + vha->d_id = id; + qla_update_vp_map(vha, SET_AL_PA); + } else if (vha->d_id.b24 != id.b24) { + qla_update_vp_map(vha, RESET_AL_PA); + vha->d_id = id; + qla_update_vp_map(vha, SET_AL_PA); + } +} + +int qla_create_buf_pool(struct scsi_qla_host *vha, struct qla_qpair *qp) +{ + int sz ; + qp->buf_pool.num_bufs = qp->req->length; + + sz = BITS_TO_LONGS(qp->req->length); + qp->buf_pool.buf_map = kcalloc(sz, sizeof(long), GFP_KERNEL); + if (!qp->buf_pool.buf_map) { + ql_log(ql_log_warn, vha, 0x0186, + "Failed to allocate buf_map(%ld).\n", sz * sizeof(unsigned long)); + return -ENOMEM; + } + sz = qp->req->length * sizeof(void*); + qp->buf_pool.buf_array = kcalloc(qp->req->length, sizeof(void*), GFP_KERNEL); + if (!qp->buf_pool.buf_array) { + ql_log(ql_log_warn, vha, 0x0186, + "Failed to allocate buf_array(%d).\n", sz); + kfree(qp->buf_pool.buf_map); + return -ENOMEM; + } + sz = qp->req->length * sizeof(dma_addr_t); + qp->buf_pool.dma_array = kcalloc(qp->req->length, sizeof(dma_addr_t), GFP_KERNEL); + if (!qp->buf_pool.dma_array) { + ql_log(ql_log_warn, vha, 0x0186, + "Failed to allocate dma_array(%d).\n", sz); + kfree(qp->buf_pool.buf_map); + kfree(qp->buf_pool.buf_array); + return -ENOMEM; + } + set_bit(0, qp->buf_pool.buf_map); + return 0; +} + +void qla_free_buf_pool(struct qla_qpair *qp) +{ + int i; + struct qla_hw_data *ha = qp->vha->hw; + + for (i = 0; i < qp->buf_pool.num_bufs; i++) { + if (qp->buf_pool.buf_array[i] && qp->buf_pool.dma_array[i]) + dma_pool_free(ha->fcp_cmnd_dma_pool, qp->buf_pool.buf_array[i], + qp->buf_pool.dma_array[i]); + qp->buf_pool.buf_array[i] = NULL; + qp->buf_pool.dma_array[i] = 0; + } + + kfree(qp->buf_pool.dma_array); + kfree(qp->buf_pool.buf_array); + kfree(qp->buf_pool.buf_map); +} + + +/* it is assume qp->qp_lock is held at this point */ +int qla_get_buf(struct scsi_qla_host *vha, struct qla_qpair *qp, struct qla_buf_dsc *dsc) +{ + u16 tag, i = 0; + void *buf; + dma_addr_t buf_dma; + struct qla_hw_data *ha = vha->hw; + + dsc->tag = TAG_FREED; +again: + tag = find_first_zero_bit(qp->buf_pool.buf_map, qp->buf_pool.num_bufs); + if (tag >= qp->buf_pool.num_bufs) { + ql_dbg(ql_dbg_io, vha, 0x00e2, + "qp(%d) ran out of buf resource.\n", qp->id); + return -EIO; + } + if (tag == 0) { + set_bit(0, qp->buf_pool.buf_map); + i++; + if (i == 5) { + ql_dbg(ql_dbg_io, vha, 0x00e3, + "qp(%d) unable to get tag.\n", qp->id); + return -EIO; + } + goto again; + } + + if (!qp->buf_pool.buf_array[tag] ) { + buf = dma_pool_zalloc(ha->fcp_cmnd_dma_pool, GFP_ATOMIC, &buf_dma); + if (!buf) { + ql_log(ql_log_fatal, vha, 0x13b1, + "Failed to allocate buf.\n"); + return -ENOMEM; + } + + dsc->buf = qp->buf_pool.buf_array[tag] = buf; + dsc->buf_dma = qp->buf_pool.dma_array[tag] = buf_dma; + } else { + dsc->buf = qp->buf_pool.buf_array[tag] ; + dsc->buf_dma = qp->buf_pool.dma_array[tag]; + memset(dsc->buf, 0, FCP_CMND_DMA_POOL_SIZE); + } + + qp->buf_pool.num_active++; + if (qp->buf_pool.num_active > qp->buf_pool.max_used) + qp->buf_pool.max_used = qp->buf_pool.num_active; + + dsc->tag = tag; + set_bit(tag, qp->buf_pool.buf_map); + return 0; +} + + +/* it is assume qp->qp_lock is held at this point */ +void qla_put_buf(struct qla_qpair *qp, struct qla_buf_dsc *dsc) +{ + if (dsc->tag == TAG_FREED) + return; + + clear_bit(dsc->tag, qp->buf_pool.buf_map); + qp->buf_pool.num_active--; + dsc->tag = TAG_FREED; +} diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 7178646ee0f06..835bd546b53df 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -1,7 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include @@ -45,16 +46,17 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp) uint8_t io_lock_on; uint16_t command = 0; uint32_t *iptr; - __le32 __iomem *optr; + uint32_t __iomem *optr; uint32_t cnt; uint32_t mboxes; unsigned long wait_time; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - if (ha->pdev->error_state == pci_channel_io_perm_failure) { + if (ha->pdev->error_state > pci_channel_io_frozen) { ql_log(ql_log_warn, vha, 0x115c, - "PCI channel failed permanently, exiting.\n"); + "error_state is greater than pci_channel_io_frozen, " + "exiting.\n"); return QLA_FUNCTION_TIMEOUT; } @@ -108,7 +110,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp) spin_lock_irqsave(&ha->hardware_lock, flags); /* Load mailbox registers. */ - optr = ®->ispfx00.mailbox0; + optr = (uint32_t __iomem *)®->ispfx00.mailbox0; iptr = mcp->mb; command = mcp->mb[0]; @@ -116,7 +118,7 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp) for (cnt = 0; cnt < ha->mbx_count; cnt++) { if (mboxes & BIT_0) - wrt_reg_dword(optr, *iptr); + WRT_REG_DWORD(optr, *iptr); mboxes >>= 1; optr++; @@ -675,14 +677,14 @@ qlafx00_config_rings(struct scsi_qla_host *vha) struct qla_hw_data *ha = vha->hw; struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; - wrt_reg_dword(®->req_q_in, 0); - wrt_reg_dword(®->req_q_out, 0); + WRT_REG_DWORD(®->req_q_in, 0); + WRT_REG_DWORD(®->req_q_out, 0); - wrt_reg_dword(®->rsp_q_in, 0); - wrt_reg_dword(®->rsp_q_out, 0); + WRT_REG_DWORD(®->rsp_q_in, 0); + WRT_REG_DWORD(®->rsp_q_out, 0); /* PCI posting */ - rd_reg_dword(®->rsp_q_out); + RD_REG_DWORD(®->rsp_q_out); } char * @@ -764,7 +766,7 @@ qlafx00_iospace_config(struct qla_hw_data *ha) } ha->cregbase = - ioremap(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00); + ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00); if (!ha->cregbase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0128, "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); @@ -785,7 +787,7 @@ qlafx00_iospace_config(struct qla_hw_data *ha) } ha->iobase = - ioremap(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00); + ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00); if (!ha->iobase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x012b, "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); @@ -888,9 +890,9 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha) /* 30 seconds wait - Adjust if required */ wait_time = 30; - pseudo_aen = rd_reg_dword(®->pseudoaen); + pseudo_aen = RD_REG_DWORD(®->pseudoaen); if (pseudo_aen == 1) { - aenmbx7 = rd_reg_dword(®->initval7); + aenmbx7 = RD_REG_DWORD(®->initval7); ha->mbx_intr_code = MSW(aenmbx7); ha->rqstq_intr_code = LSW(aenmbx7); rval = qlafx00_driver_shutdown(vha, 10); @@ -901,7 +903,7 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha) /* wait time before firmware ready */ wtime = jiffies + (wait_time * HZ); do { - aenmbx = rd_reg_dword(®->aenmailbox0); + aenmbx = RD_REG_DWORD(®->aenmailbox0); barrier(); ql_dbg(ql_dbg_mbx, vha, 0x0133, "aenmbx: 0x%x\n", aenmbx); @@ -920,15 +922,15 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha) case MBA_FW_RESTART_CMPLT: /* Set the mbx and rqstq intr code */ - aenmbx7 = rd_reg_dword(®->aenmailbox7); + aenmbx7 = RD_REG_DWORD(®->aenmailbox7); ha->mbx_intr_code = MSW(aenmbx7); ha->rqstq_intr_code = LSW(aenmbx7); - ha->req_que_off = rd_reg_dword(®->aenmailbox1); - ha->rsp_que_off = rd_reg_dword(®->aenmailbox3); - ha->req_que_len = rd_reg_dword(®->aenmailbox5); - ha->rsp_que_len = rd_reg_dword(®->aenmailbox6); - wrt_reg_dword(®->aenmailbox0, 0); - rd_reg_dword_relaxed(®->aenmailbox0); + ha->req_que_off = RD_REG_DWORD(®->aenmailbox1); + ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3); + ha->req_que_len = RD_REG_DWORD(®->aenmailbox5); + ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6); + WRT_REG_DWORD(®->aenmailbox0, 0); + RD_REG_DWORD_RELAXED(®->aenmailbox0); ql_dbg(ql_dbg_init, vha, 0x0134, "f/w returned mbx_intr_code: 0x%x, " "rqstq_intr_code: 0x%x\n", @@ -958,13 +960,13 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha) * 3. issue Get FW State Mbox cmd to determine fw state * Set the mbx and rqstq intr code from Shadow Regs */ - aenmbx7 = rd_reg_dword(®->initval7); + aenmbx7 = RD_REG_DWORD(®->initval7); ha->mbx_intr_code = MSW(aenmbx7); ha->rqstq_intr_code = LSW(aenmbx7); - ha->req_que_off = rd_reg_dword(®->initval1); - ha->rsp_que_off = rd_reg_dword(®->initval3); - ha->req_que_len = rd_reg_dword(®->initval5); - ha->rsp_que_len = rd_reg_dword(®->initval6); + ha->req_que_off = RD_REG_DWORD(®->initval1); + ha->rsp_que_off = RD_REG_DWORD(®->initval3); + ha->req_que_len = RD_REG_DWORD(®->initval5); + ha->rsp_que_len = RD_REG_DWORD(®->initval6); ql_dbg(ql_dbg_init, vha, 0x0135, "f/w returned mbx_intr_code: 0x%x, " "rqstq_intr_code: 0x%x\n", @@ -1010,7 +1012,7 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha) if (time_after_eq(jiffies, wtime)) { ql_dbg(ql_dbg_init, vha, 0x0137, "Init f/w failed: aen[7]: 0x%x\n", - rd_reg_dword(®->aenmailbox7)); + RD_REG_DWORD(®->aenmailbox7)); rval = QLA_FUNCTION_FAILED; done = true; break; @@ -1404,7 +1406,7 @@ qlafx00_init_response_q_entries(struct rsp_que *rsp) pkt = rsp->ring_ptr; for (cnt = 0; cnt < rsp->length; cnt++) { pkt->signature = RESPONSE_PROCESSED; - wrt_reg_dword((void __force __iomem *)&pkt->signature, + WRT_REG_DWORD((void __force __iomem *)&pkt->signature, RESPONSE_PROCESSED); pkt++; } @@ -1420,13 +1422,13 @@ qlafx00_rescan_isp(scsi_qla_host_t *vha) qla2x00_request_irqs(ha, ha->rsp_q_map[0]); - aenmbx7 = rd_reg_dword(®->aenmailbox7); + aenmbx7 = RD_REG_DWORD(®->aenmailbox7); ha->mbx_intr_code = MSW(aenmbx7); ha->rqstq_intr_code = LSW(aenmbx7); - ha->req_que_off = rd_reg_dword(®->aenmailbox1); - ha->rsp_que_off = rd_reg_dword(®->aenmailbox3); - ha->req_que_len = rd_reg_dword(®->aenmailbox5); - ha->rsp_que_len = rd_reg_dword(®->aenmailbox6); + ha->req_que_off = RD_REG_DWORD(®->aenmailbox1); + ha->rsp_que_off = RD_REG_DWORD(®->aenmailbox3); + ha->req_que_len = RD_REG_DWORD(®->aenmailbox5); + ha->rsp_que_len = RD_REG_DWORD(®->aenmailbox6); ql_dbg(ql_dbg_disc, vha, 0x2094, "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x " @@ -1471,7 +1473,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha) (!test_bit(UNLOADING, &vha->dpc_flags)) && (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && (ha->mr.fw_hbt_en)) { - fw_heart_beat = rd_reg_dword(®->fwheartbeat); + fw_heart_beat = RD_REG_DWORD(®->fwheartbeat); if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) { ha->mr.old_fw_hbt_cnt = fw_heart_beat; ha->mr.fw_hbt_miss_cnt = 0; @@ -1491,7 +1493,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha) if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) { /* Reset recovery to be performed in timer routine */ - aenmbx0 = rd_reg_dword(®->aenmailbox0); + aenmbx0 = RD_REG_DWORD(®->aenmailbox0); if (ha->mr.fw_reset_timer_exp) { set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); @@ -1686,9 +1688,10 @@ qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id) return; } -void +int qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt) { + int rval = 0; uint32_t aen_code, aen_data; aen_code = FCH_EVT_VENDOR_UNIQUE; @@ -1739,6 +1742,8 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt) fc_host_post_event(vha->host, fc_get_event_number(), aen_code, aen_data); + + return rval; } static void @@ -1787,17 +1792,18 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) struct register_host_info *preg_hsi; struct new_utsname *p_sysid = NULL; + /* ref: INIT */ sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; sp->type = SRB_FXIOCB_DCMD; sp->name = "fxdisc"; + qla2x00_init_async_sp(sp, FXDISC_TIMEOUT, + qla2x00_fxdisc_sp_done); + sp->u.iocb_cmd.timeout = qla2x00_fxdisc_iocb_timeout; fdisc = &sp->u.iocb_cmd; - fdisc->timeout = qla2x00_fxdisc_iocb_timeout; - qla2x00_init_timer(sp, FXDISC_TIMEOUT); - switch (fx_type) { case FXDISC_GET_CONFIG_INFO: fdisc->u.fxiocb.flags = @@ -1865,7 +1871,7 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) sizeof(phost_info->domainname)); strlcpy(phost_info->hostdriver, QLA2XXX_VERSION, sizeof(phost_info->hostdriver)); - preg_hsi->utc = (uint64_t)ktime_get_real_seconds(); + preg_hsi->utc = qla_get_real_seconds(); ql_dbg(ql_dbg_init, vha, 0x0149, "ISP%04X: Host registration with firmware\n", ha->pdev->device); @@ -1898,7 +1904,6 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) } fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type); - sp->done = qla2x00_fxdisc_sp_done; rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) @@ -1974,7 +1979,8 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len, fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle); done_free_sp: - sp->free(sp); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: return rval; } @@ -2137,7 +2143,7 @@ qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len, if (sense_len) { ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039, "Check condition Sense data, nexus%ld:%d:%llu cmd=%p.\n", - sp->vha->host_no, cp->device->id, cp->device->lun, + sp->vha->host_no, cp->device->id, lun_cast(cp->device->lun), cp); ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049, cp->sense_buffer, sense_len); @@ -2182,7 +2188,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, { const char func[] = "IOSB_IOCB"; srb_t *sp; - struct bsg_job *bsg_job; + bsg_job_t *bsg_job; struct fc_bsg_reply *bsg_reply; struct srb_iocb *iocb_job; int res = 0; @@ -2219,7 +2225,7 @@ qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req, memcpy(fstatus.reserved_3, pkt->reserved_2, 20 * sizeof(uint8_t)); - fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply); + fw_sts_ptr = qla_fwsts_ptr(bsg_job); memcpy(fw_sts_ptr, &fstatus, sizeof(fstatus)); bsg_job->reply_len = sizeof(struct fc_bsg_reply) + @@ -2504,7 +2510,7 @@ qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) "rsp_info=%p resid=0x%x fw_resid=0x%x sense_len=0x%x, " "par_sense_len=0x%x, rsp_info_len=0x%x\n", comp_status, scsi_status, res, vha->host_no, - cp->device->id, cp->device->lun, fcport->tgt_id, + cp->device->id, lun_cast(cp->device->lun), fcport->tgt_id, lscsi_status, cp->cmnd, scsi_bufflen(cp), rsp_info, resid_len, fw_resid_len, sense_len, par_sense_len, rsp_info_len); @@ -2694,7 +2700,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha, uint16_t lreq_q_in = 0; uint16_t lreq_q_out = 0; - lreq_q_in = rd_reg_dword(rsp->rsp_q_in); + lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in); lreq_q_out = rsp->ring_index; while (lreq_q_in != lreq_q_out) { @@ -2756,7 +2762,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha, } /* Adjust ring index */ - wrt_reg_dword(rsp->rsp_q_out, rsp->ring_index); + WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index); } /** @@ -2787,9 +2793,9 @@ qlafx00_async_event(scsi_qla_host_t *vha) break; case QLAFX00_MBA_PORT_UPDATE: /* Port database update */ - ha->aenmb[1] = rd_reg_dword(®->aenmailbox1); - ha->aenmb[2] = rd_reg_dword(®->aenmailbox2); - ha->aenmb[3] = rd_reg_dword(®->aenmailbox3); + ha->aenmb[1] = RD_REG_DWORD(®->aenmailbox1); + ha->aenmb[2] = RD_REG_DWORD(®->aenmailbox2); + ha->aenmb[3] = RD_REG_DWORD(®->aenmailbox3); ql_dbg(ql_dbg_async, vha, 0x5077, "Asynchronous port Update received " "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n", @@ -2819,13 +2825,13 @@ qlafx00_async_event(scsi_qla_host_t *vha) break; default: - ha->aenmb[1] = rd_reg_dword(®->aenmailbox1); - ha->aenmb[2] = rd_reg_dword(®->aenmailbox2); - ha->aenmb[3] = rd_reg_dword(®->aenmailbox3); - ha->aenmb[4] = rd_reg_dword(®->aenmailbox4); - ha->aenmb[5] = rd_reg_dword(®->aenmailbox5); - ha->aenmb[6] = rd_reg_dword(®->aenmailbox6); - ha->aenmb[7] = rd_reg_dword(®->aenmailbox7); + ha->aenmb[1] = RD_REG_WORD(®->aenmailbox1); + ha->aenmb[2] = RD_REG_WORD(®->aenmailbox2); + ha->aenmb[3] = RD_REG_WORD(®->aenmailbox3); + ha->aenmb[4] = RD_REG_WORD(®->aenmailbox4); + ha->aenmb[5] = RD_REG_WORD(®->aenmailbox5); + ha->aenmb[6] = RD_REG_WORD(®->aenmailbox6); + ha->aenmb[7] = RD_REG_WORD(®->aenmailbox7); ql_dbg(ql_dbg_async, vha, 0x5078, "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n", ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3], @@ -2845,7 +2851,7 @@ static void qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) { uint16_t cnt; - __le32 __iomem *wptr; + uint32_t __iomem *wptr; struct qla_hw_data *ha = vha->hw; struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00; @@ -2855,10 +2861,10 @@ qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0) /* Load return mailbox registers. */ ha->flags.mbox_int = 1; ha->mailbox_out32[0] = mb0; - wptr = ®->mailbox17; + wptr = (uint32_t __iomem *)®->mailbox17; for (cnt = 1; cnt < ha->mbx_count; cnt++) { - ha->mailbox_out32[cnt] = rd_reg_dword(wptr); + ha->mailbox_out32[cnt] = RD_REG_DWORD(wptr); wptr++; } } @@ -2912,13 +2918,13 @@ qlafx00_intr_handler(int irq, void *dev_id) break; if (stat & QLAFX00_INTR_MB_CMPLT) { - mb[0] = rd_reg_dword(®->mailbox16); + mb[0] = RD_REG_WORD(®->mailbox16); qlafx00_mbx_completion(vha, mb[0]); status |= MBX_INTERRUPT; clr_intr |= QLAFX00_INTR_MB_CMPLT; } if (intr_stat & QLAFX00_INTR_ASYNC_CMPLT) { - ha->aenmb[0] = rd_reg_dword(®->aenmailbox0); + ha->aenmb[0] = RD_REG_WORD(®->aenmailbox0); qlafx00_async_event(vha); clr_intr |= QLAFX00_INTR_ASYNC_CMPLT; } @@ -3086,7 +3092,7 @@ qlafx00_start_scsi(srb_t *sp) tot_dsds = nseg; req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); if (req->cnt < (req_cnt + 2)) { - cnt = rd_reg_dword_relaxed(req->req_q_out); + cnt = RD_REG_DWORD_RELAXED(req->req_q_out); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; @@ -3108,7 +3114,7 @@ qlafx00_start_scsi(srb_t *sp) memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE); - lcmd_pkt.handle = make_handle(req->id, sp->handle); + lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle); lcmd_pkt.reserved_0 = 0; lcmd_pkt.port_path_ctrl = 0; lcmd_pkt.reserved_1 = 0; @@ -3151,7 +3157,7 @@ qlafx00_start_scsi(srb_t *sp) sp->flags |= SRB_DMA_VALID; /* Set chip new ring index. */ - wrt_reg_dword(req->req_q_in, req->ring_index); + WRT_REG_DWORD(req->req_q_in, req->ring_index); QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code); spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -3178,7 +3184,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb) memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00)); tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00; tm_iocb.entry_count = 1; - tm_iocb.handle = make_handle(req->id, sp->handle); + tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); tm_iocb.reserved_0 = 0; tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id); tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags); @@ -3188,7 +3194,7 @@ qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb) sizeof(struct scsi_lun)); } - memcpy(ptm_iocb, &tm_iocb, + memcpy((void *)ptm_iocb, &tm_iocb, sizeof(struct tsk_mgmt_entry_fx00)); wmb(); } @@ -3204,12 +3210,13 @@ qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb) memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00)); abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00; abt_iocb.entry_count = 1; - abt_iocb.handle = make_handle(req->id, sp->handle); - abt_iocb.abort_handle = make_handle(req->id, fxio->u.abt.cmd_hndl); + abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); + abt_iocb.abort_handle = + cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl)); abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id); abt_iocb.req_que_no = cpu_to_le16(req->id); - memcpy(pabt_iocb, &abt_iocb, + memcpy((void *)pabt_iocb, &abt_iocb, sizeof(struct abort_iocb_entry_fx00)); wmb(); } @@ -3219,14 +3226,14 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb) { struct srb_iocb *fxio = &sp->u.iocb_cmd; struct qla_mt_iocb_rqst_fx00 *piocb_rqst; - struct bsg_job *bsg_job; + bsg_job_t *bsg_job; struct fc_bsg_request *bsg_request; struct fxdisc_entry_fx00 fx_iocb; uint8_t entry_cnt = 1; memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00)); fx_iocb.entry_type = FX00_IOCB_TYPE; - fx_iocb.handle = sp->handle; + fx_iocb.handle = cpu_to_le32(sp->handle); fx_iocb.entry_count = entry_cnt; if (sp->type == SRB_FXIOCB_DCMD) { diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h index 73be8348402af..4567f0c42486d 100644 --- a/drivers/scsi/qla2xxx/qla_mr.h +++ b/drivers/scsi/qla2xxx/qla_mr.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #ifndef __QLA_MR_H #define __QLA_MR_H @@ -95,7 +96,7 @@ struct tsk_mgmt_entry_fx00 { uint8_t sys_define; uint8_t entry_status; /* Entry Status. */ - uint32_t handle; /* System handle. */ + __le32 handle; /* System handle. */ uint32_t reserved_0; @@ -120,13 +121,13 @@ struct abort_iocb_entry_fx00 { uint8_t sys_define; /* System defined. */ uint8_t entry_status; /* Entry Status. */ - uint32_t handle; /* System handle. */ + __le32 handle; /* System handle. */ __le32 reserved_0; __le16 tgt_id_sts; /* Completion status. */ __le16 options; - uint32_t abort_handle; /* System handle. */ + __le32 abort_handle; /* System handle. */ __le32 reserved_2; __le16 req_que_no; @@ -165,7 +166,7 @@ struct fxdisc_entry_fx00 { uint8_t sys_define; /* System Defined. */ uint8_t entry_status; /* Entry Status. */ - uint32_t handle; /* System handle. */ + __le32 handle; /* System handle. */ __le32 reserved_0; /* System handle. */ __le16 func_num; @@ -358,47 +359,47 @@ struct config_info_data { #define CONTINUE_A64_TYPE_FX00 0x03 /* Continuation entry. */ #define QLAFX00_SET_HST_INTR(ha, value) \ - wrt_reg_dword((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \ + WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_TO_HBA_REG, \ value) #define QLAFX00_CLR_HST_INTR(ha, value) \ - wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \ + WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \ ~value) #define QLAFX00_RD_INTR_REG(ha) \ - rd_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG) + RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG) #define QLAFX00_CLR_INTR_REG(ha, value) \ - wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \ + WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_TO_HOST_REG, \ ~value) #define QLAFX00_SET_HBA_SOC_REG(ha, off, val)\ - wrt_reg_dword((ha)->cregbase + off, val) + WRT_REG_DWORD((ha)->cregbase + off, val) #define QLAFX00_GET_HBA_SOC_REG(ha, off)\ - rd_reg_dword((ha)->cregbase + off) + RD_REG_DWORD((ha)->cregbase + off) #define QLAFX00_HBA_RST_REG(ha, val)\ - wrt_reg_dword((ha)->cregbase + QLAFX00_HST_RST_REG, val) + WRT_REG_DWORD((ha)->cregbase + QLAFX00_HST_RST_REG, val) #define QLAFX00_RD_ICNTRL_REG(ha) \ - rd_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG) + RD_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG) #define QLAFX00_ENABLE_ICNTRL_REG(ha) \ - wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \ + WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \ (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) | \ QLAFX00_ICR_ENB_MASK)) #define QLAFX00_DISABLE_ICNTRL_REG(ha) \ - wrt_reg_dword((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \ + WRT_REG_DWORD((ha)->cregbase + QLAFX00_HBA_ICNTRL_REG, \ (QLAFX00_GET_HBA_SOC_REG(ha, QLAFX00_HBA_ICNTRL_REG) & \ QLAFX00_ICR_DIS_MASK)) #define QLAFX00_RD_REG(ha, off) \ - rd_reg_dword((ha)->cregbase + off) + RD_REG_DWORD((ha)->cregbase + off) #define QLAFX00_WR_REG(ha, off, val) \ - wrt_reg_dword((ha)->cregbase + off, val) + WRT_REG_DWORD((ha)->cregbase + off, val) struct qla_mt_iocb_rqst_fx00 { __le32 reserved_0; diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index ba1b1c7549d35..5873dfcce7a34 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -1,7 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2017 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_nvme.h" #include @@ -35,18 +36,13 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) (fcport->nvme_flag & NVME_FLAG_REGISTERED)) return 0; - if (atomic_read(&fcport->state) == FCS_ONLINE) - return 0; - - qla2x00_set_fcport_state(fcport, FCS_ONLINE); - fcport->nvme_flag &= ~NVME_FLAG_RESETTING; memset(&req, 0, sizeof(struct nvme_fc_port_info)); req.port_name = wwn_to_u64(fcport->port_name); req.node_name = wwn_to_u64(fcport->node_name); req.port_role = 0; - req.dev_loss_tmo = 0; + req.dev_loss_tmo = fcport->dev_loss_tmo; if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR) req.port_role = FC_PORT_ROLE_NVME_INITIATOR; @@ -73,13 +69,16 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport) return ret; } + nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, + fcport->dev_loss_tmo); + if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER) ql_log(ql_log_info, vha, 0x212a, - "PortID:%06x Supports SLER\n", req.port_id); + "PortID:%06x Supports SLER\n", req.port_id); if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL) ql_log(ql_log_info, vha, 0x212b, - "PortID:%06x Supports PI control\n", req.port_id); + "PortID:%06x Supports PI control\n", req.port_id); rport = fcport->nvme_remote_port->private; rport->fcport = fcport; @@ -104,7 +103,7 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, ha = vha->hw; ql_log(ql_log_info, vha, 0x2104, - "%s: handle %p, idx =%d, qsize %d\n", + "%s: handle %px, idx =%d, qsize %d\n", __func__, handle, qidx, qsize); if (qidx > qla_nvme_fc_transport.max_hw_queues) { @@ -121,15 +120,15 @@ static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, if (ha->queue_pair_map[qidx]) { *handle = ha->queue_pair_map[qidx]; ql_log(ql_log_info, vha, 0x2121, - "Returning existing qpair of %p for idx=%x\n", - *handle, qidx); + "Returning existing qpair of %px for idx=%x\n", + *handle, qidx); return 0; } qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true); - if (!qpair) { + if (qpair == NULL) { ql_log(ql_log_warn, vha, 0x2122, - "Failed to allocate qpair\n"); + "Failed to allocate qpair\n"); return -EINVAL; } } @@ -151,12 +150,24 @@ static void qla_nvme_release_fcp_cmd_kref(struct kref *kref) nvme = &sp->u.iocb_cmd; fd = nvme->u.nvme.desc; - +#ifdef QLA2XXX_LATENCY_MEASURE + if (sp->type == SRB_NVME_LS) { + ktime_get_real_ts64(&sp->cmd_to_ml); + qla_get_nvme_cmd_latency(sp); + } +#endif + +#ifdef QLA2XXX_LATENCY_MEASURE + if (sp->type == SRB_NVME_CMD) { + ktime_get_real_ts64(&sp->cmd_to_ml); + qla_get_nvme_cmd_latency(sp); + } +#endif spin_lock_irqsave(&priv->cmd_lock, flags); priv->sp = NULL; sp->priv = NULL; if (priv->comp_status == QLA_SUCCESS) { - fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len); + fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len; fd->status = NVME_SC_SUCCESS; } else { fd->rcv_rsplen = 0; @@ -170,18 +181,6 @@ static void qla_nvme_release_fcp_cmd_kref(struct kref *kref) qla2xxx_rel_qpair_sp(sp->qpair, sp); } -static void qla_nvme_ls_unmap(struct srb *sp, struct nvmefc_ls_req *fd) -{ - if (sp->flags & SRB_DMA_VALID) { - struct srb_iocb *nvme = &sp->u.iocb_cmd; - struct qla_hw_data *ha = sp->fcport->vha->hw; - - dma_unmap_single(&ha->pdev->dev, nvme->u.nvme.cmd_dma, - fd->rqstlen, DMA_TO_DEVICE); - sp->flags &= ~SRB_DMA_VALID; - } -} - static void qla_nvme_release_ls_cmd_kref(struct kref *kref) { struct srb *sp = container_of(kref, struct srb, cmd_kref); @@ -199,7 +198,6 @@ static void qla_nvme_release_ls_cmd_kref(struct kref *kref) fd = priv->fd; - qla_nvme_ls_unmap(sp, fd); fd->done(fd, priv->comp_status); out: qla2x00_rel_sp(sp); @@ -233,6 +231,7 @@ static void qla_nvme_sp_done(srb_t *sp, int res) { struct nvme_private *priv = sp->priv; + qla2xxx_scmr_manage_qdepth(sp, sp->fcport, false); priv->comp_status = res; kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref); @@ -246,30 +245,54 @@ static void qla_nvme_abort_work(struct work_struct *work) srb_t *sp = priv->sp; fc_port_t *fcport = sp->fcport; struct qla_hw_data *ha = fcport->vha->hw; - int rval; + int rval, abts_done_called = 1; + bool io_wait_for_abort_done; + uint32_t handle; ql_dbg(ql_dbg_io, fcport->vha, 0xffff, - "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n", - __func__, sp, sp->handle, fcport, fcport->deleted); + "%s called for sp=%px, hndl=%x on fcport=%px desc=%px deleted=%d\n", + __func__, sp, sp->handle, fcport, sp->u.iocb_cmd.u.nvme.desc, fcport->deleted); - if (!ha->flags.fw_started && fcport->deleted) + if (!ha->flags.fw_started || fcport->deleted == QLA_SESS_DELETED) goto out; if (ha->flags.host_shutting_down) { ql_log(ql_log_info, sp->fcport->vha, 0xffff, - "%s Calling done on sp: %p, type: 0x%x\n", + "%s Calling done on sp: %px, type: 0x%x\n", __func__, sp, sp->type); sp->done(sp, 0); goto out; } + /* + * sp may not be valid after abort_command if return code is either + * SUCCESS or ERR_FROM_FW codes, so cache the value here. + */ + io_wait_for_abort_done = ql2xabts_wait_nvme && + QLA_ABTS_WAIT_ENABLED(sp); + handle = sp->handle; + rval = ha->isp_ops->abort_command(sp); ql_dbg(ql_dbg_io, fcport->vha, 0x212b, - "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n", + "%s: %s command for sp=%px, handle=%x on fcport=%px rval=%x\n", __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted", - sp, sp->handle, fcport, rval); + sp, handle, fcport, rval); + + /* + * If async tmf is enabled, the abort callback is called only on + * return codes QLA_SUCCESS and QLA_ERR_FROM_FW. + */ + if (ql2xasynctmfenable && + rval != QLA_SUCCESS && rval != QLA_ERR_FROM_FW) + abts_done_called = 0; + /* Returned before decreasing kref so that I/O requests + * are waited until ABTS complete. This kref is decreased + * at qla24xx_abort_sp_done function. + */ + if (abts_done_called && io_wait_for_abort_done) + return; out: /* kref_get was done before work was schedule. */ kref_put(&sp->cmd_kref, sp->put_fn); @@ -309,7 +332,6 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, struct qla_hw_data *ha; srb_t *sp; - if (!fcport || (fcport && fcport->deleted)) return rval; @@ -328,7 +350,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, sp->name = "nvme_ls"; sp->done = qla_nvme_sp_ls_done; sp->put_fn = qla_nvme_release_ls_cmd_kref; - sp->priv = priv; + sp->priv = (void *)priv; priv->sp = sp; kref_init(&sp->cmd_kref); spin_lock_init(&priv->cmd_lock); @@ -341,13 +363,10 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, nvme->u.nvme.rsp_len = fd->rsplen; nvme->u.nvme.rsp_dma = fd->rspdma; nvme->u.nvme.timeout_sec = fd->timeout; - nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr, - fd->rqstlen, DMA_TO_DEVICE); + nvme->u.nvme.cmd_dma = fd->rqstdma; dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma, fd->rqstlen, DMA_TO_DEVICE); - sp->flags |= SRB_DMA_VALID; - rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x700e, @@ -355,7 +374,6 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, wake_up(&sp->nvme_ls_waitq); sp->priv = NULL; priv->sp = NULL; - qla_nvme_ls_unmap(sp, fd); qla2x00_rel_sp(sp); return rval; } @@ -397,6 +415,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp) uint16_t avail_dsds; struct dsd64 *cur_dsd; struct req_que *req = NULL; + struct rsp_que *rsp = NULL; struct scsi_qla_host *vha = sp->fcport->vha; struct qla_hw_data *ha = vha->hw; struct qla_qpair *qpair = sp->qpair; @@ -408,6 +427,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp) /* Setup qpair pointers */ req = qpair->req; + rsp = qpair->rsp; tot_dsds = fd->sg_cnt; /* Acquire qpair specific lock */ @@ -419,9 +439,25 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp) goto queuing_error; } req_cnt = qla24xx_calc_iocbs(vha, tot_dsds); + + sp->iores.res_type = RESOURCE_IOCB|RESOURCE_EXCH; + sp->iores.exch_cnt = 1; + sp->iores.iocb_cnt = req_cnt; + if (qla_get_fw_resources(sp->qpair, &sp->iores)) { + rval = -EBUSY; + goto queuing_error; + } + if (req->cnt < (req_cnt + 2)) { - cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : - rd_reg_dword_relaxed(req->req_q_out); + if (IS_SHADOW_REG_CAPABLE(ha)) { + cnt = *req->out_ptr; + } else { + cnt = RD_REG_DWORD_RELAXED(req->req_q_out); + if (qla2x00_check_reg16_for_disconnect(vha, cnt)) { + rval = -EBUSY; + goto queuing_error; + } + } if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; @@ -448,7 +484,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp) req->cnt -= req_cnt; cmd_pkt = (struct cmd_nvme *)req->ring_ptr; - cmd_pkt->handle = make_handle(req->id, handle); + cmd_pkt->handle = MAKE_HANDLE(req->id, handle); /* Zero out remaining portion of packet. */ clr_ptr = (uint32_t *)cmd_pkt + 2; @@ -461,11 +497,11 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp) /* No data transfer how do we check buffer len == 0?? */ if (fd->io_dir == NVMEFC_FCP_READ) { - cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA); + cmd_pkt->control_flags = CF_READ_DATA; qpair->counters.input_bytes += fd->payload_length; qpair->counters.input_requests++; } else if (fd->io_dir == NVMEFC_FCP_WRITE) { - cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA); + cmd_pkt->control_flags = CF_WRITE_DATA; if ((vha->flags.nvme_first_burst) && (sp->fcport->nvme_prli_service_param & NVME_PRLI_SP_FIRST_BURST)) { @@ -473,19 +509,24 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp) sp->fcport->nvme_first_burst_size) || (sp->fcport->nvme_first_burst_size == 0)) cmd_pkt->control_flags |= - cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE); + CF_NVME_FIRST_BURST_ENABLE; } qpair->counters.output_bytes += fd->payload_length; qpair->counters.output_requests++; } else if (fd->io_dir == 0) { cmd_pkt->control_flags = 0; } + if (sp->fcport->edif.enable && fd->io_dir != 0) { + cmd_pkt->control_flags |= cpu_to_le16(CF_EN_EDIF); + cmd_pkt->control_flags &= ~(cpu_to_le16(CF_NEW_SA)); + } /* Set BIT_13 of control flags for Async event */ if (vha->flags.nvme2_enabled && - cmd->sqe.common.opcode == nvme_admin_async_event) { + (cmd->sqe.common.opcode == nvme_admin_async_event)) { cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT); } + /* Set NPORT-ID */ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; @@ -553,11 +594,26 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp) req->ring_ptr++; } + // ignore nvme async cmd due to long timeout + if (!nvme->u.nvme.aen_op) + sp->qpair->cmd_cnt++; + /* Set chip new ring index. */ - wrt_reg_dword(req->req_q_in, req->ring_index); + WRT_REG_DWORD(req->req_q_in, req->ring_index); + +#ifdef QLA2XXX_LATENCY_MEASURE + ktime_get_real_ts64(&sp->cmd_to_req_q); +#endif + + if (vha->flags.process_response_queue && + rsp->ring_ptr->signature != RESPONSE_PROCESSED) + qla24xx_process_response_queue(vha, rsp); queuing_error: + if (rval) + qla_put_fw_resources(sp->qpair, &sp->iores); spin_unlock_irqrestore(&qpair->qp_lock, flags); + return rval; } @@ -569,32 +625,31 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, fc_port_t *fcport; struct srb_iocb *nvme; struct scsi_qla_host *vha; - int rval; + int rval = -ENODEV; srb_t *sp; + struct qla_hw_data *ha; + bool throttle_down; struct qla_qpair *qpair = hw_queue_handle; struct nvme_private *priv = fd->private; struct qla_nvme_rport *qla_rport = rport->private; if (!priv) { /* nvme association has been torn down */ - return -ENODEV; + return rval; } fcport = qla_rport->fcport; - if (!qpair || !fcport) - return -ENODEV; - - if (!qpair->fw_started || fcport->deleted) + if (unlikely(!qpair || !fcport || fcport->deleted)) return -EBUSY; - vha = fcport->vha; - if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)) - return -ENODEV; + return rval; - if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) || - (qpair && !qpair->fw_started) || fcport->deleted) + vha = fcport->vha; + ha = vha->hw; + + if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) return -EBUSY; /* @@ -607,6 +662,15 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, if (fcport->nvme_flag & NVME_FLAG_RESETTING) return -EBUSY; + qpair = qla_mapq_nvme_select_qpair(ha, qpair); + + /* If there is peer congestion, select the slow queue */ + if (IS_SCM_CAPABLE(ha)) { + if (ql2x_scmr_use_slow_queue && + qla_scmr_is_congested(&fcport->sfc)) + qpair = ha->queue_pair_map[ha->slow_queue_id]; + } + /* Alloc SRB structure */ sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC); if (!sp) @@ -615,22 +679,46 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport, init_waitqueue_head(&sp->nvme_ls_waitq); kref_init(&sp->cmd_kref); spin_lock_init(&priv->cmd_lock); - sp->priv = priv; + sp->priv = (void *)priv; priv->sp = sp; + sp->dir = fd->io_dir; sp->type = SRB_NVME_CMD; sp->name = "nvme_cmd"; sp->done = qla_nvme_sp_done; sp->put_fn = qla_nvme_release_fcp_cmd_kref; sp->qpair = qpair; sp->vha = vha; + sp->cmd_sp = sp; nvme = &sp->u.iocb_cmd; nvme->u.nvme.desc = fd; + if (IS_SCM_CAPABLE(ha)) { + /* Throttle I/O commands only */ + if (fd->sqid) { + throttle_down = qla2xxx_throttle_req(sp, ha, fcport, fd->io_dir); + if (throttle_down == true) { + sp->priv = NULL; + priv->sp = NULL; + qla2xxx_rel_qpair_sp(sp->qpair, sp); + return -EBUSY; + } + } + qla2xxx_update_sfc_ios(sp, ha, fcport, fd->payload_length); + } + +#ifdef QLA2XXX_LATENCY_MEASURE + ktime_get_real_ts64(&sp->q_cmd); +#endif rval = qla2x00_start_nvme_mq(sp); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x212d, "qla2x00_start_nvme_mq failed = %d\n", rval); wake_up(&sp->nvme_ls_waitq); + qla2xxx_atomic64_sub(&ha->sfc.perf.scmr_bytes_per_period, + fd->payload_length); + qla2xxx_atomic64_sub(&fcport->sfc.perf.scmr_bytes_per_period, + fd->payload_length); + qla2xxx_scmr_manage_qdepth(sp, fcport, false); sp->priv = NULL; priv->sp = NULL; qla2xxx_rel_qpair_sp(sp->qpair, sp); @@ -644,7 +732,7 @@ static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport) struct scsi_qla_host *vha = lport->private; ql_log(ql_log_info, vha, 0x210f, - "localport delete of %p completed.\n", vha->nvme_local_port); + "localport delete of %px completed.\n", vha->nvme_local_port); vha->nvme_local_port = NULL; complete(&vha->nvme_del_done); } @@ -659,12 +747,13 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) fcport->nvme_flag &= ~NVME_FLAG_REGISTERED; fcport->nvme_flag &= ~NVME_FLAG_DELETING; ql_log(ql_log_info, fcport->vha, 0x2110, - "remoteport_delete of %p %8phN completed.\n", + "remoteport_delete of %px %8phN completed.\n", fcport, fcport->port_name); complete(&fcport->nvme_del_done); } static struct nvme_fc_port_template qla_nvme_fc_transport = { + NVME_FC_PORT_TEMPLATE_MODULE .localport_delete = qla_nvme_localport_delete, .remoteport_delete = qla_nvme_remoteport_delete, .create_queue = qla_nvme_alloc_queue, @@ -673,7 +762,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = { .ls_abort = qla_nvme_ls_abort, .fcp_io = qla_nvme_post_cmd, .fcp_abort = qla_nvme_fcp_abort, - .max_hw_queues = 8, + .max_hw_queues = DEF_NVME_HW_QUEUES, .max_sgl_segments = 1024, .max_dif_sgl_segments = 64, .dma_boundary = 0xFFFFFFFF, @@ -691,7 +780,7 @@ void qla_nvme_unregister_remote_port(struct fc_port *fcport) return; ql_log(ql_log_warn, NULL, 0x2112, - "%s: unregister remoteport on %p %8phN\n", + "%s: unregister remoteport on %px %8phN\n", __func__, fcport, fcport->port_name); if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags)) @@ -716,7 +805,7 @@ void qla_nvme_delete(struct scsi_qla_host *vha) if (vha->nvme_local_port) { init_completion(&vha->nvme_del_done); ql_log(ql_log_info, vha, 0x2116, - "unregister localport=%p\n", + "unregister localport=%px\n", vha->nvme_local_port); nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port); if (nv_ret) @@ -732,7 +821,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha) struct nvme_fc_port_template *tmpl; struct qla_hw_data *ha; struct nvme_fc_port_info pinfo; - int ret = -EINVAL; + int ret = EINVAL; if (!IS_ENABLED(CONFIG_NVME_FC)) return ret; @@ -740,24 +829,53 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha) ha = vha->hw; tmpl = &qla_nvme_fc_transport; - WARN_ON(vha->nvme_local_port); + if (ql2xnvme_queues < MIN_NVME_HW_QUEUES) { + ql_log(ql_log_warn, vha, 0xfffd, + "ql2xnvme_queues=%d is lower than minimum queues: %d " + "Resetting ql2xnvme_queues to:%d\n", ql2xnvme_queues, + MIN_NVME_HW_QUEUES, DEF_NVME_HW_QUEUES); + + ql2xnvme_queues = DEF_NVME_HW_QUEUES; + } + else if (ql2xnvme_queues > (ha->max_qpairs - 1)) { + ql_log(ql_log_warn, vha, 0xfffd, + "ql2xnvme_queues=%d is greater than avalable IRQs:%d " + "Resetting ql2xnvme_queues to:%d\n", ql2xnvme_queues, + (ha->max_qpairs - 1), (ha->max_qpairs - 1)); + ql2xnvme_queues = ((ha->max_qpairs - 1)); + } qla_nvme_fc_transport.max_hw_queues = - min((uint8_t)(qla_nvme_fc_transport.max_hw_queues), - (uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1)); + min((uint8_t)(ql2xnvme_queues), + (uint8_t)((ha->max_qpairs - 1)? (ha->max_qpairs - 1): 1)); + + ql_log(ql_log_info, vha, 0xfffb, + "Number of NVME queues used for this port: %d\n", + qla_nvme_fc_transport.max_hw_queues); pinfo.node_name = wwn_to_u64(vha->node_name); pinfo.port_name = wwn_to_u64(vha->port_name); pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR; pinfo.port_id = vha->d_id.b24; - ql_log(ql_log_info, vha, 0xffff, - "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n", - pinfo.node_name, pinfo.port_name, pinfo.port_id); - qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary; - - ret = nvme_fc_register_localport(&pinfo, tmpl, - get_device(&ha->pdev->dev), &vha->nvme_local_port); + mutex_lock(&ha->vport_lock); + /* + * Check again for nvme_local_port to see if any other thread raced + * with this one and finished registration. + */ + if (!vha->nvme_local_port) { + ql_log(ql_log_info, vha, 0xffff, + "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n", + pinfo.node_name, pinfo.port_name, pinfo.port_id); + qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary; + + ret = nvme_fc_register_localport(&pinfo, tmpl, + get_device(&ha->pdev->dev), &vha->nvme_local_port); + mutex_unlock(&ha->vport_lock); + } else { + mutex_unlock(&ha->vport_lock); + return 0; + } if (ret) { ql_log(ql_log_warn, vha, 0xffff, "register_localport failed: ret=%x\n", ret); @@ -767,3 +885,91 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha) return ret; } + +void qla_nvme_abort_set_option( + struct abort_entry_24xx *abt, srb_t *orig_sp) +{ + struct qla_hw_data *ha; + + if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) + return; + + ha = orig_sp->fcport->vha->hw; + + WARN_ON_ONCE (abt->options & cpu_to_le16(BIT_0)); + /* Use Driver Specified Retry Count */ + abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT); + abt->drv.abts_rty_cnt = cpu_to_le16(2); + /* Use specified response timeout */ + abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT); + /* set it to 2 * r_a_tov in secs */ + abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10)); +} + +void qla_nvme_abort_process_comp_status( + struct abort_entry_24xx *abt, srb_t *orig_sp) +{ + uint16_t comp_status; + struct scsi_qla_host *vha; + + if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) + return; + + vha = orig_sp->fcport->vha; + + comp_status = le16_to_cpu(abt->comp_status); + switch (comp_status) + { + case CS_RESET: /* reset event aborted */ + case CS_ABORTED: /* IOCB was cleaned */ + /* N_Port handle is not currently logged in */ + case CS_TIMEOUT: + /* N_Port handle was logged out while waiting for ABTS to complete */ + case CS_PORT_UNAVAILABLE: + /* Firmware found that the port name changed */ + case CS_PORT_LOGGED_OUT: + /* BA_RJT was received for the ABTS */ + case CS_PORT_CONFIG_CHG: + ql_dbg(ql_dbg_async, vha, 0xf09d, + "Abort I/O IOCB completed with error, \ + comp_status=%x\n", comp_status); + break; + + /* BA_RJT was received for the ABTS */ + case CS_REJECT_RECEIVED: + ql_dbg(ql_dbg_async, vha, 0xf09e, + "BA_RJT was received for the ABTS \ + rjt_vendorUnique = %u, \ + ba_rjt_reasonCodeExpl = %u, \ + ba_rjt_reasonCode = %u\n", + abt->fw.ba_rjt_vendorUnique, + abt->fw.ba_rjt_reasonCodeExpl, + abt->fw.ba_rjt_reasonCode); + break; + + case CS_COMPLETE: + ql_dbg (ql_dbg_async + ql_dbg_verbose, vha, 0xf09f, + "IOCB request is completed sucessfully \ + comp_status=%x\n",comp_status); + break; + + case CS_IOCB_ERROR: + ql_dbg (ql_dbg_async, vha, 0xf0a0, + "IOCB request is failed, comp_status=%x\n",comp_status); + break; + + default: + ql_dbg (ql_dbg_async, vha, 0xf0a1, + "Invalid Abort IO IOCB Completion Status %x\n", + comp_status); + break; + } +} + +inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp) +{ + if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp))) + return; + + kref_put(&orig_sp->cmd_kref, orig_sp->put_fn); +} diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h index f81f219c7c7d7..60ec905a54bf2 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.h +++ b/drivers/scsi/qla2xxx/qla_nvme.h @@ -1,17 +1,22 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2017 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #ifndef __QLA_NVME_H #define __QLA_NVME_H +#include +#include #include #include #include #include "qla_def.h" -#include "qla_dsd.h" + +#define MIN_NVME_HW_QUEUES 1 +#define DEF_NVME_HW_QUEUES 8 #define NVME_ATIO_CMD_OFF 32 #define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF) @@ -44,15 +49,15 @@ struct cmd_nvme { uint8_t entry_status; /* Entry Status. */ uint32_t handle; /* System handle. */ - __le16 nport_handle; /* N_PORT handle. */ - __le16 timeout; /* Command timeout. */ + uint16_t nport_handle; /* N_PORT handle. */ + uint16_t timeout; /* Command timeout. */ - __le16 dseg_count; /* Data segment count. */ - __le16 nvme_rsp_dsd_len; /* NVMe RSP DSD length */ + uint16_t dseg_count; /* Data segment count. */ + uint16_t nvme_rsp_dsd_len; /* NVMe RSP DSD length */ uint64_t rsvd; - __le16 control_flags; /* Control Flags */ + uint16_t control_flags; /* Control Flags */ #define CF_ADMIN_ASYNC_EVENT BIT_13 #define CF_NVME_FIRST_BURST_ENABLE BIT_11 #define CF_DIF_SEG_DESCR_ENABLE BIT_3 @@ -60,11 +65,11 @@ struct cmd_nvme { #define CF_READ_DATA BIT_1 #define CF_WRITE_DATA BIT_0 - __le16 nvme_cmnd_dseg_len; /* Data segment length. */ + uint16_t nvme_cmnd_dseg_len; /* Data segment length. */ __le64 nvme_cmnd_dseg_address __packed;/* Data segment address. */ __le64 nvme_rsp_dseg_address __packed; /* Data segment address. */ - __le32 byte_count; /* Total byte count. */ + uint32_t byte_count; /* Total byte count. */ uint8_t port_id[3]; /* PortID of destination port. */ uint8_t vp_index; @@ -79,24 +84,24 @@ struct pt_ls4_request { uint8_t sys_define; uint8_t entry_status; uint32_t handle; - __le16 status; - __le16 nport_handle; - __le16 tx_dseg_count; + uint16_t status; + uint16_t nport_handle; + uint16_t tx_dseg_count; uint8_t vp_index; uint8_t rsvd; - __le16 timeout; - __le16 control_flags; + uint16_t timeout; + uint16_t control_flags; #define CF_LS4_SHIFT 13 #define CF_LS4_ORIGINATOR 0 #define CF_LS4_RESPONDER 1 #define CF_LS4_RESPONDER_TERM 2 - __le16 rx_dseg_count; - __le16 rsvd2; - __le32 exchange_address; - __le32 rsvd3; - __le32 rx_byte_count; - __le32 tx_byte_count; + uint16_t rx_dseg_count; + uint16_t rsvd2; + uint32_t exchange_address; + uint32_t rsvd3; + uint32_t rx_byte_count; + uint32_t tx_byte_count; struct dsd64 dsd[2]; }; @@ -104,32 +109,32 @@ struct pt_ls4_request { struct pt_ls4_rx_unsol { uint8_t entry_type; uint8_t entry_count; - __le16 rsvd0; - __le16 rsvd1; + uint16_t rsvd0; + uint16_t rsvd1; uint8_t vp_index; uint8_t rsvd2; - __le16 rsvd3; - __le16 nport_handle; - __le16 frame_size; - __le16 rsvd4; - __le32 exchange_address; + uint16_t rsvd3; + uint16_t nport_handle; + uint16_t frame_size; + uint16_t rsvd4; + uint32_t exchange_address; uint8_t d_id[3]; uint8_t r_ctl; be_id_t s_id; uint8_t cs_ctl; uint8_t f_ctl[3]; uint8_t type; - __le16 seq_cnt; + uint16_t seq_cnt; uint8_t df_ctl; uint8_t seq_id; - __le16 rx_id; - __le16 ox_id; - __le32 param; - __le32 desc0; + uint16_t rx_id; + uint16_t ox_id; + uint32_t param; + uint32_t desc0; #define PT_LS4_PAYLOAD_OFFSET 0x2c #define PT_LS4_FIRST_PACKET_LEN 20 - __le32 desc_len; - __le32 payload[3]; + uint32_t desc_len; + uint32_t payload[3]; }; /* diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 0563c9530dcad..b539ba18a2217 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -1,11 +1,11 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include -#include #include #include #include @@ -369,7 +369,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in, /* Read back value to make sure write has gone through before trying * to use it. */ - win_read = rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase); + win_read = RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase); if (win_read != ha->crb_win) { ql_dbg(ql_dbg_p3p, vha, 0xb000, "%s: Written crbwin (0x%x) " @@ -379,6 +379,47 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong off_in, *off_out = (off_in & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; } +static inline unsigned long +qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) +{ + scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); + /* See if we are currently pointing to the region we want to use next */ + if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) { + /* No need to change window. PCIX and PCIEregs are in both + * regs are in both windows. + */ + return off; + } + + if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) { + /* We are in first CRB window */ + if (ha->curr_window != 0) + WARN_ON(1); + return off; + } + + if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) { + /* We are in second CRB window */ + off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST; + + if (ha->curr_window != 1) + return off; + + /* We are in the QM or direct access + * register region - do nothing + */ + if ((off >= QLA82XX_PCI_DIRECT_CRB) && + (off < QLA82XX_PCI_CAMQM_MAX)) + return off; + } + /* strange address given */ + ql_dbg(ql_dbg_p3p, vha, 0xb001, + "%s: Warning: unm_nic_pci_set_crbwindow " + "called with an unknown address(%llx).\n", + QLA2XXX_DRIVER_NAME, off); + return off; +} + static int qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong off_in, void __iomem **off_out) @@ -478,7 +519,7 @@ qla82xx_rd_32(struct qla_hw_data *ha, ulong off_in) qla82xx_crb_win_lock(ha); qla82xx_pci_set_crbwindow_2M(ha, off_in, &off); } - data = rd_reg_dword(off); + data = RD_REG_DWORD(off); if (rv == 1) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); @@ -895,17 +936,17 @@ qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag) { uint32_t off_value, rval = 0; - wrt_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000); + WRT_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase, off & 0xFFFF0000); /* Read back value to make sure write has gone through */ - rd_reg_dword(CRB_WINDOW_2M + ha->nx_pcibase); + RD_REG_DWORD(CRB_WINDOW_2M + ha->nx_pcibase); off_value = (off & 0x0000FFFF); if (flag) - wrt_reg_dword(off_value + CRB_INDIRECT_2M + ha->nx_pcibase, + WRT_REG_DWORD(off_value + CRB_INDIRECT_2M + ha->nx_pcibase, data); else - rval = rd_reg_dword(off_value + CRB_INDIRECT_2M + + rval = RD_REG_DWORD(off_value + CRB_INDIRECT_2M + ha->nx_pcibase); return rval; @@ -965,21 +1006,26 @@ qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) static int qla82xx_flash_wait_write_finish(struct qla_hw_data *ha) { - uint32_t val; - int i, ret; + long timeout = 0; + uint32_t done = 1 ; + uint32_t val = 0; + int ret = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); - for (i = 0; i < 50000; i++) { + while ((done != 0) && (ret == 0)) { ret = qla82xx_read_status_reg(ha, &val); - if (ret < 0 || (val & 1) == 0) - return ret; + done = val & 1; + timeout++; udelay(10); cond_resched(); + if (timeout >= 50000) { + ql_log(ql_log_warn, vha, 0xb00d, + "Timeout reached waiting for write finish.\n"); + return -1; + } } - ql_log(ql_log_warn, vha, 0xb00d, - "Timeout reached waiting for write finish.\n"); - return -1; + return ret; } static int @@ -1066,8 +1112,7 @@ qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, return ret; } - ret = qla82xx_flash_set_write_enable(ha); - if (ret < 0) + if (qla82xx_flash_set_write_enable(ha)) goto done_write; qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data); @@ -1167,7 +1212,6 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha) * Offset 4: Offset and number of addr/value pairs * that present in CRB initialize sequence */ - n = 0; if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || qla82xx_rom_fast_read(ha, 4, &n) != 0) { ql_log(ql_log_fatal, vha, 0x006e, @@ -1516,14 +1560,14 @@ qla82xx_get_table_desc(const u8 *unirom, int section) uint32_t i; struct qla82xx_uri_table_desc *directory = (struct qla82xx_uri_table_desc *)&unirom[0]; - uint32_t offset; - uint32_t tab_type; - uint32_t entries = le32_to_cpu(directory->num_entries); + __le32 offset; + __le32 tab_type; + __le32 entries = cpu_to_le32(directory->num_entries); for (i = 0; i < entries; i++) { - offset = le32_to_cpu(directory->findex) + - (i * le32_to_cpu(directory->entry_size)); - tab_type = get_unaligned_le32((u32 *)&unirom[offset] + 8); + offset = cpu_to_le32(directory->findex) + + (i * cpu_to_le32(directory->entry_size)); + tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8)); if (tab_type == section) return (struct qla82xx_uri_table_desc *)&unirom[offset]; @@ -1537,17 +1581,16 @@ qla82xx_get_data_desc(struct qla_hw_data *ha, u32 section, u32 idx_offset) { const u8 *unirom = ha->hablob->fw->data; - int idx = get_unaligned_le32((u32 *)&unirom[ha->file_prd_off] + - idx_offset); + int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset)); struct qla82xx_uri_table_desc *tab_desc = NULL; - uint32_t offset; + __le32 offset; tab_desc = qla82xx_get_table_desc(unirom, section); if (!tab_desc) return NULL; - offset = le32_to_cpu(tab_desc->findex) + - (le32_to_cpu(tab_desc->entry_size) * idx); + offset = cpu_to_le32(tab_desc->findex) + + (cpu_to_le32(tab_desc->entry_size) * idx); return (struct qla82xx_uri_data_desc *)&unirom[offset]; } @@ -1562,13 +1605,14 @@ qla82xx_get_bootld_offset(struct qla_hw_data *ha) uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF); if (uri_desc) - offset = le32_to_cpu(uri_desc->findex); + offset = cpu_to_le32(uri_desc->findex); } return (u8 *)&ha->hablob->fw->data[offset]; } -static u32 qla82xx_get_fw_size(struct qla_hw_data *ha) +static __le32 +qla82xx_get_fw_size(struct qla_hw_data *ha) { struct qla82xx_uri_data_desc *uri_desc = NULL; @@ -1576,10 +1620,10 @@ static u32 qla82xx_get_fw_size(struct qla_hw_data *ha) uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, QLA82XX_URI_FIRMWARE_IDX_OFF); if (uri_desc) - return le32_to_cpu(uri_desc->size); + return cpu_to_le32(uri_desc->size); } - return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]); + return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]); } static u8 * @@ -1592,7 +1636,7 @@ qla82xx_get_fw_offs(struct qla_hw_data *ha) uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, QLA82XX_URI_FIRMWARE_IDX_OFF); if (uri_desc) - offset = le32_to_cpu(uri_desc->findex); + offset = cpu_to_le32(uri_desc->findex); } return (u8 *)&ha->hablob->fw->data[offset]; @@ -1746,9 +1790,9 @@ void qla82xx_config_rings(struct scsi_qla_host *vha) put_unaligned_le64(req->dma, &icb->request_q_address); put_unaligned_le64(rsp->dma, &icb->response_q_address); - wrt_reg_dword(®->req_q_out[0], 0); - wrt_reg_dword(®->rsp_q_in[0], 0); - wrt_reg_dword(®->rsp_q_out[0], 0); + WRT_REG_DWORD(®->req_q_out[0], 0); + WRT_REG_DWORD(®->rsp_q_in[0], 0); + WRT_REG_DWORD(®->rsp_q_out[0], 0); } static int @@ -1771,7 +1815,7 @@ qla82xx_fw_load_from_blob(struct qla_hw_data *ha) } flashaddr = FLASH_ADDR_START; - size = qla82xx_get_fw_size(ha) / 8; + size = (__force u32)qla82xx_get_fw_size(ha) / 8; ptr64 = (u64 *)qla82xx_get_fw_offs(ha); for (i = 0; i < size; i++) { @@ -1803,8 +1847,8 @@ qla82xx_set_product_offset(struct qla_hw_data *ha) struct qla82xx_uri_table_desc *ptab_desc = NULL; const uint8_t *unirom = ha->hablob->fw->data; uint32_t i; - uint32_t entries; - uint32_t flags, file_chiprev, offset; + __le32 entries; + __le32 flags, file_chiprev, offset; uint8_t chiprev = ha->chip_revision; /* Hardcoding mn_present flag for P3P */ int mn_present = 0; @@ -1815,14 +1859,14 @@ qla82xx_set_product_offset(struct qla_hw_data *ha) if (!ptab_desc) return -1; - entries = le32_to_cpu(ptab_desc->num_entries); + entries = cpu_to_le32(ptab_desc->num_entries); for (i = 0; i < entries; i++) { - offset = le32_to_cpu(ptab_desc->findex) + - (i * le32_to_cpu(ptab_desc->entry_size)); - flags = le32_to_cpu(*((__le32 *)&unirom[offset] + + offset = cpu_to_le32(ptab_desc->findex) + + (i * cpu_to_le32(ptab_desc->entry_size)); + flags = cpu_to_le32(*((int *)&unirom[offset] + QLA82XX_URI_FLAGS_OFF)); - file_chiprev = le32_to_cpu(*((__le32 *)&unirom[offset] + + file_chiprev = cpu_to_le32(*((int *)&unirom[offset] + QLA82XX_URI_CHIP_REV_OFF)); flagbit = mn_present ? 1 : 2; @@ -1838,7 +1882,7 @@ qla82xx_set_product_offset(struct qla_hw_data *ha) static int qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) { - uint32_t val; + __le32 val; uint32_t min_size; struct qla_hw_data *ha = vha->hw; const struct firmware *fw = ha->hablob->fw; @@ -1851,8 +1895,8 @@ qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) min_size = QLA82XX_URI_FW_MIN_SIZE; } else { - val = get_unaligned_le32(&fw->data[QLA82XX_FW_MAGIC_OFFSET]); - if (val != QLA82XX_BDINFO_MAGIC) + val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]); + if ((__force u32)val != QLA82XX_BDINFO_MAGIC) return -EINVAL; min_size = QLA82XX_FW_MIN_SIZE; @@ -1952,18 +1996,18 @@ void qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) { uint16_t cnt; - __le16 __iomem *wptr; + uint16_t __iomem *wptr; struct qla_hw_data *ha = vha->hw; struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; - wptr = ®->mailbox_out[1]; + wptr = (uint16_t __iomem *)®->mailbox_out[1]; /* Load return mailbox registers. */ ha->flags.mbox_int = 1; ha->mailbox_out[0] = mb0; for (cnt = 1; cnt < ha->mbx_count; cnt++) { - ha->mailbox_out[cnt] = rd_reg_word(wptr); + ha->mailbox_out[cnt] = RD_REG_WORD(wptr); wptr++; } @@ -2025,8 +2069,8 @@ qla82xx_intr_handler(int irq, void *dev_id) vha = pci_get_drvdata(ha->pdev); for (iter = 1; iter--; ) { - if (rd_reg_dword(®->host_int)) { - stat = rd_reg_dword(®->host_status); + if (RD_REG_DWORD(®->host_int)) { + stat = RD_REG_DWORD(®->host_status); switch (stat & 0xff) { case 0x1: @@ -2038,9 +2082,9 @@ qla82xx_intr_handler(int irq, void *dev_id) break; case 0x12: mb[0] = MSW(stat); - mb[1] = rd_reg_word(®->mailbox_out[1]); - mb[2] = rd_reg_word(®->mailbox_out[2]); - mb[3] = rd_reg_word(®->mailbox_out[3]); + mb[1] = RD_REG_WORD(®->mailbox_out[1]); + mb[2] = RD_REG_WORD(®->mailbox_out[2]); + mb[3] = RD_REG_WORD(®->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: @@ -2053,7 +2097,7 @@ qla82xx_intr_handler(int irq, void *dev_id) break; } } - wrt_reg_dword(®->host_int, 0); + WRT_REG_DWORD(®->host_int, 0); } qla2x00_handle_mbx_completion(ha, status); @@ -2091,11 +2135,11 @@ qla82xx_msix_default(int irq, void *dev_id) spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); do { - host_int = rd_reg_dword(®->host_int); + host_int = RD_REG_DWORD(®->host_int); if (qla2x00_check_reg32_for_disconnect(vha, host_int)) break; if (host_int) { - stat = rd_reg_dword(®->host_status); + stat = RD_REG_DWORD(®->host_status); switch (stat & 0xff) { case 0x1: @@ -2107,9 +2151,9 @@ qla82xx_msix_default(int irq, void *dev_id) break; case 0x12: mb[0] = MSW(stat); - mb[1] = rd_reg_word(®->mailbox_out[1]); - mb[2] = rd_reg_word(®->mailbox_out[2]); - mb[3] = rd_reg_word(®->mailbox_out[3]); + mb[1] = RD_REG_WORD(®->mailbox_out[1]); + mb[2] = RD_REG_WORD(®->mailbox_out[2]); + mb[3] = RD_REG_WORD(®->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: @@ -2122,7 +2166,7 @@ qla82xx_msix_default(int irq, void *dev_id) break; } } - wrt_reg_dword(®->host_int, 0); + WRT_REG_DWORD(®->host_int, 0); } while (0); qla2x00_handle_mbx_completion(ha, status); @@ -2152,11 +2196,11 @@ qla82xx_msix_rsp_q(int irq, void *dev_id) reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); - host_int = rd_reg_dword(®->host_int); + host_int = RD_REG_DWORD(®->host_int); if (qla2x00_check_reg32_for_disconnect(vha, host_int)) goto out; qla24xx_process_response_queue(vha, rsp); - wrt_reg_dword(®->host_int, 0); + WRT_REG_DWORD(®->host_int, 0); out: spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; @@ -2187,11 +2231,11 @@ qla82xx_poll(int irq, void *dev_id) spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); - host_int = rd_reg_dword(®->host_int); + host_int = RD_REG_DWORD(®->host_int); if (qla2x00_check_reg32_for_disconnect(vha, host_int)) goto out; if (host_int) { - stat = rd_reg_dword(®->host_status); + stat = RD_REG_DWORD(®->host_status); switch (stat & 0xff) { case 0x1: case 0x2: @@ -2202,9 +2246,9 @@ qla82xx_poll(int irq, void *dev_id) break; case 0x12: mb[0] = MSW(stat); - mb[1] = rd_reg_word(®->mailbox_out[1]); - mb[2] = rd_reg_word(®->mailbox_out[2]); - mb[3] = rd_reg_word(®->mailbox_out[3]); + mb[1] = RD_REG_WORD(®->mailbox_out[1]); + mb[2] = RD_REG_WORD(®->mailbox_out[2]); + mb[3] = RD_REG_WORD(®->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: @@ -2216,7 +2260,7 @@ qla82xx_poll(int irq, void *dev_id) stat * 0xff); break; } - wrt_reg_dword(®->host_int, 0); + WRT_REG_DWORD(®->host_int, 0); } out: spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -2505,8 +2549,8 @@ qla82xx_start_firmware(scsi_qla_host_t *vha) return qla82xx_check_rcvpeg_state(ha); } -static __le32 * -qla82xx_read_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr, +static uint32_t * +qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, uint32_t length) { uint32_t i; @@ -2631,13 +2675,13 @@ qla82xx_read_optrom_data(struct scsi_qla_host *vha, void *buf, uint32_t offset, uint32_t length) { scsi_block_requests(vha->host); - qla82xx_read_flash_data(vha, buf, offset, length); + qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length); scsi_unblock_requests(vha->host); return buf; } static int -qla82xx_write_flash_data(struct scsi_qla_host *vha, __le32 *dwptr, +qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, uint32_t faddr, uint32_t dwords) { int ret; @@ -2714,7 +2758,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, __le32 *dwptr, } ret = qla82xx_write_flash_dword(ha, faddr, - le32_to_cpu(*dwptr)); + cpu_to_le32(*dwptr)); if (ret) { ql_dbg(ql_dbg_p3p, vha, 0xb020, "Unable to program flash address=%x data=%x.\n", @@ -2774,10 +2818,10 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha) if (ql2xdbwr) qla82xx_wr_32(ha, (unsigned long)ha->nxdb_wr_ptr, dbval); else { - wrt_reg_dword(ha->nxdb_wr_ptr, dbval); + WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); wmb(); - while (rd_reg_dword(ha->nxdb_rd_ptr) != dbval) { - wrt_reg_dword(ha->nxdb_wr_ptr, dbval); + while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) { + WRT_REG_DWORD(ha->nxdb_wr_ptr, dbval); wmb(); } } @@ -3680,7 +3724,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) /* Minidump related functions */ static int qla82xx_minidump_process_control(scsi_qla_host_t *vha, - qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; struct qla82xx_md_entry_crb *crb_entry; @@ -3797,12 +3841,12 @@ qla82xx_minidump_process_control(scsi_qla_host_t *vha, static void qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha, - qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_stride, loop_cnt, i, r_value; struct qla82xx_md_entry_rdocm *ocm_hdr; - __le32 *data_ptr = *d_ptr; + uint32_t *data_ptr = *d_ptr; ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr; r_addr = ocm_hdr->read_addr; @@ -3810,7 +3854,7 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha, loop_cnt = ocm_hdr->op_count; for (i = 0; i < loop_cnt; i++) { - r_value = rd_reg_dword(r_addr + ha->nx_pcibase); + r_value = RD_REG_DWORD(r_addr + ha->nx_pcibase); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } @@ -3819,12 +3863,12 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha, static void qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha, - qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; struct qla82xx_md_entry_mux *mux_hdr; - __le32 *data_ptr = *d_ptr; + uint32_t *data_ptr = *d_ptr; mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr; r_addr = mux_hdr->read_addr; @@ -3845,12 +3889,12 @@ qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha, static void qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha, - qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_stride, loop_cnt, i, r_value; struct qla82xx_md_entry_crb *crb_hdr; - __le32 *data_ptr = *d_ptr; + uint32_t *data_ptr = *d_ptr; crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr; r_addr = crb_hdr->addr; @@ -3868,7 +3912,7 @@ qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha, static int qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha, - qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t addr, r_addr, c_addr, t_r_addr; @@ -3877,7 +3921,7 @@ qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha, uint32_t c_value_w, c_value_r; struct qla82xx_md_entry_cache *cache_hdr; int rval = QLA_FUNCTION_FAILED; - __le32 *data_ptr = *d_ptr; + uint32_t *data_ptr = *d_ptr; cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; @@ -3927,14 +3971,14 @@ qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha, static void qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha, - qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t addr, r_addr, c_addr, t_r_addr; uint32_t i, k, loop_count, t_value, r_cnt, r_value; uint32_t c_value_w; struct qla82xx_md_entry_cache *cache_hdr; - __le32 *data_ptr = *d_ptr; + uint32_t *data_ptr = *d_ptr; cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; @@ -3962,14 +4006,14 @@ qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha, static void qla82xx_minidump_process_queue(scsi_qla_host_t *vha, - qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t s_addr, r_addr; uint32_t r_stride, r_value, r_cnt, qid = 0; uint32_t i, k, loop_cnt; struct qla82xx_md_entry_queue *q_hdr; - __le32 *data_ptr = *d_ptr; + uint32_t *data_ptr = *d_ptr; q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr; s_addr = q_hdr->select_addr; @@ -3992,13 +4036,13 @@ qla82xx_minidump_process_queue(scsi_qla_host_t *vha, static void qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha, - qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_value; uint32_t i, loop_cnt; struct qla82xx_md_entry_rdrom *rom_hdr; - __le32 *data_ptr = *d_ptr; + uint32_t *data_ptr = *d_ptr; rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr; r_addr = rom_hdr->read_addr; @@ -4018,7 +4062,7 @@ qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha, static int qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha, - qla82xx_md_entry_hdr_t *entry_hdr, __le32 **d_ptr) + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_value, r_data; @@ -4026,7 +4070,7 @@ qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha, struct qla82xx_md_entry_rdmem *m_hdr; unsigned long flags; int rval = QLA_FUNCTION_FAILED; - __le32 *data_ptr = *d_ptr; + uint32_t *data_ptr = *d_ptr; m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr; r_addr = m_hdr->read_addr; @@ -4119,12 +4163,12 @@ qla82xx_md_collect(scsi_qla_host_t *vha) int no_entry_hdr = 0; qla82xx_md_entry_hdr_t *entry_hdr; struct qla82xx_md_template_hdr *tmplt_hdr; - __le32 *data_ptr; + uint32_t *data_ptr; uint32_t total_data_size = 0, f_capture_mask, data_collected = 0; int i = 0, rval = QLA_FUNCTION_FAILED; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; - data_ptr = ha->md_dump; + data_ptr = (uint32_t *)ha->md_dump; if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xb037, @@ -4133,7 +4177,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha) goto md_failed; } - ha->fw_dumped = false; + ha->fw_dumped = 0; if (!ha->md_tmplt_hdr || !ha->md_dump) { ql_log(ql_log_warn, vha, 0xb038, @@ -4313,7 +4357,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha) ql_log(ql_log_info, vha, 0xb044, "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); - ha->fw_dumped = true; + ha->fw_dumped = 1; qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); md_failed: @@ -4470,7 +4514,7 @@ qla82xx_beacon_off(struct scsi_qla_host *vha) } void -qla82xx_fw_dump(scsi_qla_host_t *vha) +qla82xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { struct qla_hw_data *ha = vha->hw; diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 8567eaf1bddd1..230abee10598b 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #ifndef __QLA_NX_H #define __QLA_NX_H @@ -799,16 +800,16 @@ struct qla82xx_legacy_intr_set { #define QLA82XX_URI_FIRMWARE_IDX_OFF 29 struct qla82xx_uri_table_desc{ - __le32 findex; - __le32 num_entries; - __le32 entry_size; - __le32 reserved[5]; + uint32_t findex; + uint32_t num_entries; + uint32_t entry_size; + uint32_t reserved[5]; }; struct qla82xx_uri_data_desc{ - __le32 findex; - __le32 size; - __le32 reserved[5]; + uint32_t findex; + uint32_t size; + uint32_t reserved[5]; }; /* UNIFIED ROMIMAGE END */ @@ -828,22 +829,22 @@ struct qla82xx_uri_data_desc{ * ISP 8021 I/O Register Set structure definitions. */ struct device_reg_82xx { - __le32 req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */ - __le32 rsp_q_in[64]; /* Response Queue In-Pointer. */ - __le32 rsp_q_out[64]; /* Response Queue Out-Pointer. */ + uint32_t req_q_out[64]; /* Request Queue out-Pointer (64 * 4) */ + uint32_t rsp_q_in[64]; /* Response Queue In-Pointer. */ + uint32_t rsp_q_out[64]; /* Response Queue Out-Pointer. */ - __le16 mailbox_in[32]; /* Mailbox In registers */ - __le16 unused_1[32]; - __le32 hint; /* Host interrupt register */ + uint16_t mailbox_in[32]; /* Mail box In registers */ + uint16_t unused_1[32]; + uint32_t hint; /* Host interrupt register */ #define HINT_MBX_INT_PENDING BIT_0 - __le16 unused_2[62]; - __le16 mailbox_out[32]; /* Mailbox Out registers */ - __le32 unused_3[48]; + uint16_t unused_2[62]; + uint16_t mailbox_out[32]; /* Mail box Out registers */ + uint32_t unused_3[48]; - __le32 host_status; /* host status */ + uint32_t host_status; /* host status */ #define HSRX_RISC_INT BIT_15 /* RISC to Host interrupt. */ #define HSRX_RISC_PAUSED BIT_8 /* RISC Paused. */ - __le32 host_int; /* Interrupt status. */ + uint32_t host_int; /* Interrupt status. */ #define ISRX_NX_RISC_INT BIT_0 /* RISC interrupt. */ }; diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c index 01ccd45267074..7975154a4edc0 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.c +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -1,7 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include @@ -139,7 +140,7 @@ qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1, uint32_t mask) { unsigned long timeout; - uint32_t temp; + uint32_t temp = 0; /* jiffies after 100ms */ timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS); @@ -659,7 +660,7 @@ static int qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr, int duration, uint32_t test_mask, uint32_t test_result) { - uint32_t value = 0; + uint32_t value; int timeout_error; uint8_t retries; int ret_val = QLA_SUCCESS; @@ -1440,7 +1441,7 @@ qla8044_device_bootstrap(struct scsi_qla_host *vha) if (idc_ctrl & GRACEFUL_RESET_BIT1) { qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, (idc_ctrl & ~GRACEFUL_RESET_BIT1)); - ha->fw_dumped = false; + ha->fw_dumped = 0; } dev_ready: @@ -2594,7 +2595,7 @@ qla8044_minidump_process_rdmux(struct scsi_qla_host *vha, struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr) { - uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; + uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value = 0; struct qla8044_minidump_entry_mux *mux_hdr; uint32_t *data_ptr = *d_ptr; @@ -2964,7 +2965,7 @@ qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha, /* Prepare: Write pex-dma descriptor to MS memory. */ rval = qla8044_ms_mem_write_128b(vha, - m_hdr->desc_card_addr, (uint32_t *)&dma_desc, + m_hdr->desc_card_addr, (void *)&dma_desc, (sizeof(struct qla8044_pex_dma_descriptor)/16)); if (rval) { ql_log(ql_log_warn, vha, 0xb14a, @@ -2986,7 +2987,7 @@ qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha, read_size += chunk_size; } - *d_ptr = (uint32_t *)data_ptr; + *d_ptr = (void *)data_ptr; error_exit: if (rdmem_buffer) @@ -3248,7 +3249,7 @@ qla8044_collect_md_data(struct scsi_qla_host *vha) goto md_failed; } - ha->fw_dumped = false; + ha->fw_dumped = 0; if (!ha->md_tmplt_hdr || !ha->md_dump) { ql_log(ql_log_warn, vha, 0xb10e, @@ -3469,7 +3470,7 @@ qla8044_collect_md_data(struct scsi_qla_host *vha) ql_log(ql_log_info, vha, 0xb110, "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); - ha->fw_dumped = true; + ha->fw_dumped = 1; qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); @@ -3486,7 +3487,7 @@ qla8044_get_minidump(struct scsi_qla_host *vha) struct qla_hw_data *ha = vha->hw; if (!qla8044_collect_md_data(vha)) { - ha->fw_dumped = true; + ha->fw_dumped = 1; ha->prev_minidump_failed = 0; } else { ql_log(ql_log_fatal, vha, 0xb0db, @@ -3945,8 +3946,8 @@ qla8044_intr_handler(int irq, void *dev_id) spin_lock_irqsave(&ha->hardware_lock, flags); for (iter = 1; iter--; ) { - if (rd_reg_dword(®->host_int)) { - stat = rd_reg_dword(®->host_status); + if (RD_REG_DWORD(®->host_int)) { + stat = RD_REG_DWORD(®->host_status); if ((stat & HSRX_RISC_INT) == 0) break; @@ -3960,9 +3961,9 @@ qla8044_intr_handler(int irq, void *dev_id) break; case 0x12: mb[0] = MSW(stat); - mb[1] = rd_reg_word(®->mailbox_out[1]); - mb[2] = rd_reg_word(®->mailbox_out[2]); - mb[3] = rd_reg_word(®->mailbox_out[3]); + mb[1] = RD_REG_WORD(®->mailbox_out[1]); + mb[2] = RD_REG_WORD(®->mailbox_out[2]); + mb[3] = RD_REG_WORD(®->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: @@ -3975,7 +3976,7 @@ qla8044_intr_handler(int irq, void *dev_id) break; } } - wrt_reg_dword(®->host_int, 0); + WRT_REG_DWORD(®->host_int, 0); } qla2x00_handle_mbx_completion(ha, status); @@ -4069,7 +4070,7 @@ qla8044_abort_isp(scsi_qla_host_t *vha) } void -qla8044_fw_dump(scsi_qla_host_t *vha) +qla8044_fw_dump(scsi_qla_host_t *vha, int hardware_locked) { struct qla_hw_data *ha = vha->hw; diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h index 2fc902a9fadec..8ba7c1db07c35 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.h +++ b/drivers/scsi/qla2xxx/qla_nx2.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #ifndef __QLA_NX2_H diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 419156121cb59..e4dd8cc65d324 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1,7 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" @@ -35,16 +36,6 @@ static int apidev_major; */ struct kmem_cache *srb_cachep; -int ql2xfulldump_on_mpifail; -module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(ql2xfulldump_on_mpifail, - "Set this to take full dump on MPI hang."); - -int ql2xenforce_iocb_limit = 1; -module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(ql2xenforce_iocb_limit, - "Enforce IOCB throttling, to avoid FW congestion. (default: 1)"); - /* * CT6 CTX allocation cache */ @@ -54,6 +45,13 @@ static struct kmem_cache *ctx_cachep; */ uint ql_errlev = 0x8001; +int ql2xsecenable = 0; +module_param(ql2xsecenable, int, S_IRUGO); +MODULE_PARM_DESC(ql2xsecenable, + "Enable/disable security " + "0 (Default) - Security disabled. " + "1 - Security enabled."); + static int ql2xenableclass2; module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR); MODULE_PARM_DESC(ql2xenableclass2, @@ -112,6 +110,10 @@ MODULE_PARM_DESC(ql2xextended_error_logging, "ql2xextended_error_logging=1).\n" "\t\tDo LOGICAL OR of the value to enable more than one level"); +QLA_MESSAGE_TRACE_DEFINES; + +QLA_SRB_TRACE_DEFINES; + int ql2xshiftctondsd = 6; module_param(ql2xshiftctondsd, int, S_IRUGO); MODULE_PARM_DESC(ql2xshiftctondsd, @@ -139,7 +141,6 @@ MODULE_PARM_DESC(ql2xenabledif, " Enable T10-CRC-DIF:\n" " Default is 2.\n" " 0 -- No DIF Support\n" - " 1 -- Enable DIF for all types\n" " 2 -- Enable DIF for all types, except Type 0.\n"); #if (IS_ENABLED(CONFIG_NVME_FC)) @@ -197,6 +198,12 @@ MODULE_PARM_DESC(ql2xdbwr, " 0 -- Regular doorbell.\n" " 1 -- CAMRAM doorbell (faster).\n"); +int ql2xtargetreset = 1; +module_param(ql2xtargetreset, int, S_IRUGO); +MODULE_PARM_DESC(ql2xtargetreset, + "Enable target reset." + "Default is 1 - use hw defaults."); + int ql2xgffidenable; module_param(ql2xgffidenable, int, S_IRUGO); MODULE_PARM_DESC(ql2xgffidenable, @@ -307,26 +314,157 @@ MODULE_PARM_DESC(ql2xdifbundlinginternalbuffers, "0 (Default). Based on check.\n" "1 Force using internal buffers\n"); -int ql2xsmartsan; -module_param(ql2xsmartsan, int, 0444); -module_param_named(smartsan, ql2xsmartsan, int, 0444); +int ql2xfulldump_on_mpifail; +module_param(ql2xfulldump_on_mpifail, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xfulldump_on_mpifail, + "Set this to take full dump on MPI hang."); +/* + * ql2x_scmr_drop_pct + * ql2x_scmr_up_pct + * when congested, if a SIGNAL appears within 1 sec, bring + * down congested peak rate by ql2x_scmr_drop_pct. + * when congested, if no SIGNALS for 1 sec, bump congested peak + * rate by ql2x_scmr_drop_pct. + * ql2x_scmr_drop_pct_low_wm + * when congested, I/Os will not be throttled further when the congested + * peak rate hits this percentage. + */ + +int ql2x_scmr_drop_pct = 10; +module_param(ql2x_scmr_drop_pct, int, 0600); +MODULE_PARM_DESC(ql2x_scmr_drop_pct, + " I/O peak rate percentage drop value to use on SCM\n" + "\t\t'WARNING' signal."); + +int ql2x_scmr_drop_pct_low_wm = 50; +module_param(ql2x_scmr_drop_pct_low_wm, int, 0600); +MODULE_PARM_DESC(ql2x_scmr_drop_pct_low_wm, + " I/O peak rate will not drop below this percentage."); + +int ql2x_scmr_up_pct = 1; +module_param(ql2x_scmr_up_pct, int, 0600); +MODULE_PARM_DESC(ql2x_scmr_up_pct, + " I/O peak rate percentage bump to use when there is\n" + "\t\tno congestion signal."); + +int ql2x_scmr_use_slow_queue = 1; +module_param(ql2x_scmr_use_slow_queue, int, 0600); +MODULE_PARM_DESC(ql2x_scmr_use_slow_queue, + " Queue I/O requests on slow queue, when in multi queue mode,\n" + "\t\tfor congested targets"); + +int ql2x_scmr_cg_io_status = DID_REQUEUE; +module_param(ql2x_scmr_cg_io_status, int, 0600); +MODULE_PARM_DESC(ql2x_scmr_cg_io_status, + " IO return status to use to throttle (default DID_REQUEUE (0xd)).\n" + "\t\trequests during fabric congestion."); + +int ql2x_scmr_flow_ctl_tgt = 1; +module_param(ql2x_scmr_flow_ctl_tgt, int, 0600); +MODULE_PARM_DESC(ql2x_scmr_flow_ctl_tgt, + " When I/O throttling is in place during fabric congestion, flow\n" + "\t\tcontrol tgt devices. (default: on)"); + +int ql2x_scmr_flow_ctl_host = 1; +module_param(ql2x_scmr_flow_ctl_host, int, 0600); +MODULE_PARM_DESC(ql2x_scmr_flow_ctl_host, + " When I/O throttling is in place during fabric congestion, flow\n" + "\t\tcontrol host. (default: on)"); + +int ql2x_scmr_throttle_mode = 2; +module_param(ql2x_scmr_throttle_mode, int, 0600); +MODULE_PARM_DESC(ql2x_scmr_throttle_mode, + " Defines the throttling mechanism. 2-Queue depth throttle(default),\n" + "\t\t1 - IOPS/MBPS throttle \n"); + +int ql2x_scmr_profile = 0; +module_param(ql2x_scmr_profile, int, 0600); +MODULE_PARM_DESC(ql2x_scmr_profile, + " Defines SCM profile." + "0 - Monitor only (default)" + "\t\t1 - Conservative" + "\t\t2 - Moderate " + "\t\t3 - Aggressive \n"); + +int ql2xvirtuallane = 0; +module_param(ql2xvirtuallane, int, 0600); +MODULE_PARM_DESC(ql2xvirtuallane, + " Defines support for Virtual Lanes: " + "0 - Disabled (default)" + "\t\t1 - Enabled \n"); + +int ql2xsmartsan = 0; +module_param(ql2xsmartsan, int, S_IRUGO|S_IWUSR); +module_param_named(smartsan, ql2xsmartsan, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xsmartsan, - "Send SmartSAN Management Attributes for FDMI Registration." - " Default is 0 - No SmartSAN registration," - " 1 - Register SmartSAN Management Attributes."); + "Send SmartSAN Management Attributes for FDMI Registration." + " Default is 0 - No SmartSAN registration," + " 1 - Register SmartSAN Management Attributes."); -int ql2xrdpenable; -module_param(ql2xrdpenable, int, 0444); -module_param_named(rdpenable, ql2xrdpenable, int, 0444); +int ql2xrdpenable = 0; +module_param(ql2xrdpenable, int, S_IRUGO|S_IWUSR); +module_param_named(rdpenable, ql2xrdpenable, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xrdpenable, "Enables RDP responses. " "0 - no RDP responses (default). " "1 - provide RDP responses."); +int ql2xenforce_iocb_limit = 1; +module_param(ql2xenforce_iocb_limit, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(ql2xenforce_iocb_limit, + "Enforce IOCB throttling, to avoid FW congestion. (default: 0)"); + +int ql2xabts_wait_nvme = 1; +module_param(ql2xabts_wait_nvme, int, 0444); +MODULE_PARM_DESC(ql2xabts_wait_nvme, + "To wait for ABTS response on I/O timeouts for NVMe. (default: 1)"); + +int ql2xrspq_follow_inptr = 1; +module_param(ql2xrspq_follow_inptr, int, 0644); +MODULE_PARM_DESC(ql2xrspq_follow_inptr, + "Follow RSP IN pointer for RSP updates for HBAs 27xx and newer (default: 1)."); + +int ql2xrspq_follow_inptr_legacy = 0; +module_param(ql2xrspq_follow_inptr_legacy, int, 0644); +MODULE_PARM_DESC(ql2xrspq_follow_inptr_legacy, + "Follow RSP IN pointer for RSP updates for HBAs older than 27XX. (default: 1)."); + +int ql2xcontrol_edc_rdf = 1; +module_param(ql2xcontrol_edc_rdf, int, 0644); +MODULE_PARM_DESC(ql2xcontrol_edc_rdf, + "Enable driver control of EDC and RDF for SCM" + "0 - Firmware implements EDC and RDF" + "1 - Driver controls EDC and RDF - default"); + +u64 ql2xdebug; +module_param(ql2xdebug, ullong, 0644); +MODULE_PARM_DESC(ql2xdebug, + "Driver debugging control. (default: 0)\n" + " bit-0: Firmware dump on bad response packets.\n" + " bit-1: Crash machine on critical errors.\n" + ); + +u32 ql2xnvme_queues = DEF_NVME_HW_QUEUES; +module_param(ql2xnvme_queues, uint, S_IRUGO); +MODULE_PARM_DESC(ql2xnvme_queues, + "Number of NVMe Queues that can be configured.\n" + "Final value will be min(ql2xnvme_queues, num_cpus,num_chip_queues)\n" + "1 - Minimum number of queues supported\n" + "8 - Default value"); + +u32 ql2xdelay_before_pci_error_handling = 5; +module_param(ql2xdelay_before_pci_error_handling, uint, 0644); +MODULE_PARM_DESC(ql2xdelay_before_pci_error_handling, + "Number of seconds delayed before qla begin PCI error self-handling (default: 5).\n"); + static void qla2x00_clear_drv_active(struct qla_hw_data *); static void qla2x00_free_device(scsi_qla_host_t *); -static int qla2xxx_map_queues(struct Scsi_Host *shost); static void qla2x00_destroy_deferred_work(struct qla_hw_data *); +void qla2xxx_scmr_flow_control(scsi_qla_host_t *vha); +void qla2xxx_scmr_manage_qdepth(srb_t *sp, fc_port_t *fcport, bool inc); +bool qla2xxx_throttle_req(srb_t *, struct qla_hw_data *ha, + fc_port_t *fcport, uint8_t dir); +void qla2xxx_scmr_cleanup(srb_t *sp, scsi_qla_host_t *vha, struct scsi_cmnd *cmd); static struct scsi_transport_template *qla2xxx_transport_template = NULL; @@ -340,7 +478,7 @@ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; __inline__ void qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval) { - timer_setup(&vha->timer, qla2x00_timer, 0); + qla_timer_setup(&vha->timer, qla2x00_timer, 0, vha); vha->timer.expires = jiffies + interval * HZ; add_timer(&vha->timer); vha->timer_active = 1; @@ -364,6 +502,30 @@ qla2x00_stop_timer(scsi_qla_host_t *vha) { del_timer_sync(&vha->timer); vha->timer_active = 0; + if (IS_SCM_CAPABLE(vha->hw) && + vha->hw->flags.scm_supported_f) { + del_timer_sync(&vha->perf_timer); + vha->perf_timer_active = 0; + } +} + +static inline void +qla2x00_start_perf_timer(scsi_qla_host_t *vha) +{ + if (IS_SCM_CAPABLE(vha->hw) && + vha->hw->flags.scm_supported_f) { + qla_timer_setup(&vha->perf_timer, qla2xxx_perf_timer, 0, vha); + vha->timer.expires = jiffies + HZ/10; + add_timer(&vha->perf_timer); + vha->perf_timer_active = 1; + } +} + +static inline void +qla2x00_stop_perf_timer(scsi_qla_host_t *vha) +{ + del_timer_sync(&vha->perf_timer); + vha->perf_timer_active = 0; } static int qla2x00_do_dpc(void *data); @@ -441,6 +603,11 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req, "Unable to allocate memory for queue pair ptrs.\n"); goto fail_qpair_map; } + if (qla_mapq_alloc_qp_cpu_map(ha) != 0) { + kfree(ha->queue_pair_map); + ha->queue_pair_map = NULL; + goto fail_qpair_map; + } } /* @@ -515,6 +682,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) ha->base_qpair = NULL; } + qla_mapq_free_qp_cpu_map(ha); spin_lock_irqsave(&ha->hardware_lock, flags); for (cnt = 0; cnt < ha->max_req_queues; cnt++) { if (!test_bit(cnt, ha->req_qid_map)) @@ -702,15 +870,17 @@ void qla2x00_sp_free_dma(srb_t *sp) } if (sp->flags & SRB_FCP_CMND_DMA_VALID) { - struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx; + struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx; dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, ctx1->fcp_cmnd_dma); list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; ha->gbl_dsd_avail += ctx1->dsd_use_cnt; - mempool_free(ctx1, ha->ctx_mempool); } + + if (sp->flags & SRB_GOT_BUF) + qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc); } void qla2x00_sp_compl(srb_t *sp, int res) @@ -718,9 +888,24 @@ void qla2x00_sp_compl(srb_t *sp, int res) struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct completion *comp = sp->comp; - sp->free(sp); + ql_srb_trace_ext(ql_dbg_io, sp->vha, sp->fcport, + "sp=%px handle=0x%x cmd=%px res=%x", + sp, sp->handle, cmd, res); + + qla2xxx_scmr_manage_qdepth(sp, sp->fcport, false); + /* kref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); cmd->result = res; + sp->done_jiffies = jiffies; CMD_SP(cmd) = NULL; + +#ifdef QLA2XXX_LATENCY_MEASURE + if (sp->type == SRB_SCSI_CMD) { + ktime_get_real_ts64(&sp->cmd_to_ml); + qla_get_scsi_cmd_latency(sp); + } +#endif + cmd->scsi_done(cmd); if (comp) complete(comp); @@ -785,14 +970,13 @@ void qla2xxx_qpair_sp_free_dma(srb_t *sp) } if (sp->flags & SRB_FCP_CMND_DMA_VALID) { - struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx; + struct ct6_dsd *ctx1 = &sp->u.scmd.ct6_ctx; dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, ctx1->fcp_cmnd_dma); list_splice(&ctx1->dsd_list, &ha->gbl_dsd_list); ha->gbl_dsd_inuse -= ctx1->dsd_use_cnt; ha->gbl_dsd_avail += ctx1->dsd_use_cnt; - mempool_free(ctx1, ha->ctx_mempool); sp->flags &= ~SRB_FCP_CMND_DMA_VALID; } @@ -802,6 +986,9 @@ void qla2xxx_qpair_sp_free_dma(srb_t *sp) dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); sp->flags &= ~SRB_CRC_CTX_DMA_VALID; } + + if (sp->flags & SRB_GOT_BUF) + qla_put_buf(sp->qpair, &sp->u.scmd.buf_dsc); } void qla2xxx_qpair_sp_compl(srb_t *sp, int res) @@ -809,9 +996,24 @@ void qla2xxx_qpair_sp_compl(srb_t *sp, int res) struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct completion *comp = sp->comp; - sp->free(sp); + ql_srb_trace_ext(ql_dbg_io, sp->vha, sp->fcport, + "sp=%px handle=0x%x cmd=%px res=%x", + sp, sp->handle, cmd, res); + + qla2xxx_scmr_manage_qdepth(sp, sp->fcport, false); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); cmd->result = res; + sp->done_jiffies = jiffies; CMD_SP(cmd) = NULL; + +#ifdef QLA2XXX_LATENCY_MEASURE + if (sp->type == SRB_SCSI_CMD) { + ktime_get_real_ts64(&sp->cmd_to_ml); + qla_get_scsi_cmd_latency(sp); + } +#endif + cmd->scsi_done(cmd); if (comp) complete(comp); @@ -825,6 +1027,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + bool throttle_down; srb_t *sp; int rval; @@ -834,6 +1037,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto qc24_fail_command; } + ql_srb_trace_ext(ql_dbg_io, fcport->vha, fcport, + "cmd=%px tag=0x%x mq=%d remchk=0x%x", + cmd, cmd->request->tag, ha->mqenable, + fc_remote_port_chkready(rport)); + if (ha->mqenable) { uint32_t tag; uint16_t hwq; @@ -843,19 +1051,32 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) hwq = blk_mq_unique_tag_to_hwq(tag); qpair = ha->queue_pair_map[hwq]; + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + if (ql2x_scmr_use_slow_queue && + unlikely(qla_scmr_is_congested(&fcport->sfc))) + qpair = ha->queue_pair_map[ha->slow_queue_id]; + } + if (qpair) return qla2xxx_mqueuecommand(host, cmd, qpair); } + sp = ql_scsi_cmd_priv(cmd); + /* ref: INIT */ + qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport); +#ifdef QLA2XXX_LATENCY_MEASURE + ktime_get_real_ts64(&sp->q_cmd); +#endif + if (ha->flags.eeh_busy) { if (ha->flags.pci_channel_io_perm_failure) { ql_dbg(ql_dbg_aer, vha, 0x9010, "PCI Channel IO permanent failure, exiting " - "cmd=%p.\n", cmd); + "cmd=%px.\n", cmd); cmd->result = DID_NO_CONNECT << 16; } else { ql_dbg(ql_dbg_aer, vha, 0x9011, - "EEH_Busy, Requeuing the cmd=%p.\n", cmd); + "EEH_Busy, Requeuing the cmd=%px.\n", cmd); cmd->result = DID_REQUEUE << 16; } goto qc24_fail_command; @@ -865,7 +1086,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) if (rval) { cmd->result = rval; ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003, - "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", + "fc_remote_port_chkready failed for cmd=%px, rval=0x%x.\n", cmd, rval); goto qc24_fail_command; } @@ -873,18 +1094,18 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) if (!vha->flags.difdix_supported && scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) { ql_dbg(ql_dbg_io, vha, 0x3004, - "DIF Cap not reg, fail DIF capable cmd's:%p.\n", + "DIF Cap not reg, fail DIF capable cmd's:%px.\n", cmd); cmd->result = DID_NO_CONNECT << 16; goto qc24_fail_command; } - if (!fcport) { - cmd->result = DID_NO_CONNECT << 16; + if (!fcport || fcport->deleted) { + cmd->result = DID_IMM_RETRY << 16; goto qc24_fail_command; } - if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { + if (atomic_read(&fcport->state) != FCS_ONLINE) { if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || atomic_read(&base_vha->loop_state) == LOOP_DEAD) { ql_dbg(ql_dbg_io, vha, 0x3005, @@ -908,27 +1129,37 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) else goto qc24_target_busy; - sp = scsi_cmd_priv(cmd); - qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport); sp->u.scmd.cmd = cmd; + sp->dir = cmd->sc_data_direction; sp->type = SRB_SCSI_CMD; CMD_SP(cmd) = (void *)sp; sp->free = qla2x00_sp_free_dma; sp->done = qla2x00_sp_compl; + if (IS_SCM_CAPABLE(ha)) { + throttle_down = qla2xxx_throttle_req(sp, ha, fcport, cmd->sc_data_direction); + if (unlikely(throttle_down == true)) { + cmd->result = ql2x_scmr_cg_io_status << 16; + goto qc24_target_busy; + } + qla2xxx_update_sfc_ios(sp, ha, fcport, scsi_bufflen(cmd)); + } + rval = ha->isp_ops->start_scsi(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013, - "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); + "Start scsi failed rval=%d for cmd=%px.\n", rval, cmd); goto qc24_host_busy_free_sp; } return 0; qc24_host_busy_free_sp: - sp->free(sp); + qla2xxx_scmr_cleanup(sp, vha, cmd); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); qc24_target_busy: return SCSI_MLQUEUE_TARGET_BUSY; @@ -949,24 +1180,39 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, struct fc_rport *rport = starget_to_rport(scsi_target(cmd->device)); struct qla_hw_data *ha = vha->hw; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + bool throttle_down; srb_t *sp; int rval; + sp = ql_scsi_cmd_priv(cmd); + /* ref: INIT */ + qla2xxx_init_sp(sp, vha, qpair, fcport); +#ifdef QLA2XXX_LATENCY_MEASURE + ktime_get_real_ts64(&sp->q_cmd); +#endif + rval = rport ? fc_remote_port_chkready(rport) : FC_PORTSTATE_OFFLINE; if (rval) { cmd->result = rval; ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076, - "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", + "fc_remote_port_chkready failed for cmd=%px, rval=0x%x.\n", cmd, rval); goto qc24_fail_command; } - if (!fcport) { + if (!qpair->online) { + ql_dbg(ql_dbg_io, vha, 0x3077, + "qpair not online. eeh_busy=%d.\n", ha->flags.eeh_busy); cmd->result = DID_NO_CONNECT << 16; goto qc24_fail_command; } - if (atomic_read(&fcport->state) != FCS_ONLINE || fcport->deleted) { + if (!fcport || fcport->deleted) { + cmd->result = DID_IMM_RETRY << 16; + goto qc24_fail_command; + } + + if (atomic_read(&fcport->state) != FCS_ONLINE) { if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || atomic_read(&base_vha->loop_state) == LOOP_DEAD) { ql_dbg(ql_dbg_io, vha, 0x3077, @@ -990,26 +1236,35 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, else goto qc24_target_busy; - sp = scsi_cmd_priv(cmd); - qla2xxx_init_sp(sp, vha, qpair, fcport); + sp->dir = cmd->sc_data_direction; sp->u.scmd.cmd = cmd; sp->type = SRB_SCSI_CMD; CMD_SP(cmd) = (void *)sp; sp->free = qla2xxx_qpair_sp_free_dma; sp->done = qla2xxx_qpair_sp_compl; + if (IS_SCM_CAPABLE(ha)) { + throttle_down = qla2xxx_throttle_req(sp, ha, fcport, cmd->sc_data_direction); + if (throttle_down == true) + goto qc24_target_busy; + + qla2xxx_update_sfc_ios(sp, ha, fcport, scsi_bufflen(cmd)); + } + rval = ha->isp_ops->start_scsi_mq(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078, - "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); + "Start scsi failed rval=%d for cmd=%px.\n", rval, cmd); goto qc24_host_busy_free_sp; } return 0; qc24_host_busy_free_sp: - sp->free(sp); + qla2xxx_scmr_cleanup(sp, vha, cmd); + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); qc24_target_busy: return SCSI_MLQUEUE_TARGET_BUSY; @@ -1103,12 +1358,27 @@ static inline int test_fcport_count(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; unsigned long flags; int res; + /* Return 0 = sleep, x=wake */ spin_lock_irqsave(&ha->tgt.sess_lock, flags); ql_dbg(ql_dbg_init, vha, 0x00ec, - "tgt %p, fcport_count=%d\n", + "tgt %px, fcport_count=%d\n", vha, vha->fcport_count); res = (vha->fcport_count == 0); + if (res) { + struct fc_port *fcport; + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->deleted != QLA_SESS_DELETED) { + /* session(s) may not be fully logged in + (ie fcport_count=0), but session + deletion thread(s) may be inflight. + */ + + res = 0; + break; + } + } + } spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); return res; @@ -1190,34 +1460,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) return return_status; } -#define ISP_REG_DISCONNECT 0xffffffffU -/************************************************************************** -* qla2x00_isp_reg_stat -* -* Description: -* Read the host status register of ISP before aborting the command. -* -* Input: -* ha = pointer to host adapter structure. -* -* -* Returns: -* Either true or false. -* -* Note: Return true if there is register disconnect. -**************************************************************************/ -static inline -uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) -{ - struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; - - if (IS_P3P_TYPE(ha)) - return ((rd_reg_dword(®82->host_int)) == ISP_REG_DISCONNECT); - else - return ((rd_reg_dword(®->host_status)) == - ISP_REG_DISCONNECT); -} /************************************************************************** * qla2xxx_eh_abort @@ -1253,6 +1495,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x8042, "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); return FAILED; } @@ -1261,13 +1504,31 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) if (ret != 0) fast_fail_status = ret; - sp = scsi_cmd_priv(cmd); + sp = ql_scsi_cmd_priv(cmd); qpair = sp->qpair; - if ((sp->fcport && sp->fcport->deleted) || !qpair) + vha->cmd_timeout_cnt++; + + ql_srb_trace_ext(ql_dbg_io, sp->vha, sp->fcport, + "sp=%px cmd=%px fast_fail_sts=0x%x comp/abt/abted=%d/%d/%d", + sp, cmd, fast_fail_status, sp->completed, sp->abort, + sp->aborted); + + if((sp->fcport && sp->fcport->deleted) || !qpair) return fast_fail_status != SUCCESS ? fast_fail_status : FAILED; spin_lock_irqsave(qpair->qp_lock_ptr, flags); + if (sp->completed) { + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + return fast_fail_status; + } + + if (sp->abort || sp->aborted) { + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + return fast_fail_status != SUCCESS ? fast_fail_status : FAILED; + } + + sp->abort = 1; sp->comp = ∁ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); @@ -1276,8 +1537,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) lun = cmd->device->lun; ql_dbg(ql_dbg_taskm, vha, 0x8002, - "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n", - vha->host_no, id, lun, sp, cmd, sp->handle); + "Aborting from RISC nexus=%ld:%d:%llu sp=%px cmd=%px handle=%x\n", + vha->host_no, id, lun_cast(lun), sp, cmd, sp->handle); /* * Abort will release the original Command/sp from FW. Let the @@ -1287,7 +1548,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) rval = ha->isp_ops->abort_command(sp); ql_dbg(ql_dbg_taskm, vha, 0x8003, - "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval); + "Abort command mbx cmd=%px, rval=%x.\n", cmd, rval); /* Wait for the command completion. */ ratov_j = ha->r_a_tov/10 * 4 * 1000; @@ -1320,21 +1581,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) /* * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED. */ -int -qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, +static int +__qla2x00_eh_wait_for_pending_commands(struct qla_qpair *qpair, unsigned int t, uint64_t l, enum nexus_wait_type type) { int cnt, match, status; unsigned long flags; - struct qla_hw_data *ha = vha->hw; - struct req_que *req; + scsi_qla_host_t *vha = qpair->vha; + struct req_que *req = qpair->req; srb_t *sp; struct scsi_cmnd *cmd; status = QLA_SUCCESS; - spin_lock_irqsave(&ha->hardware_lock, flags); - req = vha->req; + spin_lock_irqsave(qpair->qp_lock_ptr, flags); for (cnt = 1; status == QLA_SUCCESS && cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; @@ -1361,12 +1621,32 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, if (!match) continue; - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); status = qla2x00_eh_wait_on_command(cmd); - spin_lock_irqsave(&ha->hardware_lock, flags); + spin_lock_irqsave(qpair->qp_lock_ptr, flags); } - spin_unlock_irqrestore(&ha->hardware_lock, flags); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + return status; +} + +int +qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, + uint64_t l, enum nexus_wait_type type) +{ + struct qla_qpair *qpair; + struct qla_hw_data *ha = vha->hw; + int i, status = QLA_SUCCESS; + status = __qla2x00_eh_wait_for_pending_commands( + ha->base_qpair, t, l, type); + for (i = 0; status == QLA_SUCCESS && i < ha->max_qpairs; i++) { + qpair = ha->queue_pair_map[i]; + if (!qpair) + continue; + status = __qla2x00_eh_wait_for_pending_commands( + qpair, t, l, type); + } return status; } @@ -1394,44 +1674,45 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, return err; if (fcport->deleted) - return SUCCESS; + return FAILED; ql_log(ql_log_info, vha, 0x8009, - "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no, - cmd->device->id, cmd->device->lun, cmd); + "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%px.\n", name, vha->host_no, + cmd->device->id, lun_cast(cmd->device->lun), cmd); err = 0; if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x800a, - "Wait for hba online failed for cmd=%p.\n", cmd); + "Wait for hba online failed for cmd=%px.\n", cmd); goto eh_reset_failed; } err = 2; if (do_reset(fcport, cmd->device->lun, 1) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x800c, - "do_reset failed for cmd=%p.\n", cmd); + "do_reset failed for cmd=%px.\n", cmd); goto eh_reset_failed; } err = 3; if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id, cmd->device->lun, type) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x800d, - "wait for pending cmds failed for cmd=%p.\n", cmd); + "wait for pending cmds failed for cmd=%px.\n", cmd); goto eh_reset_failed; } ql_log(ql_log_info, vha, 0x800e, - "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%p.\n", name, - vha->host_no, cmd->device->id, cmd->device->lun, cmd); + "%s RESET SUCCEEDED nexus:%ld:%d:%llu cmd=%px.\n", name, + vha->host_no, cmd->device->id, lun_cast(cmd->device->lun), cmd); return SUCCESS; eh_reset_failed: ql_log(ql_log_info, vha, 0x800f, - "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name, - reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun, - cmd); + "%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%px.\n", name, + reset_errors[err], vha->host_no, cmd->device->id, + lun_cast(cmd->device->lun), cmd); + vha->reset_cmd_err_cnt++; return FAILED; } @@ -1444,6 +1725,7 @@ qla2xxx_eh_device_reset(struct scsi_cmnd *cmd) if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x803e, "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); return FAILED; } @@ -1460,6 +1742,7 @@ qla2xxx_eh_target_reset(struct scsi_cmnd *cmd) if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x803f, "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); return FAILED; } @@ -1495,6 +1778,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x8040, "PCI/Register disconnect, exiting.\n"); + qla_pci_set_eeh_busy(vha); return FAILED; } @@ -1572,7 +1856,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd) if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x8041, "PCI/Register disconnect, exiting.\n"); - schedule_work(&ha->board_disable); + qla_pci_set_eeh_busy(vha); return SUCCESS; } @@ -1648,6 +1932,11 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) int ret; struct qla_hw_data *ha = vha->hw; + ql_log(ql_log_info, vha, 0x8043, + "Issuing loop reset. (tgt_reset=%d lip_login=%d lip_reset=%d)\n", + ha->flags.enable_target_reset, ha->flags.enable_lip_full_login, + ha->flags.enable_lip_reset); + if (IS_QLAFX00(ha)) return QLA_SUCCESS; @@ -1675,10 +1964,6 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) return QLA_SUCCESS; } -/* - * The caller must ensure that no completion interrupts will happen - * while this function is in progress. - */ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res, unsigned long *flags) __releases(qp->qp_lock_ptr) @@ -1687,13 +1972,10 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res, DECLARE_COMPLETION_ONSTACK(comp); scsi_qla_host_t *vha = qp->vha; struct qla_hw_data *ha = vha->hw; - struct scsi_cmnd *cmd = GET_CMD_SP(sp); int rval; bool ret_cmd; uint32_t ratov_j; - lockdep_assert_held(qp->qp_lock_ptr); - if (qla2x00_chip_is_down(vha)) { sp->done(sp, res); return; @@ -1709,6 +1991,7 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res, } sp->comp = ∁ + sp->abort = 1; spin_unlock_irqrestore(qp->qp_lock_ptr, *flags); rval = ha->isp_ops->abort_command(sp); @@ -1732,17 +2015,13 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res, } spin_lock_irqsave(qp->qp_lock_ptr, *flags); - if (ret_cmd && blk_mq_request_started(cmd->request)) + if (ret_cmd && (!sp->completed || !sp->aborted)) sp->done(sp, res); } else { sp->done(sp, res); } } -/* - * The caller must ensure that no completion interrupts will happen - * while this function is in progress. - */ static void __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) { @@ -1789,10 +2068,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) spin_unlock_irqrestore(qp->qp_lock_ptr, flags); } -/* - * The caller must ensure that no completion interrupts will happen - * while this function is in progress. - */ void qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) { @@ -1831,12 +2106,15 @@ static int qla2xxx_slave_configure(struct scsi_device *sdev) { scsi_qla_host_t *vha = shost_priv(sdev->host); - struct req_que *req = vha->req; if (IS_T10_PI_CAPABLE(vha->hw)) blk_queue_update_dma_alignment(sdev->request_queue, 0x7); - scsi_change_queue_depth(sdev, req->max_q_depth); +#ifdef SCSI_CHANGE_Q_DEPTH + scsi_change_queue_depth(sdev, vha->req->max_q_depth); +#endif + qla_scsi_tcq_handler(sdev); + return 0; } @@ -1862,7 +2140,7 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha) if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { /* Any upper-dword bits set? */ if (MSD(dma_get_required_mask(&ha->pdev->dev)) && - !dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) { + !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) { /* Ok, a 64bit DMA mask is applicable. */ ha->flags.enable_64bit_addressing = 1; ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64; @@ -1872,7 +2150,7 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha) } dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); - dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32)); + pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32)); } static void @@ -1884,8 +2162,8 @@ qla2x00_enable_intrs(struct qla_hw_data *ha) spin_lock_irqsave(&ha->hardware_lock, flags); ha->interrupts_on = 1; /* enable risc and host interrupts */ - wrt_reg_word(®->ictrl, ICR_EN_INT | ICR_EN_RISC); - rd_reg_word(®->ictrl); + WRT_REG_WORD(®->ictrl, ICR_EN_INT | ICR_EN_RISC); + RD_REG_WORD(®->ictrl); spin_unlock_irqrestore(&ha->hardware_lock, flags); } @@ -1899,8 +2177,8 @@ qla2x00_disable_intrs(struct qla_hw_data *ha) spin_lock_irqsave(&ha->hardware_lock, flags); ha->interrupts_on = 0; /* disable risc and host interrupts */ - wrt_reg_word(®->ictrl, 0); - rd_reg_word(®->ictrl); + WRT_REG_WORD(®->ictrl, 0); + RD_REG_WORD(®->ictrl); spin_unlock_irqrestore(&ha->hardware_lock, flags); } @@ -1912,8 +2190,8 @@ qla24xx_enable_intrs(struct qla_hw_data *ha) spin_lock_irqsave(&ha->hardware_lock, flags); ha->interrupts_on = 1; - wrt_reg_dword(®->ictrl, ICRX_EN_RISC_INT); - rd_reg_dword(®->ictrl); + WRT_REG_DWORD(®->ictrl, ICRX_EN_RISC_INT); + RD_REG_DWORD(®->ictrl); spin_unlock_irqrestore(&ha->hardware_lock, flags); } @@ -1927,8 +2205,8 @@ qla24xx_disable_intrs(struct qla_hw_data *ha) return; spin_lock_irqsave(&ha->hardware_lock, flags); ha->interrupts_on = 0; - wrt_reg_dword(®->ictrl, 0); - rd_reg_dword(®->ictrl); + WRT_REG_DWORD(®->ictrl, 0); + RD_REG_DWORD(®->ictrl); spin_unlock_irqrestore(&ha->hardware_lock, flags); } @@ -1994,11 +2272,6 @@ qla2x00_iospace_config(struct qla_hw_data *ha) /* Determine queue resources */ ha->max_req_queues = ha->max_rsp_queues = 1; ha->msix_count = QLA_BASE_VECTORS; - - /* Check if FW supports MQ or not */ - if (!(ha->fw_attributes & BIT_6)) - goto mqiobase_exit; - if (!ql2xmqsupport || !ql2xnvmeenable || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))) goto mqiobase_exit; @@ -2007,7 +2280,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha) pci_resource_len(ha->pdev, 3)); if (ha->mqiobase) { ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018, - "MQIO Base=%p.\n", ha->mqiobase); + "MQIO Base=%px.\n", ha->mqiobase); /* Read MSIX vector size of the board */ pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix); ha->msix_count = msix + 1; @@ -2291,7 +2564,7 @@ static struct isp_operations qla81xx_isp_ops = { .config_rings = qla24xx_config_rings, .reset_adapter = qla24xx_reset_adapter, .nvram_config = qla81xx_nvram_config, - .update_fw_options = qla24xx_update_fw_options, + .update_fw_options = qla83xx_update_fw_options, .load_risc = qla81xx_load_risc, .pci_info_str = qla24xx_pci_info_str, .fw_version_str = qla24xx_fw_version_str, @@ -2505,7 +2778,7 @@ static struct isp_operations qla27xx_isp_ops = { .read_nvram = NULL, .write_nvram = NULL, .fw_dump = qla27xx_fwdump, - .mpi_fw_dump = qla27xx_mpi_fwdump, + .mpi_fw_dump = qla27xx_mpi_fwdump, .beacon_on = qla24xx_beacon_on, .beacon_off = qla24xx_beacon_off, .beacon_blink = qla83xx_beacon_blink, @@ -2640,6 +2913,7 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha) case PCI_DEVICE_ID_QLOGIC_ISPF001: ha->isp_type |= DT_ISPFX00; break; + case PCI_DEVICE_ID_QLOGIC_ISP2061: case PCI_DEVICE_ID_QLOGIC_ISP2071: ha->isp_type |= DT_ISP2071; ha->device_type |= DT_ZIO_SUPPORTED; @@ -2730,6 +3004,16 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) return atomic_read(&vha->loop_state) == LOOP_READY; } +static void qla_hb_work_fn(struct work_struct *work) +{ + struct qla_hw_data *ha = container_of(work, + struct qla_hw_data, hb_work); + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + + if (!ha->flags.mbox_busy && base_vha->flags.init_done) + qla_no_op_mb(base_vha); +} + static void qla2x00_iocb_work_fn(struct work_struct *work) { struct scsi_qla_host *vha = container_of(work, @@ -2791,7 +3075,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2081 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2281 || pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2089 || - pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289) { + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2289 || + pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2971 ) { bars = pci_select_bars(pdev, IORESOURCE_MEM); mem_only = 1; ql_dbg_pci(ql_dbg_init, pdev, 0x0007, @@ -2823,13 +3108,24 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto disable_device; } ql_dbg_pci(ql_dbg_init, pdev, 0x000a, - "Memory allocated for ha=%p.\n", ha); + "Memory allocated for ha=%px.\n", ha); ha->pdev = pdev; INIT_LIST_HEAD(&ha->tgt.q_full_list); spin_lock_init(&ha->tgt.q_full_lock); spin_lock_init(&ha->tgt.sess_lock); spin_lock_init(&ha->tgt.atio_lock); + spin_lock_init(&ha->sadb_lock); + INIT_LIST_HEAD(&ha->sadb_tx_index_list); + INIT_LIST_HEAD(&ha->sadb_rx_index_list); + + spin_lock_init(&ha->sadb_fp_lock); + + if (qla_edif_sadb_build_free_pool(ha)) { + kfree(ha); + goto disable_device; + } + atomic_set(&ha->nvme_active_aen_cnt, 0); /* Clear our data area */ @@ -3028,8 +3324,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->portnum = PCI_FUNC(ha->pdev->devfn); ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400; ha->mbx_count = MAILBOX_REGISTER_COUNT; - req_length = REQUEST_ENTRY_CNT_24XX; - rsp_length = RESPONSE_ENTRY_CNT_2300; + req_length = REQUEST_ENTRY_CNT_83XX; + rsp_length = RESPONSE_ENTRY_CNT_83XX; ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX; ha->max_loop_id = SNS_LAST_LOOP_ID_2300; ha->init_cb_size = sizeof(struct mid_init_cb_81xx); @@ -3052,7 +3348,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->init_cb_size, ha->gid_list_info_size, ha->optrom_size, ha->nvram_npiv_size, ha->max_fibre_devices); ql_dbg_pci(ql_dbg_init, pdev, 0x001f, - "isp_ops=%p, flash_conf_off=%d, " + "isp_ops=%px, flash_conf_off=%d, " "flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n", ha->isp_ops, ha->flash_conf_off, ha->flash_data_off, ha->nvram_conf_off, ha->nvram_data_off); @@ -3063,7 +3359,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto iospace_config_failed; ql_log_pci(ql_log_info, pdev, 0x001d, - "Found an ISP%04X irq %d iobase 0x%p.\n", + "Found an ISP%04X irq %d iobase 0x%px.\n", pdev->device, pdev->irq, ha->iobase); mutex_init(&ha->vport_lock); mutex_init(&ha->mq_lock); @@ -3102,6 +3398,20 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, base_vha); set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags); + ha->sfc.vha = base_vha; + ha->sfc.rstats = &ha->scm.rstats; + ha->sfc.mode = ql2x_scmr_throttle_mode; + ha->scm.last_event_timestamp = qla_get_real_seconds(); + + /* Will be updated to pull from NVRAM */ + if (ql2x_scmr_use_slow_queue) + ha->sfc.profile.scmr_control_flags |= + QLA_USE_FW_SLOW_QUEUE; + if (ql2x_scmr_flow_ctl_host) + ha->sfc.profile.scmr_control_flags |= + QLA_APPLY_SCMR_THROTTLING; + ha->sfc.profile.scmr_profile = ql2x_scmr_profile; + host = base_vha->host; base_vha->req = req; if (IS_QLA2XXX_MIDTYPE(ha)) @@ -3119,6 +3429,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED; ha->mr.fcport.scan_state = 1; + qla2xxx_reset_stats(host, + QLA2XX_HW_ERROR|QLA2XX_SHT_LNK_DWN|QLA2XX_INT_ERR| + QLA2XX_CMD_TIMEOUT|QLA2XX_RESET_CMD_ERR|QLA2XX_TGT_SHT_LNK_DOWN); + /* Set the SG table size based on ISP type */ if (!IS_FWI2_CAPABLE(ha)) { if (IS_QLA2100(ha)) @@ -3147,12 +3461,13 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ql_dbg(ql_dbg_init, base_vha, 0x0033, "max_id=%d this_id=%d " "cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d " - "max_lun=%llu transportt=%p, vendor_id=%llu.\n", host->max_id, + "max_lun=%llu transportt=%px, vendor_id=%llu.\n", host->max_id, host->this_id, host->cmd_per_lun, host->unique_id, - host->max_cmd_len, host->max_channel, host->max_lun, + host->max_cmd_len, host->max_channel, lun_cast(host->max_lun), host->transportt, sht->vendor_id); INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); + INIT_WORK(&ha->hb_work, qla_hb_work_fn); /* Set up the irqs */ ret = qla2x00_request_irqs(ha, rsp); @@ -3171,7 +3486,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) if (ha->mqenable) { /* number of hardware queues supported by blk/scsi-mq*/ - host->nr_hw_queues = ha->max_qpairs; + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + /* The last queue pair is reserved for slow queue */ + host->nr_hw_queues = ha->max_qpairs - 1; + } else { + host->nr_hw_queues = ha->max_qpairs; + } ql_dbg(ql_dbg_init, base_vha, 0x0192, "blk/scsi-mq enabled, HW queues = %d.\n", host->nr_hw_queues); @@ -3229,21 +3549,21 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) } ql_dbg(ql_dbg_multiq, base_vha, 0xc009, - "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", + "rsp_q_map=%px req_q_map=%px rsp->req=%px req->rsp=%px.\n", ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); ql_dbg(ql_dbg_multiq, base_vha, 0xc00a, - "req->req_q_in=%p req->req_q_out=%p " - "rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", + "req->req_q_in=%px req->req_q_out=%px " + "rsp->rsp_q_in=%px rsp->rsp_q_out=%px.\n", req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); ql_dbg(ql_dbg_init, base_vha, 0x003e, - "rsp_q_map=%p req_q_map=%p rsp->req=%p req->rsp=%p.\n", + "rsp_q_map=%px req_q_map=%px rsp->req=%px req->rsp=%px.\n", ha->rsp_q_map, ha->req_q_map, rsp->req, req->rsp); ql_dbg(ql_dbg_init, base_vha, 0x003f, - "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", + "req->req_q_in=%px req->req_q_out=%px rsp->rsp_q_in=%px rsp->rsp_q_out=%px.\n", req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); - ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0); + ha->wq = alloc_workqueue("qla2xxx_wq", WQ_MEM_RECLAIM, 0); if (unlikely(!ha->wq)) { ret = -ENOMEM; goto probe_failed; @@ -3281,10 +3601,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) host->can_queue = req->num_outstanding_cmds - 10; ql_dbg(ql_dbg_init, base_vha, 0x0032, - "can_queue=%d, req=%p, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", + "can_queue=%d, req=%px, mgmt_svr_loop_id=%d, sg_tablesize=%d.\n", host->can_queue, base_vha->req, base_vha->mgmt_svr_loop_id, host->sg_tablesize); + /* Check if FW supports MQ or not for ISP25xx*/ + if (IS_QLA25XX(ha) && !(ha->fw_attributes & BIT_6)) { + ha->mqenable = 0; + } + if (ha->mqenable) { bool startit = false; @@ -3295,8 +3620,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) startit = true; /* Create start of day qpairs for Block MQ */ - for (i = 0; i < ha->max_qpairs; i++) - qla2xxx_create_qpair(base_vha, 5, 0, startit); + if (IS_SCM_CAPABLE(ha) && ha->flags.scm_supported_f) { + for (i = 0; i < (ha->max_qpairs - 1); i++) + qla2xxx_create_qpair(base_vha, 5, 0, startit); + /* Create a Slow queue */ + ql_log(ql_log_info, base_vha, 0x00ed, + "SCMR: Creating Slow queue\n"); + qla2xxx_create_qpair(base_vha, 1, 0, startit); + } else { + for (i = 0; i < ha->max_qpairs; i++) + qla2xxx_create_qpair(base_vha, 5, 0, startit); + } } qla_init_iocb_limit(base_vha); @@ -3348,11 +3682,12 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) /* Initialized the timer */ qla2x00_start_timer(base_vha, WATCH_INTERVAL); + qla2x00_start_perf_timer(base_vha); ql_dbg(ql_dbg_init, base_vha, 0x00ef, "Started qla2x00_timer with " "interval=%d.\n", WATCH_INTERVAL); ql_dbg(ql_dbg_init, base_vha, 0x00f0, - "Detected hba at address=%p.\n", + "Detected hba at address=%px.\n", ha); if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) { @@ -3377,8 +3712,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) guard = SHOST_DIX_GUARD_CRC; - if (IS_PI_IPGUARD_CAPABLE(ha) && - (ql2xenabledif > 1 || IS_PI_DIFB_DIX0_CAPABLE(ha))) + if (IS_PI_IPGUARD_CAPABLE(ha) && ql2xenabledif) guard |= SHOST_DIX_GUARD_IP; if (ql2xprotguard) @@ -3409,6 +3743,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ql_dbg(ql_dbg_init, base_vha, 0x00f2, "Init done and hba is online.\n"); + qla_trace_init(&ha->srb_trace, "srb_trace", ql2xnum_srb_trace); + if (qla_ini_mode_enabled(base_vha) || qla_dual_mode_enabled(base_vha)) scsi_scan_host(host); @@ -3418,6 +3754,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) qla2x00_alloc_sysfs_attr(base_vha); + if (IS_SCM_CAPABLE(ha)) + qla2xxx_scm_alloc_rdf_payload(base_vha); + if (IS_QLAFX00(ha)) { ret = qlafx00_fx_disc(base_vha, &base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO); @@ -3451,6 +3790,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) return 0; probe_failed: + qla_enode_stop(base_vha); + qla_edb_stop(base_vha); if (base_vha->gnl.l) { dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); @@ -3459,6 +3800,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) if (base_vha->timer_active) qla2x00_stop_timer(base_vha); + if (base_vha->perf_timer_active) + qla2x00_stop_perf_timer(base_vha); + base_vha->flags.online = 0; if (ha->dpc_thread) { struct task_struct *t = ha->dpc_thread; @@ -3479,6 +3823,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) rsp = NULL; probe_hw_failed: + qla_trace_uninit(&ha->srb_trace); + qla2x00_mem_free(ha); qla2x00_free_req_que(ha, req); qla2x00_free_rsp_que(ha, rsp); @@ -3575,6 +3921,8 @@ qla2x00_shutdown(struct pci_dev *pdev) /* Disable timer */ if (vha->timer_active) qla2x00_stop_timer(vha); + if (vha->perf_timer_active) + qla2x00_stop_perf_timer(vha); /* Turn adapter off line */ vha->flags.online = 0; @@ -3721,13 +4069,6 @@ qla2x00_remove_one(struct pci_dev *pdev) } qla2x00_wait_for_hba_ready(base_vha); - /* - * if UNLOADING flag is already set, then continue unload, - * where it was set first. - */ - if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) - return; - if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (ha->flags.fw_started) @@ -3746,20 +4087,36 @@ qla2x00_remove_one(struct pci_dev *pdev) qla2x00_wait_for_sess_deletion(base_vha); + /* + * if UNLOAD flag is already set, then continue unload, + * where it was set first. + */ + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + return; + + set_bit(UNLOADING, &base_vha->dpc_flags); + qla_nvme_delete(base_vha); dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); base_vha->gnl.l = NULL; + qla_enode_stop(base_vha); + qla_edb_stop(base_vha); vfree(base_vha->scan.l); if (IS_QLAFX00(ha)) qlafx00_driver_shutdown(base_vha, 20); + if (IS_SCM_CAPABLE(ha)) + qla2xxx_scm_free_rdf_payload(base_vha); + qla2x00_delete_all_vps(ha, base_vha); + qla_trace_uninit(&ha->srb_trace); + qla2x00_dfs_remove(base_vha); qla84xx_put_chip(base_vha); @@ -3767,6 +4124,8 @@ qla2x00_remove_one(struct pci_dev *pdev) /* Disable timer */ if (base_vha->timer_active) qla2x00_stop_timer(base_vha); + if (base_vha->perf_timer_active) + qla2x00_stop_perf_timer(base_vha); base_vha->flags.online = 0; @@ -3785,7 +4144,6 @@ qla2x00_remove_one(struct pci_dev *pdev) qla2x00_free_sysfs_attr(base_vha, true); fc_remove_host(base_vha->host); - qlt_remove_target_resources(ha); scsi_remove_host(base_vha->host); @@ -3808,13 +4166,15 @@ qla2x00_remove_one(struct pci_dev *pdev) static inline void qla24xx_free_purex_list(struct purex_list *list) { - struct list_head *item, *next; + struct purex_item *item, *next; ulong flags; spin_lock_irqsave(&list->lock, flags); - list_for_each_safe(item, next, &list->head) { - list_del(item); - kfree(list_entry(item, struct purex_item, list)); + list_for_each_entry_safe(item, next, &list->head, list) { + list_del(&item->list); + if (item == &item->vha->default_item) + continue; + kfree(item); } spin_unlock_irqrestore(&list->lock, flags); } @@ -3829,6 +4189,8 @@ qla2x00_free_device(scsi_qla_host_t *vha) /* Disable timer */ if (vha->timer_active) qla2x00_stop_timer(vha); + if (vha->perf_timer_active) + qla2x00_stop_perf_timer(vha); qla25xx_delete_queues(vha); vha->flags.online = 0; @@ -3857,6 +4219,9 @@ qla2x00_free_device(scsi_qla_host_t *vha) qla82xx_md_free(vha); + qla_edif_sadb_release_free_pool(ha); + qla_edif_sadb_release(ha); + qla2x00_free_queues(ha); } @@ -3878,7 +4243,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport) if (fcport->rport) { ql_dbg(ql_dbg_disc, fcport->vha, 0x2109, - "%s %8phN. rport %p roles %x\n", + "%s %8phN. rport %px roles %x\n", __func__, fcport->port_name, fcport->rport, fcport->rport->roles); fc_remote_port_delete(fcport->rport); @@ -3908,7 +4273,9 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, vha->vp_idx == fcport->vha->vp_idx) { qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); qla2x00_schedule_rport_del(vha, fcport); + } + /* * We may need to retry the login, so don't change the state of the * port but do the retries. @@ -3922,6 +4289,19 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } +/* + * qla2x00_mark_all_devices_lost + * Updates fcport state when device goes offline. + * + * Input: + * ha = adapter block pointer. + * fcport = port structure pointer. + * + * Return: + * None. + * + * Context: + */ void qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha) { @@ -3936,9 +4316,10 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha) fcport->port_type == FCT_TARGET && !qla2x00_reset_active(vha)) { ql_dbg(ql_dbg_disc, vha, 0x211a, - "Delaying session delete for FCP2 flags 0x%x port_type = 0x%x port_id=%06x %phC", - fcport->flags, fcport->port_type, - fcport->d_id.b24, fcport->port_name); + "Delaying session delete for FCP2 flags 0x%x " + "port_type = 0x%x port_id=%06x %phC", fcport->flags, + fcport->port_type, fcport->d_id.b24, + fcport->port_name); continue; } fcport->scan_state = 0; @@ -3972,14 +4353,26 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, struct req_que **req, struct rsp_que **rsp) { char name[16]; + int rc; + + if (QLA_TGT_MODE_ENABLED() || EDIF_CAP(ha)) { + ha->vp_map = kcalloc(MAX_MULTI_ID_FABRIC, sizeof(struct qla_vp_map), GFP_KERNEL); + if (!ha->vp_map) + goto fail; + } ha->init_cb = dma_alloc_coherent(&ha->pdev->dev, ha->init_cb_size, &ha->init_cb_dma, GFP_KERNEL); if (!ha->init_cb) - goto fail; + goto fail_free_vp_map; - if (qlt_mem_alloc(ha) < 0) + rc = btree_init32(&ha->host_map); + if (rc) { goto fail_free_init_cb; + } + + if (qlt_mem_alloc(ha) < 0) + goto fail_free_btree; ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); @@ -3990,7 +4383,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, if (!ha->srb_mempool) goto fail_free_gid_list; - if (IS_P3P_TYPE(ha)) { + if (IS_P3P_TYPE(ha) || IS_QLA27XX(ha) || (ql2xsecenable & IS_QLA28XX(ha))) { /* Allocate cache for CT6 Ctx. */ if (!ctx_cachep) { ctx_cachep = kmem_cache_create("qla2xxx_ctx", @@ -4004,7 +4397,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, if (!ha->ctx_mempool) goto fail_free_srb_mempool; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0021, - "ctx_cachep=%p ctx_mempool=%p.\n", + "ctx_cachep=%px ctx_mempool=%px.\n", ctx_cachep, ha->ctx_mempool); } @@ -4021,10 +4414,10 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, goto fail_free_nvram; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0022, - "init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n", + "init_cb=%px gid_list=%px, srb_mempool=%px s_dma_pool=%px.\n", ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool); - if (IS_P3P_TYPE(ha) || ql2xenabledif) { + if (IS_P3P_TYPE(ha) || ql2xenabledif || (IS_QLA28XX(ha) & ql2xsecenable)) { ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev, DSD_LIST_DMA_POOL_SIZE, 8, 0); if (!ha->dl_dma_pool) { @@ -4065,7 +4458,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, ql_dbg_pci(ql_dbg_init, ha->pdev, 0xe0ee, "%s: failed alloc dsd\n", __func__); - return -ENOMEM; + return 1; } ha->dif_bundle_kallocs++; @@ -4117,7 +4510,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, } ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0025, - "dl_dma_pool=%p fcp_cmnd_dma_pool=%p dif_bundl_pool=%p.\n", + "dl_dma_pool=%px fcp_cmnd_dma_pool=%px dif_bundl_pool=%px.\n", ha->dl_dma_pool, ha->fcp_cmnd_dma_pool, ha->dif_bundl_pool); } @@ -4130,7 +4523,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, if (!ha->sns_cmd) goto fail_dma_pool; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026, - "sns_cmd: %p.\n", ha->sns_cmd); + "sns_cmd: %px.\n", ha->sns_cmd); } else { /* Get consistent memory allocated for MS IOCB */ ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, @@ -4143,7 +4536,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, if (!ha->ct_sns) goto fail_free_ms_iocb; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0027, - "ms_iocb=%p ct_sns=%p.\n", + "ms_iocb=%px ct_sns=%px.\n", ha->ms_iocb, ha->ct_sns); } @@ -4183,8 +4576,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, (*req)->rsp = *rsp; (*rsp)->req = *req; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002c, - "req=%p req->length=%d req->ring=%p rsp=%p " - "rsp->length=%d rsp->ring=%p.\n", + "req=%px req->length=%d req->ring=%px rsp=%px " + "rsp->length=%d rsp->ring=%px.\n", *req, (*req)->length, (*req)->ring, *rsp, (*rsp)->length, (*rsp)->ring); /* Allocate memory for NVRAM data for vports */ @@ -4208,18 +4601,18 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, if (!ha->ex_init_cb) goto fail_ex_init_cb; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002e, - "ex_init_cb=%p.\n", ha->ex_init_cb); + "ex_init_cb=%px.\n", ha->ex_init_cb); } /* Get consistent memory allocated for Special Features-CB. */ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { ha->sf_init_cb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, - &ha->sf_init_cb_dma); + &ha->sf_init_cb_dma); if (!ha->sf_init_cb) goto fail_sf_init_cb; memset(ha->sf_init_cb, 0, sizeof(struct init_sf_cb)); ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199, - "sf_init_cb=%p.\n", ha->sf_init_cb); + "sf_init_cb=%px.\n", ha->sf_init_cb); } INIT_LIST_HEAD(&ha->gbl_dsd_list); @@ -4231,7 +4624,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, if (!ha->async_pd) goto fail_async_pd; ql_dbg_pci(ql_dbg_init, ha->pdev, 0x002f, - "async_pd=%p.\n", ha->async_pd); + "async_pd=%px.\n", ha->async_pd); } INIT_LIST_HEAD(&ha->vp_list); @@ -4245,7 +4638,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, else { qla2x00_set_reserved_loop_ids(ha); ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, - "loop_id_map=%p.\n", ha->loop_id_map); + "loop_id_map=%px.\n", ha->loop_id_map); } ha->sfp_data = dma_alloc_coherent(&ha->pdev->dev, @@ -4265,8 +4658,50 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, goto fail_flt_buffer; } + /* allocate the purex dma pool */ + ha->purex_dma_pool = dma_pool_create(name, &ha->pdev->dev, + ELS_MAX_PAYLOAD, 8, 0); + + if (!ha->purex_dma_pool) { + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011b, + "Unable to allocate purex_dma_pool.\n"); + goto fail_flt; + } + + + ha->elsrej.size = sizeof(struct fc_els_ls_rjt) + 16; + ha->elsrej.c = dma_alloc_coherent(&ha->pdev->dev, + ha->elsrej.size, &ha->elsrej.cdma, GFP_KERNEL); + + if (!ha->elsrej.c) { + ql_dbg_pci(ql_dbg_init, ha->pdev, 0xffff, + "Alloc failed for els reject cmd.\n"); + goto fail_elsrej; + } + ha->elsrej.c->er_cmd = ELS_LS_RJT; + ha->elsrej.c->er_reason = ELS_RJT_LOGIC; + ha->elsrej.c->er_explan = ELS_EXPL_UNAB_DATA; + + /* Get consistent memory allocated for USCM ELS commands */ + if (IS_SCM_CAPABLE(ha)) { + ha->edc_rsp_payload = dma_zalloc_coherent(&ha->pdev->dev, + sizeof(struct edc_els_resp_payload), &ha->edc_rsp_payload_dma, GFP_KERNEL); + if (!ha->edc_rsp_payload) + goto fail_scm_els; + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0030, + "edc_rsp_payload: %px.\n", ha->edc_rsp_payload); + } return 0; +fail_scm_els: + dma_free_coherent(&ha->pdev->dev, ha->elsrej.size, + ha->elsrej.c, ha->elsrej.cdma); +fail_elsrej: + dma_pool_destroy(ha->purex_dma_pool); +fail_flt: + dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, + ha->flt, ha->flt_dma); + fail_flt_buffer: dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, ha->sfp_data, ha->sfp_data_dma); @@ -4357,11 +4792,15 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, ha->gid_list_dma = 0; fail_free_tgt_mem: qlt_mem_free(ha); +fail_free_btree: + btree_destroy32(&ha->host_map); fail_free_init_cb: dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, ha->init_cb_dma); ha->init_cb = NULL; ha->init_cb_dma = 0; +fail_free_vp_map: + kfree(ha->vp_map); fail: ql_log(ql_log_fatal, NULL, 0x0030, "Memory allocation failure.\n"); @@ -4627,7 +5066,7 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha) ha->flags.fce_enabled = 0; ha->eft = NULL; ha->eft_dma = 0; - ha->fw_dumped = false; + ha->fw_dumped = 0; ha->fw_dump_cap_flags = 0; ha->fw_dump_reading = 0; ha->fw_dump = NULL; @@ -4702,7 +5141,9 @@ qla2x00_mem_free(struct qla_hw_data *ha) if (ha->sf_init_cb) dma_pool_free(ha->s_dma_pool, - ha->sf_init_cb, ha->sf_init_cb_dma); + ha->sf_init_cb, ha->sf_init_cb_dma); + ha->sf_init_cb = NULL; + ha->sf_init_cb_dma = 0; if (ha->ex_init_cb) dma_pool_free(ha->s_dma_pool, @@ -4775,10 +5216,30 @@ qla2x00_mem_free(struct qla_hw_data *ha) ha->dif_bundl_pool = NULL; qlt_mem_free(ha); + qla_remove_hostmap(ha); if (ha->init_cb) dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, ha->init_cb_dma); + + if (ha->purex_dma_pool) { + dma_pool_destroy(ha->purex_dma_pool); + ha->purex_dma_pool = NULL; + } + + if (ha->elsrej.c){ + dma_free_coherent(&ha->pdev->dev, ha->elsrej.size, + ha->elsrej.c, ha->elsrej.cdma); + ha->elsrej.c = NULL; + } + + if (ha->edc_rsp_payload) { + dma_free_coherent(&ha->pdev->dev, sizeof(struct edc_els_resp_payload), + ha->edc_rsp_payload, ha->edc_rsp_payload_dma); + ha->edc_rsp_payload = NULL; + ha->edc_rsp_payload_dma = 0; + } + ha->init_cb = NULL; ha->init_cb_dma = 0; @@ -4791,9 +5252,10 @@ qla2x00_mem_free(struct qla_hw_data *ha) kfree(ha->swl); ha->swl = NULL; kfree(ha->loop_id_map); - ha->sf_init_cb = NULL; - ha->sf_init_cb_dma = 0; ha->loop_id_map = NULL; + + kfree(ha->vp_map); + ha->vp_map = NULL; } struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, @@ -4815,6 +5277,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, vha->host = host; vha->host_no = host->host_no; + vha->hw = ha; vha->qlini_mode = ql2x_ini_mode; @@ -4830,7 +5293,6 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, INIT_LIST_HEAD(&vha->plogi_ack_list); INIT_LIST_HEAD(&vha->qp_list); INIT_LIST_HEAD(&vha->gnl.fcports); - INIT_LIST_HEAD(&vha->gpnid_list); INIT_WORK(&vha->iocb_work, qla2x00_iocb_work_fn); INIT_LIST_HEAD(&vha->purex_list.head); @@ -4840,6 +5302,9 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, spin_lock_init(&vha->cmd_list_lock); init_waitqueue_head(&vha->fcport_waitQ); init_waitqueue_head(&vha->vref_waitq); + qla_enode_init(vha); + qla_edb_init(vha); + vha->gnl.size = sizeof(struct get_name_list_extended) * (ha->max_loop_id + 1); @@ -4866,9 +5331,10 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, } INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn); - sprintf(vha->host_str, "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no); + snprintf(vha->host_str, sizeof(vha->host_str), "%s_%ld", \ + QLA2XXX_DRIVER_NAME, vha->host_no); ql_dbg(ql_dbg_init, vha, 0x0041, - "Allocated the host=%p hw=%p vha=%p dev_name=%s", + "Allocated the host=%px hw=%px vha=%px dev_name=%s", vha->host, vha->hw, vha, dev_name(&(ha->pdev->dev))); @@ -4881,9 +5347,6 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type) struct qla_work_evt *e; uint8_t bail; - if (test_bit(UNLOADING, &vha->dpc_flags)) - return NULL; - QLA_VHA_MARK_BUSY(vha, bail); if (bail) return NULL; @@ -4995,7 +5458,7 @@ qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code) switch (code) { case QLA_UEVENT_CODE_FW_DUMP: - snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu", + snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld", vha->host_no); break; default: @@ -5077,11 +5540,17 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) fcport->d_id = e->u.new_sess.id; fcport->flags |= FCF_FABRIC_DEVICE; fcport->fw_login_state = DSC_LS_PLOGI_PEND; + fcport->tgt_short_link_down_cnt = 0; memcpy(fcport->port_name, e->u.new_sess.port_name, WWN_SIZE); fcport->fc4_type = e->u.new_sess.fc4_type; + if (NVME_PRIORITY(vha->hw, fcport)) + fcport->do_prli_nvme = 1; + else + fcport->do_prli_nvme = 0; + if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) { fcport->dm_login_expire = jiffies + QLA_N2N_WAIT_TIME * HZ; @@ -5268,9 +5737,6 @@ qla2x00_do_work(struct scsi_qla_host *vha) case QLA_EVT_AENFX: qlafx00_process_aen(vha, e); break; - case QLA_EVT_GPNID: - qla24xx_async_gpnid(vha, &e->u.gpnid.id); - break; case QLA_EVT_UNMAP: qla24xx_sp_unmap(vha, e->u.iosb.sp); break; @@ -5313,9 +5779,6 @@ qla2x00_do_work(struct scsi_qla_host *vha) case QLA_EVT_GNNFT_DONE: qla24xx_async_gnnft_done(vha, e->u.iosb.sp); break; - case QLA_EVT_GNNID: - qla24xx_async_gnnid(vha, e->u.fcport.fcport); - break; case QLA_EVT_GFPNID: qla24xx_async_gfpnid(vha, e->u.fcport.fcport); break; @@ -5329,6 +5792,9 @@ qla2x00_do_work(struct scsi_qla_host *vha) qla24xx_els_dcmd2_iocb(vha, ELS_DCMD_PLOGI, e->u.fcport.fcport, false); break; + case QLA_EVT_SA_REPLACE: + rc = qla24xx_issue_sa_replace_iocb(vha, e); + break; } if (rc == EAGAIN) { @@ -5378,6 +5844,7 @@ void qla2x00_relogin(struct scsi_qla_host *vha) if (atomic_read(&fcport->state) != FCS_ONLINE && fcport->login_retry) { if (fcport->scan_state != QLA_FCPORT_FOUND || + fcport->disc_state == DSC_LOGIN_AUTH_PEND || fcport->disc_state == DSC_LOGIN_COMPLETE) continue; @@ -5765,466 +6232,6 @@ qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) return; } -static bool -qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha, - struct purex_entry_24xx *purex) -{ - char fwstr[16]; - u32 sid = purex->s_id[2] << 16 | purex->s_id[1] << 8 | purex->s_id[0]; - struct port_database_24xx *pdb; - - /* Domain Controller is always logged-out. */ - /* if RDP request is not from Domain Controller: */ - if (sid != 0xfffc01) - return false; - - ql_dbg(ql_dbg_init, vha, 0x0181, "%s: s_id=%#x\n", __func__, sid); - - pdb = kzalloc(sizeof(*pdb), GFP_KERNEL); - if (!pdb) { - ql_dbg(ql_dbg_init, vha, 0x0181, - "%s: Failed allocate pdb\n", __func__); - } else if (qla24xx_get_port_database(vha, - le16_to_cpu(purex->nport_handle), pdb)) { - ql_dbg(ql_dbg_init, vha, 0x0181, - "%s: Failed get pdb sid=%x\n", __func__, sid); - } else if (pdb->current_login_state != PDS_PLOGI_COMPLETE && - pdb->current_login_state != PDS_PRLI_COMPLETE) { - ql_dbg(ql_dbg_init, vha, 0x0181, - "%s: Port not logged in sid=%#x\n", __func__, sid); - } else { - /* RDP request is from logged in port */ - kfree(pdb); - return false; - } - kfree(pdb); - - vha->hw->isp_ops->fw_version_str(vha, fwstr, sizeof(fwstr)); - fwstr[strcspn(fwstr, " ")] = 0; - /* if FW version allows RDP response length upto 2048 bytes: */ - if (strcmp(fwstr, "8.09.00") > 0 || strcmp(fwstr, "8.05.65") == 0) - return false; - - ql_dbg(ql_dbg_init, vha, 0x0181, "%s: fw=%s\n", __func__, fwstr); - - /* RDP response length is to be reduced to maximum 256 bytes */ - return true; -} - -/* - * Function Name: qla24xx_process_purex_iocb - * - * Description: - * Prepare a RDP response and send to Fabric switch - * - * PARAMETERS: - * vha: SCSI qla host - * purex: RDP request received by HBA - */ -void qla24xx_process_purex_rdp(struct scsi_qla_host *vha, - struct purex_item *item) -{ - struct qla_hw_data *ha = vha->hw; - struct purex_entry_24xx *purex = - (struct purex_entry_24xx *)&item->iocb; - dma_addr_t rsp_els_dma; - dma_addr_t rsp_payload_dma; - dma_addr_t stat_dma; - dma_addr_t sfp_dma; - struct els_entry_24xx *rsp_els = NULL; - struct rdp_rsp_payload *rsp_payload = NULL; - struct link_statistics *stat = NULL; - uint8_t *sfp = NULL; - uint16_t sfp_flags = 0; - uint rsp_payload_length = sizeof(*rsp_payload); - int rval; - - ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0180, - "%s: Enter\n", __func__); - - ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0181, - "-------- ELS REQ -------\n"); - ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0182, - purex, sizeof(*purex)); - - if (qla25xx_rdp_rsp_reduce_size(vha, purex)) { - rsp_payload_length = - offsetof(typeof(*rsp_payload), optical_elmt_desc); - ql_dbg(ql_dbg_init, vha, 0x0181, - "Reducing RSP payload length to %u bytes...\n", - rsp_payload_length); - } - - rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), - &rsp_els_dma, GFP_KERNEL); - if (!rsp_els) { - ql_log(ql_log_warn, vha, 0x0183, - "Failed allocate dma buffer ELS RSP.\n"); - goto dealloc; - } - - rsp_payload = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_payload), - &rsp_payload_dma, GFP_KERNEL); - if (!rsp_payload) { - ql_log(ql_log_warn, vha, 0x0184, - "Failed allocate dma buffer ELS RSP payload.\n"); - goto dealloc; - } - - sfp = dma_alloc_coherent(&ha->pdev->dev, SFP_RTDI_LEN, - &sfp_dma, GFP_KERNEL); - - stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat), - &stat_dma, GFP_KERNEL); - - /* Prepare Response IOCB */ - rsp_els->entry_type = ELS_IOCB_TYPE; - rsp_els->entry_count = 1; - rsp_els->sys_define = 0; - rsp_els->entry_status = 0; - rsp_els->handle = 0; - rsp_els->nport_handle = purex->nport_handle; - rsp_els->tx_dsd_count = cpu_to_le16(1); - rsp_els->vp_index = purex->vp_idx; - rsp_els->sof_type = EST_SOFI3; - rsp_els->rx_xchg_address = purex->rx_xchg_addr; - rsp_els->rx_dsd_count = 0; - rsp_els->opcode = purex->els_frame_payload[0]; - - rsp_els->d_id[0] = purex->s_id[0]; - rsp_els->d_id[1] = purex->s_id[1]; - rsp_els->d_id[2] = purex->s_id[2]; - - rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC); - rsp_els->rx_byte_count = 0; - rsp_els->tx_byte_count = cpu_to_le32(rsp_payload_length); - - put_unaligned_le64(rsp_payload_dma, &rsp_els->tx_address); - rsp_els->tx_len = rsp_els->tx_byte_count; - - rsp_els->rx_address = 0; - rsp_els->rx_len = 0; - - /* Prepare Response Payload */ - rsp_payload->hdr.cmd = cpu_to_be32(0x2 << 24); /* LS_ACC */ - rsp_payload->hdr.len = cpu_to_be32(le32_to_cpu(rsp_els->tx_byte_count) - - sizeof(rsp_payload->hdr)); - - /* Link service Request Info Descriptor */ - rsp_payload->ls_req_info_desc.desc_tag = cpu_to_be32(0x1); - rsp_payload->ls_req_info_desc.desc_len = - cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc)); - rsp_payload->ls_req_info_desc.req_payload_word_0 = - cpu_to_be32p((uint32_t *)purex->els_frame_payload); - - /* Link service Request Info Descriptor 2 */ - rsp_payload->ls_req_info_desc2.desc_tag = cpu_to_be32(0x1); - rsp_payload->ls_req_info_desc2.desc_len = - cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_req_info_desc2)); - rsp_payload->ls_req_info_desc2.req_payload_word_0 = - cpu_to_be32p((uint32_t *)purex->els_frame_payload); - - - rsp_payload->sfp_diag_desc.desc_tag = cpu_to_be32(0x10000); - rsp_payload->sfp_diag_desc.desc_len = - cpu_to_be32(RDP_DESC_LEN(rsp_payload->sfp_diag_desc)); - - if (sfp) { - /* SFP Flags */ - memset(sfp, 0, SFP_RTDI_LEN); - rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x7, 2, 0); - if (!rval) { - /* SFP Flags bits 3-0: Port Tx Laser Type */ - if (sfp[0] & BIT_2 || sfp[1] & (BIT_6|BIT_5)) - sfp_flags |= BIT_0; /* short wave */ - else if (sfp[0] & BIT_1) - sfp_flags |= BIT_1; /* long wave 1310nm */ - else if (sfp[1] & BIT_4) - sfp_flags |= BIT_1|BIT_0; /* long wave 1550nm */ - } - - /* SFP Type */ - memset(sfp, 0, SFP_RTDI_LEN); - rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 0x0, 1, 0); - if (!rval) { - sfp_flags |= BIT_4; /* optical */ - if (sfp[0] == 0x3) - sfp_flags |= BIT_6; /* sfp+ */ - } - - rsp_payload->sfp_diag_desc.sfp_flags = cpu_to_be16(sfp_flags); - - /* SFP Diagnostics */ - memset(sfp, 0, SFP_RTDI_LEN); - rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0x60, 10, 0); - if (!rval) { - __be16 *trx = (__force __be16 *)sfp; /* already be16 */ - rsp_payload->sfp_diag_desc.temperature = trx[0]; - rsp_payload->sfp_diag_desc.vcc = trx[1]; - rsp_payload->sfp_diag_desc.tx_bias = trx[2]; - rsp_payload->sfp_diag_desc.tx_power = trx[3]; - rsp_payload->sfp_diag_desc.rx_power = trx[4]; - } - } - - /* Port Speed Descriptor */ - rsp_payload->port_speed_desc.desc_tag = cpu_to_be32(0x10001); - rsp_payload->port_speed_desc.desc_len = - cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc)); - rsp_payload->port_speed_desc.speed_capab = cpu_to_be16( - qla25xx_fdmi_port_speed_capability(ha)); - rsp_payload->port_speed_desc.operating_speed = cpu_to_be16( - qla25xx_fdmi_port_speed_currently(ha)); - - /* Link Error Status Descriptor */ - rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002); - rsp_payload->ls_err_desc.desc_len = - cpu_to_be32(RDP_DESC_LEN(rsp_payload->ls_err_desc)); - - if (stat) { - rval = qla24xx_get_isp_stats(vha, stat, stat_dma, 0); - if (!rval) { - rsp_payload->ls_err_desc.link_fail_cnt = - cpu_to_be32(le32_to_cpu(stat->link_fail_cnt)); - rsp_payload->ls_err_desc.loss_sync_cnt = - cpu_to_be32(le32_to_cpu(stat->loss_sync_cnt)); - rsp_payload->ls_err_desc.loss_sig_cnt = - cpu_to_be32(le32_to_cpu(stat->loss_sig_cnt)); - rsp_payload->ls_err_desc.prim_seq_err_cnt = - cpu_to_be32(le32_to_cpu(stat->prim_seq_err_cnt)); - rsp_payload->ls_err_desc.inval_xmit_word_cnt = - cpu_to_be32(le32_to_cpu(stat->inval_xmit_word_cnt)); - rsp_payload->ls_err_desc.inval_crc_cnt = - cpu_to_be32(le32_to_cpu(stat->inval_crc_cnt)); - rsp_payload->ls_err_desc.pn_port_phy_type |= BIT_6; - } - } - - /* Portname Descriptor */ - rsp_payload->port_name_diag_desc.desc_tag = cpu_to_be32(0x10003); - rsp_payload->port_name_diag_desc.desc_len = - cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_diag_desc)); - memcpy(rsp_payload->port_name_diag_desc.WWNN, - vha->node_name, - sizeof(rsp_payload->port_name_diag_desc.WWNN)); - memcpy(rsp_payload->port_name_diag_desc.WWPN, - vha->port_name, - sizeof(rsp_payload->port_name_diag_desc.WWPN)); - - /* F-Port Portname Descriptor */ - rsp_payload->port_name_direct_desc.desc_tag = cpu_to_be32(0x10003); - rsp_payload->port_name_direct_desc.desc_len = - cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_name_direct_desc)); - memcpy(rsp_payload->port_name_direct_desc.WWNN, - vha->fabric_node_name, - sizeof(rsp_payload->port_name_direct_desc.WWNN)); - memcpy(rsp_payload->port_name_direct_desc.WWPN, - vha->fabric_port_name, - sizeof(rsp_payload->port_name_direct_desc.WWPN)); - - /* Bufer Credit Descriptor */ - rsp_payload->buffer_credit_desc.desc_tag = cpu_to_be32(0x10006); - rsp_payload->buffer_credit_desc.desc_len = - cpu_to_be32(RDP_DESC_LEN(rsp_payload->buffer_credit_desc)); - rsp_payload->buffer_credit_desc.fcport_b2b = 0; - rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0); - rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0); - - if (ha->flags.plogi_template_valid) { - uint32_t tmp = - be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred); - rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp); - } - - if (rsp_payload_length < sizeof(*rsp_payload)) - goto send; - - /* Optical Element Descriptor, Temperature */ - rsp_payload->optical_elmt_desc[0].desc_tag = cpu_to_be32(0x10007); - rsp_payload->optical_elmt_desc[0].desc_len = - cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); - /* Optical Element Descriptor, Voltage */ - rsp_payload->optical_elmt_desc[1].desc_tag = cpu_to_be32(0x10007); - rsp_payload->optical_elmt_desc[1].desc_len = - cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); - /* Optical Element Descriptor, Tx Bias Current */ - rsp_payload->optical_elmt_desc[2].desc_tag = cpu_to_be32(0x10007); - rsp_payload->optical_elmt_desc[2].desc_len = - cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); - /* Optical Element Descriptor, Tx Power */ - rsp_payload->optical_elmt_desc[3].desc_tag = cpu_to_be32(0x10007); - rsp_payload->optical_elmt_desc[3].desc_len = - cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); - /* Optical Element Descriptor, Rx Power */ - rsp_payload->optical_elmt_desc[4].desc_tag = cpu_to_be32(0x10007); - rsp_payload->optical_elmt_desc[4].desc_len = - cpu_to_be32(RDP_DESC_LEN(*rsp_payload->optical_elmt_desc)); - - if (sfp) { - memset(sfp, 0, SFP_RTDI_LEN); - rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 0, 64, 0); - if (!rval) { - __be16 *trx = (__force __be16 *)sfp; /* already be16 */ - - /* Optical Element Descriptor, Temperature */ - rsp_payload->optical_elmt_desc[0].high_alarm = trx[0]; - rsp_payload->optical_elmt_desc[0].low_alarm = trx[1]; - rsp_payload->optical_elmt_desc[0].high_warn = trx[2]; - rsp_payload->optical_elmt_desc[0].low_warn = trx[3]; - rsp_payload->optical_elmt_desc[0].element_flags = - cpu_to_be32(1 << 28); - - /* Optical Element Descriptor, Voltage */ - rsp_payload->optical_elmt_desc[1].high_alarm = trx[4]; - rsp_payload->optical_elmt_desc[1].low_alarm = trx[5]; - rsp_payload->optical_elmt_desc[1].high_warn = trx[6]; - rsp_payload->optical_elmt_desc[1].low_warn = trx[7]; - rsp_payload->optical_elmt_desc[1].element_flags = - cpu_to_be32(2 << 28); - - /* Optical Element Descriptor, Tx Bias Current */ - rsp_payload->optical_elmt_desc[2].high_alarm = trx[8]; - rsp_payload->optical_elmt_desc[2].low_alarm = trx[9]; - rsp_payload->optical_elmt_desc[2].high_warn = trx[10]; - rsp_payload->optical_elmt_desc[2].low_warn = trx[11]; - rsp_payload->optical_elmt_desc[2].element_flags = - cpu_to_be32(3 << 28); - - /* Optical Element Descriptor, Tx Power */ - rsp_payload->optical_elmt_desc[3].high_alarm = trx[12]; - rsp_payload->optical_elmt_desc[3].low_alarm = trx[13]; - rsp_payload->optical_elmt_desc[3].high_warn = trx[14]; - rsp_payload->optical_elmt_desc[3].low_warn = trx[15]; - rsp_payload->optical_elmt_desc[3].element_flags = - cpu_to_be32(4 << 28); - - /* Optical Element Descriptor, Rx Power */ - rsp_payload->optical_elmt_desc[4].high_alarm = trx[16]; - rsp_payload->optical_elmt_desc[4].low_alarm = trx[17]; - rsp_payload->optical_elmt_desc[4].high_warn = trx[18]; - rsp_payload->optical_elmt_desc[4].low_warn = trx[19]; - rsp_payload->optical_elmt_desc[4].element_flags = - cpu_to_be32(5 << 28); - } - - memset(sfp, 0, SFP_RTDI_LEN); - rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa2, 112, 64, 0); - if (!rval) { - /* Temperature high/low alarm/warning */ - rsp_payload->optical_elmt_desc[0].element_flags |= - cpu_to_be32( - (sfp[0] >> 7 & 1) << 3 | - (sfp[0] >> 6 & 1) << 2 | - (sfp[4] >> 7 & 1) << 1 | - (sfp[4] >> 6 & 1) << 0); - - /* Voltage high/low alarm/warning */ - rsp_payload->optical_elmt_desc[1].element_flags |= - cpu_to_be32( - (sfp[0] >> 5 & 1) << 3 | - (sfp[0] >> 4 & 1) << 2 | - (sfp[4] >> 5 & 1) << 1 | - (sfp[4] >> 4 & 1) << 0); - - /* Tx Bias Current high/low alarm/warning */ - rsp_payload->optical_elmt_desc[2].element_flags |= - cpu_to_be32( - (sfp[0] >> 3 & 1) << 3 | - (sfp[0] >> 2 & 1) << 2 | - (sfp[4] >> 3 & 1) << 1 | - (sfp[4] >> 2 & 1) << 0); - - /* Tx Power high/low alarm/warning */ - rsp_payload->optical_elmt_desc[3].element_flags |= - cpu_to_be32( - (sfp[0] >> 1 & 1) << 3 | - (sfp[0] >> 0 & 1) << 2 | - (sfp[4] >> 1 & 1) << 1 | - (sfp[4] >> 0 & 1) << 0); - - /* Rx Power high/low alarm/warning */ - rsp_payload->optical_elmt_desc[4].element_flags |= - cpu_to_be32( - (sfp[1] >> 7 & 1) << 3 | - (sfp[1] >> 6 & 1) << 2 | - (sfp[5] >> 7 & 1) << 1 | - (sfp[5] >> 6 & 1) << 0); - } - } - - /* Optical Product Data Descriptor */ - rsp_payload->optical_prod_desc.desc_tag = cpu_to_be32(0x10008); - rsp_payload->optical_prod_desc.desc_len = - cpu_to_be32(RDP_DESC_LEN(rsp_payload->optical_prod_desc)); - - if (sfp) { - memset(sfp, 0, SFP_RTDI_LEN); - rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 20, 64, 0); - if (!rval) { - memcpy(rsp_payload->optical_prod_desc.vendor_name, - sfp + 0, - sizeof(rsp_payload->optical_prod_desc.vendor_name)); - memcpy(rsp_payload->optical_prod_desc.part_number, - sfp + 20, - sizeof(rsp_payload->optical_prod_desc.part_number)); - memcpy(rsp_payload->optical_prod_desc.revision, - sfp + 36, - sizeof(rsp_payload->optical_prod_desc.revision)); - memcpy(rsp_payload->optical_prod_desc.serial_number, - sfp + 48, - sizeof(rsp_payload->optical_prod_desc.serial_number)); - } - - memset(sfp, 0, SFP_RTDI_LEN); - rval = qla2x00_read_sfp(vha, sfp_dma, sfp, 0xa0, 84, 8, 0); - if (!rval) { - memcpy(rsp_payload->optical_prod_desc.date, - sfp + 0, - sizeof(rsp_payload->optical_prod_desc.date)); - } - } - -send: - ql_dbg(ql_dbg_init, vha, 0x0183, - "Sending ELS Response to RDP Request...\n"); - ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0184, - "-------- ELS RSP -------\n"); - ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0185, - rsp_els, sizeof(*rsp_els)); - ql_dbg(ql_dbg_init + ql_dbg_verbose, vha, 0x0186, - "-------- ELS RSP PAYLOAD -------\n"); - ql_dump_buffer(ql_dbg_init + ql_dbg_verbose, vha, 0x0187, - rsp_payload, rsp_payload_length); - - rval = qla2x00_issue_iocb(vha, rsp_els, rsp_els_dma, 0); - - if (rval) { - ql_log(ql_log_warn, vha, 0x0188, - "%s: iocb failed to execute -> %x\n", __func__, rval); - } else if (rsp_els->comp_status) { - ql_log(ql_log_warn, vha, 0x0189, - "%s: iocb failed to complete -> completion=%#x subcode=(%#x,%#x)\n", - __func__, rsp_els->comp_status, - rsp_els->error_subcode_1, rsp_els->error_subcode_2); - } else { - ql_dbg(ql_dbg_init, vha, 0x018a, "%s: done.\n", __func__); - } - -dealloc: - if (stat) - dma_free_coherent(&ha->pdev->dev, sizeof(*stat), - stat, stat_dma); - if (sfp) - dma_free_coherent(&ha->pdev->dev, SFP_RTDI_LEN, - sfp, sfp_dma); - if (rsp_payload) - dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_payload), - rsp_payload, rsp_payload_dma); - if (rsp_els) - dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), - rsp_els, rsp_els_dma); -} - void qla24xx_free_purex_item(struct purex_item *item) { @@ -6564,6 +6571,13 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) struct pci_dev *pdev = ha->pdev; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); + /* + * if UNLOAD flag is already set, then continue unload, + * where it was set first. + */ + if (test_bit(UNLOADING, &base_vha->dpc_flags)) + return; + ql_log(ql_log_warn, base_vha, 0x015b, "Disabling adapter.\n"); @@ -6574,15 +6588,10 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) return; } - /* - * if UNLOADING flag is already set, then continue unload, - * where it was set first. - */ - if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags)) - return; - qla2x00_wait_for_sess_deletion(base_vha); + set_bit(UNLOADING, &base_vha->dpc_flags); + qla2x00_delete_all_vps(ha, base_vha); qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16); @@ -6593,6 +6602,8 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work) if (base_vha->timer_active) qla2x00_stop_timer(base_vha); + if (base_vha->perf_timer_active) + qla2x00_stop_perf_timer(base_vha); base_vha->flags.online = 0; @@ -6655,11 +6666,14 @@ qla2x00_do_dpc(void *data) set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { - ql_dbg(ql_dbg_dpc, base_vha, 0x4000, + ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4000, "DPC handler sleeping.\n"); schedule(); + if (test_and_clear_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags)) + qla_pci_set_eeh_busy(base_vha); + if (!base_vha->flags.init_done || ha->flags.mbox_busy) goto end_loop; @@ -6678,6 +6692,7 @@ qla2x00_do_dpc(void *data) if (test_bit(UNLOADING, &base_vha->dpc_flags)) break; + if (IS_P3P_TYPE(ha)) { if (IS_QLA8044(ha)) { if (test_and_clear_bit(ISP_UNRECOVERABLE, @@ -6806,7 +6821,6 @@ qla2x00_do_dpc(void *data) if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags))) { - base_vha->flags.online = 1; ql_dbg(ql_dbg_dpc, base_vha, 0x4007, "ISP abort scheduled.\n"); if (ha->isp_ops->abort_isp(base_vha)) { @@ -6830,11 +6844,6 @@ qla2x00_do_dpc(void *data) } } - if (test_and_clear_bit(FCPORT_UPDATE_NEEDED, - &base_vha->dpc_flags)) { - qla2x00_update_fcports(base_vha); - } - if (IS_QLAFX00(ha)) goto loop_resync_check; @@ -6953,31 +6962,54 @@ qla2x00_do_dpc(void *data) mutex_unlock(&ha->mq_lock); } - if (test_and_clear_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, - &base_vha->dpc_flags)) { - ql_log(ql_log_info, base_vha, 0xffffff, - "nvme: SET ZIO Activity exchange threshold to %d.\n", - ha->nvme_last_rptd_aen); - if (qla27xx_set_zio_threshold(base_vha, - ha->nvme_last_rptd_aen)) { - ql_log(ql_log_info, base_vha, 0xffffff, - "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n", - ha->nvme_last_rptd_aen); - } - } - if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, &base_vha->dpc_flags)) { + u16 threshold = ha->nvme_last_rptd_aen + ha->last_zio_threshold; + if (threshold > ha->orig_fw_xcb_count) + threshold = ha->orig_fw_xcb_count; + ql_log(ql_log_info, base_vha, 0xffffff, "SET ZIO Activity exchange threshold to %d.\n", - ha->last_zio_threshold); - qla27xx_set_zio_threshold(base_vha, - ha->last_zio_threshold); + threshold); + if (qla27xx_set_zio_threshold(base_vha, threshold)) { + ql_log(ql_log_info, base_vha, 0xffffff, + "Unable to SET ZIO Activity exchange threshold to %d.\n", + threshold); + } } if (!IS_QLAFX00(ha)) qla2x00_do_dpc_all_vps(base_vha); + if (test_and_clear_bit(SCM_SEND_EDC, &base_vha->dpc_flags)) { + if (QLA_DRV_SEND_ELS(ha)) { + ql_dbg(ql_dbg_dpc, base_vha, 0x4018, + "SCM ELS EDC scheduled. retry:%d\n",base_vha->hw->edc_retry_cnt); + qla2xxx_scm_send_edc_els(base_vha); + ql_dbg(ql_dbg_dpc, base_vha, 0x4019, + "SCM ELS EDC Sent\n"); + } + } + + if (test_and_clear_bit(SCM_SEND_RDF, &base_vha->dpc_flags)) { + if (QLA_DRV_SEND_ELS(ha)) { + ql_dbg(ql_dbg_dpc, base_vha, 0x4018, + "SCM ELS RDF scheduled. retry:%d\n",base_vha->rdf_retry_cnt); + qla2xxx_scm_send_rdf_els(base_vha); + ql_dbg(ql_dbg_dpc, base_vha, 0x4019, + "SCM ELS RDF Sent\n"); + } + } + + if (test_bit(SCM_NOTIFY_FW, &base_vha->dpc_flags)) { + ql_dbg(ql_dbg_dpc, base_vha, 0x4016, + "SCM update scheduled.\n"); + qla2xxx_update_scm_fcport(base_vha); + clear_bit(SCM_NOTIFY_FW, &base_vha->dpc_flags); + ql_dbg(ql_dbg_dpc, base_vha, 0x4017, + "SCM update end.\n"); + } + if (test_and_clear_bit(N2N_LINK_RESET, &base_vha->dpc_flags)) { qla2x00_lip_reset(base_vha); @@ -7039,6 +7071,105 @@ qla2x00_rst_aen(scsi_qla_host_t *vha) } } +static bool qla_do_hb(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + u32 cmpl_cnt; + u16 i; + bool do_hb = false; + + /* + * Allow do_hb only if we don’t have any active interrupts, but there + * are still IOs outstanding with firmware. + */ + cmpl_cnt = ha->base_qpair->cmd_completion_cnt; + if (cmpl_cnt == ha->base_qpair->prev_completion_cnt && + cmpl_cnt != ha->base_qpair->cmd_cnt) { + do_hb = true; + goto skip; + } + ha->base_qpair->prev_completion_cnt = cmpl_cnt; + + for (i = 0; i < ha->max_qpairs; i++) { + if (ha->queue_pair_map[i]) { + cmpl_cnt = ha->queue_pair_map[i]->cmd_completion_cnt; + if (cmpl_cnt == ha->queue_pair_map[i]->prev_completion_cnt && + cmpl_cnt != ha->queue_pair_map[i]->cmd_cnt) { + do_hb = true; + break; + } + ha->queue_pair_map[i]->prev_completion_cnt = cmpl_cnt; + } + } + +skip: + return do_hb; +} + +static void qla_heart_beat(struct scsi_qla_host *vha, u16 dpc_started) +{ + struct qla_hw_data *ha = vha->hw; + + if (vha->vp_idx || (ql2xextended_error_logging & BIT_1)) + return; + + if (vha->hw->flags.eeh_busy || qla2x00_chip_is_down(vha)) + return; + + /* + * dpc thread can not run if HB is running at the same time. + * We also do not want to starve hb task. Therefore, do hb + * task at least once every 5 seconds. + */ + if (dpc_started && time_before(jiffies, ha->last_hb_run_jiffies + 5*HZ)) + return; + + if (qla_do_hb(vha)) { + ha->last_hb_run_jiffies = jiffies; + queue_work(ha->wq, &ha->hb_work); + } + + return; +} + +static void qla_wind_down_chip(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + if (!ha->flags.eeh_busy) + return; + if (ha->pci_error_state) + /* system is trying to recover */ + return; + + /* + * Current system is not handling PCIE error. At this point, this is + * best effort to wind down the adapter. + */ + if (time_after_eq(jiffies, ha->eeh_jif + ql2xdelay_before_pci_error_handling * HZ) && + !ha->flags.eeh_flush) { + ql_log(ql_log_info, vha, 0x9009, + "PCI Error detected, attempting to reset hardware.\n"); + + ha->isp_ops->reset_chip(vha); + ha->isp_ops->disable_intrs(ha); + + ha->flags.eeh_flush = EEH_FLUSH_RDY; + ha->eeh_jif = jiffies; + + } else if (ha->flags.eeh_flush == EEH_FLUSH_RDY && + time_after_eq(jiffies, ha->eeh_jif + 5 * HZ)) { + pci_clear_master(ha->pdev); + + /* flush all command */ + qla2x00_abort_isp_cleanup(vha); + ha->flags.eeh_flush = EEH_FLUSH_DONE; + + ql_log(ql_log_info, vha, 0x900a, + "PCI Error handling complete, all IOs aborted.\n"); + } +} + /************************************************************************** * qla2x00_timer * @@ -7048,9 +7179,9 @@ qla2x00_rst_aen(scsi_qla_host_t *vha) * Context: Interrupt ***************************************************************************/ void -qla2x00_timer(struct timer_list *t) +qla2x00_timer(qla_timer_arg_t t) { - scsi_qla_host_t *vha = from_timer(vha, t, timer); + scsi_qla_host_t *vha = qla_from_timer(vha, t, timer); unsigned long cpu_flags = 0; int start_dpc = 0; int index; @@ -7058,8 +7189,12 @@ qla2x00_timer(struct timer_list *t) uint16_t w; struct qla_hw_data *ha = vha->hw; struct req_que *req; + unsigned long flags; + fc_port_t *fcport = NULL; if (ha->flags.eeh_busy) { + qla_wind_down_chip(vha); + ql_dbg(ql_dbg_timer, vha, 0x6000, "EEH = %d, restarting timer.\n", ha->flags.eeh_busy); @@ -7089,6 +7224,20 @@ qla2x00_timer(struct timer_list *t) if (!vha->vp_idx && IS_QLAFX00(ha)) qlafx00_timer_routine(vha); + if (!vha->vp_idx && IS_SCM_CAPABLE(ha)) + qla2xxx_scmr_flow_control(vha); + + if (vha->link_down_time < QLA2XX_MAX_LINK_DOWN_TIME) { + vha->link_down_time++; + } + + spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->tgt_link_down_time < QLA2XX_MAX_LINK_DOWN_TIME) + fcport->tgt_link_down_time++; + } + spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + /* Loop down handler. */ if (atomic_read(&vha->loop_down_timer) > 0 && !(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && @@ -7145,7 +7294,7 @@ qla2x00_timer(struct timer_list *t) /* if the loop has been down for 4 minutes, reinit adapter */ if (atomic_dec_and_test(&vha->loop_down_timer) != 0) { - if (!(vha->device_flags & DFLG_NO_CABLE)) { + if (!(vha->device_flags & DFLG_NO_CABLE) && !vha->vp_idx) { ql_log(ql_log_warn, vha, 0x6009, "Loop down - aborting ISP.\n"); @@ -7170,6 +7319,10 @@ qla2x00_timer(struct timer_list *t) } } + /* check if edif running */ + if (vha->hw->flags.edif_enabled) + qla_edif_timer(vha); + /* Process any deferred work. */ if (!list_empty(&vha->work_list)) { unsigned long flags; @@ -7190,20 +7343,20 @@ qla2x00_timer(struct timer_list *t) index = atomic_read(&ha->nvme_active_aen_cnt); if (!vha->vp_idx && (index != ha->nvme_last_rptd_aen) && - (index >= DEFAULT_ZIO_THRESHOLD) && ha->zio_mode == QLA_ZIO_MODE_6 && !ha->flags.host_shutting_down) { + ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt); ql_log(ql_log_info, vha, 0x3002, "nvme: Sched: Set ZIO exchange threshold to %d.\n", ha->nvme_last_rptd_aen); - ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt); - set_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); + set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags); start_dpc++; } if (!vha->vp_idx && atomic_read(&ha->zio_threshold) != ha->last_zio_threshold && - IS_ZIO_THRESHOLD_CAPABLE(ha)) { + ha->zio_mode == QLA_ZIO_MODE_6 && + IS_ZIO_THRESHOLD_CAPABLE(ha) ) { ql_log(ql_log_info, vha, 0x3002, "Sched: Set ZIO exchange threshold to %d.\n", ha->last_zio_threshold); @@ -7212,40 +7365,49 @@ qla2x00_timer(struct timer_list *t) start_dpc++; } + /* borrowing w to signify dpc will run */ + w = 0; /* Schedule the DPC routine if needed */ if ((test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) || test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) || - test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags) || start_dpc || test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) || test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags) || test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) || test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || test_bit(VP_DPC_NEEDED, &vha->dpc_flags) || + test_bit(SCM_NOTIFY_FW, &vha->dpc_flags) || test_bit(RELOGIN_NEEDED, &vha->dpc_flags) || + test_bit(SCM_SEND_RDF, &vha->dpc_flags) || + test_bit(SCM_SEND_EDC, &vha->dpc_flags) || test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags))) { ql_dbg(ql_dbg_timer, vha, 0x600b, "isp_abort_needed=%d loop_resync_needed=%d " - "fcport_update_needed=%d start_dpc=%d " - "reset_marker_needed=%d", + "start_dpc=%d reset_marker_needed=%d", test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags), test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags), - test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags), - start_dpc, - test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); + start_dpc, test_bit(RESET_MARKER_NEEDED, &vha->dpc_flags)); ql_dbg(ql_dbg_timer, vha, 0x600c, "beacon_blink_needed=%d isp_unrecoverable=%d " "fcoe_ctx_reset_needed=%d vp_dpc_needed=%d " - "relogin_needed=%d, Process_purex_iocb=%d.\n", + "relogin_needed=%d, process_purex_iocb=%d " + "scm_notify_fw=%d, scm_send_edc=:%d " + "scm_send_rdf = %d\n", test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags), test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags), test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags), test_bit(VP_DPC_NEEDED, &vha->dpc_flags), test_bit(RELOGIN_NEEDED, &vha->dpc_flags), - test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)); + test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags), + test_bit(SCM_NOTIFY_FW, &vha->dpc_flags), + test_bit(SCM_SEND_EDC, &vha->dpc_flags), + test_bit(SCM_SEND_RDF, &vha->dpc_flags)); qla2xxx_wake_dpc(vha); + w = 1; } + qla_heart_beat(vha, w); + qla2x00_restart_timer(vha, WATCH_INTERVAL); } @@ -7365,11 +7527,13 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); struct qla_qpair *qpair = NULL; - struct scsi_qla_host *vp; + struct scsi_qla_host *vp, *tvp; fc_port_t *fcport; int i; unsigned long flags; + ql_dbg(ql_dbg_aer, vha, 0x9000, + "%s\n", __func__); ha->chip_reset++; ha->base_qpair->chip_reset = ha->chip_reset; @@ -7379,34 +7543,20 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha) ha->base_qpair->chip_reset; } - /* purge MBox commands */ - if (atomic_read(&ha->num_pend_mbx_stage3)) { - clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); - complete(&ha->mbx_intr_comp); - } - - i = 0; - - while (atomic_read(&ha->num_pend_mbx_stage3) || - atomic_read(&ha->num_pend_mbx_stage2) || - atomic_read(&ha->num_pend_mbx_stage1)) { - msleep(20); - i++; - if (i > 50) - break; - } - - ha->flags.purge_mbox = 0; + /* purge mailbox might take a while. Slot Reset/chip reset + will take care of the purge */ mutex_lock(&ha->mq_lock); + ha->base_qpair->online = 0; list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) qpair->online = 0; + wmb(); mutex_unlock(&ha->mq_lock); qla2x00_mark_all_devices_lost(vha); spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vp, &ha->vp_list, list) { + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); qla2x00_mark_all_devices_lost(vp); @@ -7420,7 +7570,7 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha) fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); spin_lock_irqsave(&ha->vport_slock, flags); - list_for_each_entry(vp, &ha->vp_list, list) { + list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); list_for_each_entry(fcport, &vp->vp_fcports, list) @@ -7437,28 +7587,32 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { scsi_qla_host_t *vha = pci_get_drvdata(pdev); struct qla_hw_data *ha = vha->hw; + int ret = PCI_ERS_RESULT_NEED_RESET; - ql_dbg(ql_dbg_aer, vha, 0x9000, + ql_log(ql_log_warn, vha, 0x9000, "PCI error detected, state %x.\n", state); + ha->pci_error_state = QLA_PCI_ERR_DETECTED; if (!atomic_read(&pdev->enable_cnt)) { ql_log(ql_log_info, vha, 0xffff, "PCI device is disabled,state %x\n", state); - return PCI_ERS_RESULT_NEED_RESET; + ret = PCI_ERS_RESULT_NEED_RESET; + goto out; } switch (state) { case pci_channel_io_normal: - ha->flags.eeh_busy = 0; + qla_pci_set_eeh_busy(vha); if (ql2xmqsupport || ql2xnvmeenable) { set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } - return PCI_ERS_RESULT_CAN_RECOVER; + ret = PCI_ERS_RESULT_CAN_RECOVER; + break; case pci_channel_io_frozen: - ha->flags.eeh_busy = 1; - qla_pci_error_cleanup(vha); - return PCI_ERS_RESULT_NEED_RESET; + qla_pci_set_eeh_busy(vha); + ret = PCI_ERS_RESULT_NEED_RESET; + break; case pci_channel_io_perm_failure: ha->flags.pci_channel_io_perm_failure = 1; qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); @@ -7466,9 +7620,12 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) set_bit(QPAIR_ONLINE_CHECK_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); } - return PCI_ERS_RESULT_DISCONNECT; + ret = PCI_ERS_RESULT_DISCONNECT; } - return PCI_ERS_RESULT_NEED_RESET; +out: + ql_dbg(ql_dbg_aer, vha, 0x600d, + "PCI error detected returning [%x].\n", ret); + return ret; } static pci_ers_result_t @@ -7482,20 +7639,31 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24; + ql_log(ql_log_warn, base_vha, 0x9000, + "mmio enabled\n"); + + ha->pci_error_state = QLA_PCI_MMIO_ENABLED; + if (IS_QLA82XX(ha)) return PCI_ERS_RESULT_RECOVERED; + if (qla2x00_isp_reg_stat(ha)) { + ql_log(ql_log_info, base_vha, 0x803f, + "During mmio enabled, PCI/Register disconnect still detected.\n"); + goto out; + } + spin_lock_irqsave(&ha->hardware_lock, flags); if (IS_QLA2100(ha) || IS_QLA2200(ha)){ - stat = rd_reg_word(®->hccr); + stat = RD_REG_DWORD(®->hccr); if (stat & HCCR_RISC_PAUSE) risc_paused = 1; } else if (IS_QLA23XX(ha)) { - stat = rd_reg_dword(®->u.isp2300.host_status); + stat = RD_REG_DWORD(®->u.isp2300.host_status); if (stat & HSR_RISC_PAUSED) risc_paused = 1; } else if (IS_FWI2_CAPABLE(ha)) { - stat = rd_reg_dword(®24->host_status); + stat = RD_REG_DWORD(®24->host_status); if (stat & HSRX_RISC_PAUSED) risc_paused = 1; } @@ -7504,11 +7672,13 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev) if (risc_paused) { ql_log(ql_log_info, base_vha, 0x9003, "RISC paused -- mmio_enabled, Dumping firmware.\n"); - qla2xxx_dump_fw(base_vha); - - return PCI_ERS_RESULT_NEED_RESET; - } else - return PCI_ERS_RESULT_RECOVERED; + ha->isp_ops->fw_dump(base_vha, 0); + } +out: + /* set PCI_ERS_RESULT_NEED_RESET to trigger call to qla2xxx_pci_slot_reset */ + ql_dbg(ql_dbg_aer, base_vha, 0x600d, + "mmio enabled returning.\n"); + return PCI_ERS_RESULT_NEED_RESET; } static pci_ers_result_t @@ -7520,9 +7690,10 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) int rc; struct qla_qpair *qpair = NULL; - ql_dbg(ql_dbg_aer, base_vha, 0x9004, + ql_log(ql_log_warn, base_vha, 0x9004, "Slot Reset.\n"); + ha->pci_error_state = QLA_PCI_SLOT_RESET; /* Workaround: qla2xxx driver which access hardware earlier * needs error state to be pci_channel_io_online. * Otherwise mailbox command timesout. @@ -7556,16 +7727,24 @@ qla2xxx_pci_slot_reset(struct pci_dev *pdev) qpair->online = 1; mutex_unlock(&ha->mq_lock); + ha->flags.eeh_busy = 0; base_vha->flags.online = 1; set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); - if (ha->isp_ops->abort_isp(base_vha) == QLA_SUCCESS) - ret = PCI_ERS_RESULT_RECOVERED; + ha->isp_ops->abort_isp(base_vha); clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + if (qla2x00_isp_reg_stat(ha)) { + ha->flags.eeh_busy = 1; + qla_pci_error_cleanup(base_vha); + ql_log(ql_log_warn, base_vha, 0x9005, + "Device unable to recover from PCI error.\n"); + } else + ret = PCI_ERS_RESULT_RECOVERED; + exit_slot_reset: ql_dbg(ql_dbg_aer, base_vha, 0x900e, - "slot_reset return %x.\n", ret); + "Slot Reset returning %x.\n", ret); return ret; } @@ -7577,80 +7756,57 @@ qla2xxx_pci_resume(struct pci_dev *pdev) struct qla_hw_data *ha = base_vha->hw; int ret; - ql_dbg(ql_dbg_aer, base_vha, 0x900f, - "pci_resume.\n"); + ql_log(ql_log_warn, base_vha, 0x900f, + "Pci Resume.\n"); - ha->flags.eeh_busy = 0; ret = qla2x00_wait_for_hba_online(base_vha); if (ret != QLA_SUCCESS) { ql_log(ql_log_fatal, base_vha, 0x9002, "The device failed to resume I/O from slot/link_reset.\n"); } + ha->pci_error_state = QLA_PCI_RESUME; + ql_dbg(ql_dbg_aer, base_vha, 0x600d, + "Pci Resume returning.\n"); } -static void -qla_pci_reset_prepare(struct pci_dev *pdev) +void qla_pci_set_eeh_busy(struct scsi_qla_host *vha) { - scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); - struct qla_hw_data *ha = base_vha->hw; - struct qla_qpair *qpair; - - ql_log(ql_log_warn, base_vha, 0xffff, - "%s.\n", __func__); - - /* - * PCI FLR/function reset is about to reset the - * slot. Stop the chip to stop all DMA access. - * It is assumed that pci_reset_done will be called - * after FLR to resume Chip operation. - */ - ha->flags.eeh_busy = 1; - mutex_lock(&ha->mq_lock); - list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) - qpair->online = 0; - mutex_unlock(&ha->mq_lock); - - set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); - qla2x00_abort_isp_cleanup(base_vha); - qla2x00_abort_all_cmds(base_vha, DID_RESET << 16); -} + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); + bool do_cleanup = false; + unsigned long flags; -static void -qla_pci_reset_done(struct pci_dev *pdev) -{ - scsi_qla_host_t *base_vha = pci_get_drvdata(pdev); - struct qla_hw_data *ha = base_vha->hw; - struct qla_qpair *qpair; + if (ha->flags.eeh_busy) + return; - ql_log(ql_log_warn, base_vha, 0xffff, - "%s.\n", __func__); + spin_lock_irqsave(&base_vha->work_lock, flags); + if (!ha->flags.eeh_busy) { + ha->eeh_jif = jiffies; + ha->flags.eeh_flush = 0; - /* - * FLR just completed by PCI layer. Resume adapter - */ - ha->flags.eeh_busy = 0; - mutex_lock(&ha->mq_lock); - list_for_each_entry(qpair, &base_vha->qp_list, qp_list_elem) - qpair->online = 1; - mutex_unlock(&ha->mq_lock); + ha->flags.eeh_busy = 1; + do_cleanup = true; + } + spin_unlock_irqrestore(&base_vha->work_lock, flags); - base_vha->flags.online = 1; - ha->isp_ops->abort_isp(base_vha); - clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags); + if (do_cleanup) + qla_pci_error_cleanup(base_vha); } -static int qla2xxx_map_queues(struct Scsi_Host *shost) +/* this routine will schedule a task to pause IO from interrupt context + if caller sees a PCIE error event (register read = 0xf's) + */ +void qla_schedule_eeh_work(struct scsi_qla_host *vha) { - int rc; - scsi_qla_host_t *vha = (scsi_qla_host_t *)shost->hostdata; - struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT]; + struct qla_hw_data *ha = vha->hw; + struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); - if (USER_CTRL_IRQ(vha->hw) || !vha->hw->mqiobase) - rc = blk_mq_map_queues(qmap); - else - rc = blk_mq_pci_map_queues(qmap, vha->hw->pdev, vha->irq_offset); - return rc; + if (ha->flags.eeh_busy) + return; + + set_bit(DO_EEH_RECOVERY, &base_vha->dpc_flags); + qla2xxx_wake_dpc(base_vha); } struct scsi_host_template qla2xxx_driver_template = { @@ -7658,7 +7814,6 @@ struct scsi_host_template qla2xxx_driver_template = { .name = QLA2XXX_DRIVER_NAME, .queuecommand = qla2xxx_queuecommand, - .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = qla2xxx_eh_abort, .eh_device_reset_handler = qla2xxx_eh_device_reset, .eh_target_reset_handler = qla2xxx_eh_target_reset, @@ -7671,8 +7826,6 @@ struct scsi_host_template qla2xxx_driver_template = { .slave_destroy = qla2xxx_slave_destroy, .scan_finished = qla2xxx_scan_finished, .scan_start = qla2xxx_scan_start, - .change_queue_depth = scsi_change_queue_depth, - .map_queues = qla2xxx_map_queues, .this_id = -1, .cmd_per_lun = 3, .sg_tablesize = SG_ALL, @@ -7681,8 +7834,9 @@ struct scsi_host_template qla2xxx_driver_template = { .shost_attrs = qla2x00_host_attrs, .supported_mode = MODE_INITIATOR, - .track_queue_depth = 1, .cmd_size = sizeof(srb_t), + + qla_scsi_templ_compat_entries }; static const struct pci_error_handlers qla2xxx_err_handler = { @@ -7690,8 +7844,7 @@ static const struct pci_error_handlers qla2xxx_err_handler = { .mmio_enabled = qla2xxx_pci_mmio_enabled, .slot_reset = qla2xxx_pci_slot_reset, .resume = qla2xxx_pci_resume, - .reset_prepare = qla_pci_reset_prepare, - .reset_done = qla_pci_reset_done, + qla_pci_err_handler_compat_entries }; static struct pci_device_id qla2xxx_pci_tbl[] = { @@ -7722,6 +7875,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2281) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2089) }, { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2289) }, + { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2971) }, { 0 }, }; MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl); @@ -7751,19 +7905,13 @@ qla2x00_module_init(void) { int ret = 0; - BUILD_BUG_ON(sizeof(cmd_a64_entry_t) != 64); BUILD_BUG_ON(sizeof(cmd_entry_t) != 64); BUILD_BUG_ON(sizeof(cont_a64_entry_t) != 64); BUILD_BUG_ON(sizeof(cont_entry_t) != 64); BUILD_BUG_ON(sizeof(init_cb_t) != 96); - BUILD_BUG_ON(sizeof(mrk_entry_t) != 64); BUILD_BUG_ON(sizeof(ms_iocb_entry_t) != 64); BUILD_BUG_ON(sizeof(request_t) != 64); - BUILD_BUG_ON(sizeof(struct abort_entry_24xx) != 64); - BUILD_BUG_ON(sizeof(struct abort_iocb_entry_fx00) != 64); - BUILD_BUG_ON(sizeof(struct abts_entry_24xx) != 64); BUILD_BUG_ON(sizeof(struct access_chip_84xx) != 64); - BUILD_BUG_ON(sizeof(struct access_chip_rsp_84xx) != 64); BUILD_BUG_ON(sizeof(struct cmd_bidir) != 64); BUILD_BUG_ON(sizeof(struct cmd_nvme) != 64); BUILD_BUG_ON(sizeof(struct cmd_type_6) != 64); @@ -7771,70 +7919,17 @@ qla2x00_module_init(void) BUILD_BUG_ON(sizeof(struct cmd_type_7_fx00) != 64); BUILD_BUG_ON(sizeof(struct cmd_type_crc_2) != 64); BUILD_BUG_ON(sizeof(struct ct_entry_24xx) != 64); - BUILD_BUG_ON(sizeof(struct ct_fdmi1_hba_attributes) != 2344); - BUILD_BUG_ON(sizeof(struct ct_fdmi2_hba_attributes) != 4424); - BUILD_BUG_ON(sizeof(struct ct_fdmi2_port_attributes) != 4164); - BUILD_BUG_ON(sizeof(struct ct_fdmi_hba_attr) != 260); - BUILD_BUG_ON(sizeof(struct ct_fdmi_port_attr) != 260); - BUILD_BUG_ON(sizeof(struct ct_rsp_hdr) != 16); BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); - BUILD_BUG_ON(sizeof(struct device_reg_24xx) != 256); - BUILD_BUG_ON(sizeof(struct device_reg_25xxmq) != 24); - BUILD_BUG_ON(sizeof(struct device_reg_2xxx) != 256); - BUILD_BUG_ON(sizeof(struct device_reg_82xx) != 1288); - BUILD_BUG_ON(sizeof(struct device_reg_fx00) != 216); BUILD_BUG_ON(sizeof(struct els_entry_24xx) != 64); - BUILD_BUG_ON(sizeof(struct els_sts_entry_24xx) != 64); BUILD_BUG_ON(sizeof(struct fxdisc_entry_fx00) != 64); - BUILD_BUG_ON(sizeof(struct imm_ntfy_from_isp) != 64); BUILD_BUG_ON(sizeof(struct init_cb_24xx) != 128); BUILD_BUG_ON(sizeof(struct init_cb_81xx) != 128); - BUILD_BUG_ON(sizeof(struct logio_entry_24xx) != 64); - BUILD_BUG_ON(sizeof(struct mbx_entry) != 64); - BUILD_BUG_ON(sizeof(struct mid_init_cb_24xx) != 5252); - BUILD_BUG_ON(sizeof(struct mrk_entry_24xx) != 64); - BUILD_BUG_ON(sizeof(struct nvram_24xx) != 512); - BUILD_BUG_ON(sizeof(struct nvram_81xx) != 512); BUILD_BUG_ON(sizeof(struct pt_ls4_request) != 64); - BUILD_BUG_ON(sizeof(struct pt_ls4_rx_unsol) != 64); - BUILD_BUG_ON(sizeof(struct purex_entry_24xx) != 64); - BUILD_BUG_ON(sizeof(struct qla2100_fw_dump) != 123634); - BUILD_BUG_ON(sizeof(struct qla2300_fw_dump) != 136100); - BUILD_BUG_ON(sizeof(struct qla24xx_fw_dump) != 37976); - BUILD_BUG_ON(sizeof(struct qla25xx_fw_dump) != 39228); - BUILD_BUG_ON(sizeof(struct qla2xxx_fce_chain) != 52); - BUILD_BUG_ON(sizeof(struct qla2xxx_fw_dump) != 136172); - BUILD_BUG_ON(sizeof(struct qla2xxx_mq_chain) != 524); - BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_chain) != 8); - BUILD_BUG_ON(sizeof(struct qla2xxx_mqueue_header) != 12); - BUILD_BUG_ON(sizeof(struct qla2xxx_offld_chain) != 24); - BUILD_BUG_ON(sizeof(struct qla81xx_fw_dump) != 39420); - BUILD_BUG_ON(sizeof(struct qla82xx_uri_data_desc) != 28); - BUILD_BUG_ON(sizeof(struct qla82xx_uri_table_desc) != 32); - BUILD_BUG_ON(sizeof(struct qla83xx_fw_dump) != 51196); - BUILD_BUG_ON(sizeof(struct qla_fcp_prio_cfg) != FCP_PRIO_CFG_SIZE); - BUILD_BUG_ON(sizeof(struct qla_fdt_layout) != 128); - BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8); - BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16); - BUILD_BUG_ON(sizeof(struct qla_npiv_entry) != 24); - BUILD_BUG_ON(sizeof(struct qla_npiv_header) != 16); - BUILD_BUG_ON(sizeof(struct rdp_rsp_payload) != 336); BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064); - BUILD_BUG_ON(sizeof(struct sts_entry_24xx) != 64); - BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry) != 64); - BUILD_BUG_ON(sizeof(struct tsk_mgmt_entry_fx00) != 64); BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64); - BUILD_BUG_ON(sizeof(struct verify_chip_rsp_84xx) != 52); BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56); - BUILD_BUG_ON(sizeof(struct vp_config_entry_24xx) != 64); - BUILD_BUG_ON(sizeof(struct vp_ctrl_entry_24xx) != 64); - BUILD_BUG_ON(sizeof(struct vp_rpt_id_entry_24xx) != 64); - BUILD_BUG_ON(sizeof(sts21_entry_t) != 64); - BUILD_BUG_ON(sizeof(sts22_entry_t) != 64); - BUILD_BUG_ON(sizeof(sts_cont_entry_t) != 64); - BUILD_BUG_ON(sizeof(sts_entry_t) != 64); - BUILD_BUG_ON(sizeof(sw_info_t) != 32); - BUILD_BUG_ON(sizeof(target_id_t) != 2); + + qla_tracing_init(); /* Allocate cache for SRBs. */ srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, @@ -7917,6 +8012,7 @@ qla2x00_module_init(void) destroy_cache: kmem_cache_destroy(srb_cachep); + qla_tracing_exit(); return ret; } @@ -7935,6 +8031,8 @@ qla2x00_module_exit(void) fc_release_transport(qla2xxx_transport_template); qlt_exit(); kmem_cache_destroy(srb_cachep); + + qla_tracing_exit(); } module_init(qla2x00_module_init); @@ -7943,6 +8041,7 @@ module_exit(qla2x00_module_exit); MODULE_AUTHOR("QLogic Corporation"); MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver"); MODULE_LICENSE("GPL"); +MODULE_VERSION(QLA2XXX_VERSION); MODULE_FIRMWARE(FW_FILE_ISP21XX); MODULE_FIRMWARE(FW_FILE_ISP22XX); MODULE_FIRMWARE(FW_FILE_ISP2300); diff --git a/drivers/scsi/qla2xxx/qla_scm.c b/drivers/scsi/qla2xxx/qla_scm.c new file mode 100644 index 0000000000000..7b04d32bb6be4 --- /dev/null +++ b/drivers/scsi/qla2xxx/qla_scm.c @@ -0,0 +1,2178 @@ +/* + * QLogic Fibre Channel HBA Driver + * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. + */ +#include "qla_def.h" +#include "qla_gbl.h" + +/* SCM Private Functions */ +static bool +qla2xxx_scmr_check_low_thruput(struct qla_scmr_flow_control *sfc) +{ + bool ret = false; + + if ((atomic64_read(&sfc->perf.bytes_last_sec)) < + (atomic64_read(&sfc->base_bytes) * ql2x_scmr_drop_pct_low_wm)/100) { + ret = true; + sfc->rstats->throttle_hit_low_wm++; + ql_dbg(ql_dbg_scm, sfc->vha, 0x0203, + "USCM: Throughput drop from: %llu to: %llu (MB)\n", + (long long unsigned int) + atomic64_read(&sfc->base_bytes) >> 20, + (long long unsigned int) + atomic64_read(&sfc->perf.bytes_last_sec) >> 20); + } + + return ret; +} + +static bool +qla2xxx_scmr_check_low_wm(struct qla_scmr_flow_control *sfc, + int curr, int base) +{ + bool ret = false; + uint8_t *port_name = qla_scmr_is_tgt(sfc) ? sfc->fcport->port_name: sfc->vha->port_name; + + if (sfc->mode == QLA_MODE_Q_DEPTH) { + if (qla_scmr_is_tgt(sfc)) { + if (curr == QLA_MIN_TGT_Q_DEPTH) + ret = true; + } else { + if (curr == QLA_MIN_HBA_Q_DEPTH) + ret = true; + } + } else if (sfc->mode == QLA_MODE_FLOWS) { + if (curr < base * + ql2x_scmr_drop_pct_low_wm/100) { + ret = true; + } + } + + if (ret == true) { + ql_dbg(ql_dbg_scm, sfc->vha, 0x0203, + "USCM: Reached low watermark, permitted: %d baseline: %d for:%8phN\n", + curr, base, port_name); + sfc->rstats->throttle_hit_low_wm++; + } else { + ret = qla2xxx_scmr_check_low_thruput(sfc); + } + return ret; +} + +static void +qla2xxx_change_queue_depth(struct qla_scmr_flow_control *sfc, int new) +{ + if (qla_scmr_is_tgt(sfc)) { + if (new >= QLA_MIN_TGT_Q_DEPTH) + atomic_set(&sfc->scmr_permitted, new); + else + atomic_set(&sfc->scmr_permitted, QLA_MIN_TGT_Q_DEPTH); + } else { + if (new >= QLA_MIN_HBA_Q_DEPTH) + atomic_set(&sfc->scmr_permitted, new); + else + atomic_set(&sfc->scmr_permitted, QLA_MIN_HBA_Q_DEPTH); + } +} + +static int +qla2xxx_calculate_delta(struct qla_scmr_flow_control *sfc, int base) +{ + int delta; + + if (sfc->dir == QLA_DIR_UP) { + if (sfc->level) + delta = (base * ql2x_scmr_up_pct * + (sfc->level)) / 100; + else + delta = (base * ql2x_scmr_up_pct) / 100; + } else { + if (sfc->level) + delta = (base * ql2x_scmr_drop_pct * + (sfc->level)) / 100; + else + delta = (base * ql2x_scmr_drop_pct) / 100; + } + + return (delta ? delta : 1); +} + +bool +qla2xxx_switch_vl(struct qla_scmr_flow_control *sfc, uint8_t vl) +{ + if (sfc->vha->hw->flags.conn_fabric_cisco_er_rdy) { + if (sfc->fcport->vl.v_lane != vl) { + sfc->fcport->vl.v_lane = vl; + qla_scmr_set_notify_fw(sfc); + set_bit(SCM_NOTIFY_FW, + &sfc->vha->dpc_flags); + return true; + } + } + return false; + +} + +DECLARE_ENUM2STR_LOOKUP(qla_get_profile_type, ql_scm_profile_type, + QL_SCM_PROFILE_TYPES_INIT); + +/* Reduce throttle based on IOs/period or bytes/period */ +static void +qla2xxx_scmr_reduce_throttle(struct qla_scmr_flow_control *sfc) +{ + int delta, current_val, new; + bool low_wm = false; + int qla_scmr_profile = sfc->profile.scmr_profile; + uint8_t *port_name = qla_scmr_is_tgt(sfc) ? sfc->fcport->port_name: sfc->vha->port_name; + + current_val = new = 0; + + current_val = atomic_read(&sfc->scmr_permitted); + if (current_val) { + low_wm = qla2xxx_scmr_check_low_wm(sfc, current_val, + atomic_read(&sfc->scmr_base)); + } else { + current_val = atomic_read(&sfc->scmr_base); + } + + if (low_wm == true) { + ql_dbg(ql_dbg_scm, sfc->vha, 0x0203, + "USCM: Hit low wm, no throttling \n"); + return; + } + + if (sfc->mode == QLA_MODE_Q_DEPTH) { + if (sfc->dir == QLA_DIR_UP) { + new = current_val - 1; + } else { + /* Profile defines the rate of throttling */ + new = current_val >> sfc->scmr_down_delta[qla_scmr_profile]; + sfc->down_delta = current_val - new; + } + qla2xxx_change_queue_depth(sfc, new); + } else if (sfc->mode == QLA_MODE_FLOWS) { + delta = qla2xxx_calculate_delta(sfc, + atomic_read(&sfc->scmr_base)); + new = current_val - delta; + atomic_set(&sfc->scmr_permitted, new); + } + + sfc->rstats->throttle_down_count++; + ql_dbg(ql_dbg_scm, sfc->vha, 0x0203, + "USCM: Congested, throttling down:%8phN, permitted: %d baseline: %d, profile %s\n", + port_name, atomic_read(&sfc->scmr_permitted), atomic_read(&sfc->scmr_base), + qla_get_profile_type(qla_scmr_profile)); + + return; +} +/* Clear Baseline Throughput */ +static void +qla2xxx_clear_baseline_tp(struct qla_scmr_flow_control *sfc) +{ + atomic64_set(&sfc->base_bytes, 0); +} + +/* Set Baseline Throughput */ +static void +qla2xxx_set_baseline_tp(struct qla_scmr_flow_control *sfc) +{ + atomic64_set(&sfc->base_bytes, + atomic64_read(&sfc->perf.bytes_last_sec)); + ql_dbg(ql_dbg_scm, sfc->vha, 0x0203, + "USCM: Base Bytes %llu (MB)", + (long long unsigned int) + atomic64_read(&sfc->perf.bytes_last_sec) >> 20); +} + + +/* Increase by @ql2x_scmr_up_pct percent, every QLA_SCMR_THROTTLE_PERIOD + * secs. + */ +static void +qla2xxx_scmr_increase_flows(struct qla_scmr_flow_control *sfc) +{ + int delta, current_val, base_val, new; + int qla_scmr_profile = sfc->profile.scmr_profile; + uint8_t *port_name = qla_scmr_is_tgt(sfc) ? sfc->fcport->port_name: sfc->vha->port_name; + + new = 0; + + if (sfc->throttle_period--) + return; + + sfc->throttle_period = sfc->event_period + sfc->event_period_buffer; + current_val = atomic_read(&sfc->scmr_permitted); + base_val = atomic_read(&sfc->scmr_base); + + /* Unlikely */ + if (!current_val) + return; + + if (sfc->mode == QLA_MODE_Q_DEPTH) { + if (qla_scmr_profile) { + delta = sfc->scmr_up_delta[qla_scmr_profile]; + new = current_val + (sfc->down_delta > delta ? + delta : sfc->down_delta - 1); + + } + } else if (sfc->mode == QLA_MODE_FLOWS) { + delta = (base_val * ql2x_scmr_up_pct) / 100; + new = current_val + (delta ? delta: 1); + } + + if (new > base_val) { + qla2xxx_scmr_clear_throttle(sfc); + ql_log(ql_log_info, sfc->vha, 0x0203, + "USCM: Clearing throttle for:%8phN \n", port_name); + /* Switch back to Normal */ + if (qla_scmr_is_tgt(sfc)) { + qla2xxx_switch_vl(sfc, VL_NORMAL); + qla2xxx_clear_baseline_tp(sfc); + } + return; + } else { + if (sfc->mode == QLA_MODE_Q_DEPTH) { + qla2xxx_change_queue_depth(sfc, new); + } else if (sfc->mode == QLA_MODE_FLOWS) { + atomic_set(&sfc->scmr_permitted, new); + } + sfc->dir = QLA_DIR_UP; + sfc->rstats->throttle_up_count++; + ql_dbg(ql_dbg_scm, sfc->vha, 0x0203, + "USCM: throttling up, permitted: %d baseline: %d for:%8phN\n", + atomic_read(&sfc->scmr_permitted), base_val, port_name); + } +} + +static void +qla2xxx_check_congestion_timeout(struct qla_scmr_flow_control *sfc) +{ + if (sfc->expiration_jiffies && + (time_after(jiffies, sfc->expiration_jiffies))) { + ql_log(ql_log_info, sfc->vha, 0x0203, + "USCM: Clearing Congestion, event period expired\n"); + qla2xxx_scmr_clear_congn(sfc); + /* If there is no throttling, move to Normal lane */ + if ((sfc->dir == QLA_DIR_NONE) && qla_scmr_is_tgt(sfc)) { + qla2xxx_switch_vl(sfc, VL_NORMAL); + qla2xxx_clear_baseline_tp(sfc); + } + } + +} + +static bool +qla2xxx_check_fpin_event(struct qla_scmr_flow_control *sfc) +{ + if (qla_scmr_get_sig(sfc) == QLA_SIG_CLEAR) { + qla_scmr_clear_sig(sfc, scmr_congn_signal); + qla2xxx_scmr_clear_congn(sfc); + ql_log(ql_log_info, sfc->vha, 0x0203, + "USCM:(H) Clear Congestion for WWN %8phN\n", + sfc->vha->port_name); + if ((sfc->dir == QLA_DIR_NONE) /* There is no throttling */ + && qla_scmr_is_tgt(sfc)) { + qla2xxx_switch_vl(sfc, VL_NORMAL); + qla2xxx_clear_baseline_tp(sfc); + } + } + + if (qla_scmr_get_sig(sfc) == QLA_SIG_CREDIT_STALL) { + qla_scmr_clear_sig(sfc, scmr_congn_signal); + return true; + } else if (qla_scmr_get_sig(sfc) == QLA_SIG_OVERSUBSCRIPTION) { + /* Check if profile policy asks for Global/Targeted Throttling (where relevant) + * Check if Targeted throttling is possible + */ + qla_scmr_clear_sig(sfc, scmr_congn_signal); + return true; + } + return false; +} + +static bool +qla2xxx_check_cn_event(struct qla_scmr_flow_control *sfc) +{ + bool congested = false; + + if (IS_ARB_CAPABLE(sfc->vha->hw)) { + /* Handle ARB Signals */ + if (atomic_read(&sfc->num_sig_warning) >= + QLA_SCMR_WARN_THRESHOLD) { + sfc->level = QLA_CONG_LOW; + sfc->expiration_jiffies = + jiffies + (2 * HZ); + congested = true; + atomic_set(&sfc->num_sig_warning, 0); + } else if (atomic_read(&sfc->num_sig_warning)) { + ql_dbg(ql_dbg_scm, sfc->vha, 0xffff, + "USCM: Low congestion signals (warning): %d\n", + atomic_read(&sfc->num_sig_warning)); + atomic_set(&sfc->num_sig_warning, 0); + } + + if (atomic_read(&sfc->num_sig_alarm) >= + QLA_SCMR_ALARM_THRESHOLD) { + sfc->level = QLA_CONG_HIGH; + sfc->expiration_jiffies = + jiffies + (2 * HZ); + congested = true; + atomic_set(&sfc->num_sig_alarm, 0); + } else if (atomic_read(&sfc->num_sig_alarm)) { + ql_dbg(ql_dbg_scm, sfc->vha, 0xffff, + "USCM: Low congestion signals (alarm) %d\n", + atomic_read(&sfc->num_sig_alarm)); + atomic_set(&sfc->num_sig_alarm, 0); + } + } + + if (congested == false) + congested = qla2xxx_check_fpin_event(sfc); + + return congested; + +} + +#define SCMR_PERIODS_PER_SEC 10 + +static bool +qla2xxx_scmr_set_baseline(struct qla_scmr_flow_control *sfc) +{ + bool ret = false; + + if (sfc->mode == QLA_MODE_Q_DEPTH) { + if (atomic_read(&sfc->perf.max_q_depth) > + QLA_MIN_BASELINE_QDEPTH) + atomic_set(&sfc->scmr_base, + atomic_read(&sfc->perf.max_q_depth)); + else + atomic_set(&sfc->scmr_base, + QLA_MIN_BASELINE_QDEPTH); + qla_scmr_set_throttle_qdepth(sfc); + sfc->dir = QLA_DIR_DOWN; + ret = true; + } else if (sfc->mode == QLA_MODE_FLOWS) { + if (atomic_read(&sfc->perf.reqs_last_sec) > + QLA_MIN_BASELINE_IOS) { + atomic_set(&sfc->scmr_base, + atomic_read(&sfc->perf.reqs_last_sec) / + QLA_SCMR_PERIODS_PER_SEC); + qla_scmr_set_reduce_throttle_ios(sfc); + sfc->dir = QLA_DIR_DOWN; + ret = true; + } else if (atomic64_read(&sfc->perf.bytes_last_sec) > + QLA_MIN_BASELINE_BPS) { + atomic_set(&sfc->scmr_base, + atomic64_read(&sfc->perf.bytes_last_sec) / + QLA_SCMR_PERIODS_PER_SEC); + qla_scmr_set_reduce_throttle_bps(sfc); + sfc->dir = QLA_DIR_DOWN; + ret = true; + } + } + if ((ret == true) && (atomic64_read(&sfc->base_bytes) == 0)) + qla2xxx_set_baseline_tp(sfc); + + return ret; +} + +static void +qla2xxx_reduce_flows(struct qla_scmr_flow_control *sfc) +{ + bool throttle = false; + uint8_t *port_name = qla_scmr_is_tgt(sfc) ? sfc->fcport->port_name: sfc->vha->port_name; + + if (sfc->profile.scmr_profile == 0) {/* Monitor profile */ + ql_dbg(ql_dbg_scm, sfc->vha, 0x0203, + "USCM: Congested, No throttling (Monitor profile)\n"); + return; + } + + /* Congestion Signal/FPIN received */ + if (!qla_scmr_reduced_throttle(sfc)) { + throttle = qla2xxx_scmr_set_baseline(sfc); + } else { + throttle = true; + } + + if (throttle == true) + qla2xxx_scmr_reduce_throttle(sfc); + else + ql_log(ql_log_info, sfc->vha, 0x0203, + "USCM: IOs too low, not throttling for WWN %8phN\n", + port_name); + + if (!qla_scmr_is_congested(sfc)) { + ql_log(ql_log_info, sfc->vha, 0x0203, + "USCM: Set Congestion for WWN %8phN\n", port_name); + qla_scmr_set_congested(sfc); + if (qla_scmr_is_tgt(sfc) && + IS_NPVC_CAPABLE(sfc->vha->hw)) { + qla_scmr_set_notify_fw(sfc); + set_bit(SCM_NOTIFY_FW, & + sfc->vha->dpc_flags); + } + } +} + +static void +qla2xxx_handle_tgt_congestion(struct fc_port *fcport) +{ + bool congested, throttle; + struct qla_scmr_flow_control *sfc = &fcport->sfc; + + congested = throttle = false; + + congested = qla2xxx_check_fpin_event(sfc); + + if (congested == true) { + qla_scmr_set_congested(sfc); + /* If a lane change is needed */ + if (qla2xxx_switch_vl(sfc, VL_SLOW)) { + qla2xxx_set_baseline_tp(sfc); + return; + } + if (ql2x_scmr_flow_ctl_tgt) + qla2xxx_reduce_flows(sfc); + } else { + qla2xxx_check_congestion_timeout(sfc); + if (ql2x_scmr_flow_ctl_tgt) { + if (!qla_scmr_reduced_throttle(sfc)) + return; + + qla2xxx_scmr_increase_flows(sfc); + } + } +} + +static void +qla2xxx_tune_host_throttle(struct qla_scmr_flow_control *sfc) +{ + bool congested = false; + + congested = qla2xxx_check_cn_event(sfc); + + if (congested == true) { + if (ql2x_scmr_flow_ctl_host) + qla2xxx_reduce_flows(sfc); + qla_scmr_set_congested(sfc); + } else { + qla2xxx_check_congestion_timeout(sfc); + if (ql2x_scmr_flow_ctl_host) { + if (!qla_scmr_reduced_throttle(sfc)) + return; + + qla2xxx_scmr_increase_flows(sfc); + } + } +} + +/* + * qla2xxx_throttle_curr_req() - Check if this request should be sent + * back for a retry because of congestion on this host. + * + * @sfc: Pointer to the flow control struct for the given request queue. + * @cmd: SCSI Command. + * + * Returns true for retry, false otherwise. + */ +static bool +qla2xxx_throttle_curr_req(struct qla_scmr_flow_control *sfc) +{ + /* Throttle down reqs if the host has oversubscribed */ + + if (sfc->mode == QLA_MODE_Q_DEPTH) { + if (qla_scmr_throttle_qdepth(sfc)) { + if (atomic_read(&sfc->scmr_permitted) < + atomic_read(&sfc->perf.dir_q_depth)) { + sfc->rstats->busy_status_count++; + return true; + } + } + } else if (sfc->mode == QLA_MODE_FLOWS) { + if (qla_scmr_throttle_bps(sfc)) { + if (atomic_read(&sfc->scmr_permitted) < + atomic64_read(&sfc->perf.scmr_bytes_per_period)) { + sfc->rstats->busy_status_count++; + return true; + } + } else if (qla_scmr_throttle_ios(sfc)) { + if (atomic_read(&sfc->scmr_permitted) < + atomic_read(&sfc->perf.scmr_reqs_per_period)) { + sfc->rstats->busy_status_count++; + return true; + } + } + } + + return false; +} + +static inline void +qla2x00_restart_perf_timer(scsi_qla_host_t *vha) +{ + /* Currently used for 82XX only. */ + if (vha->device_flags & DFLG_DEV_FAILED) { + ql_dbg(ql_dbg_scm, vha, 0x600d, + "Device in a failed state, returning.\n"); + return; + } + + mod_timer(&vha->perf_timer, jiffies + HZ/10); +} + +static void +qla2xxx_minute_stats(struct qla_scmr_flow_control *sfc) +{ + sfc->ticks++; + + if (!(sfc->ticks % 60)) { + atomic_set(&sfc->perf.max_q_depth, 0); + } +} + +/* Externally Used APIs */ + +/************************************************************************** +* qla2xxx_perf_timer +* +* Description: +* 100 ms timer. Should be maintained as a lightweight thread because +* of its frequency. +* +* Context: Interrupt +***************************************************************************/ +void +qla2xxx_perf_timer(qla_timer_arg_t t) +{ + scsi_qla_host_t *vha = qla_from_timer(vha, t, perf_timer); + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport; + uint64_t index = 0; + uint64_t count = 0; + + if (ha->flags.eeh_busy) { + ql_dbg(ql_dbg_scm, vha, 0x6000, + "EEH = %d, restarting timer.\n", + ha->flags.eeh_busy); + qla2x00_restart_perf_timer(vha); + return; + } + + index = ha->sfc.perf.index % 10; + count = atomic64_read(&ha->sfc.perf.scmr_bytes_per_period); + qla2xxx_atomic64_add(&ha->sfc.perf.bytes_last_sec, count); + qla2xxx_atomic64_sub(&ha->sfc.perf.bytes_last_sec, + ha->sfc.perf.bytes_arr[index]); + ha->sfc.perf.bytes_arr[index] = count; + + atomic64_set(&ha->sfc.perf.scmr_bytes_per_period, 0); + + count = atomic_read(&ha->sfc.perf.scmr_reqs_per_period); + qla2xxx_atomic_add(&ha->sfc.perf.reqs_last_sec, count); + qla2xxx_atomic_sub(&ha->sfc.perf.reqs_last_sec, + ha->sfc.perf.reqs_arr[index]); + ha->sfc.perf.reqs_arr[index] = count; + atomic_set(&ha->sfc.perf.scmr_reqs_per_period, 0); + ha->sfc.perf.index++; + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (!(fcport->port_type & FCT_TARGET) && + !(fcport->port_type & FCT_NVME_TARGET)) + continue; + + index = fcport->sfc.perf.index % 10; + count = atomic64_read(&fcport->sfc.perf.scmr_bytes_per_period); + qla2xxx_atomic64_add(&fcport->sfc.perf.bytes_last_sec, count); + qla2xxx_atomic64_sub(&fcport->sfc.perf.bytes_last_sec, + fcport->sfc.perf.bytes_arr[index]); + fcport->sfc.perf.bytes_arr[index] = count; + + atomic64_set(&fcport->sfc.perf.scmr_bytes_per_period, 0); + + count = atomic_read(&fcport->sfc.perf.scmr_reqs_per_period); + qla2xxx_atomic_add(&fcport->sfc.perf.reqs_last_sec, count); + qla2xxx_atomic_sub(&fcport->sfc.perf.reqs_last_sec, + fcport->sfc.perf.reqs_arr[index]); + fcport->sfc.perf.reqs_arr[index] = count; + atomic_set(&fcport->sfc.perf.scmr_reqs_per_period, 0); + fcport->sfc.perf.index++; + } + + qla2x00_restart_perf_timer(vha); +} + +/* + * qla2xxx_throttle_req - To rate limit I/O on congestion. + * + * Returns true to throttle down, false otherwise. + */ +bool +qla2xxx_throttle_req(srb_t *sp, struct qla_hw_data *ha, fc_port_t *fcport, uint8_t dir) +{ + bool ret = false; + + /* NVMe enums map to the same values */ + if (ql2x_scmr_flow_ctl_host && + dir == DMA_FROM_DEVICE && + qla_scm_chk_throttle_cmd_opcode(sp)) { + ret = qla2xxx_throttle_curr_req(&ha->sfc); + if (ret == true) { + atomic_inc(&ha->throttle_read); + return ret; + } + } + + if (ql2x_scmr_flow_ctl_tgt && + dir == DMA_TO_DEVICE && + qla_scm_chk_throttle_cmd_opcode(sp) ) { + ret = qla2xxx_throttle_curr_req(&fcport->sfc); + if (ret == true && ql2x_scmr_flow_ctl_host) { + atomic_inc(&ha->throttle_write); + return ret; + } + } + + return ret; +} + +void +qla2xxx_scmr_manage_qdepth(srb_t *sp, struct fc_port *fcport, bool inc) +{ + int curr; + struct scsi_qla_host *vha = fcport->vha; + + if (!IS_SCM_CAPABLE(vha->hw)) + return; + + if (inc == true) { + if (qla_scm_chk_throttle_cmd_opcode(sp)) { + if (sp->dir == DMA_TO_DEVICE) { + atomic_inc(&fcport->sfc.perf.dir_q_depth); + curr = atomic_read(&fcport->sfc.perf.dir_q_depth); + if (atomic_read(&fcport->sfc.perf.max_q_depth) < + curr) + atomic_set(&fcport->sfc.perf.max_q_depth, curr); + } else { + atomic_inc(&vha->hw->sfc.perf.dir_q_depth); + curr = atomic_read(&vha->hw->sfc.perf.dir_q_depth); + if (atomic_read(&vha->hw->sfc.perf.max_q_depth) < + curr) + atomic_set(&vha->hw->sfc.perf.max_q_depth, curr); + } + } + atomic_inc(&vha->hw->sfc.perf.q_depth); + atomic_inc(&fcport->sfc.perf.q_depth); + } else { + atomic_dec(&vha->hw->sfc.perf.q_depth); + atomic_dec(&fcport->sfc.perf.q_depth); + if (qla_scm_chk_throttle_cmd_opcode(sp)) { + if (sp->dir == DMA_TO_DEVICE) + atomic_dec(&fcport->sfc.perf.dir_q_depth); + else + atomic_dec(&vha->hw->sfc.perf.dir_q_depth); + } + } +} + +void +qla2xxx_scmr_cleanup(srb_t *sp, scsi_qla_host_t *vha, struct scsi_cmnd *cmd) +{ + fc_port_t *fcport = (struct fc_port *)cmd->device->hostdata; + + if (!IS_SCM_CAPABLE(vha->hw)) + return; + + atomic_dec(&fcport->sfc.perf.scmr_reqs_per_period); + qla2xxx_atomic64_sub(&fcport->sfc.perf.scmr_bytes_per_period, + scsi_bufflen(cmd)); + atomic_dec(&vha->hw->sfc.perf.scmr_reqs_per_period); + qla2xxx_atomic64_sub(&vha->hw->sfc.perf.scmr_bytes_per_period, + scsi_bufflen(cmd)); + qla2xxx_scmr_manage_qdepth(sp, fcport, false); +} + +/* + * qla2xxx_scmr_flow_control - To rate limit I/O on congestion. + * + */ +void +qla2xxx_scmr_flow_control(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + struct fc_port *fcport; + + qla2xxx_minute_stats(&ha->sfc); + /* Controlled at the port level */ + qla2xxx_tune_host_throttle(&ha->sfc); + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (!(fcport->port_type & FCT_TARGET) && + !(fcport->port_type & FCT_NVME_TARGET)) + continue; + + qla2xxx_minute_stats(&fcport->sfc); + qla2xxx_handle_tgt_congestion(fcport); + } +} + +void +qla2xxx_scmr_clear_congn(struct qla_scmr_flow_control *sfc) +{ + struct qla_hw_data *ha = sfc->vha->hw; + + qla_scmr_clear_congested(sfc); + sfc->level = QLA_CONG_NONE; + sfc->expiration_jiffies = 0; + + /* Clear severity status for the application as well */ + ha->scm.congestion.severity = 0; + ha->scm.last_event_timestamp = qla_get_real_seconds(); +} + +void +qla2xxx_scmr_clear_throttle(struct qla_scmr_flow_control *sfc) +{ + if (sfc->mode == QLA_MODE_Q_DEPTH) { + qla_scmr_clear_throttle_qdepth(sfc); + } else if (sfc->mode == QLA_MODE_FLOWS) { + if (qla_scmr_throttle_bps(sfc)) + qla_scmr_clear_throttle_bps(sfc); + if (qla_scmr_throttle_ios(sfc)) + qla_scmr_clear_throttle_ios(sfc); + } + atomic_set(&sfc->scmr_base, 0); + atomic_set(&sfc->scmr_permitted, 0); + qla2xxx_clear_baseline_tp(sfc); + sfc->rstats->throttle_cleared++; + sfc->dir = QLA_DIR_NONE; + sfc->throttle_period = + sfc->event_period + sfc->event_period_buffer; + + ql_dbg(ql_dbg_scm, sfc->vha, 0x0203, + "USCM: Clearing Throttling for:%8phN\n", qla_scmr_is_tgt(sfc) ? + sfc->fcport->port_name: sfc->vha->port_name); +} + +void +qla2xxx_update_scm_fcport(scsi_qla_host_t *vha) +{ + fc_port_t *fcport; + struct qla_scmr_flow_control *sfc; + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (!(fcport->port_type & FCT_TARGET) && + !(fcport->port_type & FCT_NVME_TARGET)) + continue; + + sfc = &fcport->sfc; + if (qla_scmr_test_notify_fw(sfc)) { + qla2xxx_set_vl(sfc->fcport, fcport->vl.v_lane); + qla_scmr_clr_notify_fw(sfc); + } + } +} + +/* + * Ensure that the caller checks for IS_SCM_CAPABLE() + */ +void +qla2xxx_update_sfc_ios(srb_t *sp, struct qla_hw_data *ha, + fc_port_t *fcport, int new) +{ + atomic_inc(&ha->sfc.perf.scmr_reqs_per_period); + atomic_inc(&fcport->sfc.perf.scmr_reqs_per_period); + qla2xxx_atomic64_add(&ha->sfc.perf.scmr_bytes_per_period, new); + qla2xxx_atomic64_add(&fcport->sfc.perf.scmr_bytes_per_period, new); + qla2xxx_scmr_manage_qdepth(sp, fcport, true); + return; +} + +/* + * Clear all stats maintained by SCM + */ + +void +qla2xxx_clear_scm_stats(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport = NULL; + + memset(&ha->scm.stats, 0, sizeof(struct qla_scm_stats)); + memset(&ha->scm.sev, 0, sizeof(struct qla_fpin_severity)); + memset(&ha->sig_sev, 0, sizeof(struct qla_sig_severity)); + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (!(fcport->port_type & FCT_TARGET) && + !(fcport->port_type & FCT_NVME_TARGET)) + continue; + + memset(&fcport->scm.stats, 0, + sizeof(struct qla_scm_stats)); + } + +} + +void +qla2xxx_clear_scmr_stats(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + fc_port_t *fcport = NULL; + + memset(&ha->scm.rstats, 0, sizeof(struct qla_scmr_stats)); + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (!(fcport->port_type & FCT_TARGET) && + !(fcport->port_type & FCT_NVME_TARGET)) + continue; + + memset(&fcport->scm.rstats, 0, + sizeof(struct qla_scmr_stats)); + } +} + + +/* + * Function Name: qla2xxx_scmr_init_deltas + * + * Description: + * Initialize deltas used to throttle down/up based + * on the profiles. + * + * PARAMETERS: + * sfc: USCM flow control + */ +void qla2xxx_scmr_init_deltas(struct qla_scmr_flow_control *sfc) +{ + sfc->scmr_down_delta[QL_SCM_MONITOR] = 0; + sfc->scmr_down_delta[QL_SCM_CONSERVATIVE] = 1; + sfc->scmr_down_delta[QL_SCM_MODERATE] = 2; + sfc->scmr_down_delta[QL_SCM_AGGRESSIVE] = 3; + sfc->scmr_up_delta[QL_SCM_MONITOR] = 1; + sfc->scmr_up_delta[QL_SCM_CONSERVATIVE] = 8; + sfc->scmr_up_delta[QL_SCM_MODERATE] = 4; + sfc->scmr_up_delta[QL_SCM_AGGRESSIVE] = 2; +} + +/* Helper routine to prepare RDF ELS payload. + * Refer to FC LS 5.01 for a detailed explanation + */ +static void qla_prepare_rdf_payload(scsi_qla_host_t *vha) +{ + vha->rdf_els_payload.els_code = RDF_OPCODE; + vha->rdf_els_payload.desc_len = cpu_to_be32(sizeof(struct rdf_els_descriptor)); + vha->rdf_els_payload.rdf_desc.desc_tag = + cpu_to_be32(QLA_ELS_DTAG_FPIN_REGISTER); + + vha->rdf_els_payload.rdf_desc.desc_cnt = + cpu_to_be32(ELS_RDF_REG_TAG_CNT); + + vha->rdf_els_payload.rdf_desc.desc_len = + cpu_to_be32(sizeof(struct rdf_els_descriptor) - 8); + vha->rdf_els_payload.rdf_desc.desc_tags[0] = + cpu_to_be32(QLA_ELS_DTAG_LNK_INTEGRITY); + vha->rdf_els_payload.rdf_desc.desc_tags[1] = + cpu_to_be32(QLA_ELS_DTAG_DELIVERY); + vha->rdf_els_payload.rdf_desc.desc_tags[2] = + cpu_to_be32(QLA_ELS_DTAG_PEER_CONGEST); + vha->rdf_els_payload.rdf_desc.desc_tags[3] = + cpu_to_be32(QLA_ELS_DTAG_CONGESTION); + vha->rdf_els_payload.rdf_desc.desc_tags[4] = + cpu_to_be32(QLA_ELS_DTAG_PUN); +} + +/* Helper routine to prepare RDF ELS payload. + * Refer to FC LS 5.01 for a detailed explanation + */ +static void qla_prepare_edc_payload(scsi_qla_host_t *vha) +{ + struct edc_els_payload *edc = &vha->hw->edc_els_payload; + + edc->els_code = EDC_OPCODE; + edc->desc_len = cpu_to_be32(sizeof(struct edc_els_descriptor)); + + edc->edc_desc.link_fault_cap_descriptor_tag = cpu_to_be32(ELS_EDC_LFC_INFO); + edc->edc_desc.lfc_descriptor_length = cpu_to_be32(12); + + edc->edc_desc.cong_sig_cap_descriptor_tag = cpu_to_be32(ELS_EDC_CONG_SIG_INFO); + edc->edc_desc.csc_descriptor_length = cpu_to_be32(16); +} + +/* + * Update various fields of SP to send the ELS via the ELS PT + * IOCB. + */ + +static void qla_update_sp(srb_t *sp, scsi_qla_host_t *vha, u8 cmd) +{ + struct qla_els_pt_arg *a = &sp->u.iocb_cmd.u.drv_els.els_pt_arg; + struct srb_iocb *iocb_cmd = &sp->u.iocb_cmd; + void *buf; + dma_addr_t dma_addr; + int len; + u8 al_pa; + + if (cmd == RDF_OPCODE) + al_pa = 0xFD; + else + al_pa = 0xFE; + + a->els_opcode = cmd; + a->nport_handle = cpu_to_le16(sp->fcport->loop_id); + a->vp_idx = sp->vha->vp_idx; + a->control_flags = 0; + a->rx_xchg_address = 0; //No resp DMA from fabric + + a->did.b.al_pa = al_pa; + a->did.b.area = 0xFF; + a->did.b.domain = 0xFF; + + if (cmd == RDF_OPCODE) + a->tx_len = a->tx_byte_count = + cpu_to_le32(sizeof(iocb_cmd->u.drv_els.els_req.rdf_cmd)); + else + a->tx_len = a->tx_byte_count = + cpu_to_le32(sizeof(iocb_cmd->u.drv_els.els_req.edc_cmd)); + + a->rx_len = a->rx_byte_count = cpu_to_le32(sizeof(iocb_cmd->u.drv_els.els_rsp)); + + len = iocb_cmd->u.drv_els.dma_addr.cmd_len = sizeof(iocb_cmd->u.drv_els.els_req); + + buf = dma_alloc_coherent(&vha->hw->pdev->dev, len, + &dma_addr, GFP_KERNEL); + + iocb_cmd->u.drv_els.dma_addr.cmd_buf = buf; + iocb_cmd->u.drv_els.dma_addr.cmd_dma = dma_addr; + + if (cmd == RDF_OPCODE) + memcpy(iocb_cmd->u.drv_els.dma_addr.cmd_buf, &vha->rdf_els_payload, + sizeof(vha->rdf_els_payload)); + else + memcpy(iocb_cmd->u.drv_els.dma_addr.cmd_buf, &vha->hw->edc_els_payload, + sizeof(vha->hw->edc_els_payload)); + + a->tx_addr = iocb_cmd->u.drv_els.dma_addr.cmd_dma; + + len = iocb_cmd->u.drv_els.dma_addr.rsp_len = sizeof(iocb_cmd->u.drv_els.els_rsp); + + buf = dma_alloc_coherent(&vha->hw->pdev->dev, len, + &dma_addr, GFP_KERNEL); + + iocb_cmd->u.drv_els.dma_addr.rsp_buf = buf; + iocb_cmd->u.drv_els.dma_addr.rsp_dma = dma_addr; + + a->rx_addr = iocb_cmd->u.drv_els.dma_addr.rsp_dma; +} + +/* + * qla2xxx_scm_get_features - + * Get the firmware/Chip related supported features w.r.t SCM + * Issue mbox 5A. + * Parse through the response and get relevant values + */ +int +qla2xxx_scm_get_features(scsi_qla_host_t *vha) +{ + dma_addr_t fdma; + u16 sz = FW_FEATURES_SIZE; + int rval = 0; + u8 *f; + int i; + u8 scm_feature; + struct edc_els_descriptor *edc = &vha->hw->edc_els_payload.edc_desc; + + f = dma_alloc_coherent(&vha->hw->pdev->dev, sz, + &fdma, GFP_KERNEL); + if (!f) { + ql_log(ql_log_warn, vha, 0x7035, + "DMA alloc failed for feature buf.\n"); + return -ENOMEM; + } + + rval = qla_get_features(vha, fdma, FW_FEATURES_SIZE); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x7035, + "Get features failed :0x%x.\n", rval); + } else { + scm_feature = f[0]; + /* If both bits 3 and 4 are zero, firmware sends the ELS + * 27xx has bit 4 set and bit 3 cleared + */ + if ((!(scm_feature & BIT_3) && !(scm_feature & BIT_4))) { + rval = 1; + goto free; + } + i = 4; + if (scm_feature & BIT_3) { + /* The next 3 words contain Link fault Capability */ + edc->degrade_activate_threshold = get_unaligned_be32(&f[i]); + i += 4; + edc->degrade_deactivate_threshold = get_unaligned_be32(&f[i]); + i += 4; + edc->fec_degrade_interval = get_unaligned_be32(&f[i]); + i += 4; + } + if (scm_feature & BIT_4) { + /* The next 4 words contain Cong. Sig. Capability */ + i = 16; + edc->tx_signal_cap = (get_unaligned_be32(&f[i])); + i += 4; + edc->tx_signal_freq = get_unaligned_be32(&f[i]); + i += 4; + edc->rx_signal_cap = get_unaligned_be32(&f[i]); + i += 4; + edc->rx_signal_freq = get_unaligned_be32(&f[i]); + i += 4; + } + } +free: + dma_free_coherent(&vha->hw->pdev->dev, sz, f, fdma); + return rval; +} + +void qla2x00_scm_els_sp_done(srb_t *sp, int res) +{ + struct scsi_qla_host *vha = sp->vha; + struct els_resp *rsp = + (struct els_resp *)sp->u.iocb_cmd.u.drv_els.dma_addr.rsp_buf; + u8 err_code; + + if (res == QLA_SUCCESS) { + ql_log(ql_log_info, vha, 0x700f, + "%s ELS completed for port:%8phC\n", + (sp->type == SRB_ELS_EDC)?"EDC":"RDF", vha->port_name); + + if (rsp->resp_code == ELS_LS_RJT) { + struct fc_els_ls_rjt *rjt = + (struct fc_els_ls_rjt *)sp->u.iocb_cmd.u.drv_els.dma_addr.rsp_buf; + err_code = rjt->er_reason; + ql_log(ql_log_info, vha, 0x503f, + "%s rejected with code:0x%x\n",(sp->type == SRB_ELS_EDC)?"EDC":"RDF", + err_code); + if (sp->type == SRB_ELS_EDC) { + if (err_code == ELS_RJT_BUSY && ++vha->hw->edc_retry_cnt < MAX_USCM_ELS_RETRIES) + set_bit(SCM_SEND_EDC, &vha->dpc_flags); + } else { + if (err_code == ELS_RJT_BUSY && ++vha->rdf_retry_cnt < MAX_USCM_ELS_RETRIES) + set_bit(SCM_SEND_RDF, &vha->dpc_flags); + } + } else if ((rsp->resp_code == ELS_LS_ACC) && (sp->type == SRB_ELS_RDF)) { + /* RDF completion indicates that SCM can be supported */ + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x503f, + "RDF completed \n"); + vha->hw->flags.scm_enabled = 1; + vha->hw->scm.scm_fabric_connection_flags |= SCM_FLAG_RDF_COMPLETED; + /* If VL is negotiated successfully */ + if (vha->hw->flags.conn_fabric_cisco_er_rdy) + vha->hw->scm.scm_fabric_connection_flags |= SCM_FLAG_CISCO_CONNECTED; + } + } else { + ql_log(ql_log_warn, vha, 0x701a, + "%s ELS failed for port:%8phC, res:0x%x\n", + (sp->type == SRB_ELS_EDC)?"EDC":"RDF", vha->port_name, res); + if (sp->type == SRB_ELS_EDC) { + if (++vha->hw->edc_retry_cnt < MAX_USCM_ELS_RETRIES) { + ql_log(ql_log_info, vha, 0x701b, + "Retrying EDC:retry:%d\n",vha->hw->edc_retry_cnt); + set_bit(SCM_SEND_EDC, &vha->dpc_flags); + } + } else if (sp->type == SRB_ELS_RDF) { + if (++vha->rdf_retry_cnt < MAX_USCM_ELS_RETRIES) { + ql_log(ql_log_info, vha, 0x701c, + "Retrying RDF:retry:%d\n",vha->rdf_retry_cnt); + set_bit(SCM_SEND_RDF, &vha->dpc_flags); + } + } + } + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +} + +void qla2x00_scm_els_sp_free(srb_t *sp) +{ + void *cmd_buf, *rsp_buf; + dma_addr_t cmd_dma, rsp_dma; + int cmd_len, rsp_len; + struct qla_work_evt *e; + + cmd_buf = sp->u.iocb_cmd.u.drv_els.dma_addr.cmd_buf; + cmd_dma = sp->u.iocb_cmd.u.drv_els.dma_addr.cmd_dma; + cmd_len = sp->u.iocb_cmd.u.drv_els.dma_addr.cmd_len; + + rsp_buf = sp->u.iocb_cmd.u.drv_els.dma_addr.rsp_buf; + rsp_dma = sp->u.iocb_cmd.u.drv_els.dma_addr.rsp_dma; + rsp_len = sp->u.iocb_cmd.u.drv_els.dma_addr.rsp_len; + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, sp->vha, 0x700a, + "cmd_buf:%p, cmd_dma:%llx, len:%d\n", + cmd_buf, cmd_dma, cmd_len); + + e = qla2x00_alloc_work(sp->vha, QLA_EVT_UNMAP); + if (!e) { + dma_free_coherent(&sp->vha->hw->pdev->dev, + cmd_len, + cmd_buf, + cmd_dma); + cmd_buf = NULL; + dma_free_coherent(&sp->vha->hw->pdev->dev, + rsp_len, + rsp_buf, + rsp_dma); + rsp_buf = NULL; + qla2x00_free_fcport(sp->fcport); + qla2x00_rel_sp(sp); + } else { + e->u.iosb.sp = sp; + qla2x00_post_work(sp->vha, e); + } +} + +/* + * qla2xxx_scm_send_rdf_els - Send RDF ELS to the switch + * Called by both base port and vports + */ + +int +qla2xxx_scm_send_rdf_els(scsi_qla_host_t *vha) +{ + srb_t *sp; + fc_port_t *fcport = NULL; + int rval = 0; + + /* Allocate a dummy fcport structure, since functions + * preparing the IOCB and mailbox command retrieves port + * specific information from fcport structure. + */ + + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (!fcport) { + rval = -ENOMEM; + return rval; + } + + qla_prepare_rdf_payload(vha); + + /* Initialize all required fields of fcport */ + fcport->vha = vha; + fcport->d_id = vha->d_id; + fcport->loop_id = NPH_FABRIC_CONTROLLER; // RDF, EDC -> F_PORT + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x700a, + "loop-id=%x portid=%-2x%02x%02x.\n", + fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); + + /* Alloc SRB structure */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + qla2x00_free_fcport(fcport); + rval = -ENOMEM; + return rval; + } + + sp->type = SRB_ELS_RDF; + sp->name = "rdf_els"; + sp->u.iocb_cmd.u.drv_els.els_req.rdf_cmd = vha->rdf_els_payload; + + qla_update_sp(sp, vha, RDF_OPCODE); + + sp->free = qla2x00_scm_els_sp_free; + sp->done = qla2x00_scm_els_sp_done; + + /* Reset scm_enabled to indicate SCM is not yet enabled */ + vha->hw->flags.scm_enabled = 0; + vha->hw->scm.scm_fabric_connection_flags &= ~SCM_FLAG_RDF_COMPLETED; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x700e, + "qla2x00_start_sp failed = %d\n", rval); + qla2x00_rel_sp(sp); + qla2x00_free_fcport(fcport); + rval = -EIO; + } + return rval; +} + +/* + * qla2xxx_scm_send_edc_els - Send EDC ELS to the switch + * Called by base port - post the initial login to the fabric + */ +int +qla2xxx_scm_send_edc_els(scsi_qla_host_t *vha) +{ + srb_t *sp; + fc_port_t *fcport = NULL; + int rval = 0; + + /* Allocate a dummy fcport structure, since functions + * preparing the IOCB and mailbox command retrieves port + * specific information from fcport structure. + */ + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); + if (!fcport) { + rval = -ENOMEM; + return rval; + } + + qla_prepare_edc_payload(vha); + + /* Initialize all required fields of fcport */ + fcport->vha = vha; + fcport->d_id = vha->d_id; + fcport->loop_id = NPH_F_PORT; + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x700a, + "loop-id=%x " + "portid=%-2x%02x%02x.\n", + fcport->loop_id, + fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); + + /* Alloc SRB structure */ + sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + if (!sp) { + rval = -ENOMEM; + return rval; + } + + sp->type = SRB_ELS_EDC; + sp->name = "edc_els"; + sp->u.iocb_cmd.u.drv_els.els_req.edc_cmd = vha->hw->edc_els_payload; + + qla_update_sp(sp, vha, EDC_OPCODE); + + sp->free = qla2x00_scm_els_sp_free; + sp->done = qla2x00_scm_els_sp_done; + + rval = qla2x00_start_sp(sp); + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x700e, + "qla2x00_start_sp failed = %d\n", rval); + qla2x00_rel_sp(sp); + rval = -EIO; + } + return rval; +} + +void qla2xxx_send_uscm_els(scsi_qla_host_t *vha) +{ + if (test_and_clear_bit(SCM_SEND_EDC, &vha->dpc_flags)) { + ql_log(ql_log_info, vha, 0x20ad, + "Driver sending EDC for port :%8phC\n", vha->port_name); + qla2xxx_scm_send_edc_els(vha); + } + if (test_and_clear_bit(SCM_SEND_RDF, &vha->dpc_flags)) { + ql_log(ql_log_info, vha, 0x20ae, + "Driver sending RDF for port :%8phC\n", vha->port_name); + qla2xxx_scm_send_rdf_els(vha); + } +} + +/* + * Function Name: qla2xxx_prepare_els_rsp + * + * Description: + * Helper function to populate common fields in the ELS response + * + * PARAMETERS: + * rsp_els: The ELS response to be sent + * purex: ELS request received by the HBA + */ +static void qla2xxx_prepare_els_rsp(struct els_entry_24xx *rsp_els, + struct purex_entry_24xx *purex) +{ + rsp_els->entry_type = ELS_IOCB_TYPE; + rsp_els->entry_count = 1; + rsp_els->sys_define = 0; + rsp_els->entry_status = 0; + rsp_els->handle = QLA_SKIP_HANDLE; + rsp_els->nport_handle = purex->nport_handle; + rsp_els->tx_dsd_count = 1; + rsp_els->vp_index = purex->vp_idx; + rsp_els->sof_type = EST_SOFI3; + rsp_els->rx_xchg_address = purex->rx_xchg_addr; + rsp_els->rx_dsd_count = 0; + rsp_els->opcode = purex->els_frame_payload[0]; + + rsp_els->d_id[0] = purex->s_id[0]; + rsp_els->d_id[1] = purex->s_id[1]; + rsp_els->d_id[2] = purex->s_id[2]; + + rsp_els->control_flags = cpu_to_le16(EPD_ELS_ACC); + +} +/* + * Function Name: qla2xx_scm_process_purex_edc + * + * Description: + * Prepare an EDC response and send it to the switch + * + * PARAMETERS: + * vha: SCSI qla host + * purex: EDC ELS received by HBA + */ +void +qla2xx_scm_process_purex_edc(struct scsi_qla_host *vha, + struct purex_item *item) +{ + struct qla_hw_data *ha = vha->hw; + struct purex_entry_24xx *purex = + (struct purex_entry_24xx *)&item->iocb; + struct edc_els_resp_payload *edc_rsp_payload = ha->edc_rsp_payload; + dma_addr_t edc_rsp_payload_dma = ha->edc_rsp_payload_dma; + struct els_entry_24xx *rsp_els = NULL; + uint edc_rsp_payload_length = sizeof(*edc_rsp_payload); + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x0181, + "-------- ELS REQ -------\n"); + ql_dump_buffer(ql_dbg_scm + ql_dbg_verbose, vha, 0x0182, + purex, sizeof(*purex)); + + rsp_els = __qla2x00_alloc_iocbs(item->qpair, NULL); + if (!rsp_els) { + ql_log(ql_log_warn, vha, 0x018b, + "Failed to allocate iocbs for ELS RSP \n"); + return; + } + + /* Prepare Response IOCB */ + qla2xxx_prepare_els_rsp(rsp_els, purex); + + rsp_els->rx_byte_count = 0; + rsp_els->tx_byte_count = edc_rsp_payload_length; + + rsp_els->tx_address = edc_rsp_payload_dma; + rsp_els->tx_len = rsp_els->tx_byte_count; + + rsp_els->rx_address = 0; + rsp_els->rx_len = 0; + + edc_rsp_payload->resp_code = cpu_to_be32(ELS_LS_ACC << 24); /* LS_ACC */ + /* Send Link Service Req Info desc also */ + edc_rsp_payload->desc_len = cpu_to_be32(sizeof(struct edc_els_descriptor) + + sizeof(struct edc_els_link_srv_descriptor)); + edc_rsp_payload->edc_ls_desc.link_srv_info_descriptor_tag = + cpu_to_be32(QLA_ELS_DTAG_LS_REQ_INFO); + edc_rsp_payload->edc_ls_desc.ls_info_descriptor_length = + cpu_to_be32(4); + edc_rsp_payload->edc_ls_desc.ls_info_descriptor_length = + cpu_to_be32(EDC_OPCODE); + + /* Prepare Response Payload */ + memcpy(&edc_rsp_payload->edc_desc, + &vha->hw->edc_els_payload.edc_desc, sizeof(struct edc_els_descriptor)); + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x0183, + "Sending ELS Response to EDC Request...\n"); + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x0184, + "-------- ELS RSP -------\n"); + ql_dump_buffer(ql_dbg_scm + ql_dbg_verbose, vha, 0x0185, + rsp_els, sizeof(*rsp_els)); + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x0186, + "-------- ELS RSP PAYLOAD -------\n"); + ql_dump_buffer(ql_dbg_scm + ql_dbg_verbose, vha, 0x0187, + edc_rsp_payload, edc_rsp_payload_length); + + wmb(); + qla2x00_start_iocbs(vha, item->qpair->req); + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x018a, "%s: done.\n", __func__); + +} +/* + * Function Name: qla2xxx_scm_process_purex_rdf + * + * Description: + * Prepare an RDF response and send it to the switch + * + * PARAMETERS: + * vha: SCSI qla host + * purex: RDF ELS received by HBA + */ +void +qla2xxx_scm_process_purex_rdf(struct scsi_qla_host *vha, + struct purex_item *item) +{ + struct purex_entry_24xx *purex = + (struct purex_entry_24xx *)&item->iocb; + dma_addr_t rdf_payload_dma = vha->rdf_payload_dma; + struct els_entry_24xx *rsp_els = NULL; + struct rdf_els_payload *rdf_payload = vha->rdf_payload; + uint rdf_payload_length = sizeof(*rdf_payload); + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x0191, + "-------- ELS REQ -------\n"); + ql_dump_buffer(ql_dbg_scm + ql_dbg_verbose, vha, 0x0192, + purex, sizeof(*purex)); + + rsp_els = __qla2x00_alloc_iocbs(item->qpair, NULL); + if (!rsp_els) { + ql_log(ql_log_warn, vha, 0x019c, + "Failed to allocate dma buffer ELS RSP.\n"); + return; + } + + /* Prepare Response IOCB */ + qla2xxx_prepare_els_rsp(rsp_els, purex); + + rsp_els->rx_byte_count = 0; + rsp_els->tx_byte_count = rdf_payload_length - ELS_RDF_RSP_DESC_LEN; + /* Since we send 1 desc */ + + rsp_els->tx_address = rdf_payload_dma; + rsp_els->tx_len = rsp_els->tx_byte_count; + + rsp_els->rx_address = 0; + rsp_els->rx_len = 0; + + /* Prepare Response Payload */ + /* For Nx ports, the desc list len will be 12 + * and the LS req info will be sent as part of LS_ACC. + * An RDF will be re-sent to the switch + */ + rdf_payload->els_code = cpu_to_be32(ELS_LS_ACC << 24); /* LS_ACC */ + rdf_payload->desc_len = cpu_to_be32(ELS_RDF_RSP_DESC_LEN); + rdf_payload->rdf_desc.desc_tag = cpu_to_be32(QLA_ELS_DTAG_LS_REQ_INFO); + rdf_payload->rdf_desc.desc_cnt = cpu_to_be32(1); + rdf_payload->rdf_desc.desc_len = cpu_to_be32(4); + rdf_payload->rdf_desc.desc_tags[0] = cpu_to_be32(RDF_OPCODE << 24); + + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x0193, + "Sending ELS Response to incoming RDF...\n"); + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x0194, + "-------- ELS RSP -------\n"); + ql_dump_buffer(ql_dbg_scm + ql_dbg_verbose, vha, 0x0195, + rsp_els, sizeof(*rsp_els)); + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x0196, + "-------- ELS RSP PAYLOAD -------\n"); + ql_dump_buffer(ql_dbg_scm + ql_dbg_verbose, vha, 0x0197, + rdf_payload, rdf_payload_length); + + wmb(); + qla2x00_start_iocbs(vha, item->qpair->req); + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x019a, "%s: done.\n", __func__); + + /* Schedule an RDF */ + set_bit(SCM_SEND_RDF, &vha->dpc_flags); +} + +/* + * Function Name: qla2xxx_alloc_rdf_payload + * + * Description: + * Allocate DMA memory for RDF payload. Called for base and vports + * + * PARAMETERS: + * vha: SCSI qla host + */ +void qla2xxx_scm_alloc_rdf_payload(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + + vha->rdf_payload = dma_zalloc_coherent(&ha->pdev->dev, sizeof(struct rdf_els_payload), + &vha->rdf_payload_dma, GFP_KERNEL); + if (!vha->rdf_payload) { + ql_log(ql_log_warn, vha, 0x019b, + "Failed allocate dma buffer ELS RSP payload.\n"); + } + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x019b, "%s: rdf_payload:%px \n", + __func__, vha->rdf_payload); +} + +/* + * Function Name: qla2xxx_free_rdf_payload + * + * Description: + * Free DMA memory for RDF payload. Called for base and vports + * + * PARAMETERS: + * vha: SCSI qla host + */ +void qla2xxx_scm_free_rdf_payload(struct scsi_qla_host *vha) +{ + struct qla_hw_data *ha = vha->hw; + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x019c, "%s: Freeing:%px\n", + __func__, vha->rdf_payload); + if (vha->rdf_payload) + dma_free_coherent(&ha->pdev->dev, sizeof(struct rdf_els_payload), + vha->rdf_payload, vha->rdf_payload_dma); +} + +/* Clear all events when clearing SCM Stats */ +void +qla_scm_clear_previous_event(struct scsi_qla_host *vha) +{ + fc_port_t *fcport = NULL; + struct qla_hw_data *ha = vha->hw; + + ha->scm.current_events = SCM_EVENT_NONE; + list_for_each_entry(fcport, &vha->vp_fcports, list) { + fcport->scm.current_events = SCM_EVENT_NONE; + } +} + +/* Move all targets to NORMAL VL. + * Called on a port bounce/ISP reset + */ +void +qla_scm_host_clear_vl_state(struct scsi_qla_host *vha) +{ + fc_port_t *fcport = NULL; + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + qla_scm_tgt_clear_vl_state(fcport); + } +} + +void +qla_scm_tgt_clear_vl_state(fc_port_t *fcport) +{ + struct scsi_qla_host *vha = fcport->vha; + + fcport->vl.v_lane = VL_NORMAL; + fcport->vl.prio_hi = vha->hw->flogi_acc.rx_vl[VL_NORMAL].prio_hi; + fcport->vl.prio_lo = vha->hw->flogi_acc.rx_vl[VL_NORMAL].prio_lo; +} + +/* Clear all SCM related state and throttling for all the targets */ +void +qla_scm_clear_all_tgt_sess(struct scsi_qla_host *vha) +{ + fc_port_t *fcport = NULL; + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + qla_scm_clear_session(fcport); + } +} + +/* Clear all SCM related stats/state and throttling for the remote port */ +void +qla_scm_clear_session(fc_port_t *fcport) +{ + if (fcport->vha->hw->flags.scm_enabled) { + qla2xxx_scmr_clear_congn(&fcport->sfc); + qla2xxx_scmr_clear_throttle(&fcport->sfc); + qla_scm_tgt_clear_vl_state(fcport); + fcport->scm.current_events = SCM_EVENT_NONE; + } +} + +/* Clear all SCM related state and throttling for the host */ +void +qla_scm_clear_host(struct scsi_qla_host *vha) +{ + if (vha->hw->flags.scm_enabled) { + qla2xxx_scmr_clear_congn(&vha->hw->sfc); + qla2xxx_scmr_clear_throttle(&vha->hw->sfc); + qla_scm_clear_previous_event(vha); + qla_scm_host_clear_vl_state(vha); + } +} + +bool +qla_scm_chk_throttle_cmd_opcode(srb_t *sp) +{ + struct srb_iocb *nvme; + struct nvmefc_fcp_req *fd; + struct nvme_fc_cmd_iu *cmd; + + if (sp->type == SRB_SCSI_CMD) { + switch (sp->u.scmd.cmd->cmnd[0]) { + case WRITE_10: + case WRITE_12: + case WRITE_16: + case READ_10: + case READ_12: + case READ_16: + return true; + break; + default: + return false; + } + } else if (sp->type == SRB_NVME_CMD) { + nvme = &sp->u.iocb_cmd; + fd = nvme->u.nvme.desc; + cmd = fd->cmdaddr; + + if (cmd->sqe.rw.opcode == nvme_cmd_write || + cmd->sqe.rw.opcode == nvme_cmd_read) + return true; + } + return false; +} + +uint8_t +qla_get_throttling_state(struct qla_scmr_flow_control *sfc) +{ + uint8_t io_throttle = 0; + + if (sfc->profile.scmr_profile == 0) /* Monitor */ + io_throttle = QLA_THROTTLE_DISABLED; + else { + if (sfc->dir == QLA_DIR_NONE) /* Not throttled */ + io_throttle = QLA_THROTTLE_NONE; + else + io_throttle = QLA_THROTTLE_ACTIVE; + } + + return io_throttle; +} + +DECLARE_ENUM2STR_LOOKUP(qla_get_li_event_type, ql_fpin_li_event_types, + QL_FPIN_LI_EVT_TYPES_INIT); +static void +qla_link_integrity_tgt_stats_update(struct fpin_descriptor *fpin_desc, + fc_port_t *fcport) +{ + uint16_t event; + uint32_t event_count; + const char * li_type; + + event = be16_to_cpu(fpin_desc->link_integrity.event_type); + event_count = be32_to_cpu(fpin_desc->link_integrity.event_count); + li_type = qla_get_li_event_type(event); + ql_log(ql_log_info, fcport->vha, 0x502d, + "Link Integrity Event Type: %s(%x) for Port %8phN\n", + li_type, event, fcport->port_name); + + fcport->scm.link_integrity.event_type = event; + fcport->scm.link_integrity.event_modifier = + be16_to_cpu(fpin_desc->link_integrity.event_modifier); + fcport->scm.link_integrity.event_threshold = + be32_to_cpu(fpin_desc->link_integrity.event_threshold); + fcport->scm.link_integrity.event_count = event_count; + fcport->scm.last_event_timestamp = qla_get_real_seconds(); + + fcport->scm.current_events |= SCM_EVENT_LINK_INTEGRITY; + switch (event) { + case QL_FPIN_LI_UNKNOWN: + fcport->scm.stats.li_failure_unknown += event_count; + break; + case QL_FPIN_LI_LINK_FAILURE: + fcport->scm.stats.li_link_failure_count += event_count; + break; + case QL_FPIN_LI_LOSS_OF_SYNC: + fcport->scm.stats.li_loss_of_sync_count += event_count; + break; + case QL_FPIN_LI_LOSS_OF_SIG: + fcport->scm.stats.li_loss_of_signals_count += event_count; + break; + case QL_FPIN_LI_PRIM_SEQ_ERR: + fcport->scm.stats.li_prim_seq_err_count += event_count; + break; + case QL_FPIN_LI_INVALID_TX_WD: + fcport->scm.stats.li_invalid_tx_word_count += event_count; + break; + case QL_FPIN_LI_INVALID_CRC: + fcport->scm.stats.li_invalid_crc_count += event_count; + break; + case QL_FPIN_LI_DEVICE_SPEC: + fcport->scm.stats.li_device_specific += event_count; + break; + } +} + +static void +qla_link_integrity_host_stats_update(struct fpin_descriptor *fpin_desc, + struct qla_hw_data *ha) +{ + uint16_t event; + uint32_t event_count; + const char *li_type; + + event = be16_to_cpu(fpin_desc->link_integrity.event_type); + event_count = be32_to_cpu(fpin_desc->link_integrity.event_count); + li_type = qla_get_li_event_type(event); + + ha->scm.link_integrity.event_type = event; + ha->scm.link_integrity.event_modifier = + be16_to_cpu(fpin_desc->link_integrity.event_modifier); + ha->scm.link_integrity.event_threshold = + be32_to_cpu(fpin_desc->link_integrity.event_threshold); + ha->scm.link_integrity.event_count = event_count; + ha->scm.last_event_timestamp = qla_get_real_seconds(); + + ha->scm.current_events |= SCM_EVENT_LINK_INTEGRITY; + switch (event) { + case QL_FPIN_LI_UNKNOWN: + ha->scm.stats.li_failure_unknown += event_count; + break; + case QL_FPIN_LI_LINK_FAILURE: + ha->scm.stats.li_link_failure_count += event_count; + break; + case QL_FPIN_LI_LOSS_OF_SYNC: + ha->scm.stats.li_loss_of_sync_count += event_count; + break; + case QL_FPIN_LI_LOSS_OF_SIG: + ha->scm.stats.li_loss_of_signals_count += event_count; + break; + case QL_FPIN_LI_PRIM_SEQ_ERR: + ha->scm.stats.li_prim_seq_err_count += event_count; + break; + case QL_FPIN_LI_INVALID_TX_WD: + ha->scm.stats.li_invalid_tx_word_count += event_count; + break; + case QL_FPIN_LI_INVALID_CRC: + ha->scm.stats.li_invalid_crc_count += event_count; + break; + case QL_FPIN_LI_DEVICE_SPEC: + ha->scm.stats.li_device_specific += event_count; + break; + } +} + + +static void +qla_scm_process_link_integrity_d(struct scsi_qla_host *vha, + struct fpin_descriptor *fpin_desc, + int length) +{ + uint16_t event; + const char * li_type; + fc_port_t *fcport = NULL; + struct qla_hw_data *ha = vha->hw; + fc_port_t *d_fcport = NULL, *a_fcport = NULL; + + fcport = qla2x00_find_fcport_by_wwpn(vha, + fpin_desc->link_integrity.detecting_port_name, + 0); + if (fcport) { + d_fcport = fcport; + qla_link_integrity_tgt_stats_update(fpin_desc, fcport); + } + + fcport = qla2x00_find_fcport_by_wwpn(vha, + fpin_desc->link_integrity.attached_port_name, + 0); + if (fcport) { + a_fcport = fcport; + qla_link_integrity_tgt_stats_update(fpin_desc, fcport); + } + + if (memcmp(vha->port_name, fpin_desc->link_integrity.attached_port_name, + WWN_SIZE) == 0) { + event = be16_to_cpu(fpin_desc->link_integrity.event_type); + li_type = qla_get_li_event_type(event); + ql_log(ql_log_info, vha, 0x5093, + "Link Integrity Event Type: %s(%x) for HBA WWN %8phN\n", + li_type, event, vha->port_name); + + qla_link_integrity_host_stats_update(fpin_desc, ha); + } +} + +DECLARE_ENUM2STR_LOOKUP_DELI_EVENT + +static void +qla_delivery_tgt_stats_update(struct fpin_descriptor *fpin_desc, + fc_port_t *fcport) +{ + uint32_t event; + const char * deli_type; + + event = be32_to_cpu(fpin_desc->delivery.delivery_reason_code); + deli_type = qla_get_dn_event_type(event); + ql_log(ql_log_info, fcport->vha, 0x5095, + "Delivery Notification Reason Code: %s(%x) for Port %8phN\n", + deli_type, event, fcport->port_name); + + fcport->scm.current_events |= SCM_EVENT_DELIVERY; + fcport->scm.delivery.delivery_reason = + be32_to_cpu(fpin_desc->delivery.delivery_reason_code); + switch (event) { + case FPIN_DELI_UNKNOWN: + fcport->scm.stats.dn_unknown++; + break; + case FPIN_DELI_TIMEOUT: + fcport->scm.stats.dn_timeout++; + break; + case FPIN_DELI_UNABLE_TO_ROUTE: + fcport->scm.stats.dn_unable_to_route++; + break; + case FPIN_DELI_DEVICE_SPEC: + fcport->scm.stats.dn_device_specific++; + break; + } + fcport->scm.last_event_timestamp = qla_get_real_seconds(); +} + +static void +qla_delivery_host_stats_update(struct fpin_descriptor *fpin_desc, + struct qla_hw_data *ha) +{ + uint32_t event; + const char *deli_type; + + event = be32_to_cpu(fpin_desc->delivery.delivery_reason_code); + deli_type = qla_get_dn_event_type(event); + + ha->scm.current_events |= SCM_EVENT_DELIVERY; + ha->scm.delivery.delivery_reason = + be32_to_cpu(fpin_desc->delivery.delivery_reason_code); + switch (event) { + case FPIN_DELI_UNKNOWN: + ha->scm.stats.dn_unknown++; + break; + case FPIN_DELI_TIMEOUT: + ha->scm.stats.dn_timeout++; + break; + case FPIN_DELI_UNABLE_TO_ROUTE: + ha->scm.stats.dn_unable_to_route++; + break; + case FPIN_DELI_DEVICE_SPEC: + ha->scm.stats.dn_device_specific++; + break; + } + ha->scm.last_event_timestamp = qla_get_real_seconds(); +} + +/* + * Process Delivery Notification Descriptor + */ +static void +qla_scm_process_delivery_notification_d(struct scsi_qla_host *vha, + struct fpin_descriptor *fpin_desc) +{ + uint32_t event; + const char * deli_type; + fc_port_t *fcport = NULL; + struct qla_hw_data *ha = vha->hw; + + fcport = qla2x00_find_fcport_by_wwpn(vha, + fpin_desc->delivery.detecting_port_name, 0); + if (fcport) + qla_delivery_tgt_stats_update(fpin_desc, fcport); + + fcport = qla2x00_find_fcport_by_wwpn(vha, + fpin_desc->delivery.attached_port_name, 0); + if (fcport) + qla_delivery_tgt_stats_update(fpin_desc, fcport); + + if (memcmp(vha->port_name, fpin_desc->delivery.attached_port_name, + WWN_SIZE) == 0) { + event = be32_to_cpu(fpin_desc->delivery.delivery_reason_code); + deli_type = qla_get_dn_event_type(event); + ql_log(ql_log_info, vha, 0x5096, + "Delivery Notification Reason Code: %s(%x) for HBA WWN %8phN\n", + deli_type, event, vha->port_name); + qla_delivery_host_stats_update(fpin_desc, ha); + } +} + +static void +qla_scm_set_target_device_state(fc_port_t *fcport, + struct fpin_descriptor *fpin_desc) +{ + struct qla_scmr_flow_control *sfc = &fcport->sfc; + u64 delta; + + sfc->throttle_period = sfc->event_period_buffer + sfc->event_period; + delta = (2 * sfc->event_period * HZ); + if (delta < HZ) + delta = HZ; + + switch (be16_to_cpu(fpin_desc->peer_congestion.event_type)) { + case FPIN_CONGN_CLEAR: + atomic_set(&sfc->scmr_congn_signal, QLA_SIG_CLEAR); + ql_log(ql_log_info, fcport->vha, 0x5097, + "Port %8phN Slow Device: Cleared\n", fcport->port_name); + break; + case FPIN_CONGN_LOST_CREDIT: + break; + case FPIN_CONGN_CREDIT_STALL: + sfc->expiration_jiffies = jiffies + delta; + atomic_set(&sfc->scmr_congn_signal, QLA_SIG_CREDIT_STALL); + ql_log(ql_log_info, fcport->vha, 0x508c, + "Port %8phN Slow Device: Set\n", fcport->port_name); + break; + case FPIN_CONGN_OVERSUBSCRIPTION: + sfc->expiration_jiffies = jiffies + delta; + atomic_set(&sfc->scmr_congn_signal, QLA_SIG_OVERSUBSCRIPTION); + ql_log(ql_log_info, fcport->vha, 0x508c, + "Port %8phN Slow Device: Set\n", fcport->port_name); + break; + default: + break; + + } +} + +DECLARE_ENUM2STR_LOOKUP_CONGN_EVENT + +static void +qla_peer_congestion_tgt_stats_update(struct fpin_descriptor *fpin_desc, + fc_port_t *fcport) +{ + uint16_t event; + uint32_t event_period_secs = 0; + const char * congn_type; + + event = be16_to_cpu(fpin_desc->peer_congestion.event_type); + congn_type = qla_get_congn_event_type(event); + ql_log(ql_log_info, fcport->vha, 0x5098, + "Peer Congestion Event Type: %s(%x) for Port %8phN\n", + congn_type, event, fcport->port_name); + + fcport->scm.last_event_timestamp = qla_get_real_seconds(); + fcport->scm.peer_congestion.event_type = event; + fcport->scm.peer_congestion.event_modifier = + be16_to_cpu(fpin_desc->peer_congestion.event_modifier); + fcport->scm.peer_congestion.event_period = + be32_to_cpu(fpin_desc->peer_congestion.event_period); + event_period_secs = + be32_to_cpu(fpin_desc->peer_congestion.event_period) / 1000; + if (event_period_secs) + fcport->sfc.event_period = event_period_secs; + else + fcport->sfc.event_period = 1; + + fcport->sfc.event_period_buffer = QLA_SCMR_BUFFER; + + // What is the API to get system time ? + fcport->scm.current_events |= SCM_EVENT_PEER_CONGESTION; + switch (event) { + case FPIN_CONGN_CLEAR: + fcport->scm.stats.cn_clear++; + break; + case FPIN_CONGN_LOST_CREDIT: + fcport->scm.stats.cn_lost_credit++; + break; + case FPIN_CONGN_CREDIT_STALL: + fcport->scm.stats.cn_credit_stall++; + break; + case FPIN_CONGN_OVERSUBSCRIPTION: + fcport->scm.stats.cn_oversubscription++; + break; + case FPIN_CONGN_DEVICE_SPEC: + fcport->scm.stats.cn_device_specific++; + break; + } + qla_scm_set_target_device_state(fcport, fpin_desc); +} + +/* + * Process Peer-Congestion Notification Descriptor + */ +static void +qla_scm_process_peer_congestion_notification_d(struct scsi_qla_host *vha, + struct fpin_descriptor *fpin_desc, + int length) +{ + fc_port_t *fcport = NULL; + fc_port_t *d_fcport = NULL, *a_fcport = NULL; + + fcport = qla2x00_find_fcport_by_wwpn(vha, + fpin_desc->peer_congestion.detecting_port_name, 0); + if (fcport) { + d_fcport = fcport; + qla_peer_congestion_tgt_stats_update(fpin_desc, fcport); + } + + fcport = qla2x00_find_fcport_by_wwpn(vha, + fpin_desc->peer_congestion.attached_port_name, 0); + if (fcport) { + a_fcport = fcport; + qla_peer_congestion_tgt_stats_update(fpin_desc, fcport); + } +} + +/* + * qla_scm_process_pun_notification_d() - + * Process Priority Update Notification Descriptor + */ + +static void +qla_scm_process_pun_notification_d(struct scsi_qla_host *vha, + struct fpin_descriptor *fpin_desc) +{ + uint32_t event_period_secs = 0; + uint32_t num_prio_mappings = 0; + int i, j; + uint16_t num_devices; + uint8_t pr_low; + uint8_t pr_high; + uint8_t port_name[WWN_SIZE]; + fc_port_t *fcport = NULL; + struct pun_wwn_list *plist; + + event_period_secs = + be32_to_cpu(fpin_desc->pun.event_period) / 1000; + num_prio_mappings = + be32_to_cpu(fpin_desc->pun.num_prio_map_records); + for (i = 0; i < num_prio_mappings; i++) { + num_devices = be16_to_cpu(fpin_desc->pun.prio_map_record.num_devices); + pr_low = fpin_desc->pun.prio_map_record.pr_low; + pr_high = fpin_desc->pun.prio_map_record.pr_high; + plist = &fpin_desc->pun.prio_map_record.port_list; + for (j = 0; j < num_devices; j++) { + memcpy(port_name, plist->port_name, WWN_SIZE); + fcport = qla2x00_find_fcport_by_wwpn(vha, port_name, 0); + if (fcport) { + fcport->scm.stats.pun_count++; + fcport->vl.prio_hi = pr_high; + fcport->vl.prio_lo = pr_low; + ql_log(ql_log_info, vha, 0x5099, + "Prio range for %8phN, Low:0x%x, High: 0x%x\n", + fcport->port_name, fcport->vl.prio_lo, fcport->vl.prio_hi); + } else { + ql_log(ql_log_warn, vha, 0x5099, + "PUN for invalid port %8phN \n", + port_name); + } + plist++; + } + + fpin_desc += sizeof(struct priority_map_record); + } +} + +/* + * qla_scm_process_congestion_notification_d() - Process + * Process Congestion Notification Descriptor + * @rsp: response queue + * @pkt: Entry pointer + */ +static void +qla_scm_process_congestion_notification_d(struct scsi_qla_host *vha, + struct fpin_descriptor *fpin_desc) +{ + u64 delta; + uint16_t event; + uint32_t event_period_secs = 0; + const char * congn_type; + struct qla_hw_data *ha = vha->hw; + struct qla_scmr_flow_control *sfc = &ha->sfc; + + + event = be16_to_cpu(fpin_desc->congestion.event_type); + congn_type = qla_get_congn_event_type(event); + ql_log(ql_log_info, vha, 0x5099, + "Congestion Event Type: %s(%x)\n", congn_type, event); + + ha->scm.congestion.event_type = event; + ha->scm.congestion.event_modifier = + be16_to_cpu(fpin_desc->congestion.event_modifier); + ha->scm.congestion.event_period = + be32_to_cpu(fpin_desc->congestion.event_period); + event_period_secs = + be32_to_cpu(fpin_desc->congestion.event_period) / 1000; + if (event_period_secs) + sfc->event_period = event_period_secs; + else + sfc->event_period = 1; + ha->scm.last_event_timestamp = qla_get_real_seconds(); + + ha->scm.congestion.severity = + fpin_desc->congestion.severity; + + sfc->throttle_period = sfc->event_period_buffer + sfc->event_period; + delta = (2 * sfc->event_period * HZ); + if (delta < HZ) + delta = HZ; + + ha->scm.current_events |= SCM_EVENT_CONGESTION; + switch (be16_to_cpu(fpin_desc->congestion.event_type)) { + case FPIN_CONGN_CLEAR: + atomic_set(&sfc->scmr_congn_signal, QLA_SIG_CLEAR); + ha->scm.stats.cn_clear++; + break; + case FPIN_CONGN_CREDIT_STALL: + if (qla_scmr_get_sig(sfc) == QLA_SIG_NONE) { + atomic_set(&sfc->scmr_congn_signal, + QLA_SIG_CREDIT_STALL); + sfc->expiration_jiffies = jiffies + delta; + } + ha->scm.stats.cn_credit_stall++; + break; + case FPIN_CONGN_OVERSUBSCRIPTION: + if (qla_scmr_get_sig(sfc) == QLA_SIG_NONE) { + atomic_set(&sfc->scmr_congn_signal, + QLA_SIG_OVERSUBSCRIPTION); + sfc->expiration_jiffies = jiffies + delta; + } + ha->scm.stats.cn_oversubscription++; + break; + case FPIN_CONGN_LOST_CREDIT: + ha->scm.stats.cn_lost_credit++; + break; + default: + break; + } + + if (fpin_desc->congestion.severity == + SCM_CONGESTION_SEVERITY_WARNING) { + sfc->level = QLA_CONG_LOW; + ha->scm.sev.cn_warning++; + } else if (fpin_desc->congestion.severity == + SCM_CONGESTION_SEVERITY_ERROR) { + sfc->level = QLA_CONG_HIGH; + ha->scm.sev.cn_alarm++; + } +} + +void qla27xx_process_purex_fpin(struct scsi_qla_host *vha, struct purex_item *item) +{ + struct fpin_descriptor *fpin_desc; + uint16_t fpin_desc_len, total_fpin_desc_len; + uint32_t fpin_offset = 0; + void *pkt = &item->iocb; + uint16_t pkt_size = item->size; + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x508d, + "%s: Enter\n", __func__); + + ql_dbg(ql_dbg_scm + ql_dbg_verbose, vha, 0x508e, + "-------- ELS REQ -------\n"); + ql_dump_buffer(ql_dbg_scm + ql_dbg_verbose, vha, 0x508f, + pkt, pkt_size); + + fpin_desc = (struct fpin_descriptor *)((uint8_t *)pkt + + FPIN_ELS_DESCRIPTOR_LIST_OFFSET); + + fpin_desc_len = pkt_size - FPIN_ELS_DESCRIPTOR_LIST_OFFSET; + if (fpin_desc_len != be32_to_cpu(fpin_desc->descriptor_length) + 8) { + ql_dbg(ql_dbg_scm, vha, 0x5099, + "desc len=%d, actual len=%d\n", + be32_to_cpu(fpin_desc->descriptor_length), fpin_desc_len); + } + + total_fpin_desc_len = 0; + while (fpin_offset <= fpin_desc_len) { + fpin_desc = (struct fpin_descriptor *)((uint8_t *)fpin_desc + + fpin_offset); + if (fpin_desc_len < (total_fpin_desc_len + FPIN_DESCRIPTOR_HEADER_SIZE)) { + ql_dbg(ql_dbg_scm, vha, 0x5099, + "fpin_desc_len =%d, total_fpin_desc_len =%d\n", + fpin_desc_len, total_fpin_desc_len); + break; + } + + switch (be32_to_cpu(fpin_desc->descriptor_tag)) { + case SCM_NOTIFICATION_TYPE_LINK_INTEGRITY: + qla_scm_process_link_integrity_d(vha, + fpin_desc, + fpin_desc_len); + break; + case SCM_NOTIFICATION_TYPE_DELIVERY: + qla_scm_process_delivery_notification_d(vha, fpin_desc); + break; + case SCM_NOTIFICATION_TYPE_PEER_CONGESTION: + qla_scm_process_peer_congestion_notification_d(vha, + fpin_desc, + fpin_desc_len); + break; + case SCM_NOTIFICATION_TYPE_CONGESTION: + qla_scm_process_congestion_notification_d(vha, + fpin_desc); + break; + case SCM_NOTIFICATION_TYPE_PUN: + qla_scm_process_pun_notification_d(vha, fpin_desc); + break; + } + fpin_offset = be32_to_cpu(fpin_desc->descriptor_length) + + FPIN_ELS_DESCRIPTOR_LIST_OFFSET; + total_fpin_desc_len += fpin_offset; + } + fc_host_fpin_rcv(vha->host, pkt_size, (char *)pkt); +} diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h index a5f3000ae53bd..2fb7ebfbbc381 100644 --- a/drivers/scsi/qla2xxx/qla_settings.h +++ b/drivers/scsi/qla2xxx/qla_settings.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #define MAX_RETRIES_OF_ISP_ABORT 5 diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 0fa9c529fca11..1479a824a77ca 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -1,7 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" @@ -25,24 +26,24 @@ qla2x00_lock_nvram_access(struct qla_hw_data *ha) struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) { - data = rd_reg_word(®->nvram); + data = RD_REG_WORD(®->nvram); while (data & NVR_BUSY) { udelay(100); - data = rd_reg_word(®->nvram); + data = RD_REG_WORD(®->nvram); } /* Lock resource */ - wrt_reg_word(®->u.isp2300.host_semaphore, 0x1); - rd_reg_word(®->u.isp2300.host_semaphore); + WRT_REG_WORD(®->u.isp2300.host_semaphore, 0x1); + RD_REG_WORD(®->u.isp2300.host_semaphore); udelay(5); - data = rd_reg_word(®->u.isp2300.host_semaphore); + data = RD_REG_WORD(®->u.isp2300.host_semaphore); while ((data & BIT_0) == 0) { /* Lock failed */ udelay(100); - wrt_reg_word(®->u.isp2300.host_semaphore, 0x1); - rd_reg_word(®->u.isp2300.host_semaphore); + WRT_REG_WORD(®->u.isp2300.host_semaphore, 0x1); + RD_REG_WORD(®->u.isp2300.host_semaphore); udelay(5); - data = rd_reg_word(®->u.isp2300.host_semaphore); + data = RD_REG_WORD(®->u.isp2300.host_semaphore); } } } @@ -57,8 +58,8 @@ qla2x00_unlock_nvram_access(struct qla_hw_data *ha) struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha)) { - wrt_reg_word(®->u.isp2300.host_semaphore, 0); - rd_reg_word(®->u.isp2300.host_semaphore); + WRT_REG_WORD(®->u.isp2300.host_semaphore, 0); + RD_REG_WORD(®->u.isp2300.host_semaphore); } } @@ -72,15 +73,15 @@ qla2x00_nv_write(struct qla_hw_data *ha, uint16_t data) { struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; - wrt_reg_word(®->nvram, data | NVR_SELECT | NVR_WRT_ENABLE); - rd_reg_word(®->nvram); /* PCI Posting. */ + WRT_REG_WORD(®->nvram, data | NVR_SELECT | NVR_WRT_ENABLE); + RD_REG_WORD(®->nvram); /* PCI Posting. */ NVRAM_DELAY(); - wrt_reg_word(®->nvram, data | NVR_SELECT | NVR_CLOCK | + WRT_REG_WORD(®->nvram, data | NVR_SELECT | NVR_CLOCK | NVR_WRT_ENABLE); - rd_reg_word(®->nvram); /* PCI Posting. */ + RD_REG_WORD(®->nvram); /* PCI Posting. */ NVRAM_DELAY(); - wrt_reg_word(®->nvram, data | NVR_SELECT | NVR_WRT_ENABLE); - rd_reg_word(®->nvram); /* PCI Posting. */ + WRT_REG_WORD(®->nvram, data | NVR_SELECT | NVR_WRT_ENABLE); + RD_REG_WORD(®->nvram); /* PCI Posting. */ NVRAM_DELAY(); } @@ -119,21 +120,21 @@ qla2x00_nvram_request(struct qla_hw_data *ha, uint32_t nv_cmd) /* Read data from NVRAM. */ for (cnt = 0; cnt < 16; cnt++) { - wrt_reg_word(®->nvram, NVR_SELECT | NVR_CLOCK); - rd_reg_word(®->nvram); /* PCI Posting. */ + WRT_REG_WORD(®->nvram, NVR_SELECT | NVR_CLOCK); + RD_REG_WORD(®->nvram); /* PCI Posting. */ NVRAM_DELAY(); data <<= 1; - reg_data = rd_reg_word(®->nvram); + reg_data = RD_REG_WORD(®->nvram); if (reg_data & NVR_DATA_IN) data |= BIT_0; - wrt_reg_word(®->nvram, NVR_SELECT); - rd_reg_word(®->nvram); /* PCI Posting. */ + WRT_REG_WORD(®->nvram, NVR_SELECT); + RD_REG_WORD(®->nvram); /* PCI Posting. */ NVRAM_DELAY(); } /* Deselect chip. */ - wrt_reg_word(®->nvram, NVR_DESELECT); - rd_reg_word(®->nvram); /* PCI Posting. */ + WRT_REG_WORD(®->nvram, NVR_DESELECT); + RD_REG_WORD(®->nvram); /* PCI Posting. */ NVRAM_DELAY(); return data; @@ -170,8 +171,8 @@ qla2x00_nv_deselect(struct qla_hw_data *ha) { struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; - wrt_reg_word(®->nvram, NVR_DESELECT); - rd_reg_word(®->nvram); /* PCI Posting. */ + WRT_REG_WORD(®->nvram, NVR_DESELECT); + RD_REG_WORD(®->nvram); /* PCI Posting. */ NVRAM_DELAY(); } @@ -182,7 +183,7 @@ qla2x00_nv_deselect(struct qla_hw_data *ha) * @data: word to program */ static void -qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, __le16 data) +qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data) { int count; uint16_t word; @@ -201,7 +202,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, __le16 data) /* Write data */ nv_cmd = (addr << 16) | NV_WRITE_OP; - nv_cmd |= (__force u16)data; + nv_cmd |= data; nv_cmd <<= 5; for (count = 0; count < 27; count++) { if (nv_cmd & BIT_31) @@ -215,8 +216,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, __le16 data) qla2x00_nv_deselect(ha); /* Wait for NVRAM to become ready */ - wrt_reg_word(®->nvram, NVR_SELECT); - rd_reg_word(®->nvram); /* PCI Posting. */ + WRT_REG_WORD(®->nvram, NVR_SELECT); + RD_REG_WORD(®->nvram); /* PCI Posting. */ wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { @@ -225,7 +226,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, __le16 data) break; } NVRAM_DELAY(); - word = rd_reg_word(®->nvram); + word = RD_REG_WORD(®->nvram); } while ((word & NVR_DATA_IN) == 0); qla2x00_nv_deselect(ha); @@ -240,7 +241,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, __le16 data) static int qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr, - __le16 data, uint32_t tmo) + uint16_t data, uint32_t tmo) { int ret, count; uint16_t word; @@ -260,7 +261,7 @@ qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr, /* Write data */ nv_cmd = (addr << 16) | NV_WRITE_OP; - nv_cmd |= (__force u16)data; + nv_cmd |= data; nv_cmd <<= 5; for (count = 0; count < 27; count++) { if (nv_cmd & BIT_31) @@ -274,11 +275,11 @@ qla2x00_write_nvram_word_tmo(struct qla_hw_data *ha, uint32_t addr, qla2x00_nv_deselect(ha); /* Wait for NVRAM to become ready */ - wrt_reg_word(®->nvram, NVR_SELECT); - rd_reg_word(®->nvram); /* PCI Posting. */ + WRT_REG_WORD(®->nvram, NVR_SELECT); + RD_REG_WORD(®->nvram); /* PCI Posting. */ do { NVRAM_DELAY(); - word = rd_reg_word(®->nvram); + word = RD_REG_WORD(®->nvram); if (!--tmo) { ret = QLA_FUNCTION_FAILED; break; @@ -307,7 +308,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha) int ret, stat; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; uint32_t word, wait_cnt; - __le16 wprot, wprot_old; + uint16_t wprot, wprot_old; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); /* Clear NVRAM write protection. */ @@ -317,7 +318,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha) stat = qla2x00_write_nvram_word_tmo(ha, ha->nvram_base, cpu_to_le16(0x1234), 100000); wprot = cpu_to_le16(qla2x00_get_nvram_word(ha, ha->nvram_base)); - if (stat != QLA_SUCCESS || wprot != cpu_to_le16(0x1234)) { + if (stat != QLA_SUCCESS || wprot != 0x1234) { /* Write enable. */ qla2x00_nv_write(ha, NVR_DATA_OUT); qla2x00_nv_write(ha, 0); @@ -346,8 +347,8 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha) qla2x00_nv_deselect(ha); /* Wait for NVRAM to become ready. */ - wrt_reg_word(®->nvram, NVR_SELECT); - rd_reg_word(®->nvram); /* PCI Posting. */ + WRT_REG_WORD(®->nvram, NVR_SELECT); + RD_REG_WORD(®->nvram); /* PCI Posting. */ wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { @@ -356,7 +357,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha) break; } NVRAM_DELAY(); - word = rd_reg_word(®->nvram); + word = RD_REG_WORD(®->nvram); } while ((word & NVR_DATA_IN) == 0); if (wait_cnt) @@ -406,8 +407,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat) qla2x00_nv_deselect(ha); /* Wait for NVRAM to become ready. */ - wrt_reg_word(®->nvram, NVR_SELECT); - rd_reg_word(®->nvram); /* PCI Posting. */ + WRT_REG_WORD(®->nvram, NVR_SELECT); + RD_REG_WORD(®->nvram); /* PCI Posting. */ wait_cnt = NVR_WAIT_CNT; do { if (!--wait_cnt) { @@ -416,7 +417,7 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat) break; } NVRAM_DELAY(); - word = rd_reg_word(®->nvram); + word = RD_REG_WORD(®->nvram); } while ((word & NVR_DATA_IN) == 0); } @@ -455,11 +456,11 @@ qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t *data) struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; ulong cnt = 30000; - wrt_reg_dword(®->flash_addr, addr & ~FARX_DATA_FLAG); + WRT_REG_DWORD(®->flash_addr, addr & ~FARX_DATA_FLAG); while (cnt--) { - if (rd_reg_dword(®->flash_addr) & FARX_DATA_FLAG) { - *data = rd_reg_dword(®->flash_data); + if (RD_REG_DWORD(®->flash_addr) & FARX_DATA_FLAG) { + *data = RD_REG_DWORD(®->flash_data); return QLA_SUCCESS; } udelay(10); @@ -498,11 +499,11 @@ qla24xx_write_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t data) struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; ulong cnt = 500000; - wrt_reg_dword(®->flash_data, data); - wrt_reg_dword(®->flash_addr, addr | FARX_DATA_FLAG); + WRT_REG_DWORD(®->flash_data, data); + WRT_REG_DWORD(®->flash_addr, addr | FARX_DATA_FLAG); while (cnt--) { - if (!(rd_reg_dword(®->flash_addr) & FARX_DATA_FLAG)) + if (!(RD_REG_DWORD(®->flash_addr) & FARX_DATA_FLAG)) return QLA_SUCCESS; udelay(10); cond_resched(); @@ -548,12 +549,11 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) { const char *loc, *locations[] = { "DEF", "PCI" }; uint32_t pcihdr, pcids; - uint16_t cnt, chksum; - __le16 *wptr; + uint16_t cnt, chksum, *wptr; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct qla_flt_location *fltl = (void *)req->ring; - uint32_t *dcode = (uint32_t *)req->ring; + uint32_t *dcode = (void *)req->ring; uint8_t *buf = (void *)req->ring, *bcode, last_image; /* @@ -610,7 +610,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start) if (memcmp(fltl->sig, "QFLT", 4)) goto end; - wptr = (__force __le16 *)req->ring; + wptr = (void *)req->ring; cnt = sizeof(*fltl) / sizeof(*wptr); for (chksum = 0; cnt--; wptr++) chksum += le16_to_cpu(*wptr); @@ -669,10 +669,9 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) struct qla_hw_data *ha = vha->hw; uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0; - struct qla_flt_header *flt = ha->flt; - struct qla_flt_region *region = &flt->region[0]; - __le16 *wptr; - uint16_t cnt, chksum; + struct qla_flt_header *flt = (void *)ha->flt; + struct qla_flt_region *region = (void *)&flt[1]; + uint16_t *wptr, cnt, chksum; uint32_t start; /* Assign FCP prio region since older adapters may not have FLT, or @@ -682,8 +681,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) fcp_prio_cfg0[def] : fcp_prio_cfg1[def]; ha->flt_region_flt = flt_addr; - wptr = (__force __le16 *)ha->flt; - ha->isp_ops->read_optrom(vha, flt, flt_addr << 2, + wptr = (uint16_t *)ha->flt; + ha->isp_ops->read_optrom(vha, (void *)flt, flt_addr << 2, (sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE)); if (le16_to_cpu(*wptr) == 0xffff) @@ -950,7 +949,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha) struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; uint16_t cnt, chksum; - __le16 *wptr = (__force __le16 *)req->ring; + uint16_t *wptr = (void *)req->ring; struct qla_fdt_layout *fdt = (struct qla_fdt_layout *)req->ring; uint8_t man_id, flash_id; uint16_t mid = 0, fid = 0; @@ -1043,14 +1042,14 @@ static void qla2xxx_get_idc_param(scsi_qla_host_t *vha) { #define QLA82XX_IDC_PARAM_ADDR 0x003e885c - __le32 *wptr; + uint32_t *wptr; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; if (!(IS_P3P_TYPE(ha))) return; - wptr = (__force __le32 *)req->ring; + wptr = (uint32_t *)req->ring; ha->isp_ops->read_optrom(vha, req->ring, QLA82XX_IDC_PARAM_ADDR, 8); if (*wptr == cpu_to_le32(0xffffffff)) { @@ -1096,7 +1095,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) { #define NPIV_CONFIG_SIZE (16*1024) void *data; - __le16 *wptr; + uint16_t *wptr; uint16_t cnt, chksum; int i; struct qla_npiv_header hdr; @@ -1198,9 +1197,9 @@ qla24xx_unprotect_flash(scsi_qla_host_t *vha) return qla81xx_fac_do_write_enable(vha, 1); /* Enable flash write. */ - wrt_reg_dword(®->ctrl_status, - rd_reg_dword(®->ctrl_status) | CSRX_FLASH_ENABLE); - rd_reg_dword(®->ctrl_status); /* PCI Posting. */ + WRT_REG_DWORD(®->ctrl_status, + RD_REG_DWORD(®->ctrl_status) | CSRX_FLASH_ENABLE); + RD_REG_DWORD(®->ctrl_status); /* PCI Posting. */ if (!ha->fdt_wrt_disable) goto done; @@ -1241,8 +1240,8 @@ qla24xx_protect_flash(scsi_qla_host_t *vha) skip_wrt_protect: /* Disable flash write. */ - wrt_reg_dword(®->ctrl_status, - rd_reg_dword(®->ctrl_status) & ~CSRX_FLASH_ENABLE); + WRT_REG_DWORD(®->ctrl_status, + RD_REG_DWORD(®->ctrl_status) & ~CSRX_FLASH_ENABLE); return QLA_SUCCESS; } @@ -1266,7 +1265,7 @@ qla24xx_erase_sector(scsi_qla_host_t *vha, uint32_t fdata) } static int -qla24xx_write_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr, +qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, uint32_t dwords) { int ret; @@ -1338,7 +1337,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr, } ql_log(ql_log_warn, vha, 0x7097, - "Failed burst-write at %x (%p/%#llx)....\n", + "Failed burst-write at %x (%px/%#llx)....\n", flash_data_addr(ha, faddr), optrom, (u64)optrom_dma); @@ -1353,7 +1352,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, __le32 *dwptr, uint32_t faddr, /* Slow write */ ret = qla24xx_write_flash_dword(ha, - flash_data_addr(ha, faddr), le32_to_cpu(*dwptr)); + flash_data_addr(ha, faddr), cpu_to_le32(*dwptr)); if (ret) { ql_dbg(ql_dbg_user, vha, 0x7006, "Failed slow write %x (%x)\n", faddr, *dwptr); @@ -1380,11 +1379,11 @@ qla2x00_read_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, uint32_t bytes) { uint32_t i; - __le16 *wptr; + uint16_t *wptr; struct qla_hw_data *ha = vha->hw; /* Word reads to NVRAM via registers. */ - wptr = buf; + wptr = (uint16_t *)buf; qla2x00_lock_nvram_access(ha); for (i = 0; i < bytes >> 1; i++, naddr++) wptr[i] = cpu_to_le16(qla2x00_get_nvram_word(ha, @@ -1457,7 +1456,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, { struct qla_hw_data *ha = vha->hw; struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; - __le32 *dwptr = buf; + uint32_t *dwptr = buf; uint32_t i; int ret; @@ -1467,9 +1466,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, return ret; /* Enable flash write. */ - wrt_reg_dword(®->ctrl_status, - rd_reg_dword(®->ctrl_status) | CSRX_FLASH_ENABLE); - rd_reg_dword(®->ctrl_status); /* PCI Posting. */ + WRT_REG_DWORD(®->ctrl_status, + RD_REG_DWORD(®->ctrl_status) | CSRX_FLASH_ENABLE); + RD_REG_DWORD(®->ctrl_status); /* PCI Posting. */ /* Disable NVRAM write-protection. */ qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0); @@ -1479,7 +1478,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, naddr = nvram_data_addr(ha, naddr); bytes >>= 2; for (i = 0; i < bytes; i++, naddr++, dwptr++) { - if (qla24xx_write_flash_dword(ha, naddr, le32_to_cpu(*dwptr))) { + if (qla24xx_write_flash_dword(ha, naddr, cpu_to_le32(*dwptr))) { ql_dbg(ql_dbg_user, vha, 0x709a, "Unable to program nvram address=%x data=%x.\n", naddr, *dwptr); @@ -1491,9 +1490,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, void *buf, uint32_t naddr, qla24xx_write_flash_dword(ha, nvram_conf_addr(ha, 0x101), 0x8c); /* Disable flash write. */ - wrt_reg_dword(®->ctrl_status, - rd_reg_dword(®->ctrl_status) & ~CSRX_FLASH_ENABLE); - rd_reg_dword(®->ctrl_status); /* PCI Posting. */ + WRT_REG_DWORD(®->ctrl_status, + RD_REG_DWORD(®->ctrl_status) & ~CSRX_FLASH_ENABLE); + RD_REG_DWORD(®->ctrl_status); /* PCI Posting. */ return ret; } @@ -1589,8 +1588,8 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha) gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe)); gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod)); } else { - gpio_enable = rd_reg_word(®->gpioe); - gpio_data = rd_reg_word(®->gpiod); + gpio_enable = RD_REG_WORD(®->gpioe); + gpio_data = RD_REG_WORD(®->gpiod); } /* Set the modified gpio_enable values */ @@ -1599,8 +1598,8 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha) if (ha->pio_address) { WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable); } else { - wrt_reg_word(®->gpioe, gpio_enable); - rd_reg_word(®->gpioe); + WRT_REG_WORD(®->gpioe, gpio_enable); + RD_REG_WORD(®->gpioe); } qla2x00_flip_colors(ha, &led_color); @@ -1615,8 +1614,8 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha) if (ha->pio_address) { WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data); } else { - wrt_reg_word(®->gpiod, gpio_data); - rd_reg_word(®->gpiod); + WRT_REG_WORD(®->gpiod, gpio_data); + RD_REG_WORD(®->gpiod); } spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -1646,8 +1645,8 @@ qla2x00_beacon_on(struct scsi_qla_host *vha) gpio_enable = RD_REG_WORD_PIO(PIO_REG(ha, gpioe)); gpio_data = RD_REG_WORD_PIO(PIO_REG(ha, gpiod)); } else { - gpio_enable = rd_reg_word(®->gpioe); - gpio_data = rd_reg_word(®->gpiod); + gpio_enable = RD_REG_WORD(®->gpioe); + gpio_data = RD_REG_WORD(®->gpiod); } gpio_enable |= GPIO_LED_MASK; @@ -1655,8 +1654,8 @@ qla2x00_beacon_on(struct scsi_qla_host *vha) if (ha->pio_address) { WRT_REG_WORD_PIO(PIO_REG(ha, gpioe), gpio_enable); } else { - wrt_reg_word(®->gpioe, gpio_enable); - rd_reg_word(®->gpioe); + WRT_REG_WORD(®->gpioe, gpio_enable); + RD_REG_WORD(®->gpioe); } /* Clear out previously set LED colour. */ @@ -1664,8 +1663,8 @@ qla2x00_beacon_on(struct scsi_qla_host *vha) if (ha->pio_address) { WRT_REG_WORD_PIO(PIO_REG(ha, gpiod), gpio_data); } else { - wrt_reg_word(®->gpiod, gpio_data); - rd_reg_word(®->gpiod); + WRT_REG_WORD(®->gpiod, gpio_data); + RD_REG_WORD(®->gpiod); } spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -1732,13 +1731,13 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha) /* Save the Original GPIOD. */ spin_lock_irqsave(&ha->hardware_lock, flags); - gpio_data = rd_reg_dword(®->gpiod); + gpio_data = RD_REG_DWORD(®->gpiod); /* Enable the gpio_data reg for update. */ gpio_data |= GPDX_LED_UPDATE_MASK; - wrt_reg_dword(®->gpiod, gpio_data); - gpio_data = rd_reg_dword(®->gpiod); + WRT_REG_DWORD(®->gpiod, gpio_data); + gpio_data = RD_REG_DWORD(®->gpiod); /* Set the color bits. */ qla24xx_flip_colors(ha, &led_color); @@ -1750,8 +1749,8 @@ qla24xx_beacon_blink(struct scsi_qla_host *vha) gpio_data |= led_color; /* Set the modified gpio_data values. */ - wrt_reg_dword(®->gpiod, gpio_data); - gpio_data = rd_reg_dword(®->gpiod); + WRT_REG_DWORD(®->gpiod, gpio_data); + gpio_data = RD_REG_DWORD(®->gpiod); spin_unlock_irqrestore(&ha->hardware_lock, flags); } @@ -1882,12 +1881,12 @@ qla24xx_beacon_on(struct scsi_qla_host *vha) goto skip_gpio; spin_lock_irqsave(&ha->hardware_lock, flags); - gpio_data = rd_reg_dword(®->gpiod); + gpio_data = RD_REG_DWORD(®->gpiod); /* Enable the gpio_data reg for update. */ gpio_data |= GPDX_LED_UPDATE_MASK; - wrt_reg_dword(®->gpiod, gpio_data); - rd_reg_dword(®->gpiod); + WRT_REG_DWORD(®->gpiod, gpio_data); + RD_REG_DWORD(®->gpiod); spin_unlock_irqrestore(&ha->hardware_lock, flags); } @@ -1930,12 +1929,12 @@ qla24xx_beacon_off(struct scsi_qla_host *vha) /* Give control back to firmware. */ spin_lock_irqsave(&ha->hardware_lock, flags); - gpio_data = rd_reg_dword(®->gpiod); + gpio_data = RD_REG_DWORD(®->gpiod); /* Disable the gpio_data reg for update. */ gpio_data &= ~GPDX_LED_UPDATE_MASK; - wrt_reg_dword(®->gpiod, gpio_data); - rd_reg_dword(®->gpiod); + WRT_REG_DWORD(®->gpiod, gpio_data); + RD_REG_DWORD(®->gpiod); spin_unlock_irqrestore(&ha->hardware_lock, flags); set_fw_options: @@ -1971,10 +1970,10 @@ qla2x00_flash_enable(struct qla_hw_data *ha) uint16_t data; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; - data = rd_reg_word(®->ctrl_status); + data = RD_REG_WORD(®->ctrl_status); data |= CSR_FLASH_ENABLE; - wrt_reg_word(®->ctrl_status, data); - rd_reg_word(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->ctrl_status, data); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ } /** @@ -1987,10 +1986,10 @@ qla2x00_flash_disable(struct qla_hw_data *ha) uint16_t data; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; - data = rd_reg_word(®->ctrl_status); + data = RD_REG_WORD(®->ctrl_status); data &= ~(CSR_FLASH_ENABLE); - wrt_reg_word(®->ctrl_status, data); - rd_reg_word(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->ctrl_status, data); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ } /** @@ -2009,7 +2008,7 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr) uint16_t bank_select; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; - bank_select = rd_reg_word(®->ctrl_status); + bank_select = RD_REG_WORD(®->ctrl_status); if (IS_QLA2322(ha) || IS_QLA6322(ha)) { /* Specify 64K address range: */ @@ -2017,11 +2016,11 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr) bank_select &= ~0xf8; bank_select |= addr >> 12 & 0xf0; bank_select |= CSR_FLASH_64K_BANK; - wrt_reg_word(®->ctrl_status, bank_select); - rd_reg_word(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->ctrl_status, bank_select); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ - wrt_reg_word(®->flash_address, (uint16_t)addr); - data = rd_reg_word(®->flash_data); + WRT_REG_WORD(®->flash_address, (uint16_t)addr); + data = RD_REG_WORD(®->flash_data); return (uint8_t)data; } @@ -2029,13 +2028,13 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr) /* Setup bit 16 of flash address. */ if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) { bank_select |= CSR_FLASH_64K_BANK; - wrt_reg_word(®->ctrl_status, bank_select); - rd_reg_word(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->ctrl_status, bank_select); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ } else if (((addr & BIT_16) == 0) && (bank_select & CSR_FLASH_64K_BANK)) { bank_select &= ~(CSR_FLASH_64K_BANK); - wrt_reg_word(®->ctrl_status, bank_select); - rd_reg_word(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->ctrl_status, bank_select); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ } /* Always perform IO mapped accesses to the FLASH registers. */ @@ -2050,7 +2049,7 @@ qla2x00_read_flash_byte(struct qla_hw_data *ha, uint32_t addr) data2 = RD_REG_WORD_PIO(PIO_REG(ha, flash_data)); } while (data != data2); } else { - wrt_reg_word(®->flash_address, (uint16_t)addr); + WRT_REG_WORD(®->flash_address, (uint16_t)addr); data = qla2x00_debounce_register(®->flash_data); } @@ -2069,20 +2068,20 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data) uint16_t bank_select; struct device_reg_2xxx __iomem *reg = &ha->iobase->isp; - bank_select = rd_reg_word(®->ctrl_status); + bank_select = RD_REG_WORD(®->ctrl_status); if (IS_QLA2322(ha) || IS_QLA6322(ha)) { /* Specify 64K address range: */ /* clear out Module Select and Flash Address bits [19:16]. */ bank_select &= ~0xf8; bank_select |= addr >> 12 & 0xf0; bank_select |= CSR_FLASH_64K_BANK; - wrt_reg_word(®->ctrl_status, bank_select); - rd_reg_word(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->ctrl_status, bank_select); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ - wrt_reg_word(®->flash_address, (uint16_t)addr); - rd_reg_word(®->ctrl_status); /* PCI Posting. */ - wrt_reg_word(®->flash_data, (uint16_t)data); - rd_reg_word(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->flash_address, (uint16_t)addr); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->flash_data, (uint16_t)data); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ return; } @@ -2090,13 +2089,13 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data) /* Setup bit 16 of flash address. */ if ((addr & BIT_16) && ((bank_select & CSR_FLASH_64K_BANK) == 0)) { bank_select |= CSR_FLASH_64K_BANK; - wrt_reg_word(®->ctrl_status, bank_select); - rd_reg_word(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->ctrl_status, bank_select); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ } else if (((addr & BIT_16) == 0) && (bank_select & CSR_FLASH_64K_BANK)) { bank_select &= ~(CSR_FLASH_64K_BANK); - wrt_reg_word(®->ctrl_status, bank_select); - rd_reg_word(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->ctrl_status, bank_select); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ } /* Always perform IO mapped accesses to the FLASH registers. */ @@ -2104,10 +2103,10 @@ qla2x00_write_flash_byte(struct qla_hw_data *ha, uint32_t addr, uint8_t data) WRT_REG_WORD_PIO(PIO_REG(ha, flash_address), (uint16_t)addr); WRT_REG_WORD_PIO(PIO_REG(ha, flash_data), (uint16_t)data); } else { - wrt_reg_word(®->flash_address, (uint16_t)addr); - rd_reg_word(®->ctrl_status); /* PCI Posting. */ - wrt_reg_word(®->flash_data, (uint16_t)data); - rd_reg_word(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->flash_address, (uint16_t)addr); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ + WRT_REG_WORD(®->flash_data, (uint16_t)data); + RD_REG_WORD(®->ctrl_status); /* PCI Posting. */ } } @@ -2290,12 +2289,12 @@ qla2x00_read_flash_data(struct qla_hw_data *ha, uint8_t *tmp_buf, midpoint = length / 2; - wrt_reg_word(®->nvram, 0); - rd_reg_word(®->nvram); + WRT_REG_WORD(®->nvram, 0); + RD_REG_WORD(®->nvram); for (ilength = 0; ilength < length; saddr++, ilength++, tmp_buf++) { if (ilength == midpoint) { - wrt_reg_word(®->nvram, NVR_SELECT); - rd_reg_word(®->nvram); + WRT_REG_WORD(®->nvram, NVR_SELECT); + RD_REG_WORD(®->nvram); } data = qla2x00_read_flash_byte(ha, saddr); if (saddr % 100) @@ -2320,11 +2319,11 @@ qla2x00_suspend_hba(struct scsi_qla_host *vha) /* Pause RISC. */ spin_lock_irqsave(&ha->hardware_lock, flags); - wrt_reg_word(®->hccr, HCCR_PAUSE_RISC); - rd_reg_word(®->hccr); + WRT_REG_WORD(®->hccr, HCCR_PAUSE_RISC); + RD_REG_WORD(®->hccr); if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) { for (cnt = 0; cnt < 30000; cnt++) { - if ((rd_reg_word(®->hccr) & HCCR_RISC_PAUSE) != 0) + if ((RD_REG_WORD(®->hccr) & HCCR_RISC_PAUSE) != 0) break; udelay(100); } @@ -2363,12 +2362,12 @@ qla2x00_read_optrom_data(struct scsi_qla_host *vha, void *buf, midpoint = ha->optrom_size / 2; qla2x00_flash_enable(ha); - wrt_reg_word(®->nvram, 0); - rd_reg_word(®->nvram); /* PCI Posting. */ + WRT_REG_WORD(®->nvram, 0); + RD_REG_WORD(®->nvram); /* PCI Posting. */ for (addr = offset, data = buf; addr < length; addr++, data++) { if (addr == midpoint) { - wrt_reg_word(®->nvram, NVR_SELECT); - rd_reg_word(®->nvram); /* PCI Posting. */ + WRT_REG_WORD(®->nvram, NVR_SELECT); + RD_REG_WORD(®->nvram); /* PCI Posting. */ } *data = qla2x00_read_flash_byte(ha, addr); @@ -2400,7 +2399,7 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf, sec_number = 0; /* Reset ISP chip. */ - wrt_reg_word(®->ctrl_status, CSR_ISP_SOFT_RESET); + WRT_REG_WORD(®->ctrl_status, CSR_ISP_SOFT_RESET); pci_read_config_word(ha->pdev, PCI_COMMAND, &wd); /* Go with write. */ @@ -2457,7 +2456,6 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf, break; } fallthrough; - case 0x1f: /* Atmel flash. */ /* 512k sector size. */ if (flash_id == 0x13) { @@ -2466,7 +2464,6 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf, break; } fallthrough; - case 0x01: /* AMD flash. */ if (flash_id == 0x38 || flash_id == 0x40 || flash_id == 0x4f) { @@ -2549,8 +2546,8 @@ qla2x00_write_optrom_data(struct scsi_qla_host *vha, void *buf, } } } else if (addr == ha->optrom_size / 2) { - wrt_reg_word(®->nvram, NVR_SELECT); - rd_reg_word(®->nvram); + WRT_REG_WORD(®->nvram, NVR_SELECT); + RD_REG_WORD(®->nvram); } if (flash_id == 0xda && man_id == 0xc1) { @@ -2611,7 +2608,7 @@ qla24xx_read_optrom_data(struct scsi_qla_host *vha, void *buf, set_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); /* Go with read. */ - qla24xx_read_flash_data(vha, buf, offset >> 2, length >> 2); + qla24xx_read_flash_data(vha, (void *)buf, offset >> 2, length >> 2); /* Resume HBA. */ clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); @@ -2634,14 +2631,14 @@ qla28xx_extract_sfub_and_verify(struct scsi_qla_host *vha, uint32_t *buf, sizeof(struct secure_flash_update_block)); for (i = 0; i < (sizeof(struct secure_flash_update_block) >> 2); i++) - check_sum += p[i]; + check_sum += le32_to_cpu(p[i]); check_sum = (~check_sum) + 1; - if (check_sum != p[i]) { + if (check_sum != le32_to_cpu(p[i])) { ql_log(ql_log_warn, vha, 0x7097, "SFUB checksum failed, 0x%x, 0x%x\n", - check_sum, p[i]); + check_sum, le32_to_cpu(p[i])); return QLA_COMMAND_ERROR; } @@ -2653,15 +2650,18 @@ qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start, struct qla_flt_region *region) { struct qla_hw_data *ha = vha->hw; - struct qla_flt_header *flt = ha->flt; - struct qla_flt_region *flt_reg = &flt->region[0]; + struct qla_flt_header *flt; + struct qla_flt_region *flt_reg; uint16_t cnt; int rval = QLA_FUNCTION_FAILED; if (!ha->flt) return QLA_FUNCTION_FAILED; + flt = (struct qla_flt_header *)ha->flt; + flt_reg = (struct qla_flt_region *)&flt[1]; cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region); + for (; cnt; cnt--, flt_reg++) { if (le32_to_cpu(flt_reg->start) == start) { memcpy((uint8_t *)region, flt_reg, @@ -2692,7 +2692,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, struct qla_flt_region region; bool reset_to_rom = false; uint32_t risc_size, risc_attr = 0; - __be32 *fw_array = NULL; + uint32_t *fw_array = NULL; /* Retrieve region info - must be a start address passed in */ rval = qla28xx_get_flash_region(vha, offset, ®ion); @@ -2721,14 +2721,14 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, if (ha->flags.secure_adapter && region.attribute) { ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, - "Region %x is secure\n", region.code); + "Region %x is secure\n", le16_to_cpu(region.code)); switch (le16_to_cpu(region.code)) { case FLT_REG_FW: case FLT_REG_FW_SEC_27XX: case FLT_REG_MPI_PRI_28XX: case FLT_REG_MPI_SEC_28XX: - fw_array = (__force __be32 *)dwptr; + fw_array = dwptr; /* 1st fw array */ risc_size = be32_to_cpu(fw_array[3]); @@ -2762,7 +2762,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, case FLT_REG_PEP_PRI_28XX: case FLT_REG_PEP_SEC_28XX: - fw_array = (__force __be32 *)dwptr; + fw_array = dwptr; /* 1st fw array */ risc_size = be32_to_cpu(fw_array[3]); @@ -2775,7 +2775,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, default: ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, "Secure region %x not supported\n", - region.code); + le16_to_cpu(region.code)); rval = QLA_COMMAND_ERROR; goto done; } @@ -2926,7 +2926,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, flash_data_addr(ha, faddr), dburst); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7097, - "Failed burst write at %x (%p/%#llx)...\n", + "Failed burst write at %x (%px/%#llx)...\n", flash_data_addr(ha, faddr), optrom, (u64)optrom_dma); break; @@ -2946,7 +2946,6 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, qla81xx_fac_semaphore_access(vha, FAC_SEMAPHORE_UNLOCK); ql_log(ql_log_warn, vha, 0x7099, "Failed protect flash\n"); - rval = QLA_COMMAND_ERROR; } if (reset_to_rom == true) { @@ -2958,7 +2957,6 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xffff, "Adapter did not come out of reset\n"); - rval = QLA_COMMAND_ERROR; } } @@ -2983,11 +2981,11 @@ qla24xx_write_optrom_data(struct scsi_qla_host *vha, void *buf, /* Go with write. */ if (IS_QLA28XX(ha)) - rval = qla28xx_write_flash_data(vha, buf, offset >> 2, - length >> 2); + rval = qla28xx_write_flash_data(vha, (uint32_t *)buf, + offset >> 2, length >> 2); else - rval = qla24xx_write_flash_data(vha, buf, offset >> 2, - length >> 2); + rval = qla24xx_write_flash_data(vha, (uint32_t *)buf, + offset >> 2, length >> 2); clear_bit(MBX_UPDATE_FLASH_ACTIVE, &ha->mbx_cmd_flags); scsi_unblock_requests(vha->host); @@ -3515,8 +3513,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) ql_dump_buffer(ql_dbg_init, vha, 0x005f, dcode, 32); } else { for (i = 0; i < 4; i++) - ha->fw_revision[i] = - be32_to_cpu((__force __be32)dcode[4+i]); + ha->fw_revision[i] = be32_to_cpu(dcode[4+i]); ql_dbg(ql_dbg_init, vha, 0x0060, "Firmware revision (flash) %u.%u.%u (%x).\n", ha->fw_revision[0], ha->fw_revision[1], @@ -3531,7 +3528,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) memset(ha->gold_fw_version, 0, sizeof(ha->gold_fw_version)); faddr = ha->flt_region_gold_fw; - qla24xx_read_flash_data(vha, dcode, ha->flt_region_gold_fw, 8); + qla24xx_read_flash_data(vha, (void *)dcode, ha->flt_region_gold_fw, 8); if (qla24xx_risc_firmware_invalid(dcode)) { ql_log(ql_log_warn, vha, 0x0056, "Unrecognized golden fw at %#x.\n", faddr); @@ -3540,8 +3537,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf) } for (i = 0; i < 4; i++) - ha->gold_fw_version[i] = - be32_to_cpu((__force __be32)dcode[4+i]); + ha->gold_fw_version[i] = be32_to_cpu(dcode[4+i]); return ret; } @@ -3621,7 +3617,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha) /* read remaining FCP CMD config data from flash */ fcp_prio_addr += (FCP_PRIO_CFG_HDR_SIZE >> 2); - len = ha->fcp_prio_cfg->num_entries * sizeof(struct qla_fcp_prio_entry); + len = ha->fcp_prio_cfg->num_entries * FCP_PRIO_CFG_ENTRY_SIZE; max_len = FCP_PRIO_CFG_SIZE - FCP_PRIO_CFG_HDR_SIZE; ha->isp_ops->read_optrom(vha, &ha->fcp_prio_cfg->entry[0], @@ -3638,3 +3634,101 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha) ha->fcp_prio_cfg = NULL; return QLA_FUNCTION_FAILED; } + +int qla2x00_sys_ld_info(bsg_job_t *bsg_job) +{ + scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job)); + struct fc_bsg_reply *bsg_reply = bsg_job->reply; + struct qla_lockdown_info *i; + int rval; + u16 mpi_ld_status[4], sz; + u8 *f, ld_feature; + dma_addr_t fdma; + + + if (!IS_SYS_LOCKDOWN_CAPABLE(vha->hw)) { + ql_dbg(ql_dbg_user, vha, 0x7045, + "%s: unsuport adapter\n", __func__); + return -EINVAL; + } + if (bsg_job->reply_payload.payload_len < sizeof(*i)) { + ql_dbg(ql_dbg_user, vha, 0x7047, + "%s: rsp payload too small %d < %ld\n", + __func__, bsg_job->reply_payload.payload_len, sizeof(i)); + return -EINVAL; + } + + sz = max(FW_FEATURES_SIZE, (int)sizeof(*i)); + f = dma_alloc_coherent(&vha->hw->pdev->dev, sz, + &fdma, GFP_KERNEL); + if (!f) { + ql_dbg(ql_dbg_user, vha, 0x702e, + "DMA alloc failed for feature buf.\n"); + return -ENOMEM; + } + i = (struct qla_lockdown_info*)f; + + rval = qla_get_features(vha, fdma, FW_FEATURES_SIZE); + ld_feature = f[0]; + /* + * Check lockdown functionality supported or not + * before requesting lockdown feature. + * + * BIT_0 (Firmware flash Lockdown feature supported). + * If this bit is set, it reflects that the current level of FC + * firmware supports the flash lockdown feature. + */ + + if (rval != QLA_SUCCESS || (!(ld_feature & BIT_0))) { + ql_dbg(ql_log_warn, vha, 0x0067, + "Lockdown features are not supported rval=%d \ + ld_feature=%u\n", rval, (u16)ld_feature); + memset(f, 0, sz); + rval = 0; + goto done_release; + } + memset(f, 0, sz); + + i->isp_fw_lockdown = ld_feature & BIT_0; + + /* + * BIT_1(MPI flash Lockdown feature supported). If this bit + * is set, it reflects that the current level of MPI firmware + * supports the flash lockdown feature. + */ + + if (!(ld_feature & BIT_1)) { + rval = 0; + goto done_release; + } + + rval = qla_mpipt_get_status(vha, MPIPT_LOCKDOWN_STATUS, + mpi_ld_status, sizeof(mpi_ld_status)); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_user, vha, 0x703b, + "Unable to read mpi status=%x, %x, %x, %x.\n", + mpi_ld_status[0], mpi_ld_status[1], mpi_ld_status[2], + mpi_ld_status[3]); + rval = 0; + goto done_release; + } + + i->mpi_fw_lockdown.lockdown_support = mpi_ld_status[0]; + i->mpi_fw_lockdown.config_disable_flags = mpi_ld_status[1]; + i->mpi_fw_lockdown.fw_update_disable_flags = mpi_ld_status[2]; + i->mpi_fw_lockdown.mpi_disable_flags = mpi_ld_status[3]; + +done_release: + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, i, + sizeof(*i)); + bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK; + bsg_reply->result = DID_OK << 16; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + + dma_free_coherent(&vha->hw->pdev->dev, sz, f, fdma); + + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index ba823e8eb902b..f74576bd728ce 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -27,6 +27,8 @@ #include #include #include +#include +#include #include "qla_def.h" #include "qla_target.h" @@ -184,8 +186,7 @@ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) return QLA_SUCCESS; } -static inline -struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, +struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha, be_id_t d_id) { struct scsi_qla_host *host; @@ -198,7 +199,7 @@ struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, key = be_to_port_id(d_id).b24; - host = btree_lookup32(&vha->hw->tgt.host_map, key); + host = btree_lookup32(&vha->hw->host_map, key); if (!host) ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005, "Unable to find host %06x\n", key); @@ -206,22 +207,6 @@ struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, return host; } -static inline -struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha, - uint16_t vp_idx) -{ - struct qla_hw_data *ha = vha->hw; - - if (vha->vp_idx == vp_idx) - return vha; - - BUG_ON(ha->tgt.tgt_vp_map == NULL); - if (likely(test_bit(vp_idx, ha->vp_idx_map))) - return ha->tgt.tgt_vp_map[vp_idx].vha; - - return NULL; -} - static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha) { unsigned long flags; @@ -292,27 +277,27 @@ static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha, list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) { if (u->aborted) { ql_dbg(ql_dbg_async, vha, 0x502e, - "Freeing unknown %s %p, because of Abort\n", + "Freeing unknown %s %px, because of Abort\n", "ATIO_TYPE7", u); qlt_send_term_exchange(vha->hw->base_qpair, NULL, &u->atio, ha_locked, 0); goto abort; } - host = qlt_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); + host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id); if (host != NULL) { ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f, - "Requeuing unknown ATIO_TYPE7 %p\n", u); + "Requeuing unknown ATIO_TYPE7 %px\n", u); qlt_24xx_atio_pkt(host, &u->atio, ha_locked); } else if (tgt->tgt_stop) { ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a, - "Freeing unknown %s %p, because tgt is being stopped\n", + "Freeing unknown %s %px, because tgt is being stopped\n", "ATIO_TYPE7", u); qlt_send_term_exchange(vha->hw->base_qpair, NULL, &u->atio, ha_locked, 0); } else { ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d, - "Reschedule u %p, vha %p, host %p\n", u, vha, host); + "Reschedule u %px, vha %px, host %px\n", u, vha, host); if (!queued) { queued = 1; schedule_delayed_work(&vha->unknown_atio_work, @@ -348,7 +333,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, switch (atio->u.raw.entry_type) { case ATIO_TYPE7: { - struct scsi_qla_host *host = qlt_find_host_by_d_id(vha, + struct scsi_qla_host *host = qla_find_host_by_d_id(vha, atio->u.isp24.fcp_hdr.d_id); if (unlikely(NULL == host)) { ql_dbg(ql_dbg_tgt, vha, 0xe03e, @@ -378,8 +363,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, qlt_issue_marker(vha, ha_locked); if ((entry->u.isp24.vp_index != 0xFF) && - (entry->u.isp24.nport_handle != cpu_to_le16(0xFFFF))) { - host = qlt_find_host_by_vp_idx(vha, + (entry->u.isp24.nport_handle != 0xFFFF)) { + host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe03f, @@ -403,7 +388,7 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, { struct abts_recv_from_24xx *entry = (struct abts_recv_from_24xx *)atio; - struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, + struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, entry->vp_index); unsigned long flags; @@ -446,7 +431,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, case CTIO_TYPE7: { struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt; - struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, + struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, entry->vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe041, @@ -465,7 +450,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, struct imm_ntfy_from_isp *entry = (struct imm_ntfy_from_isp *)pkt; - host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); + host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe042, "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) " @@ -483,7 +468,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, struct nack_to_isp *entry = (struct nack_to_isp *)pkt; if (0xFF != entry->u.isp24.vp_index) { - host = qlt_find_host_by_vp_idx(vha, + host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe043, @@ -503,7 +488,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, { struct abts_recv_from_24xx *entry = (struct abts_recv_from_24xx *)pkt; - struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, + struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, entry->vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe044, @@ -520,7 +505,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, { struct abts_resp_to_24xx *entry = (struct abts_resp_to_24xx *)pkt; - struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, + struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha, entry->vp_index); if (unlikely(!host)) { ql_dbg(ql_dbg_tgt, vha, 0xe045, @@ -577,6 +562,18 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res) sp->fcport->logout_on_delete = 1; sp->fcport->plogi_nack_done_deadline = jiffies + HZ; sp->fcport->send_els_logo = 0; + + if (sp->fcport->flags & FCF_FCSP_DEVICE) { + ql_dbg(ql_dbg_edif, vha, 0x20ef, + "%s %8phC edif: PLOGI- AUTH WAIT\n", __func__, + sp->fcport->port_name); + qla2x00_set_fcport_disc_state(sp->fcport, + DSC_LOGIN_AUTH_PEND); + qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, + sp->fcport->d_id.b24); + qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24, + 0, sp->fcport); + } break; case SRB_NACK_PRLI: @@ -594,8 +591,7 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); } else { sp->fcport->login_retry = 0; - qla2x00_set_fcport_disc_state(sp->fcport, - DSC_LOGIN_COMPLETE); + qla2x00_set_fcport_disc_state(sp->fcport, DSC_LOGIN_COMPLETE); sp->fcport->deleted = 0; sp->fcport->logout_on_delete = 1; } @@ -609,7 +605,7 @@ static void qla2x00_async_nack_sp_done(srb_t *sp, int res) } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - sp->free(sp); + kref_put(&sp->cmd_kref, qla2x00_sp_release); } int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, @@ -624,6 +620,9 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, case SRB_NACK_PLOGI: fcport->fw_login_state = DSC_LS_PLOGI_PEND; c = "PLOGI"; + if (vha->hw->flags.edif_enabled && + (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) + fcport->flags |= FCF_FCSP_DEVICE; break; case SRB_NACK_PRLI: fcport->fw_login_state = DSC_LS_PRLI_PEND; @@ -642,12 +641,10 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, sp->type = type; sp->name = "nack"; - - sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout; - qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha)+2); + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2, + qla2x00_async_nack_sp_done); sp->u.iocb_cmd.u.nack.ntfy = ntfy; - sp->done = qla2x00_async_nack_sp_done; ql_dbg(ql_dbg_disc, vha, 0x20f4, "Async-%s %8phC hndl %x %s\n", @@ -660,7 +657,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, return rval; done_free_sp: - sp->free(sp); + kref_put(&sp->cmd_kref, qla2x00_sp_release); done: fcport->flags &= ~FCF_ASYNC_SENT; return rval; @@ -680,7 +677,7 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) mutex_unlock(&vha->vha_tgt.tgt_mutex); if (t) { ql_log(ql_log_info, vha, 0xd034, - "%s create sess success %p", __func__, t); + "%s create sess success %px", __func__, t); /* create sess has an extra kref */ vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport); } @@ -693,7 +690,13 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e) void qla24xx_delete_sess_fn(struct work_struct *work) { fc_port_t *fcport = container_of(work, struct fc_port, del_work); - struct qla_hw_data *ha = fcport->vha->hw; + struct qla_hw_data *ha = NULL; + + if (fcport == NULL || fcport->vha == NULL || fcport->vha->hw == NULL) { + return; + } + + ha = fcport->vha->hw; if (fcport->se_sess) { ha->tgt.tgt_ops->shutdown_sess(fcport); @@ -755,7 +758,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) sess->local ? "local " : "", sess->port_name, sess->loop_id); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007, - "Reappeared sess %p\n", sess); + "Reappeared sess %px\n", sess); ha->tgt.tgt_ops->update_sess(sess, fcport->d_id, fcport->loop_id, @@ -870,8 +873,8 @@ qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla, pla->ref_count++; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097, - "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC" - " s_id %02x:%02x:%02x, ref=%d pla %p link %d\n", + "Linking sess %px [%d] wwn %8phC with PLOGI ACK to wwn %8phC" + " s_id %02x:%02x:%02x, ref=%d pla %px link %d\n", sess, link, sess->port_name, iocb->u.isp24.port_name, iocb->u.isp24.port_id[2], iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[0], @@ -917,6 +920,11 @@ qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) qlt_port_logo_t *tmp; int res; + if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) { + res = 0; + goto out; + } + mutex_lock(&vha->vha_tgt.tgt_mutex); list_for_each_entry(tmp, &vha->logo_list, list) { @@ -937,6 +945,7 @@ qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo) list_del(&logo->list); mutex_unlock(&vha->vha_tgt.tgt_mutex); +out: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098, "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n", logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa, @@ -957,7 +966,7 @@ void qlt_free_session_done(struct work_struct *work) sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; ql_dbg(ql_dbg_disc, vha, 0xf084, - "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" + "%s: se_sess %px / sess %px from port %8phC loop_id %#04x" " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, sess->d_id.b.domain, sess->d_id.b.area, sess->d_id.b.al_pa, @@ -972,8 +981,11 @@ void qlt_free_session_done(struct work_struct *work) logo.id = sess->d_id; logo.cmd_count = 0; - if (!own) + INIT_LIST_HEAD(&logo.list); + if (!own) { qlt_send_first_logo(vha, &logo); + msleep(100); + } sess->send_els_logo = 0; } @@ -983,21 +995,30 @@ void qlt_free_session_done(struct work_struct *work) if (!own || (own && (own->iocb.u.isp24.status_subcode == ELS_PLOGI))) { + sess->logout_completed = 0; rc = qla2x00_post_async_logout_work(vha, sess, NULL); if (rc != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0xf085, - "Schedule logo failed sess %p rc %d\n", + "Schedule logo failed sess %px rc %d\n", sess, rc); else logout_started = true; } else if (own && (own->iocb.u.isp24.status_subcode == ELS_PRLI) && ha->flags.rida_fmt2) { + sess->prlo_rc = MBS_COMMAND_COMPLETE; + + ql_dbg(ql_dbg_disc, vha, 0xf084, + "%s: se_sess %px / sess %px from port %8phC " + "loop_id %#04x Schedule PRLO", + __func__, sess->se_sess, sess, + sess->port_name, sess->loop_id); + rc = qla2x00_post_async_prlo_work(vha, sess, NULL); if (rc != QLA_SUCCESS) ql_log(ql_log_warn, vha, 0xf085, - "Schedule PRLO failed sess %p rc %d\n", + "Schedule PRLO failed sess %px rc %d\n", sess, rc); else logout_started = true; @@ -1009,6 +1030,24 @@ void qlt_free_session_done(struct work_struct *work) sess->nvme_flag |= NVME_FLAG_DELETING; qla_nvme_unregister_remote_port(sess); } + + if (ha->flags.edif_enabled && + (!own || (own && + (own->iocb.u.isp24.status_subcode == ELS_PLOGI)))) { + sess->edif.authok = 0; + if (!ha->flags.host_shutting_down) { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s wwpn %8phC calling qla2x00_release_all_sadb" + "\n", __func__, sess->port_name); + qla2x00_release_all_sadb(vha, sess); + } else { + ql_dbg(ql_dbg_edif, vha, 0x911e, + "%s bypassing release_all_sadb\n", __func__); + } + + qla_edif_clear_appdata(vha, sess); + qla_edif_sess_down(vha, sess); + } } /* @@ -1024,18 +1063,22 @@ void qlt_free_session_done(struct work_struct *work) while (!READ_ONCE(sess->logout_completed)) { if (!traced) { ql_dbg(ql_dbg_disc, vha, 0xf086, - "%s: waiting for sess %p logout\n", + "%s: waiting for sess %px logout\n", __func__, sess); traced = true; } msleep(100); cnt++; - if (cnt > 200) + /* wait for logout to complete before advance. Otherwise, + straddling logout can interfere with relogin. + */ + if (cnt > 230) break; } ql_dbg(ql_dbg_disc, vha, 0xf087, - "%s: sess %p logout completed\n", __func__, sess); + "%s: sess %px logout completed\n", __func__, sess); + sess->logout_completed = 0; } if (sess->logo_ack_needed) { @@ -1083,7 +1126,7 @@ void qlt_free_session_done(struct work_struct *work) if (con) { iocb = &con->iocb; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099, - "se_sess %p / sess %p port %8phC is gone," + "se_sess %px / sess %px port %8phC is gone," " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n", sess->se_sess, sess, sess->port_name, own ? "releasing own PLOGI" : "no own PLOGI pending", @@ -1093,7 +1136,7 @@ void qlt_free_session_done(struct work_struct *work) sess->plogi_link[QLT_PLOGI_LINK_CONFLICT] = NULL; } else { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a, - "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n", + "se_sess %px / sess %px port %8phC is gone, %s (ref=%d)\n", sess->se_sess, sess, sess->port_name, own ? "releasing own PLOGI" : "no own PLOGI pending", @@ -1114,7 +1157,7 @@ void qlt_free_session_done(struct work_struct *work) qla2x00_dfs_remove_rport(vha, sess); ql_dbg(ql_dbg_disc, vha, 0xf001, - "Unregistration of sess %p %8phC finished fcp_cnt %d\n", + "Unregistration of sess %px %8phC finished fcp_cnt %d\n", sess, sess->port_name, vha->fcport_count); if (tgt && (tgt->sess_count == 0)) @@ -1147,7 +1190,7 @@ void qlt_unreg_sess(struct fc_port *sess) unsigned long flags; ql_dbg(ql_dbg_disc, sess->vha, 0x210a, - "%s sess %p for deletion %8phC\n", + "%s sess %px for deletion %8phC\n", __func__, sess, sess->port_name); spin_lock_irqsave(&sess->vha->work_lock, flags); @@ -1156,9 +1199,8 @@ void qlt_unreg_sess(struct fc_port *sess) return; } sess->free_pending = 1; - /* - * Use FCF_ASYNC_SENT flag to block other cmds used in sess - * management from being sent. + /* Use FCF_ASYNC_SENT flag to block other cmds used in sess + * managment from being sent. */ sess->flags |= FCF_ASYNC_SENT; spin_unlock_irqrestore(&sess->vha->work_lock, flags); @@ -1171,6 +1213,7 @@ void qlt_unreg_sess(struct fc_port *sess) sess->last_rscn_gen = sess->rscn_gen; sess->last_login_gen = sess->login_gen; + INIT_WORK(&sess->free_work, qlt_free_session_done); queue_work(sess->vha->hw->wq, &sess->free_work); } EXPORT_SYMBOL(qlt_unreg_sess); @@ -1198,14 +1241,14 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd) } ql_dbg(ql_dbg_tgt, vha, 0xe000, - "Using sess for qla_tgt_reset: %p\n", sess); + "Using sess for qla_tgt_reset: %px\n", sess); if (!sess) { res = -ESRCH; return res; } ql_dbg(ql_dbg_tgt, vha, 0xe047, - "scsi(%ld): resetting (session %p from port %8phC mcmd %x, " + "scsi(%ld): resetting (session %px from port %8phC mcmd %x, " "loop_id %d)\n", vha->host_no, sess, sess->port_name, mcmd, loop_id); @@ -1233,7 +1276,7 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) case DSC_DELETED: if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] && !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) { - if (tgt && tgt->tgt_stop && tgt->sess_count == 0) + if (tgt && tgt->tgt_stop && (tgt->sess_count == 0)) wake_up_all(&tgt->waitQ); if (sess->vha->fcport_count == 0) @@ -1274,9 +1317,12 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) qla24xx_chk_fcp_state(sess); ql_dbg(ql_dbg_disc, sess->vha, 0xe001, - "Scheduling sess %p for deletion %8phC\n", - sess, sess->port_name); + "Scheduling sess %px for deletion %8phC fc4_type %x\n", + sess, sess->port_name, sess->fc4_type); + + qla_scm_clear_session(sess); + INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); } @@ -1406,7 +1452,7 @@ static struct fc_port *qlt_create_sess( } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, - "Adding sess %p se_sess %p to tgt %p sess_count %d\n", + "Adding sess %px se_sess %px to tgt %px sess_count %d\n", sess, sess->se_sess, vha->vha_tgt.qla_tgt, vha->vha_tgt.qla_tgt->sess_count); @@ -1450,14 +1496,14 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen) if (max_gen - sess->generation < 0) { spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092, - "Ignoring stale deletion request for se_sess %p / sess %p" + "Ignoring stale deletion request for se_sess %px / sess %px" " for port %8phC, req_gen %d, sess_gen %d\n", sess->se_sess, sess, sess->port_name, max_gen, sess->generation); return; } - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %px", sess); sess->local = 1; spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); @@ -1475,7 +1521,7 @@ static inline int test_tgt_sess_count(struct qla_tgt *tgt) */ spin_lock_irqsave(&ha->tgt.sess_lock, flags); ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002, - "tgt %p, sess_count=%d\n", + "tgt %px, sess_count=%d\n", tgt, tgt->sess_count); res = (tgt->sess_count == 0); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); @@ -1501,7 +1547,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt) return -EPERM; } - ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n", + ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%px)\n", vha->host_no, vha); /* * Mutex needed to sync with qla_tgt_fc_port_[added,deleted]. @@ -1514,7 +1560,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt) mutex_unlock(&qla_tgt_mutex); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009, - "Waiting for sess works (tgt %p)", tgt); + "Waiting for sess works (tgt %px)", tgt); spin_lock_irqsave(&tgt->sess_work_lock, flags); while (!list_empty(&tgt->sess_works_list)) { spin_unlock_irqrestore(&tgt->sess_work_lock, flags); @@ -1524,7 +1570,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt) spin_unlock_irqrestore(&tgt->sess_work_lock, flags); ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a, - "Waiting for tgt %p: sess_count=%d\n", tgt, tgt->sess_count); + "Waiting for tgt %px: sess_count=%d\n", tgt, tgt->sess_count); wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ); @@ -1559,14 +1605,12 @@ void qlt_stop_phase2(struct qla_tgt *tgt) return; } - mutex_lock(&tgt->ha->optrom_mutex); mutex_lock(&vha->vha_tgt.tgt_mutex); tgt->tgt_stop = 0; tgt->tgt_stopped = 1; mutex_unlock(&vha->vha_tgt.tgt_mutex); - mutex_unlock(&tgt->ha->optrom_mutex); - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n", + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %px finished\n", tgt); switch (vha->qlini_mode) { @@ -1626,7 +1670,7 @@ static void qlt_release(struct qla_tgt *tgt) vha->vha_tgt.qla_tgt = NULL; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d, - "Release of tgt %p finished\n", tgt); + "Release of tgt %px finished\n", tgt); kfree(tgt); } @@ -1647,8 +1691,8 @@ static int qlt_sched_sess_work(struct qla_tgt *tgt, int type, } ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e, - "Scheduling work (type %d, prm %p)" - " to find session for param %p (size %d, tgt %p)\n", + "Scheduling work (type %d, prm %px)" + " to find session for param %px (size %d, tgt %px)\n", type, prm, param, param_size, tgt); prm->type = type; @@ -1679,7 +1723,7 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair, if (!ha->flags.fw_started) return; - ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); + ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%px)\n", ha); pkt = (request_t *)__qla2x00_alloc_iocbs(qpair, NULL); if (!pkt) { @@ -1702,7 +1746,7 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair, nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { nack->u.isp24.flags = ntfy->u.isp24.flags & - cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); + cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); } nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id; nack->u.isp24.status = ntfy->u.isp24.status; @@ -1716,6 +1760,12 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair, nack->u.isp24.srr_reject_code_expl = srr_explan; nack->u.isp24.vp_index = ntfy->u.isp24.vp_index; + /* TODO qualify this with EDIF enable */ + if ((ntfy->u.isp24.status_subcode == ELS_PLOGI) && + (le16_to_cpu(ntfy->u.isp24.flags) & NOTIFY24XX_FLAGS_FCSP)) { + nack->u.isp24.flags |= cpu_to_le16(NOTIFY_ACK_FLAGS_FCSP); + } + ql_dbg(ql_dbg_tgt, vha, 0xe005, "qla_target(%d): Sending 24xx Notify Ack %d\n", vha->vp_idx, nack->u.isp24.status); @@ -1730,15 +1780,14 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd) struct scsi_qla_host *vha = mcmd->vha; struct qla_hw_data *ha = vha->hw; struct abts_resp_to_24xx *resp; - __le32 f_ctl; - uint32_t h; + uint32_t f_ctl, h; uint8_t *p; int rc; struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts; struct qla_qpair *qpair = mcmd->qpair; ql_dbg(ql_dbg_tgt, vha, 0xe006, - "Sending task mgmt ABTS response (ha=%p, status=%x)\n", + "Sending task mgmt ABTS response (ha=%px, status=%x)\n", ha, mcmd->fc_tm_rsp); rc = qlt_check_reserve_free_req(qpair, 1); @@ -1764,7 +1813,7 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd) qpair->req->outstanding_cmds[h] = (srb_t *)mcmd; } - resp->handle = make_handle(qpair->req->id, h); + resp->handle = MAKE_HANDLE(qpair->req->id, h); resp->entry_type = ABTS_RESP_24XX; resp->entry_count = 1; resp->nport_handle = abts->nport_handle; @@ -1788,7 +1837,7 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd) resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; resp->payload.ba_acct.low_seq_cnt = 0x0000; - resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF); + resp->payload.ba_acct.high_seq_cnt = 0xFFFF; resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; } else { @@ -1820,11 +1869,11 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair, struct scsi_qla_host *vha = qpair->vha; struct qla_hw_data *ha = vha->hw; struct abts_resp_to_24xx *resp; - __le32 f_ctl; + uint32_t f_ctl; uint8_t *p; ql_dbg(ql_dbg_tgt, vha, 0xe006, - "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n", + "Sending task mgmt ABTS response (ha=%px, atio=%px, status=%x\n", ha, abts, status); resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, @@ -1863,7 +1912,7 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair, resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC; resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID; resp->payload.ba_acct.low_seq_cnt = 0x0000; - resp->payload.ba_acct.high_seq_cnt = cpu_to_le16(0xFFFF); + resp->payload.ba_acct.high_seq_cnt = 0xFFFF; resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id; resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id; } else { @@ -2030,13 +2079,13 @@ static void qlt_do_tmr_work(struct work_struct *work) struct qla_tgt_mgmt_cmd *mcmd = container_of(work, struct qla_tgt_mgmt_cmd, work); struct qla_hw_data *ha = mcmd->vha->hw; - int rc; + int rc = EIO; uint32_t tag; unsigned long flags; switch (mcmd->tmr_func) { case QLA_TGT_ABTS: - tag = le32_to_cpu(mcmd->orig_iocb.abts.exchange_addr_to_abort); + tag = mcmd->orig_iocb.abts.exchange_addr_to_abort; break; default: tag = 0; @@ -2116,7 +2165,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, struct qla_tgt_cmd *abort_cmd; abort_cmd = ha->tgt.tgt_ops->find_cmd_by_tag(sess, - le32_to_cpu(abts->exchange_addr_to_abort)); + abts->exchange_addr_to_abort); if (abort_cmd && abort_cmd->qpair) { mcmd->qpair = abort_cmd->qpair; mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid; @@ -2139,7 +2188,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, { struct qla_hw_data *ha = vha->hw; struct fc_port *sess; - uint32_t tag = le32_to_cpu(abts->exchange_addr_to_abort); + uint32_t tag = abts->exchange_addr_to_abort; be_id_t s_id; int rc; unsigned long flags; @@ -2214,7 +2263,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair, uint16_t temp; ql_dbg(ql_dbg_tgt, ha, 0xe008, - "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", + "Sending task mgmt CTIO7 (ha=%px, atio=%px, resp_code=%x\n", ha, atio, resp_code); @@ -2229,7 +2278,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair, ctio->entry_type = CTIO_TYPE7; ctio->entry_count = 1; ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK; - ctio->nport_handle = cpu_to_le16(mcmd->sess->loop_id); + ctio->nport_handle = mcmd->sess->loop_id; ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio->vp_index = ha->vp_idx; ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); @@ -2271,7 +2320,7 @@ void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, struct scsi_qla_host *vha = cmd->vha; ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, - "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " + "Sending response CTIO7 (vha=%px, atio=%px, scsi_status=%02x, " "sense_key=%02x, asc=%02x, ascq=%02x", vha, atio, scsi_status, sense_key, asc, ascq); @@ -2286,7 +2335,7 @@ void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, ctio->entry_type = CTIO_TYPE7; ctio->entry_count = 1; ctio->handle = QLA_TGT_SKIP_HANDLE; - ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id); + ctio->nport_handle = cmd->sess->loop_id; ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio->vp_index = vha->vp_idx; ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); @@ -2336,7 +2385,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) bool free_mcmd = true; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013, - "TM response mcmd (%p) status %#x state %#x", + "TM response mcmd (%px) status %#x state %#x", mcmd, mcmd->fc_tm_rsp, mcmd->flags); spin_lock_irqsave(qpair->qp_lock_ptr, flags); @@ -2490,7 +2539,7 @@ static int qlt_check_reserve_free_req(struct qla_qpair *qpair, if (req->cnt < (req_cnt + 2)) { cnt = (uint16_t)(qpair->use_shadow_reg ? *req->out_ptr : - rd_reg_dword_relaxed(req->req_q_out)); + RD_REG_DWORD_RELAXED(req->req_q_out)); if (req->ring_index < cnt) req->cnt = cnt - req->ring_index; @@ -2566,6 +2615,14 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, struct ctio7_to_24xx *pkt; struct atio_from_isp *atio = &prm->cmd->atio; uint16_t temp; + uint32_t byte_count=0; + struct qla_tgt_cmd *cmd = prm->cmd; + + byte_count=cmd->bufflen; + if (cmd->dma_data_direction == DMA_TO_DEVICE) + prm->cmd->sess->edif.rx_bytes +=byte_count; + if (cmd->dma_data_direction == DMA_FROM_DEVICE) + prm->cmd->sess->edif.tx_bytes +=byte_count; pkt = (struct ctio7_to_24xx *)qpair->req->ring_ptr; prm->pkt = pkt; @@ -2586,7 +2643,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, } else qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; - pkt->handle = make_handle(qpair->req->id, h); + pkt->handle = MAKE_HANDLE(qpair->req->id, h); pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); @@ -2598,6 +2655,11 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, pkt->u.status0.ox_id = cpu_to_le16(temp); pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); + if (prm->cmd->edif) { + pkt->u.status0.edif_flags |= EF_EN_EDIF; + pkt->u.status0.edif_flags &= ~CF_NEW_SA; + } + return 0; } @@ -2699,28 +2761,28 @@ static void qlt_print_dif_err(struct qla_tgt_prm *prm) case 1: ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b, "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " - "se_cmd=%p tag[%x]", + "se_cmd=%px tag[%x]", cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, cmd->atio.u.isp24.exchange_addr); break; case 2: ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c, "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " - "se_cmd=%p tag[%x]", + "se_cmd=%px tag[%x]", cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, cmd->atio.u.isp24.exchange_addr); break; case 3: ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f, "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " - "se_cmd=%p tag[%x]", + "se_cmd=%px tag[%x]", cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, cmd->atio.u.isp24.exchange_addr); break; default: ql_dbg(ql_dbg_tgt_dif, vha, 0xe010, "BE detected Dif ERR: lba[%llx|%lld] len[%x] " - "se_cmd=%p tag[%x]", + "se_cmd=%px tag[%x]", cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, cmd->atio.u.isp24.exchange_addr); break; @@ -2846,14 +2908,10 @@ static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio, cpu_to_le16(SS_SENSE_LEN_VALID); ctio->u.status1.sense_length = cpu_to_le16(prm->sense_buffer_len); - for (i = 0; i < prm->sense_buffer_len/4; i++) { - uint32_t v; + for (i = 0; i < prm->sense_buffer_len/4; i++) + ((uint32_t *)ctio->u.status1.sense_data)[i] = + cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); - v = get_unaligned_be32( - &((uint32_t *)prm->sense_buffer)[i]); - put_unaligned_le32(v, - &((uint32_t *)ctio->u.status1.sense_data)[i]); - } qlt_print_dif_err(prm); } else { @@ -3026,7 +3084,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) memset(pkt, 0, sizeof(*pkt)); ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071, - "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n", + "qla_target(%d):%s: se_cmd[%px] CRC2 prot_op[0x%x] cmd prot sg:cnt[%px:%x] lba[%llu]\n", cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op, prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba); @@ -3103,7 +3161,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) } else qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd; - pkt->handle = make_handle(qpair->req->id, h); + pkt->handle = MAKE_HANDLE(qpair->req->id, h); pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); @@ -3124,7 +3182,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) else if (cmd->dma_data_direction == DMA_FROM_DEVICE) pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); - pkt->dseg_count = cpu_to_le16(prm->tot_dsds); + pkt->dseg_count = prm->tot_dsds; /* Fibre channel byte count */ pkt->transfer_length = cpu_to_le32(transfer_length); @@ -3146,7 +3204,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); put_unaligned_le64(crc_ctx_dma, &pkt->crc_context_address); - pkt->crc_context_len = cpu_to_le16(CRC_CONTEXT_LEN_FW); + pkt->crc_context_len = CRC_CONTEXT_LEN_FW; if (!bundling) { cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; @@ -3224,20 +3282,20 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || (cmd->sess && cmd->sess->deleted)) { cmd->state = QLA_TGT_STATE_PROCESSED; - return 0; + res = 0; + goto free; } ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, - "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n", + "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%px] qp %d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ? 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction, &cmd->se_cmd, qpair->id); res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, &full_req_cnt); - if (unlikely(res != 0)) { - return res; - } + if (unlikely(res != 0)) + goto free; spin_lock_irqsave(qpair->qp_lock_ptr, flags); @@ -3256,8 +3314,9 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n", vha->flags.online, qla2x00_reset_active(vha), cmd->reset_count, qpair->chip_reset); + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); res = 0; - goto out_unmap_unlock; + goto free; } /* Does F/W have an IOCBs for this request */ @@ -3288,8 +3347,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, if (xmit_type & QLA_TGT_XMIT_STATUS) { pkt->u.status0.scsi_status = cpu_to_le16(prm.rq_result); - pkt->u.status0.residual = - cpu_to_le32(prm.residual); + if (!cmd->edif) + pkt->u.status0.residual = + cpu_to_le32(prm.residual); + pkt->u.status0.flags |= cpu_to_le16( CTIO7_FLAGS_SEND_STATUS); if (qlt_need_explicit_conf(cmd, 0)) { @@ -3311,7 +3372,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, qpair->req); ql_dbg_qp(ql_dbg_tgt, qpair, 0x305e, - "Building additional status packet 0x%p.\n", + "Building additional status packet 0x%px.\n", ctio); /* @@ -3360,6 +3421,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, qlt_unmap_sg(vha, cmd); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); +free: + vha->hw->tgt.tgt_ops->free_cmd(cmd); return res; } EXPORT_SYMBOL(qlt_xmit_response); @@ -3380,6 +3443,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) prm.sg = NULL; prm.req_cnt = 1; + /* Calculate number of entries and segments required */ + if (qlt_pci_map_calc_cnt(&prm) != 0) + return -EAGAIN; + if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || (cmd->sess && cmd->sess->deleted)) { /* @@ -3397,10 +3464,6 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) return 0; } - /* Calculate number of entries and segments required */ - if (qlt_pci_map_calc_cnt(&prm) != 0) - return -EAGAIN; - spin_lock_irqsave(qpair->qp_lock_ptr, flags); /* Does F/W have an IOCBs for this request */ res = qlt_check_reserve_free_req(qpair, prm.req_cnt); @@ -3462,13 +3525,13 @@ qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, cmd->trc_flags |= TRC_DIF_ERR; - cmd->a_guard = get_unaligned_be16(ap + 0); - cmd->a_app_tag = get_unaligned_be16(ap + 2); - cmd->a_ref_tag = get_unaligned_be32(ap + 4); + cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); + cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); + cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); - cmd->e_guard = get_unaligned_be16(ep + 0); - cmd->e_app_tag = get_unaligned_be16(ep + 2); - cmd->e_ref_tag = get_unaligned_be32(ep + 4); + cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); + cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); + cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); @@ -3478,7 +3541,7 @@ qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, /* check appl tag */ if (cmd->e_app_tag != cmd->a_app_tag) { ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d, - "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", + "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%px ox_id[%04x]", cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, @@ -3494,7 +3557,7 @@ qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, /* check ref tag */ if (cmd->e_ref_tag != cmd->a_ref_tag) { ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e, - "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ", + "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%px ox_id[%04x] ", cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, @@ -3511,7 +3574,7 @@ qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, /* check guard */ if (cmd->e_guard != cmd->a_guard) { ql_dbg(ql_dbg_tgt_dif, vha, 0xe012, - "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]", + "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%px ox_id[%04x]", cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag, cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd, @@ -3560,7 +3623,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, int ret = 0; ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, - "Sending TERM ELS CTIO (ha=%p)\n", ha); + "Sending TERM ELS CTIO (ha=%px)\n", ha); pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); if (pkt == NULL) { @@ -3580,7 +3643,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle; if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) { nack->u.isp24.flags = ntfy->u.isp24.flags & - cpu_to_le16(NOTIFY24XX_FLAGS_PUREX_IOCB); + __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB); } /* terminate */ @@ -3625,7 +3688,7 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair, int ret = 0; uint16_t temp; - ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha); + ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%px)\n", ha); if (cmd) vha = cmd->vha; @@ -3641,7 +3704,7 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair, if (cmd != NULL) { if (cmd->state < QLA_TGT_STATE_PROCESSED) { ql_dbg(ql_dbg_tgt, vha, 0xe051, - "qla_target(%d): Terminating cmd %p with " + "qla_target(%d): Terminating cmd %px with " "incorrect state %d\n", vha->vp_idx, cmd, cmd->state); } else @@ -3654,7 +3717,7 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair, ctio24 = (struct ctio7_to_24xx *)pkt; ctio24->entry_type = CTIO_TYPE7; - ctio24->nport_handle = cpu_to_le16(CTIO7_NHANDLE_UNRECOGNIZED); + ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED; ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio24->vp_index = vha->vp_idx; ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); @@ -3767,15 +3830,12 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd) unsigned long flags; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, - "qla_target(%d): terminating exchange for aborted cmd=%p " - "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, + "qla_target(%d): terminating exchange for aborted cmd=%px " + "(se_cmd=%px, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, se_cmd->tag); spin_lock_irqsave(&cmd->cmd_lock, flags); if (cmd->aborted) { - if (cmd->sg_mapped) - qlt_unmap_sg(vha, cmd); - spin_unlock_irqrestore(&cmd->cmd_lock, flags); /* * It's normal to see 2 calls in this path: @@ -3783,10 +3843,10 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd) * 2) TCM TMR - drain_state_list */ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016, - "multiple abort. %p transport_state %x, t_state %x, " + "multiple abort. %px transport_state %x, t_state %x, " "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); - return -EIO; + return EIO; } cmd->aborted = 1; cmd->trc_flags |= TRC_ABORT; @@ -3797,17 +3857,28 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd) } EXPORT_SYMBOL(qlt_abort_cmd); +#ifndef TGT_FREE_TAG +static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd) +{ + struct se_session *se_sess = cmd->se_cmd.se_sess; + percpu_ida_free(&se_sess->sess_tag_pool, cmd->se_cmd.map_tag); +} +#endif + void qlt_free_cmd(struct qla_tgt_cmd *cmd) { struct fc_port *sess = cmd->sess; ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074, - "%s: se_cmd[%p] ox_id %04x\n", + "%s: se_cmd[%px] ox_id %04x\n", __func__, &cmd->se_cmd, be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); BUG_ON(cmd->cmd_in_wq); + if (cmd->sg_mapped) + qlt_unmap_sg(cmd->vha, cmd); + if (!cmd->q_full) qlt_decr_num_pend_cmds(cmd->vha); @@ -3821,7 +3892,7 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd) return; } cmd->jiffies_at_free = get_jiffies_64(); - cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd); + QL_TGT_FREE_TAG(cmd); } EXPORT_SYMBOL(qlt_free_cmd); @@ -3837,7 +3908,7 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio, if (cmd->se_cmd.prot_op) ql_dbg(ql_dbg_tgt_dif, vha, 0xe013, "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " - "se_cmd=%p tag[%x] op %#x/%s", + "se_cmd=%px tag[%x] op %#x/%s", cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, cmd->atio.u.isp24.exchange_addr, @@ -3892,7 +3963,7 @@ static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha, return NULL; } - cmd = req->outstanding_cmds[h]; + cmd = (void *) req->outstanding_cmds[h]; if (unlikely(cmd == NULL)) { ql_dbg(ql_dbg_async, vha, 0xe053, "qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n", @@ -3936,6 +4007,12 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, if (cmd == NULL) return; + if ((le16_to_cpu(((struct ctio7_from_24xx *)ctio)->flags) & CTIO7_FLAGS_DATA_OUT) && + cmd->sess) { + qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess, + (struct ctio7_from_24xx*)ctio); + } + se_cmd = &cmd->se_cmd; cmd->cmd_sent_to_fw = 0; @@ -3960,7 +4037,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, /* They are OK */ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058, "qla_target(%d): CTIO with " - "status %#x received, state %x, se_cmd %p, " + "status %#x received, state %x, se_cmd %px, " "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, " "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx, status, cmd->state, se_cmd); @@ -3974,7 +4051,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, "qla_target(%d): CTIO with %s status %x " - "received (state %x, se_cmd %p)\n", vha->vp_idx, + "received (state %x, se_cmd %px)\n", vha->vp_idx, logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE", status, cmd->state, se_cmd); @@ -3997,7 +4074,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, (struct ctio_crc_from_fw *)ctio; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, "qla_target(%d): CTIO with DIF_ERROR status %x " - "received (state %x, ulp_cmd %p) actual_dif[0x%llx] " + "received (state %x, ulp_cmd %px) actual_dif[0x%llx] " "expect_dif[0x%llx]\n", vha->vp_idx, status, cmd->state, se_cmd, *((u64 *)&crc->actual_dif[0]), @@ -4006,9 +4083,20 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, qlt_handle_dif_error(qpair, cmd, ctio); return; } + + case CTIO_FAST_AUTH_ERR: + case CTIO_FAST_INCOMP_PAD_LEN: + case CTIO_FAST_INVALID_REQ: + case CTIO_FAST_SPI_ERR: + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, + "qla_target(%d): CTIO with EDIF error status 0x%x " + "received (state %x, se_cmd %px\n", + vha->vp_idx, status, cmd->state, se_cmd); + break; + default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, - "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n", + "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %px\n", vha->vp_idx, status, cmd->state, se_cmd); break; } @@ -4041,7 +4129,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, } else if (cmd->aborted) { cmd->trc_flags |= TRC_CTIO_ABORTED; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, - "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); + "Aborted command %px (tag %lld) finished\n", cmd, se_cmd->tag); } else { cmd->trc_flags |= TRC_CTIO_STRANGE; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, @@ -4117,7 +4205,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) spin_lock_init(&cmd->cmd_lock); cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; - cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr); + cmd->se_cmd.tag = atio->u.isp24.exchange_addr; if (atio->u.isp24.fcp_cmnd.rddata && atio->u.isp24.fcp_cmnd.wrdata) { @@ -4145,7 +4233,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) return; out_term: - ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd); + ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %px", cmd); /* * cmd has not sent to target yet, so pass NULL as the second * argument to qlt_send_term_exchange() and free the memory here. @@ -4155,7 +4243,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0); qlt_decr_num_pend_cmds(vha); - cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd); + QL_TGT_FREE_TAG(cmd); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); ha->tgt.tgt_ops->put_sess(sess); @@ -4282,18 +4370,35 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, struct fc_port *sess, struct atio_from_isp *atio) { - struct qla_tgt_cmd *cmd; + struct qla_tgt_cmd *cmd = NULL; +#ifdef TGT_SBITMAP_QUE + struct se_session *se_sess = sess->se_sess; + int tag, cpu; +#endif +#ifdef TGT_SBITMAP_QUE + tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); + if (tag < 0) + return NULL; + + cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; +#else cmd = vha->hw->tgt.tgt_ops->get_cmd(sess); +#endif if (!cmd) return NULL; + memset(cmd, 0, sizeof(struct qla_tgt_cmd)); cmd->cmd_type = TYPE_TGT_CMD; memcpy(&cmd->atio, atio, sizeof(*atio)); cmd->state = QLA_TGT_STATE_NEW; cmd->tgt = vha->vha_tgt.qla_tgt; qlt_incr_num_pend_cmds(vha); cmd->vha = vha; +#ifdef TGT_SBITMAP_QUE + cmd->se_cmd.map_tag = tag; + cmd->se_cmd.map_cpu = cpu; +#endif cmd->sess = sess; cmd->loop_id = sess->loop_id; cmd->conf_compl_supported = sess->conf_compl_supported; @@ -4306,6 +4411,7 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha, qlt_assign_qpair(vha, cmd); cmd->reset_count = vha->hw->base_qpair->chip_reset; cmd->vp_idx = vha->vp_idx; + cmd->edif = sess->edif.enable; return cmd; } @@ -4323,7 +4429,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, if (unlikely(tgt->tgt_stop)) { ql_dbg(ql_dbg_io, vha, 0x3061, - "New command while device %p is shutting down\n", tgt); + "New command while device %px is shutting down\n", tgt); return -ENODEV; } @@ -4339,7 +4445,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, * session deletion, but it's still in sess_del_work wq */ if (sess->deleted) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002, - "New command while old session %p is being deleted\n", + "New command while old session %px is being deleted\n", sess); return -EFAULT; } @@ -4546,7 +4652,7 @@ void qlt_logo_completion_handler(fc_port_t *fcport, int rc) { if (rc != MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093, - "%s: se_sess %p / sess %p from" + "%s: se_sess %px / sess %px from" " port %8phC loop_id %#04x s_id %02x:%02x:%02x" " LOGO failed: %#x\n", __func__, @@ -4590,7 +4696,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, if (port_id.b24 == other_sess->d_id.b24) { if (loop_id != other_sess->loop_id) { ql_dbg(ql_dbg_disc, vha, 0x1000c, - "Invalidating sess %p loop_id %d wwn %llx.\n", + "Invalidating sess %px loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); /* @@ -4606,7 +4712,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, * kill the session, but don't free the loop_id */ ql_dbg(ql_dbg_disc, vha, 0xf01b, - "Invalidating sess %p loop_id %d wwn %llx.\n", + "Invalidating sess %px loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); other_sess->keep_nport_handle = 1; @@ -4621,7 +4727,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, if ((loop_id == other_sess->loop_id) && (loop_id != FC_NO_LOOP_ID)) { ql_dbg(ql_dbg_disc, vha, 0x1000d, - "Invalidating sess %p loop_id %d wwn %llx.\n", + "Invalidating sess %px loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); /* Same loop_id but different s_id @@ -4720,6 +4826,22 @@ static int qlt_handle_login(struct scsi_qla_host *vha, res = 1; goto out; } + if (vha->hw->flags.edif_enabled) { + if (DBELL_INACTIVE(vha)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d Term INOT due to app not started lid=%d, NportID %06X ", + __func__, __LINE__, loop_id, port_id.b24); + qlt_send_term_imm_notif(vha, iocb, 1); + goto out; + } else if (iocb->u.isp24.status_subcode == ELS_PLOGI && + !(iocb->u.isp24.flags & NOTIFY24XX_FLAGS_FCSP)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d Term INOT due to unsecure lid=%d, NportID %06X ", + __func__, __LINE__, loop_id, port_id.b24); + qlt_send_term_imm_notif(vha, iocb, 1); + goto out; + } + } pla = qlt_plogi_ack_find_add(vha, &port_id, iocb); if (!pla) { @@ -4740,7 +4862,7 @@ static int qlt_handle_login(struct scsi_qla_host *vha, if (!sess) { pla->ref_count++; ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s %d %8phC post new sess\n", + "%s %d %8phC post new sess 1\n", __func__, __LINE__, iocb->u.isp24.port_name); if (iocb->u.isp24.status_subcode == ELS_PLOGI) qla24xx_post_newsess_work(vha, &port_id, @@ -4786,6 +4908,21 @@ static int qlt_handle_login(struct scsi_qla_host *vha, qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN); sess->d_id = port_id; sess->login_gen++; + sess->loop_id = loop_id; + + if (iocb->u.isp24.status_subcode == ELS_PLOGI) + { + /* remote port has assigned Port ID */ + if (N2N_TOPO(vha->hw) && fcport_is_bigger(sess)) + vha->d_id = sess->d_id; + + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %8phC - send port online\n", + __func__, sess->port_name); + + qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE, + sess->d_id.b24); + } if (iocb->u.isp24.status_subcode == ELS_PRLI) { sess->fw_login_state = DSC_LS_PRLI_PEND; @@ -4803,9 +4940,9 @@ static int qlt_handle_login(struct scsi_qla_host *vha, else sess->port_type = FCT_TARGET; - } else + } else { sess->fw_login_state = DSC_LS_PLOGI_PEND; - + } ql_dbg(ql_dbg_disc, vha, 0x20f9, "%s %d %8phC DS %d\n", @@ -4813,7 +4950,6 @@ static int qlt_handle_login(struct scsi_qla_host *vha, switch (sess->disc_state) { case DSC_DELETED: - case DSC_LOGIN_PEND: qlt_plogi_ack_unref(vha, pla); break; @@ -4898,6 +5034,16 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, sess = qla2x00_find_fcport_by_wwpn(vha, iocb->u.isp24.port_name, 1); + if (vha->hw->flags.edif_enabled && sess && + (!(sess->flags & FCF_FCSP_DEVICE) || + !sess->edif.authok)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC Term PRLI due to unauthorize PRLI\n", + __func__, __LINE__, iocb->u.isp24.port_name); + qlt_send_term_imm_notif(vha, iocb, 1); + break; + } + if (sess && sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]) { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC Term PRLI due to PLOGI ACK not completed\n", @@ -4932,7 +5078,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, break; default: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b, - "PRLI with conflicting sess %p port %8phC\n", + "PRLI with conflicting sess %px port %8phC\n", conflict_sess, conflict_sess->port_name); conflict_sess->fw_login_state = DSC_LS_PORT_UNAVAIL; @@ -4946,6 +5092,16 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, bool delete = false; int sec; + if (vha->hw->flags.edif_enabled && sess && + (!(sess->flags & FCF_FCSP_DEVICE) || + !sess->edif.authok)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s %d %8phC Term PRLI due to unauthorize prli\n", + __func__, __LINE__, iocb->u.isp24.port_name); + qlt_send_term_imm_notif(vha, iocb, 1); + break; + } + spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags); switch (sess->fw_login_state) { case DSC_LS_PLOGI_PEND: @@ -4993,7 +5149,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, * while last one finishes. */ ql_log(ql_log_warn, sess->vha, 0xf095, - "sess %p PRLI received, before plogi ack.\n", + "sess %px PRLI received, before plogi ack.\n", sess); qlt_send_term_imm_notif(vha, iocb, 1); res = 0; @@ -5005,7 +5161,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, * since we have deleted the old session during PLOGI */ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096, - "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n", + "PRLI (loop_id %#04x) for existing sess %px (loop_id %#04x)\n", sess->loop_id, sess, iocb->u.isp24.nport_handle); sess->local = 0; @@ -5076,7 +5232,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); ql_dbg(ql_dbg_disc, vha, 0x20fc, - "%s: logo %llx res %d sess %p ", + "%s: logo %llx res %d sess %px ", __func__, wwn, res, sess); if (res == 0) { /* @@ -5109,7 +5265,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, iocb->u.isp24.port_name, 1); if (sess) { ql_dbg(ql_dbg_disc, vha, 0x20fd, - "sess %p lid %d|%d DS %d LS %d\n", + "sess %px lid %d|%d DS %d LS %d\n", sess, sess->loop_id, loop_id, sess->disc_state, sess->fw_login_state); } @@ -5309,7 +5465,7 @@ static int __qlt_send_busy(struct qla_qpair *qpair, ctio24 = (struct ctio7_to_24xx *)pkt; ctio24->entry_type = CTIO_TYPE7; - ctio24->nport_handle = cpu_to_le16(sess->loop_id); + ctio24->nport_handle = sess->loop_id; ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio24->vp_index = vha->vp_idx; ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); @@ -5322,14 +5478,13 @@ static int __qlt_send_busy(struct qla_qpair *qpair, * CTIO from fw w/o se_cmd doesn't provide enough info to retry it, * if the explicit conformation is used. */ - ctio24->u.status1.ox_id = - cpu_to_le16(be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id)); + ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); ctio24->u.status1.scsi_status = cpu_to_le16(status); - ctio24->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); + ctio24->u.status1.residual = get_datalen_for_atio(atio); if (ctio24->u.status1.residual != 0) - ctio24->u.status1.scsi_status |= cpu_to_le16(SS_RESIDUAL_UNDER); + ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER; /* Memory Barrier */ wmb(); @@ -5352,12 +5507,16 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; struct qla_hw_data *ha = vha->hw; struct fc_port *sess; + struct se_session *se_sess; struct qla_tgt_cmd *cmd; +#ifdef TGT_SBITMAP_QUE + int tag, cpu; +#endif unsigned long flags; if (unlikely(tgt->tgt_stop)) { ql_dbg(ql_dbg_io, vha, 0x300a, - "New command while device %p is shutting down\n", tgt); + "New command while device %px is shutting down\n", tgt); return; } @@ -5382,8 +5541,11 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, if (!sess) return; - cmd = ha->tgt.tgt_ops->get_cmd(sess); - if (!cmd) { + se_sess = sess->se_sess; + +#ifdef TGT_SBITMAP_QUE + tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); + if (tag < 0) { ql_dbg(ql_dbg_io, vha, 0x3009, "qla_target(%d): %s: Allocation of cmd failed\n", vha->vp_idx, __func__); @@ -5398,6 +5560,12 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, return; } + cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; +#else + cmd = vha->hw->tgt.tgt_ops->get_cmd(sess); +#endif + memset(cmd, 0, sizeof(struct qla_tgt_cmd)); + qlt_incr_num_pend_cmds(vha); INIT_LIST_HEAD(&cmd->cmd_list); memcpy(&cmd->atio, atio, sizeof(*atio)); @@ -5407,6 +5575,9 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, cmd->reset_count = ha->base_qpair->chip_reset; cmd->q_full = 1; cmd->qpair = ha->base_qpair; +#ifdef TGT_SBITMAP_QUE + cmd->se_cmd.map_cpu = cpu; +#endif if (qfull) { cmd->q_full = 1; @@ -5472,7 +5643,7 @@ qlt_free_qfull_cmds(struct qla_qpair *qpair) be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id)); else ql_dbg(ql_dbg_io, vha, 0x3008, - "%s: Unexpected cmd in QFull list %p\n", __func__, + "%s: Unexpected cmd in QFull list %px\n", __func__, cmd); list_del(&cmd->cmd_list); @@ -5545,7 +5716,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, if (unlikely(tgt == NULL)) { ql_dbg(ql_dbg_tgt, vha, 0x3064, - "ATIO pkt, but no tgt (ha %p)", ha); + "ATIO pkt, but no tgt (ha %px)", ha); return; } /* @@ -5558,7 +5729,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, switch (atio->u.raw.entry_type) { case ATIO_TYPE7: if (unlikely(atio->u.isp24.exchange_addr == - cpu_to_le32(ATIO_EXCHANGE_ADDRESS_UNKNOWN))) { + ATIO_EXCHANGE_ADDRESS_UNKNOWN)) { ql_dbg(ql_dbg_io, vha, 0x3065, "qla_target(%d): ATIO_TYPE7 " "received with UNKNOWN exchange address, " @@ -5670,7 +5841,7 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha, /* found existing exchange */ qpair->retry_term_cnt++; if (qpair->retry_term_cnt >= 5) { - rc = -EIO; + rc = EIO; qpair->retry_term_cnt = 0; ql_log(ql_log_warn, vha, 0xffff, "Unable to send ABTS Respond. Dumping firmware.\n"); @@ -5678,9 +5849,9 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha, vha, 0xffff, (uint8_t *)entry, sizeof(*entry)); if (qpair == ha->base_qpair) - ha->isp_ops->fw_dump(vha); + ha->isp_ops->fw_dump(vha, 1); else - qla2xxx_dump_fw(vha); + ha->isp_ops->fw_dump(vha, 0); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); @@ -5721,8 +5892,8 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha, entry->compl_status); if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) { - if (le32_to_cpu(entry->error_subcode1) == 0x1E && - le32_to_cpu(entry->error_subcode2) == 0) { + if ((entry->error_subcode1 == 0x1E) && + (entry->error_subcode2 == 0)) { if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) { ha->tgt.tgt_ops->free_mcmd(mcmd); return; @@ -5751,7 +5922,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, if (unlikely(tgt == NULL)) { ql_dbg(ql_dbg_tgt, vha, 0xe05d, - "qla_target(%d): Response pkt %x received, but no tgt (ha %p)\n", + "qla_target(%d): Response pkt %x received, but no tgt (ha %px)\n", vha->vp_idx, pkt->entry_type, vha->hw); return; } @@ -5936,10 +6107,11 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b, "qla_target(%d): Async LOOP_UP occurred " "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, - mailbox[0], mailbox[1], mailbox[2], mailbox[3]); + le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), + le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); if (tgt->link_reinit_iocb_pending) { qlt_send_notify_ack(ha->base_qpair, - &tgt->link_reinit_iocb, + (void *)&tgt->link_reinit_iocb, 0, 0, 0, 0, 0, 0); tgt->link_reinit_iocb_pending = 0; } @@ -5953,16 +6125,18 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c, "qla_target(%d): Async event %#x occurred " "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code, - mailbox[0], mailbox[1], mailbox[2], mailbox[3]); + le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), + le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); break; case MBA_REJECTED_FCP_CMD: ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017, "qla_target(%d): Async event LS_REJECT occurred (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, - mailbox[0], mailbox[1], mailbox[2], mailbox[3]); + le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), + le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); - if (mailbox[3] == 1) { + if (le16_to_cpu(mailbox[3]) == 1) { /* exchange starvation. */ vha->hw->exch_starvation++; if (vha->hw->exch_starvation > 5) { @@ -5986,9 +6160,10 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha, "qla_target(%d): Port update async event %#x " "occurred: updating the ports database (m[0]=%x, m[1]=%x, " "m[2]=%x, m[3]=%x)", vha->vp_idx, code, - mailbox[0], mailbox[1], mailbox[2], mailbox[3]); + le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]), + le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3])); - login_code = mailbox[2]; + login_code = le16_to_cpu(mailbox[2]); if (login_code == 0x4) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e, "Async MB 2: Got PLOGI Complete\n"); @@ -6285,7 +6460,7 @@ static void qlt_sess_work_fn(struct work_struct *work) struct scsi_qla_host *vha = tgt->vha; unsigned long flags; - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt); + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %px)", tgt); spin_lock_irqsave(&tgt->sess_work_lock, flags); while (!list_empty(&tgt->sess_works_list)) { @@ -6337,7 +6512,7 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) } ql_dbg(ql_dbg_tgt, base_vha, 0xe03b, - "Registering target for host %ld(%p).\n", base_vha->host_no, ha); + "Registering target for host %ld(%px).\n", base_vha->host_no, ha); BUG_ON(base_vha->vha_tgt.qla_tgt != NULL); @@ -6432,22 +6607,22 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) /* free left over qfull cmds */ qlt_init_term_exchange(vha); - ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)", + ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%px)", vha->host_no, ha); qlt_release(vha->vha_tgt.qla_tgt); return 0; } -void qlt_remove_target_resources(struct qla_hw_data *ha) +void qla_remove_hostmap(struct qla_hw_data *ha) { struct scsi_qla_host *node; u32 key = 0; - btree_for_each_safe32(&ha->tgt.host_map, key, node) - btree_remove32(&ha->tgt.host_map, key); + btree_for_each_safe32(&ha->host_map, key, node) + btree_remove32(&ha->host_map, key); - btree_destroy32(&ha->tgt.host_map); + btree_destroy32(&ha->host_map); } static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, @@ -6665,14 +6840,9 @@ static void qlt_disable_vha(struct scsi_qla_host *vha) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); - - /* - * We are expecting the offline state. - * QLA_FUNCTION_FAILED means that adapter is offline. - */ if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) ql_dbg(ql_dbg_tgt, vha, 0xe081, - "adapter is offline\n"); + "qla2x00_wait_for_hba_online() failed\n"); } /* @@ -6738,7 +6908,7 @@ qlt_init_atio_q_entries(struct scsi_qla_host *vha) return; for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) { - pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED); + pkt->u.raw.signature = ATIO_PROCESSED; pkt++; } @@ -6773,7 +6943,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", &pkt->u.isp24.fcp_hdr.s_id, be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), - pkt->u.isp24.exchange_addr, pkt); + le32_to_cpu(pkt->u.isp24.exchange_addr), pkt); adjust_corrupted_atio(pkt); qlt_send_term_exchange(ha->base_qpair, NULL, pkt, @@ -6791,14 +6961,14 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) } else ha->tgt.atio_ring_ptr++; - pkt->u.raw.signature = cpu_to_le32(ATIO_PROCESSED); + pkt->u.raw.signature = ATIO_PROCESSED; pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr; } wmb(); } /* Adjust ring index */ - wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); + WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); } void @@ -6811,19 +6981,19 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha) if (!QLA_TGT_MODE_ENABLED()) return; - wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0); - wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0); - rd_reg_dword(ISP_ATIO_Q_OUT(vha)); + WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0); + WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0); + RD_REG_DWORD(ISP_ATIO_Q_OUT(vha)); if (ha->flags.msix_enabled) { if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { if (IS_QLA2071(ha)) { /* 4 ports Baker: Enable Interrupt Handshake */ icb->msix_atio = 0; - icb->firmware_options_2 |= cpu_to_le32(BIT_26); + icb->firmware_options_2 |= BIT_26; } else { icb->msix_atio = cpu_to_le16(msix->entry); - icb->firmware_options_2 &= cpu_to_le32(~BIT_26); + icb->firmware_options_2 &= ~BIT_26; } ql_dbg(ql_dbg_init, vha, 0xf072, "Registering ICB vector 0x%x for atio que.\n", @@ -6833,7 +7003,7 @@ qlt_24xx_config_rings(struct scsi_qla_host *vha) /* INTx|MSI */ if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { icb->msix_atio = 0; - icb->firmware_options_2 |= cpu_to_le32(BIT_26); + icb->firmware_options_2 |= BIT_26; ql_dbg(ql_dbg_init, vha, 0xf072, "%s: Use INTx for ATIOQ.\n", __func__); } @@ -7075,8 +7245,7 @@ qlt_modify_vp_config(struct scsi_qla_host *vha, void qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) { - int rc; - + mutex_init(&base_vha->vha_tgt.tgt_mutex); if (!QLA_TGT_MODE_ENABLED()) return; @@ -7088,7 +7257,6 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out; } - mutex_init(&base_vha->vha_tgt.tgt_mutex); mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex); INIT_LIST_HEAD(&base_vha->unknown_atio_list); @@ -7097,12 +7265,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) qlt_clear_mode(base_vha); - rc = btree_init32(&ha->tgt.host_map); - if (rc) - ql_log(ql_log_info, base_vha, 0xd03d, - "Unable to initialize ha->host_map btree\n"); - - qlt_update_vp_map(base_vha, SET_VP_IDX); + qla_update_vp_map(base_vha, SET_VP_IDX); } irqreturn_t @@ -7181,17 +7344,10 @@ qlt_mem_alloc(struct qla_hw_data *ha) if (!QLA_TGT_MODE_ENABLED()) return 0; - ha->tgt.tgt_vp_map = kcalloc(MAX_MULTI_ID_FABRIC, - sizeof(struct qla_tgt_vp_map), - GFP_KERNEL); - if (!ha->tgt.tgt_vp_map) - return -ENOMEM; - ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp), &ha->tgt.atio_dma, GFP_KERNEL); if (!ha->tgt.atio_ring) { - kfree(ha->tgt.tgt_vp_map); return -ENOMEM; } return 0; @@ -7210,69 +7366,6 @@ qlt_mem_free(struct qla_hw_data *ha) } ha->tgt.atio_ring = NULL; ha->tgt.atio_dma = 0; - kfree(ha->tgt.tgt_vp_map); - ha->tgt.tgt_vp_map = NULL; -} - -/* vport_slock to be held by the caller */ -void -qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) -{ - void *slot; - u32 key; - int rc; - - if (!QLA_TGT_MODE_ENABLED()) - return; - - key = vha->d_id.b24; - - switch (cmd) { - case SET_VP_IDX: - vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; - break; - case SET_AL_PA: - slot = btree_lookup32(&vha->hw->tgt.host_map, key); - if (!slot) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf018, - "Save vha in host_map %p %06x\n", vha, key); - rc = btree_insert32(&vha->hw->tgt.host_map, - key, vha, GFP_ATOMIC); - if (rc) - ql_log(ql_log_info, vha, 0xd03e, - "Unable to insert s_id into host_map: %06x\n", - key); - return; - } - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019, - "replace existing vha in host_map %p %06x\n", vha, key); - btree_update32(&vha->hw->tgt.host_map, key, vha); - break; - case RESET_VP_IDX: - vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; - break; - case RESET_AL_PA: - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a, - "clear vha in host_map %p %06x\n", vha, key); - slot = btree_lookup32(&vha->hw->tgt.host_map, key); - if (slot) - btree_remove32(&vha->hw->tgt.host_map, key); - vha->d_id.b24 = 0; - break; - } -} - -void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id) -{ - - if (!vha->d_id.b24) { - vha->d_id = id; - qlt_update_vp_map(vha, SET_AL_PA); - } else if (vha->d_id.b24 != id.b24) { - qlt_update_vp_map(vha, RESET_AL_PA); - vha->d_id = id; - qlt_update_vp_map(vha, SET_AL_PA); - } } static int __init qlt_parse_ini_mode(void) diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 1e94586c7eb21..fac49967ef7c0 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -116,6 +116,7 @@ (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \ QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0)) #endif +#endif #define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \ ? le16_to_cpu((iocb)->u.isp2x.target.extended) \ @@ -134,37 +135,37 @@ struct nack_to_isp { uint8_t entry_status; /* Entry Status. */ union { struct { - __le32 sys_define_2; /* System defined. */ + uint32_t sys_define_2; /* System defined. */ target_id_t target; uint8_t target_id; uint8_t reserved_1; - __le16 flags; - __le16 resp_code; - __le16 status; - __le16 task_flags; - __le16 seq_id; - __le16 srr_rx_id; - __le32 srr_rel_offs; - __le16 srr_ui; - __le16 srr_flags; - __le16 srr_reject_code; + uint16_t flags; + uint16_t resp_code; + uint16_t status; + uint16_t task_flags; + uint16_t seq_id; + uint16_t srr_rx_id; + uint32_t srr_rel_offs; + uint16_t srr_ui; + uint16_t srr_flags; + uint16_t srr_reject_code; uint8_t srr_reject_vendor_uniq; uint8_t srr_reject_code_expl; uint8_t reserved_2[24]; } isp2x; struct { uint32_t handle; - __le16 nport_handle; + uint16_t nport_handle; uint16_t reserved_1; - __le16 flags; - __le16 srr_rx_id; - __le16 status; + uint16_t flags; + uint16_t srr_rx_id; + uint16_t status; uint8_t status_subcode; uint8_t fw_handle; - __le32 exchange_address; - __le32 srr_rel_offs; - __le16 srr_ui; - __le16 srr_flags; + uint32_t exchange_address; + uint32_t srr_rel_offs; + uint16_t srr_ui; + uint16_t srr_flags; uint8_t reserved_4[19]; uint8_t vp_index; uint8_t srr_reject_vendor_uniq; @@ -174,8 +175,9 @@ struct nack_to_isp { } isp24; } u; uint8_t reserved[2]; - __le16 ox_id; + uint16_t ox_id; } __packed; +#define NOTIFY_ACK_FLAGS_FCSP BIT_5 #define NOTIFY_ACK_FLAGS_TERMINATE BIT_3 #define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0 #define NOTIFY_ACK_SRR_FLAGS_REJECT 1 @@ -205,16 +207,16 @@ struct ctio_to_2xxx { uint8_t entry_status; /* Entry Status. */ uint32_t handle; /* System defined handle */ target_id_t target; - __le16 rx_id; - __le16 flags; - __le16 status; - __le16 timeout; /* 0 = 30 seconds, 0xFFFF = disable */ - __le16 dseg_count; /* Data segment count. */ - __le32 relative_offset; - __le32 residual; - __le16 reserved_1[3]; - __le16 scsi_status; - __le32 transfer_length; + uint16_t rx_id; + uint16_t flags; + uint16_t status; + uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */ + uint16_t dseg_count; /* Data segment count. */ + uint32_t relative_offset; + uint32_t residual; + uint16_t reserved_1[3]; + uint16_t scsi_status; + uint32_t transfer_length; struct dsd32 dsd[3]; } __packed; #define ATIO_PATH_INVALID 0x07 @@ -238,12 +240,15 @@ struct ctio_to_2xxx { #define CTIO_PORT_LOGGED_OUT 0x29 #define CTIO_PORT_CONF_CHANGED 0x2A #define CTIO_SRR_RECEIVED 0x45 +#define CTIO_FAST_AUTH_ERR 0x63 +#define CTIO_FAST_INCOMP_PAD_LEN 0x65 +#define CTIO_FAST_INVALID_REQ 0x66 +#define CTIO_FAST_SPI_ERR 0x67 #endif #ifndef CTIO_RET_TYPE #define CTIO_RET_TYPE 0x17 /* CTIO return entry */ #define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */ -#endif struct fcp_hdr { uint8_t r_ctl; @@ -257,8 +262,8 @@ struct fcp_hdr { uint16_t seq_cnt; __be16 ox_id; uint16_t rx_id; - __le32 parameter; -}; + uint32_t parameter; +} __packed; struct fcp_hdr_le { le_id_t d_id; @@ -267,13 +272,13 @@ struct fcp_hdr_le { uint8_t cs_ctl; uint8_t f_ctl[3]; uint8_t type; - __le16 seq_cnt; + uint16_t seq_cnt; uint8_t df_ctl; uint8_t seq_id; - __le16 rx_id; - __le16 ox_id; - __le32 parameter; -}; + uint16_t rx_id; + uint16_t ox_id; + uint32_t parameter; +} __packed; #define F_CTL_EXCH_CONTEXT_RESP BIT_23 #define F_CTL_SEQ_CONTEXT_RESIP BIT_22 @@ -306,7 +311,7 @@ struct atio7_fcp_cmnd { * BUILD_BUG_ON in qlt_init(). */ uint8_t add_cdb[4]; - /* __le32 data_length; */ + /* uint32_t data_length; */ } __packed; /* @@ -316,31 +321,31 @@ struct atio7_fcp_cmnd { struct atio_from_isp { union { struct { - __le16 entry_hdr; + uint16_t entry_hdr; uint8_t sys_define; /* System defined. */ uint8_t entry_status; /* Entry Status. */ - __le32 sys_define_2; /* System defined. */ + uint32_t sys_define_2; /* System defined. */ target_id_t target; - __le16 rx_id; - __le16 flags; - __le16 status; + uint16_t rx_id; + uint16_t flags; + uint16_t status; uint8_t command_ref; uint8_t task_codes; uint8_t task_flags; uint8_t execution_codes; uint8_t cdb[MAX_CMDSZ]; - __le32 data_length; - __le16 lun; + uint32_t data_length; + uint16_t lun; uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */ - __le16 reserved_32[6]; - __le16 ox_id; + uint16_t reserved_32[6]; + uint16_t ox_id; } isp2x; struct { - __le16 entry_hdr; + uint16_t entry_hdr; uint8_t fcp_cmnd_len_low; uint8_t fcp_cmnd_len_high:4; uint8_t attr:4; - __le32 exchange_addr; + uint32_t exchange_addr; #define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF struct fcp_hdr fcp_hdr; struct atio7_fcp_cmnd fcp_cmnd; @@ -352,7 +357,7 @@ struct atio_from_isp { #define FCP_CMD_LENGTH_MASK 0x0fff #define FCP_CMD_LENGTH_MIN 0x38 uint8_t data[56]; - __le32 signature; + uint32_t signature; #define ATIO_PROCESSED 0xDEADDEAD /* Signature */ } raw; } u; @@ -379,7 +384,8 @@ static inline int get_datalen_for_atio(struct atio_from_isp *atio) { int len = atio->u.isp24.fcp_cmnd.add_cdb_len; - return get_unaligned_be32(&atio->u.isp24.fcp_cmnd.add_cdb[len * 4]); + return (be32_to_cpu(get_unaligned((uint32_t *) + &atio->u.isp24.fcp_cmnd.add_cdb[len * 4]))); } #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ @@ -395,36 +401,45 @@ struct ctio7_to_24xx { uint8_t sys_define; /* System defined. */ uint8_t entry_status; /* Entry Status. */ uint32_t handle; /* System defined handle */ - __le16 nport_handle; + uint16_t nport_handle; #define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF - __le16 timeout; - __le16 dseg_count; /* Data segment count. */ + uint16_t timeout; + uint16_t dseg_count; /* Data segment count. */ uint8_t vp_index; uint8_t add_flags; le_id_t initiator_id; uint8_t reserved; - __le32 exchange_addr; + uint32_t exchange_addr; union { struct { - __le16 reserved1; + uint16_t reserved1; __le16 flags; - __le32 residual; + union { + struct { + uint8_t rsvd1; + uint8_t edif_flags; +#define EF_EN_EDIF BIT_0 +#define EF_NEW_SA BIT_1 + uint16_t rsvd2; + }; + uint32_t residual; + }; __le16 ox_id; - __le16 scsi_status; - __le32 relative_offset; - __le32 reserved2; - __le32 transfer_length; - __le32 reserved3; + uint16_t scsi_status; + uint32_t relative_offset; + uint32_t reserved2; + uint32_t transfer_length; + uint32_t reserved3; struct dsd64 dsd; } status0; struct { - __le16 sense_length; + uint16_t sense_length; __le16 flags; - __le32 residual; + uint32_t residual; __le16 ox_id; - __le16 scsi_status; - __le16 response_len; - __le16 reserved; + uint16_t scsi_status; + uint16_t response_len; + uint16_t reserved; uint8_t sense_data[24]; } status1; } u; @@ -440,18 +455,18 @@ struct ctio7_from_24xx { uint8_t sys_define; /* System defined. */ uint8_t entry_status; /* Entry Status. */ uint32_t handle; /* System defined handle */ - __le16 status; - __le16 timeout; - __le16 dseg_count; /* Data segment count. */ + uint16_t status; + uint16_t timeout; + uint16_t dseg_count; /* Data segment count. */ uint8_t vp_index; uint8_t reserved1[5]; - __le32 exchange_address; - __le16 reserved2; - __le16 flags; - __le32 residual; - __le16 ox_id; - __le16 reserved3; - __le32 relative_offset; + uint32_t exchange_address; + uint16_t edif_sa_index; /* edif sa_index used for target write data */ + uint16_t flags; + uint32_t residual; + uint16_t ox_id; + uint16_t reserved3; + uint32_t relative_offset; uint8_t reserved4[24]; } __packed; @@ -489,29 +504,29 @@ struct ctio_crc2_to_fw { uint8_t entry_status; /* Entry Status. */ uint32_t handle; /* System handle. */ - __le16 nport_handle; /* N_PORT handle. */ + uint16_t nport_handle; /* N_PORT handle. */ __le16 timeout; /* Command timeout. */ - __le16 dseg_count; /* Data segment count. */ + uint16_t dseg_count; /* Data segment count. */ uint8_t vp_index; uint8_t add_flags; /* additional flags */ #define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3 le_id_t initiator_id; /* initiator ID */ uint8_t reserved1; - __le32 exchange_addr; /* rcv exchange address */ - __le16 reserved2; + uint32_t exchange_addr; /* rcv exchange address */ + uint16_t reserved2; __le16 flags; /* refer to CTIO7 flags values */ - __le32 residual; + uint32_t residual; __le16 ox_id; - __le16 scsi_status; + uint16_t scsi_status; __le32 relative_offset; - __le32 reserved5; + uint32_t reserved5; __le32 transfer_length; /* total fc transfer length */ - __le32 reserved6; + uint32_t reserved6; __le64 crc_context_address __packed; /* Data segment address. */ - __le16 crc_context_len; /* Data segment length. */ - __le16 reserved_1; /* MUST be set to 0. */ + uint16_t crc_context_len; /* Data segment length. */ + uint16_t reserved_1; /* MUST be set to 0. */ }; /* CTIO Type CRC_x Status IOCB */ @@ -522,20 +537,20 @@ struct ctio_crc_from_fw { uint8_t entry_status; /* Entry Status. */ uint32_t handle; /* System handle. */ - __le16 status; - __le16 timeout; /* Command timeout. */ - __le16 dseg_count; /* Data segment count. */ - __le32 reserved1; - __le16 state_flags; + uint16_t status; + uint16_t timeout; /* Command timeout. */ + uint16_t dseg_count; /* Data segment count. */ + uint32_t reserved1; + uint16_t state_flags; #define CTIO_CRC_SF_DIF_CHOPPED BIT_4 - __le32 exchange_address; /* rcv exchange address */ - __le16 reserved2; - __le16 flags; - __le32 resid_xfer_length; - __le16 ox_id; + uint32_t exchange_address; /* rcv exchange address */ + uint16_t reserved2; + uint16_t flags; + uint32_t resid_xfer_length; + uint16_t ox_id; uint8_t reserved3[12]; - __le16 runt_guard; /* reported runt blk guard */ + uint16_t runt_guard; /* reported runt blk guard */ uint8_t actual_dif[8]; uint8_t expected_dif[8]; } __packed; @@ -558,29 +573,29 @@ struct abts_recv_from_24xx { uint8_t sys_define; /* System defined. */ uint8_t entry_status; /* Entry Status. */ uint8_t reserved_1[6]; - __le16 nport_handle; + uint16_t nport_handle; uint8_t reserved_2[2]; uint8_t vp_index; uint8_t reserved_3:4; uint8_t sof_type:4; - __le32 exchange_address; + uint32_t exchange_address; struct fcp_hdr_le fcp_hdr_le; uint8_t reserved_4[16]; - __le32 exchange_addr_to_abort; + uint32_t exchange_addr_to_abort; } __packed; #define ABTS_PARAM_ABORT_SEQ BIT_0 struct ba_acc_le { - __le16 reserved; + uint16_t reserved; uint8_t seq_id_last; uint8_t seq_id_valid; #define SEQ_ID_VALID 0x80 #define SEQ_ID_INVALID 0x00 - __le16 rx_id; - __le16 ox_id; - __le16 high_seq_cnt; - __le16 low_seq_cnt; + uint16_t rx_id; + uint16_t ox_id; + uint16_t high_seq_cnt; + uint16_t low_seq_cnt; } __packed; struct ba_rjt_le { @@ -604,21 +619,21 @@ struct abts_resp_to_24xx { uint8_t sys_define; /* System defined. */ uint8_t entry_status; /* Entry Status. */ uint32_t handle; - __le16 reserved_1; - __le16 nport_handle; - __le16 control_flags; + uint16_t reserved_1; + uint16_t nport_handle; + uint16_t control_flags; #define ABTS_CONTR_FLG_TERM_EXCHG BIT_0 uint8_t vp_index; uint8_t reserved_3:4; uint8_t sof_type:4; - __le32 exchange_address; + uint32_t exchange_address; struct fcp_hdr_le fcp_hdr_le; union { struct ba_acc_le ba_acct; struct ba_rjt_le ba_rjt; } __packed payload; - __le32 reserved_4; - __le32 exchange_addr_to_abort; + uint32_t reserved_4; + uint32_t exchange_addr_to_abort; } __packed; /* @@ -634,21 +649,21 @@ struct abts_resp_from_24xx_fw { uint8_t sys_define; /* System defined. */ uint8_t entry_status; /* Entry Status. */ uint32_t handle; - __le16 compl_status; + uint16_t compl_status; #define ABTS_RESP_COMPL_SUCCESS 0 #define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31 - __le16 nport_handle; - __le16 reserved_1; + uint16_t nport_handle; + uint16_t reserved_1; uint8_t reserved_2; uint8_t reserved_3:4; uint8_t sof_type:4; - __le32 exchange_address; + uint32_t exchange_address; struct fcp_hdr_le fcp_hdr_le; uint8_t reserved_4[8]; - __le32 error_subcode1; + uint32_t error_subcode1; #define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E - __le32 error_subcode2; - __le32 exchange_addr_to_abort; + uint32_t error_subcode2; + uint32_t exchange_addr_to_abort; } __packed; /********************************************************************\ @@ -671,8 +686,9 @@ struct qla_tgt_func_tmpl { void (*handle_data)(struct qla_tgt_cmd *); int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, u64, uint16_t, uint32_t); +#ifndef TGT_SBITMAP_QUE struct qla_tgt_cmd *(*get_cmd)(struct fc_port *); - void (*rel_cmd)(struct qla_tgt_cmd *); +#endif void (*free_cmd)(struct qla_tgt_cmd *); void (*free_mcmd)(struct qla_tgt_mgmt_cmd *); void (*free_session)(struct fc_port *); @@ -874,6 +890,7 @@ struct qla_tgt_cmd { unsigned int term_exchg:1; unsigned int cmd_sent_to_fw:1; unsigned int cmd_in_wq:1; + unsigned int edif:1; /* * This variable may be set from outside the LIO and I/O completion @@ -923,6 +940,9 @@ struct qla_tgt_cmd { uint64_t jiffies_at_alloc; uint64_t jiffies_at_free; +#ifdef TGT_FREE_TAG + void (*rel_cmd)(struct qla_tgt_cmd *); +#endif enum trace_flags trc_flags; }; @@ -990,6 +1010,9 @@ struct qla_tgt_prm { #define QLA_TGT_XMIT_STATUS 2 #define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) +#define PRLO_ACK_NEEDED(_sess) \ + (_sess->logo_ack_needed && \ + ((struct imm_ntfy_from_isp*)_sess->iocb)->u.isp24.status_subcode == ELS_PRLO) extern struct qla_tgt_data qla_target; @@ -1006,7 +1029,6 @@ extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int); extern int __init qlt_init(void); extern void qlt_exit(void); -extern void qlt_update_vp_map(struct scsi_qla_host *, int); extern void qlt_free_session_done(struct work_struct *); /* * This macro is used during early initializations when host->active_mode diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index 26c13a953b975..08ef27fc38e3c 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -1,7 +1,8 @@ -// SPDX-License-Identifier: GPL-2.0-only /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include "qla_tmpl.h" @@ -47,7 +48,7 @@ qla27xx_read8(void __iomem *window, void *buf, ulong *len) uint8_t value = ~0; if (buf) { - value = rd_reg_byte(window); + value = RD_REG_BYTE(window); } qla27xx_insert32(value, buf, len); } @@ -58,7 +59,7 @@ qla27xx_read16(void __iomem *window, void *buf, ulong *len) uint16_t value = ~0; if (buf) { - value = rd_reg_word(window); + value = RD_REG_WORD(window); } qla27xx_insert32(value, buf, len); } @@ -69,7 +70,7 @@ qla27xx_read32(void __iomem *window, void *buf, ulong *len) uint32_t value = ~0; if (buf) { - value = rd_reg_dword(window); + value = RD_REG_DWORD(window); } qla27xx_insert32(value, buf, len); } @@ -98,7 +99,7 @@ qla27xx_write_reg(__iomem struct device_reg_24xx *reg, if (buf) { void __iomem *window = (void __iomem *)reg + offset; - wrt_reg_dword(window, data); + WRT_REG_DWORD(window, data); } } @@ -435,8 +436,13 @@ qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha, { ql_dbg(ql_dbg_misc, vha, 0xd20a, "%s: reset risc [%lx]\n", __func__, *len); - if (buf) - WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS); + if (buf) { + if (qla24xx_soft_reset(vha->hw) != QLA_SUCCESS) { + ql_dbg(ql_dbg_async, vha, 0x5001, + "%s: unable to soft reset\n", __func__); + return INVALID_ENTRY; + } + } return qla27xx_next_entry(ent); } @@ -879,7 +885,7 @@ qla27xx_driver_info(struct qla27xx_fwdt_template *tmp) WARN_ON_ONCE(sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu", - v + 0, v + 1, v + 2, v + 3) != 4); + v+0, v+1, v+2, v+3) != 4); tmp->driver_info[0] = cpu_to_le32( v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]); @@ -891,12 +897,12 @@ static void qla27xx_firmware_info(struct scsi_qla_host *vha, struct qla27xx_fwdt_template *tmp) { - tmp->firmware_version[0] = cpu_to_le32(vha->hw->fw_major_version); - tmp->firmware_version[1] = cpu_to_le32(vha->hw->fw_minor_version); - tmp->firmware_version[2] = cpu_to_le32(vha->hw->fw_subminor_version); + tmp->firmware_version[0] = vha->hw->fw_major_version; + tmp->firmware_version[1] = vha->hw->fw_minor_version; + tmp->firmware_version[2] = vha->hw->fw_subminor_version; tmp->firmware_version[3] = cpu_to_le32( vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes); - tmp->firmware_version[4] = cpu_to_le32( + tmp->firmware_version[4] =cpu_to_le32( vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0]); } @@ -1002,8 +1008,10 @@ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked) { ulong flags = 0; +#ifndef __CHECKER__ if (!hardware_locked) spin_lock_irqsave(&vha->hw->hardware_lock, flags); +#endif if (!vha->hw->mpi_fw_dump) { ql_log(ql_log_warn, vha, 0x02f3, "-> mpi_fwdump no buffer\n"); } else { @@ -1017,14 +1025,13 @@ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked) buf += fwdt->dump_size; walk_template_only = true; ql_log(ql_log_warn, vha, 0x02f4, - "-> MPI firmware already dumped -- dump saving to temporary buffer %p.\n", - buf); + "-> MPI firmware already dumped -- dump saving to temporary buffer %px.\n", + buf); } - ql_log(ql_log_warn, vha, 0x02f5, "-> fwdt1 running...\n"); if (!fwdt->template) { ql_log(ql_log_warn, vha, 0x02f6, - "-> fwdt1 no template\n"); + "-> fwdt1 no template\n"); goto bailout; } len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf); @@ -1032,8 +1039,8 @@ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked) goto bailout; } else if (len != fwdt->dump_size) { ql_log(ql_log_warn, vha, 0x02f7, - "-> fwdt1 fwdump residual=%+ld\n", - fwdt->dump_size - len); + "-> fwdt1 fwdump residual=%+ld\n", + fwdt->dump_size - len); } vha->hw->stat.num_mpi_reset++; if (walk_template_only) @@ -1043,26 +1050,34 @@ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked) vha->hw->mpi_fw_dumped = 1; ql_log(ql_log_warn, vha, 0x02f8, - "-> MPI firmware dump saved to buffer (%lu/%p)\n", - vha->host_no, vha->hw->mpi_fw_dump); + "-> MPI firmware dump saved to buffer (%lu/%px)\n", + vha->host_no, vha->hw->mpi_fw_dump); qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); } bailout: +#ifndef __CHECKER__ if (!hardware_locked) spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); +#endif } + void -qla27xx_fwdump(scsi_qla_host_t *vha) +qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked) { - lockdep_assert_held(&vha->hw->hardware_lock); + ulong flags = 0; + +#ifndef __CHECKER__ + if (!hardware_locked) + spin_lock_irqsave(&vha->hw->hardware_lock, flags); +#endif if (!vha->hw->fw_dump) { ql_log(ql_log_warn, vha, 0xd01e, "-> fwdump no buffer\n"); } else if (vha->hw->fw_dumped) { ql_log(ql_log_warn, vha, 0xd01f, - "-> Firmware already dumped (%p) -- ignoring request\n", + "-> Firmware already dumped (%px) -- ignoring request\n", vha->hw->fw_dump); } else { struct fwdt *fwdt = vha->hw->fwdt; @@ -1072,24 +1087,30 @@ qla27xx_fwdump(scsi_qla_host_t *vha) ql_log(ql_log_warn, vha, 0xd011, "-> fwdt0 running...\n"); if (!fwdt->template) { ql_log(ql_log_warn, vha, 0xd012, - "-> fwdt0 no template\n"); - return; + "-> fwdt0 no template\n"); + goto bailout; } len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf); if (len == 0) { - return; + goto bailout; } else if (len != fwdt->dump_size) { ql_log(ql_log_warn, vha, 0xd013, - "-> fwdt0 fwdump residual=%+ld\n", - fwdt->dump_size - len); + "-> fwdt0 fwdump residual=%+ld\n", + fwdt->dump_size - len); } vha->hw->fw_dump_len = len; - vha->hw->fw_dumped = true; + vha->hw->fw_dumped = 1; ql_log(ql_log_warn, vha, 0xd015, - "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n", + "-> Firmware dump saved to buffer (%lu/%px) <%lx>\n", vha->host_no, vha->hw->fw_dump, vha->hw->fw_dump_cap_flags); qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); } + +bailout: +#ifndef __CHECKER__ + if (!hardware_locked) + spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); +#endif } diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h index 6e0987edfcebc..fb8ab3bc86c26 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.h +++ b/drivers/scsi/qla2xxx/qla_tmpl.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ #ifndef __QLA_DMP27_H__ @@ -17,16 +18,16 @@ struct __packed qla27xx_fwdt_template { __le32 entry_count; uint32_t template_version; - __le32 capture_timestamp; + uint32_t capture_timestamp; uint32_t template_checksum; uint32_t reserved_2; - __le32 driver_info[3]; + uint32_t driver_info[3]; uint32_t saved_state[16]; uint32_t reserved_3[8]; - __le32 firmware_version[5]; + uint32_t firmware_version[5]; }; #define TEMPLATE_TYPE_FWDUMP 99 diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index c2d4da52f4a92..d35fcc7194e6a 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -1,14 +1,14 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ /* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2014 QLogic Corporation + * + * See LICENSE.qla2xxx for copyright and licensing details. */ /* * Driver version */ -#define QLA2XXX_VERSION "10.02.00.103-k" - +#define QLA2XXX_VERSION "10.02.08.00.a7-k" #define QLA_DRIVER_MAJOR_VER 10 #define QLA_DRIVER_MINOR_VER 2 -#define QLA_DRIVER_PATCH_VER 0 -#define QLA_DRIVER_BETA_VER 103 +#define QLA_DRIVER_PATCH_VER 8 +#define QLA_DRIVER_BETA_VER 0 diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 7405fab324c82..763358b0a470d 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -16,1971 +16,14 @@ #include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "qla_def.h" -#include "qla_target.h" -#include "tcm_qla2xxx.h" - -static struct workqueue_struct *tcm_qla2xxx_free_wq; - -/* - * Parse WWN. - * If strict, we require lower-case hex and colon separators to be sure - * the name is the same as what would be generated by ft_format_wwn() - * so the name and wwn are mapped one-to-one. - */ -static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict) -{ - const char *cp; - char c; - u32 nibble; - u32 byte = 0; - u32 pos = 0; - u32 err; - - *wwn = 0; - for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) { - c = *cp; - if (c == '\n' && cp[1] == '\0') - continue; - if (strict && pos++ == 2 && byte++ < 7) { - pos = 0; - if (c == ':') - continue; - err = 1; - goto fail; - } - if (c == '\0') { - err = 2; - if (strict && byte != 8) - goto fail; - return cp - name; - } - err = 3; - if (isdigit(c)) - nibble = c - '0'; - else if (isxdigit(c) && (islower(c) || !strict)) - nibble = tolower(c) - 'a' + 10; - else - goto fail; - *wwn = (*wwn << 4) | nibble; - } - err = 4; -fail: - pr_debug("err %u len %zu pos %u byte %u\n", - err, cp - name, pos, byte); - return -1; -} - -static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn) -{ - u8 b[8]; - - put_unaligned_be64(wwn, b); - return snprintf(buf, len, - "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x", - b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]); -} - -/* - * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn - */ -static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm) -{ - unsigned int i, j; - u8 wwn[8]; - - memset(wwn, 0, sizeof(wwn)); - - /* Validate and store the new name */ - for (i = 0, j = 0; i < 16; i++) { - int value; - - value = hex_to_bin(*ns++); - if (value >= 0) - j = (j << 4) | value; - else - return -EINVAL; - - if (i % 2) { - wwn[i/2] = j & 0xff; - j = 0; - } - } - - *nm = wwn_to_u64(wwn); - return 0; -} - -/* - * This parsing logic follows drivers/scsi/scsi_transport_fc.c: - * store_fc_host_vport_create() - */ -static int tcm_qla2xxx_npiv_parse_wwn( - const char *name, - size_t count, - u64 *wwpn, - u64 *wwnn) -{ - unsigned int cnt = count; - int rc; - - *wwpn = 0; - *wwnn = 0; - - /* count may include a LF at end of string */ - if (name[cnt-1] == '\n' || name[cnt-1] == 0) - cnt--; - - /* validate we have enough characters for WWPN */ - if ((cnt != (16+1+16)) || (name[16] != ':')) - return -EINVAL; - - rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn); - if (rc != 0) - return rc; - - rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn); - if (rc != 0) - return rc; - - return 0; -} - -static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg) -{ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - struct tcm_qla2xxx_lport *lport = tpg->lport; - - return lport->lport_naa_name; -} - -static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg) -{ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - return tpg->lport_tpgt; -} - -static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg) -{ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - - return tpg->tpg_attrib.generate_node_acls; -} - -static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg) -{ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - - return tpg->tpg_attrib.cache_dynamic_acls; -} - -static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg) -{ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - - return tpg->tpg_attrib.demo_mode_write_protect; -} - -static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg) -{ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - - return tpg->tpg_attrib.prod_mode_write_protect; -} - -static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg) -{ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - - return tpg->tpg_attrib.demo_mode_login_only; -} - -static int tcm_qla2xxx_check_prot_fabric_only(struct se_portal_group *se_tpg) -{ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - - return tpg->tpg_attrib.fabric_prot_type; -} - -static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg) -{ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - - return tpg->lport_tpgt; -} - -static void tcm_qla2xxx_complete_mcmd(struct work_struct *work) -{ - struct qla_tgt_mgmt_cmd *mcmd = container_of(work, - struct qla_tgt_mgmt_cmd, free_work); - - transport_generic_free_cmd(&mcmd->se_cmd, 0); -} - -/* - * Called from qla_target_template->free_mcmd(), and will call - * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops - * release callback. qla_hw_data->hardware_lock is expected to be held - */ -static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) -{ - if (!mcmd) - return; - INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd); - queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work); -} - -static void tcm_qla2xxx_complete_free(struct work_struct *work) -{ - struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); - - cmd->cmd_in_wq = 0; - - WARN_ON(cmd->trc_flags & TRC_CMD_FREE); - - /* To do: protect all tgt_counters manipulations with proper locking. */ - cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++; - cmd->trc_flags |= TRC_CMD_FREE; - cmd->cmd_sent_to_fw = 0; - - transport_generic_free_cmd(&cmd->se_cmd, 0); -} - -static struct qla_tgt_cmd *tcm_qla2xxx_get_cmd(struct fc_port *sess) -{ - struct se_session *se_sess = sess->se_sess; - struct qla_tgt_cmd *cmd; - int tag, cpu; - - tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu); - if (tag < 0) - return NULL; - - cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag]; - memset(cmd, 0, sizeof(struct qla_tgt_cmd)); - cmd->se_cmd.map_tag = tag; - cmd->se_cmd.map_cpu = cpu; - - return cmd; -} - -static void tcm_qla2xxx_rel_cmd(struct qla_tgt_cmd *cmd) -{ - target_free_tag(cmd->sess->se_sess, &cmd->se_cmd); -} - -/* - * Called from qla_target_template->free_cmd(), and will call - * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops - * release callback. qla_hw_data->hardware_lock is expected to be held - */ -static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) -{ - cmd->qpair->tgt_counters.core_qla_free_cmd++; - cmd->cmd_in_wq = 1; - - WARN_ON(cmd->trc_flags & TRC_CMD_DONE); - cmd->trc_flags |= TRC_CMD_DONE; - - INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); - queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); -} - -/* - * Called from struct target_core_fabric_ops->check_stop_free() context - */ -static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd) -{ - struct qla_tgt_cmd *cmd; - - if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) { - cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - cmd->trc_flags |= TRC_CMD_CHK_STOP; - } - - return target_put_sess_cmd(se_cmd); -} - -/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying - * fabric descriptor @se_cmd command to release - */ -static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd) -{ - struct qla_tgt_cmd *cmd; - - if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { - struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, - struct qla_tgt_mgmt_cmd, se_cmd); - qlt_free_mcmd(mcmd); - return; - } - cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - - if (WARN_ON(cmd->cmd_sent_to_fw)) - return; - - qlt_free_cmd(cmd); -} - -static void tcm_qla2xxx_release_session(struct kref *kref) -{ - struct fc_port *sess = container_of(kref, - struct fc_port, sess_kref); - - qlt_unreg_sess(sess); -} - -static void tcm_qla2xxx_put_sess(struct fc_port *sess) -{ - if (!sess) - return; - - kref_put(&sess->sess_kref, tcm_qla2xxx_release_session); -} - -static void tcm_qla2xxx_close_session(struct se_session *se_sess) -{ - struct fc_port *sess = se_sess->fabric_sess_ptr; - struct scsi_qla_host *vha; - unsigned long flags; - - BUG_ON(!sess); - vha = sess->vha; - - spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); - target_sess_cmd_list_set_waiting(se_sess); - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); - - sess->explicit_logout = 1; - tcm_qla2xxx_put_sess(sess); -} - -static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess) -{ - return 0; -} - -static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) -{ - struct qla_tgt_cmd *cmd = container_of(se_cmd, - struct qla_tgt_cmd, se_cmd); - - if (cmd->aborted) { - /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task - * can get ahead of this cmd. tcm_qla2xxx_aborted_task - * already kick start the free. - */ - pr_debug("write_pending aborted cmd[%p] refcount %d " - "transport_state %x, t_state %x, se_cmd_flags %x\n", - cmd, kref_read(&cmd->se_cmd.cmd_kref), - cmd->se_cmd.transport_state, - cmd->se_cmd.t_state, - cmd->se_cmd.se_cmd_flags); - transport_generic_request_failure(&cmd->se_cmd, - TCM_CHECK_CONDITION_ABORT_CMD); - return 0; - } - cmd->trc_flags |= TRC_XFR_RDY; - cmd->bufflen = se_cmd->data_length; - cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); - - cmd->sg_cnt = se_cmd->t_data_nents; - cmd->sg = se_cmd->t_data_sg; - - cmd->prot_sg_cnt = se_cmd->t_prot_nents; - cmd->prot_sg = se_cmd->t_prot_sg; - cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; - se_cmd->pi_err = 0; - - /* - * qla_target.c:qlt_rdy_to_xfer() will call dma_map_sg() to setup - * the SGL mappings into PCIe memory for incoming FCP WRITE data. - */ - return qlt_rdy_to_xfer(cmd); -} - -static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl) -{ - return; -} - -static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd) -{ - if (!(se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { - struct qla_tgt_cmd *cmd = container_of(se_cmd, - struct qla_tgt_cmd, se_cmd); - return cmd->state; - } - - return 0; -} - -/* - * Called from process context in qla_target.c:qlt_do_work() code - */ -static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, - unsigned char *cdb, uint32_t data_length, int fcp_task_attr, - int data_dir, int bidi) -{ - struct se_cmd *se_cmd = &cmd->se_cmd; - struct se_session *se_sess; - struct fc_port *sess; -#ifdef CONFIG_TCM_QLA2XXX_DEBUG - struct se_portal_group *se_tpg; - struct tcm_qla2xxx_tpg *tpg; -#endif - int flags = TARGET_SCF_ACK_KREF; - - if (bidi) - flags |= TARGET_SCF_BIDI_OP; - - if (se_cmd->cpuid != WORK_CPU_UNBOUND) - flags |= TARGET_SCF_USE_CPUID; - - sess = cmd->sess; - if (!sess) { - pr_err("Unable to locate struct fc_port from qla_tgt_cmd\n"); - return -EINVAL; - } - - se_sess = sess->se_sess; - if (!se_sess) { - pr_err("Unable to locate active struct se_session\n"); - return -EINVAL; - } - -#ifdef CONFIG_TCM_QLA2XXX_DEBUG - se_tpg = se_sess->se_tpg; - tpg = container_of(se_tpg, struct tcm_qla2xxx_tpg, se_tpg); - if (unlikely(tpg->tpg_attrib.jam_host)) { - /* return, and dont run target_submit_cmd,discarding command */ - return 0; - } -#endif - - cmd->qpair->tgt_counters.qla_core_sbt_cmd++; - return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0], - cmd->unpacked_lun, data_length, fcp_task_attr, - data_dir, flags); -} - -static void tcm_qla2xxx_handle_data_work(struct work_struct *work) -{ - struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); - - /* - * Ensure that the complete FCP WRITE payload has been received. - * Otherwise return an exception via CHECK_CONDITION status. - */ - cmd->cmd_in_wq = 0; - cmd->cmd_sent_to_fw = 0; - if (cmd->aborted) { - transport_generic_request_failure(&cmd->se_cmd, - TCM_CHECK_CONDITION_ABORT_CMD); - return; - } - - cmd->qpair->tgt_counters.qla_core_ret_ctio++; - if (!cmd->write_data_transferred) { - switch (cmd->dif_err_code) { - case DIF_ERR_GRD: - cmd->se_cmd.pi_err = - TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; - break; - case DIF_ERR_REF: - cmd->se_cmd.pi_err = - TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; - break; - case DIF_ERR_APP: - cmd->se_cmd.pi_err = - TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; - break; - case DIF_ERR_NONE: - default: - break; - } - - if (cmd->se_cmd.pi_err) - transport_generic_request_failure(&cmd->se_cmd, - cmd->se_cmd.pi_err); - else - transport_generic_request_failure(&cmd->se_cmd, - TCM_CHECK_CONDITION_ABORT_CMD); - - return; - } - - return target_execute_cmd(&cmd->se_cmd); -} - -/* - * Called from qla_target.c:qlt_do_ctio_completion() - */ -static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) -{ - cmd->trc_flags |= TRC_DATA_IN; - cmd->cmd_in_wq = 1; - INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work); - queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); -} - -static int tcm_qla2xxx_chk_dif_tags(uint32_t tag) -{ - return 0; -} - -static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd, - uint16_t *pfw_prot_opts) -{ - struct se_cmd *se_cmd = &cmd->se_cmd; - - if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) - *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK; - - if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)) - *pfw_prot_opts |= PO_DIS_APP_TAG_VALD; - - return 0; -} - -/* - * Called from qla_target.c:qlt_issue_task_mgmt() - */ -static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun, - uint16_t tmr_func, uint32_t tag) -{ - struct fc_port *sess = mcmd->sess; - struct se_cmd *se_cmd = &mcmd->se_cmd; - int transl_tmr_func = 0; - int flags = TARGET_SCF_ACK_KREF; - - switch (tmr_func) { - case QLA_TGT_ABTS: - pr_debug("%ld: ABTS received\n", sess->vha->host_no); - transl_tmr_func = TMR_ABORT_TASK; - flags |= TARGET_SCF_LOOKUP_LUN_FROM_TAG; - break; - case QLA_TGT_2G_ABORT_TASK: - pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no); - transl_tmr_func = TMR_ABORT_TASK; - break; - case QLA_TGT_CLEAR_ACA: - pr_debug("%ld: CLEAR_ACA received\n", sess->vha->host_no); - transl_tmr_func = TMR_CLEAR_ACA; - break; - case QLA_TGT_TARGET_RESET: - pr_debug("%ld: TARGET_RESET received\n", sess->vha->host_no); - transl_tmr_func = TMR_TARGET_WARM_RESET; - break; - case QLA_TGT_LUN_RESET: - pr_debug("%ld: LUN_RESET received\n", sess->vha->host_no); - transl_tmr_func = TMR_LUN_RESET; - break; - case QLA_TGT_CLEAR_TS: - pr_debug("%ld: CLEAR_TS received\n", sess->vha->host_no); - transl_tmr_func = TMR_CLEAR_TASK_SET; - break; - case QLA_TGT_ABORT_TS: - pr_debug("%ld: ABORT_TS received\n", sess->vha->host_no); - transl_tmr_func = TMR_ABORT_TASK_SET; - break; - default: - pr_debug("%ld: Unknown task mgmt fn 0x%x\n", - sess->vha->host_no, tmr_func); - return -ENOSYS; - } - - return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd, - transl_tmr_func, GFP_ATOMIC, tag, flags); -} - -static struct qla_tgt_cmd *tcm_qla2xxx_find_cmd_by_tag(struct fc_port *sess, - uint64_t tag) -{ - struct qla_tgt_cmd *cmd = NULL; - struct se_cmd *secmd; - unsigned long flags; - - if (!sess->se_sess) - return NULL; - - spin_lock_irqsave(&sess->se_sess->sess_cmd_lock, flags); - list_for_each_entry(secmd, &sess->se_sess->sess_cmd_list, se_cmd_list) { - /* skip task management functions, including tmr->task_cmd */ - if (secmd->se_cmd_flags & SCF_SCSI_TMR_CDB) - continue; - - if (secmd->tag == tag) { - cmd = container_of(secmd, struct qla_tgt_cmd, se_cmd); - break; - } - } - spin_unlock_irqrestore(&sess->se_sess->sess_cmd_lock, flags); - - return cmd; -} - -static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) -{ - struct qla_tgt_cmd *cmd = container_of(se_cmd, - struct qla_tgt_cmd, se_cmd); - - if (cmd->aborted) { - /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task - * can get ahead of this cmd. tcm_qla2xxx_aborted_task - * already kick start the free. - */ - pr_debug("queue_data_in aborted cmd[%p] refcount %d " - "transport_state %x, t_state %x, se_cmd_flags %x\n", - cmd, kref_read(&cmd->se_cmd.cmd_kref), - cmd->se_cmd.transport_state, - cmd->se_cmd.t_state, - cmd->se_cmd.se_cmd_flags); - return 0; - } - - cmd->trc_flags |= TRC_XMIT_DATA; - cmd->bufflen = se_cmd->data_length; - cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); - - cmd->sg_cnt = se_cmd->t_data_nents; - cmd->sg = se_cmd->t_data_sg; - cmd->offset = 0; - - cmd->prot_sg_cnt = se_cmd->t_prot_nents; - cmd->prot_sg = se_cmd->t_prot_sg; - cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size; - se_cmd->pi_err = 0; - - /* - * Now queue completed DATA_IN the qla2xxx LLD and response ring - */ - return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS, - se_cmd->scsi_status); -} - -static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) -{ - struct qla_tgt_cmd *cmd = container_of(se_cmd, - struct qla_tgt_cmd, se_cmd); - int xmit_type = QLA_TGT_XMIT_STATUS; - - if (cmd->aborted) { - /* - * Cmd can loop during Q-full. tcm_qla2xxx_aborted_task - * can get ahead of this cmd. tcm_qla2xxx_aborted_task - * already kick start the free. - */ - pr_debug( - "queue_data_in aborted cmd[%p] refcount %d transport_state %x, t_state %x, se_cmd_flags %x\n", - cmd, kref_read(&cmd->se_cmd.cmd_kref), - cmd->se_cmd.transport_state, cmd->se_cmd.t_state, - cmd->se_cmd.se_cmd_flags); - return 0; - } - cmd->bufflen = se_cmd->data_length; - cmd->sg = NULL; - cmd->sg_cnt = 0; - cmd->offset = 0; - cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); - cmd->trc_flags |= TRC_XMIT_STATUS; - - if (se_cmd->data_direction == DMA_FROM_DEVICE) { - /* - * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen - * for qla_tgt_xmit_response LLD code - */ - if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { - se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT; - se_cmd->residual_count = 0; - } - se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; - se_cmd->residual_count += se_cmd->data_length; - - cmd->bufflen = 0; - } - /* - * Now queue status response to qla2xxx LLD code and response ring - */ - return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status); -} - -static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) -{ - struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; - struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd, - struct qla_tgt_mgmt_cmd, se_cmd); - - pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n", - mcmd, se_tmr->function, se_tmr->response); - /* - * Do translation between TCM TM response codes and - * QLA2xxx FC TM response codes. - */ - switch (se_tmr->response) { - case TMR_FUNCTION_COMPLETE: - mcmd->fc_tm_rsp = FC_TM_SUCCESS; - break; - case TMR_TASK_DOES_NOT_EXIST: - mcmd->fc_tm_rsp = FC_TM_BAD_CMD; - break; - case TMR_FUNCTION_REJECTED: - mcmd->fc_tm_rsp = FC_TM_REJECT; - break; - case TMR_LUN_DOES_NOT_EXIST: - default: - mcmd->fc_tm_rsp = FC_TM_FAILED; - break; - } - /* - * Queue the TM response to QLA2xxx LLD to build a - * CTIO response packet. - */ - qlt_xmit_tm_rsp(mcmd); -} - -static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) -{ - struct qla_tgt_cmd *cmd = container_of(se_cmd, - struct qla_tgt_cmd, se_cmd); - - if (qlt_abort_cmd(cmd)) - return; -} - -static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, - struct tcm_qla2xxx_nacl *, struct fc_port *); -/* - * Expected to be called with struct qla_hw_data->tgt.sess_lock held - */ -static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess) -{ - struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; - struct se_portal_group *se_tpg = se_nacl->se_tpg; - struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; - struct tcm_qla2xxx_lport *lport = container_of(se_wwn, - struct tcm_qla2xxx_lport, lport_wwn); - struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, - struct tcm_qla2xxx_nacl, se_node_acl); - void *node; - - pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id); - - node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id); - if (WARN_ON(node && (node != se_nacl))) { - /* - * The nacl no longer matches what we think it should be. - * Most likely a new dynamic acl has been added while - * someone dropped the hardware lock. It clearly is a - * bug elsewhere, but this bit can't make things worse. - */ - btree_insert32(&lport->lport_fcport_map, nacl->nport_id, - node, GFP_ATOMIC); - } - - pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", - se_nacl, nacl->nport_wwnn, nacl->nport_id); - /* - * Now clear the se_nacl and session pointers from our HW lport lookup - * table mapping for this initiator's fabric S_ID and LOOP_ID entries. - * - * This is done ahead of callbacks into tcm_qla2xxx_free_session() -> - * target_wait_for_sess_cmds() before the session waits for outstanding - * I/O to complete, to avoid a race between session shutdown execution - * and incoming ATIOs or TMRs picking up a stale se_node_act reference. - */ - tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess); -} - -static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess) -{ - target_sess_cmd_list_set_waiting(sess->se_sess); -} - -static int tcm_qla2xxx_init_nodeacl(struct se_node_acl *se_nacl, - const char *name) -{ - struct tcm_qla2xxx_nacl *nacl = - container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); - u64 wwnn; - - if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0) - return -EINVAL; - - nacl->nport_wwnn = wwnn; - tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn); - - return 0; -} - -/* Start items for tcm_qla2xxx_tpg_attrib_cit */ - -#define DEF_QLA_TPG_ATTRIB(name) \ - \ -static ssize_t tcm_qla2xxx_tpg_attrib_##name##_show( \ - struct config_item *item, char *page) \ -{ \ - struct se_portal_group *se_tpg = attrib_to_tpg(item); \ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ - struct tcm_qla2xxx_tpg, se_tpg); \ - \ - return sprintf(page, "%d\n", tpg->tpg_attrib.name); \ -} \ - \ -static ssize_t tcm_qla2xxx_tpg_attrib_##name##_store( \ - struct config_item *item, const char *page, size_t count) \ -{ \ - struct se_portal_group *se_tpg = attrib_to_tpg(item); \ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \ - struct tcm_qla2xxx_tpg, se_tpg); \ - struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \ - unsigned long val; \ - int ret; \ - \ - ret = kstrtoul(page, 0, &val); \ - if (ret < 0) { \ - pr_err("kstrtoul() failed with" \ - " ret: %d\n", ret); \ - return -EINVAL; \ - } \ - \ - if ((val != 0) && (val != 1)) { \ - pr_err("Illegal boolean value %lu\n", val); \ - return -EINVAL; \ - } \ - \ - a->name = val; \ - \ - return count; \ -} \ -CONFIGFS_ATTR(tcm_qla2xxx_tpg_attrib_, name) - -DEF_QLA_TPG_ATTRIB(generate_node_acls); -DEF_QLA_TPG_ATTRIB(cache_dynamic_acls); -DEF_QLA_TPG_ATTRIB(demo_mode_write_protect); -DEF_QLA_TPG_ATTRIB(prod_mode_write_protect); -DEF_QLA_TPG_ATTRIB(demo_mode_login_only); -#ifdef CONFIG_TCM_QLA2XXX_DEBUG -DEF_QLA_TPG_ATTRIB(jam_host); -#endif - -static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = { - &tcm_qla2xxx_tpg_attrib_attr_generate_node_acls, - &tcm_qla2xxx_tpg_attrib_attr_cache_dynamic_acls, - &tcm_qla2xxx_tpg_attrib_attr_demo_mode_write_protect, - &tcm_qla2xxx_tpg_attrib_attr_prod_mode_write_protect, - &tcm_qla2xxx_tpg_attrib_attr_demo_mode_login_only, -#ifdef CONFIG_TCM_QLA2XXX_DEBUG - &tcm_qla2xxx_tpg_attrib_attr_jam_host, -#endif - NULL, -}; - -/* End items for tcm_qla2xxx_tpg_attrib_cit */ - -static ssize_t tcm_qla2xxx_tpg_enable_show(struct config_item *item, - char *page) -{ - struct se_portal_group *se_tpg = to_tpg(item); - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - - return snprintf(page, PAGE_SIZE, "%d\n", - atomic_read(&tpg->lport_tpg_enabled)); -} - -static ssize_t tcm_qla2xxx_tpg_enable_store(struct config_item *item, - const char *page, size_t count) -{ - struct se_portal_group *se_tpg = to_tpg(item); - struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; - struct tcm_qla2xxx_lport *lport = container_of(se_wwn, - struct tcm_qla2xxx_lport, lport_wwn); - struct scsi_qla_host *vha = lport->qla_vha; - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - unsigned long op; - int rc; - - rc = kstrtoul(page, 0, &op); - if (rc < 0) { - pr_err("kstrtoul() returned %d\n", rc); - return -EINVAL; - } - if ((op != 1) && (op != 0)) { - pr_err("Illegal value for tpg_enable: %lu\n", op); - return -EINVAL; - } - if (op) { - if (atomic_read(&tpg->lport_tpg_enabled)) - return -EEXIST; - - atomic_set(&tpg->lport_tpg_enabled, 1); - qlt_enable_vha(vha); - } else { - if (!atomic_read(&tpg->lport_tpg_enabled)) - return count; - - atomic_set(&tpg->lport_tpg_enabled, 0); - qlt_stop_phase1(vha->vha_tgt.qla_tgt); - qlt_stop_phase2(vha->vha_tgt.qla_tgt); - } - - return count; -} - -static ssize_t tcm_qla2xxx_tpg_dynamic_sessions_show(struct config_item *item, - char *page) -{ - return target_show_dynamic_sessions(to_tpg(item), page); -} - -static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_store(struct config_item *item, - const char *page, size_t count) -{ - struct se_portal_group *se_tpg = to_tpg(item); - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - unsigned long val; - int ret = kstrtoul(page, 0, &val); - - if (ret) { - pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret); - return ret; - } - if (val != 0 && val != 1 && val != 3) { - pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val); - return -EINVAL; - } - tpg->tpg_attrib.fabric_prot_type = val; - - return count; -} - -static ssize_t tcm_qla2xxx_tpg_fabric_prot_type_show(struct config_item *item, - char *page) -{ - struct se_portal_group *se_tpg = to_tpg(item); - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - - return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type); -} - -CONFIGFS_ATTR(tcm_qla2xxx_tpg_, enable); -CONFIGFS_ATTR_RO(tcm_qla2xxx_tpg_, dynamic_sessions); -CONFIGFS_ATTR(tcm_qla2xxx_tpg_, fabric_prot_type); - -static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = { - &tcm_qla2xxx_tpg_attr_enable, - &tcm_qla2xxx_tpg_attr_dynamic_sessions, - &tcm_qla2xxx_tpg_attr_fabric_prot_type, - NULL, -}; - -static struct se_portal_group *tcm_qla2xxx_make_tpg(struct se_wwn *wwn, - const char *name) -{ - struct tcm_qla2xxx_lport *lport = container_of(wwn, - struct tcm_qla2xxx_lport, lport_wwn); - struct tcm_qla2xxx_tpg *tpg; - unsigned long tpgt; - int ret; - - if (strstr(name, "tpgt_") != name) - return ERR_PTR(-EINVAL); - if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) - return ERR_PTR(-EINVAL); - - if ((tpgt != 1)) { - pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n"); - return ERR_PTR(-ENOSYS); - } - - tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); - if (!tpg) { - pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); - return ERR_PTR(-ENOMEM); - } - tpg->lport = lport; - tpg->lport_tpgt = tpgt; - /* - * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic - * NodeACLs - */ - tpg->tpg_attrib.generate_node_acls = 1; - tpg->tpg_attrib.demo_mode_write_protect = 1; - tpg->tpg_attrib.cache_dynamic_acls = 1; - tpg->tpg_attrib.demo_mode_login_only = 1; - tpg->tpg_attrib.jam_host = 0; - - ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); - if (ret < 0) { - kfree(tpg); - return NULL; - } - - lport->tpg_1 = tpg; - - return &tpg->se_tpg; -} - -static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg) -{ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - struct tcm_qla2xxx_lport *lport = tpg->lport; - struct scsi_qla_host *vha = lport->qla_vha; - /* - * Call into qla2x_target.c LLD logic to shutdown the active - * FC Nexuses and disable target mode operation for this qla_hw_data - */ - if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop) - qlt_stop_phase1(vha->vha_tgt.qla_tgt); - - core_tpg_deregister(se_tpg); - /* - * Clear local TPG=1 pointer for non NPIV mode. - */ - lport->tpg_1 = NULL; - kfree(tpg); -} - -static ssize_t tcm_qla2xxx_npiv_tpg_enable_show(struct config_item *item, - char *page) -{ - return tcm_qla2xxx_tpg_enable_show(item, page); -} - -static ssize_t tcm_qla2xxx_npiv_tpg_enable_store(struct config_item *item, - const char *page, size_t count) -{ - struct se_portal_group *se_tpg = to_tpg(item); - struct se_wwn *se_wwn = se_tpg->se_tpg_wwn; - struct tcm_qla2xxx_lport *lport = container_of(se_wwn, - struct tcm_qla2xxx_lport, lport_wwn); - struct scsi_qla_host *vha = lport->qla_vha; - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - unsigned long op; - int rc; - - rc = kstrtoul(page, 0, &op); - if (rc < 0) { - pr_err("kstrtoul() returned %d\n", rc); - return -EINVAL; - } - if ((op != 1) && (op != 0)) { - pr_err("Illegal value for tpg_enable: %lu\n", op); - return -EINVAL; - } - if (op) { - if (atomic_read(&tpg->lport_tpg_enabled)) - return -EEXIST; - - atomic_set(&tpg->lport_tpg_enabled, 1); - qlt_enable_vha(vha); - } else { - if (!atomic_read(&tpg->lport_tpg_enabled)) - return count; - - atomic_set(&tpg->lport_tpg_enabled, 0); - qlt_stop_phase1(vha->vha_tgt.qla_tgt); - qlt_stop_phase2(vha->vha_tgt.qla_tgt); - } - - return count; -} - -CONFIGFS_ATTR(tcm_qla2xxx_npiv_tpg_, enable); - -static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = { - &tcm_qla2xxx_npiv_tpg_attr_enable, - NULL, -}; - -static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(struct se_wwn *wwn, - const char *name) -{ - struct tcm_qla2xxx_lport *lport = container_of(wwn, - struct tcm_qla2xxx_lport, lport_wwn); - struct tcm_qla2xxx_tpg *tpg; - unsigned long tpgt; - int ret; - - if (strstr(name, "tpgt_") != name) - return ERR_PTR(-EINVAL); - if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX) - return ERR_PTR(-EINVAL); - - tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL); - if (!tpg) { - pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n"); - return ERR_PTR(-ENOMEM); - } - tpg->lport = lport; - tpg->lport_tpgt = tpgt; - - /* - * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic - * NodeACLs - */ - tpg->tpg_attrib.generate_node_acls = 1; - tpg->tpg_attrib.demo_mode_write_protect = 1; - tpg->tpg_attrib.cache_dynamic_acls = 1; - tpg->tpg_attrib.demo_mode_login_only = 1; - - ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP); - if (ret < 0) { - kfree(tpg); - return NULL; - } - lport->tpg_1 = tpg; - return &tpg->se_tpg; -} - -/* - * Expected to be called with struct qla_hw_data->tgt.sess_lock held - */ -static struct fc_port *tcm_qla2xxx_find_sess_by_s_id(scsi_qla_host_t *vha, - const be_id_t s_id) -{ - struct tcm_qla2xxx_lport *lport; - struct se_node_acl *se_nacl; - struct tcm_qla2xxx_nacl *nacl; - u32 key; - - lport = vha->vha_tgt.target_lport_ptr; - if (!lport) { - pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); - dump_stack(); - return NULL; - } - - key = sid_to_key(s_id); - pr_debug("find_sess_by_s_id: 0x%06x\n", key); - - se_nacl = btree_lookup32(&lport->lport_fcport_map, key); - if (!se_nacl) { - pr_debug("Unable to locate s_id: 0x%06x\n", key); - return NULL; - } - pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n", - se_nacl, se_nacl->initiatorname); - - nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); - if (!nacl->fc_port) { - pr_err("Unable to locate struct fc_port\n"); - return NULL; - } - - return nacl->fc_port; -} - -/* - * Expected to be called with struct qla_hw_data->tgt.sess_lock held - */ -static void tcm_qla2xxx_set_sess_by_s_id( - struct tcm_qla2xxx_lport *lport, - struct se_node_acl *new_se_nacl, - struct tcm_qla2xxx_nacl *nacl, - struct se_session *se_sess, - struct fc_port *fc_port, - be_id_t s_id) -{ - u32 key; - void *slot; - int rc; - - key = sid_to_key(s_id); - pr_debug("set_sess_by_s_id: %06x\n", key); - - slot = btree_lookup32(&lport->lport_fcport_map, key); - if (!slot) { - if (new_se_nacl) { - pr_debug("Setting up new fc_port entry to new_se_nacl\n"); - nacl->nport_id = key; - rc = btree_insert32(&lport->lport_fcport_map, key, - new_se_nacl, GFP_ATOMIC); - if (rc) - printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n", - (int)key); - } else { - pr_debug("Wiping nonexisting fc_port entry\n"); - } - - fc_port->se_sess = se_sess; - nacl->fc_port = fc_port; - return; - } - - if (nacl->fc_port) { - if (new_se_nacl == NULL) { - pr_debug("Clearing existing nacl->fc_port and fc_port entry\n"); - btree_remove32(&lport->lport_fcport_map, key); - nacl->fc_port = NULL; - return; - } - pr_debug("Replacing existing nacl->fc_port and fc_port entry\n"); - btree_update32(&lport->lport_fcport_map, key, new_se_nacl); - fc_port->se_sess = se_sess; - nacl->fc_port = fc_port; - return; - } - - if (new_se_nacl == NULL) { - pr_debug("Clearing existing fc_port entry\n"); - btree_remove32(&lport->lport_fcport_map, key); - return; - } - - pr_debug("Replacing existing fc_port entry w/o active nacl->fc_port\n"); - btree_update32(&lport->lport_fcport_map, key, new_se_nacl); - fc_port->se_sess = se_sess; - nacl->fc_port = fc_port; - - pr_debug("Setup nacl->fc_port %p by s_id for se_nacl: %p, initiatorname: %s\n", - nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname); -} - -/* - * Expected to be called with struct qla_hw_data->tgt.sess_lock held - */ -static struct fc_port *tcm_qla2xxx_find_sess_by_loop_id( - scsi_qla_host_t *vha, - const uint16_t loop_id) -{ - struct tcm_qla2xxx_lport *lport; - struct se_node_acl *se_nacl; - struct tcm_qla2xxx_nacl *nacl; - struct tcm_qla2xxx_fc_loopid *fc_loopid; - - lport = vha->vha_tgt.target_lport_ptr; - if (!lport) { - pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); - dump_stack(); - return NULL; - } - - pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); - - fc_loopid = lport->lport_loopid_map + loop_id; - se_nacl = fc_loopid->se_nacl; - if (!se_nacl) { - pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n", - loop_id); - return NULL; - } - - nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl); - - if (!nacl->fc_port) { - pr_err("Unable to locate struct fc_port\n"); - return NULL; - } - - return nacl->fc_port; -} - -/* - * Expected to be called with struct qla_hw_data->tgt.sess_lock held - */ -static void tcm_qla2xxx_set_sess_by_loop_id( - struct tcm_qla2xxx_lport *lport, - struct se_node_acl *new_se_nacl, - struct tcm_qla2xxx_nacl *nacl, - struct se_session *se_sess, - struct fc_port *fc_port, - uint16_t loop_id) -{ - struct se_node_acl *saved_nacl; - struct tcm_qla2xxx_fc_loopid *fc_loopid; - - pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id); - - fc_loopid = &((struct tcm_qla2xxx_fc_loopid *) - lport->lport_loopid_map)[loop_id]; - - saved_nacl = fc_loopid->se_nacl; - if (!saved_nacl) { - pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n"); - fc_loopid->se_nacl = new_se_nacl; - if (fc_port->se_sess != se_sess) - fc_port->se_sess = se_sess; - if (nacl->fc_port != fc_port) - nacl->fc_port = fc_port; - return; - } - - if (nacl->fc_port) { - if (new_se_nacl == NULL) { - pr_debug("Clearing nacl->fc_port and fc_loopid->se_nacl\n"); - fc_loopid->se_nacl = NULL; - nacl->fc_port = NULL; - return; - } - - pr_debug("Replacing existing nacl->fc_port and fc_loopid->se_nacl\n"); - fc_loopid->se_nacl = new_se_nacl; - if (fc_port->se_sess != se_sess) - fc_port->se_sess = se_sess; - if (nacl->fc_port != fc_port) - nacl->fc_port = fc_port; - return; - } - - if (new_se_nacl == NULL) { - pr_debug("Clearing fc_loopid->se_nacl\n"); - fc_loopid->se_nacl = NULL; - return; - } - - pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->fc_port\n"); - fc_loopid->se_nacl = new_se_nacl; - if (fc_port->se_sess != se_sess) - fc_port->se_sess = se_sess; - if (nacl->fc_port != fc_port) - nacl->fc_port = fc_port; - - pr_debug("Setup nacl->fc_port %p by loop_id for se_nacl: %p, initiatorname: %s\n", - nacl->fc_port, new_se_nacl, new_se_nacl->initiatorname); -} - -/* - * Should always be called with qla_hw_data->tgt.sess_lock held. - */ -static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, - struct tcm_qla2xxx_nacl *nacl, struct fc_port *sess) -{ - struct se_session *se_sess = sess->se_sess; - - tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, - sess, port_id_to_be_id(sess->d_id)); - tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, - sess, sess->loop_id); -} - -static void tcm_qla2xxx_free_session(struct fc_port *sess) -{ - struct qla_tgt *tgt = sess->tgt; - struct qla_hw_data *ha = tgt->ha; - scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); - struct se_session *se_sess; - struct tcm_qla2xxx_lport *lport; - - BUG_ON(in_interrupt()); - - se_sess = sess->se_sess; - if (!se_sess) { - pr_err("struct fc_port->se_sess is NULL\n"); - dump_stack(); - return; - } - - lport = vha->vha_tgt.target_lport_ptr; - if (!lport) { - pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); - dump_stack(); - return; - } - target_wait_for_sess_cmds(se_sess); - - target_remove_session(se_sess); -} - -static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg, - struct se_session *se_sess, void *p) -{ - struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, - struct tcm_qla2xxx_tpg, se_tpg); - struct tcm_qla2xxx_lport *lport = tpg->lport; - struct qla_hw_data *ha = lport->qla_vha->hw; - struct se_node_acl *se_nacl = se_sess->se_node_acl; - struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, - struct tcm_qla2xxx_nacl, se_node_acl); - struct fc_port *qlat_sess = p; - uint16_t loop_id = qlat_sess->loop_id; - unsigned long flags; - - /* - * And now setup se_nacl and session pointers into HW lport internal - * mappings for fabric S_ID and LOOP_ID. - */ - spin_lock_irqsave(&ha->tgt.sess_lock, flags); - tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess, qlat_sess, - port_id_to_be_id(qlat_sess->d_id)); - tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, - se_sess, qlat_sess, loop_id); - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); - - return 0; -} - -/* - * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl() - * to locate struct se_node_acl - */ -static int tcm_qla2xxx_check_initiator_node_acl( - scsi_qla_host_t *vha, - unsigned char *fc_wwpn, - struct fc_port *qlat_sess) -{ - struct qla_hw_data *ha = vha->hw; - struct tcm_qla2xxx_lport *lport; - struct tcm_qla2xxx_tpg *tpg; - struct se_session *se_sess; - unsigned char port_name[36]; - int num_tags = (ha->cur_fw_xcb_count) ? ha->cur_fw_xcb_count : - TCM_QLA2XXX_DEFAULT_TAGS; - - lport = vha->vha_tgt.target_lport_ptr; - if (!lport) { - pr_err("Unable to locate struct tcm_qla2xxx_lport\n"); - dump_stack(); - return -EINVAL; - } - /* - * Locate the TPG=1 reference.. - */ - tpg = lport->tpg_1; - if (!tpg) { - pr_err("Unable to locate struct tcm_qla2xxx_lport->tpg_1\n"); - return -EINVAL; - } - /* - * Format the FCP Initiator port_name into colon seperated values to - * match the format by tcm_qla2xxx explict ConfigFS NodeACLs. - */ - memset(&port_name, 0, 36); - snprintf(port_name, sizeof(port_name), "%8phC", fc_wwpn); - /* - * Locate our struct se_node_acl either from an explict NodeACL created - * via ConfigFS, or via running in TPG demo mode. - */ - se_sess = target_setup_session(&tpg->se_tpg, num_tags, - sizeof(struct qla_tgt_cmd), - TARGET_PROT_ALL, port_name, - qlat_sess, tcm_qla2xxx_session_cb); - if (IS_ERR(se_sess)) - return PTR_ERR(se_sess); - - return 0; -} - -static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id, - uint16_t loop_id, bool conf_compl_supported) -{ - struct qla_tgt *tgt = sess->tgt; - struct qla_hw_data *ha = tgt->ha; - scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); - struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr; - struct se_node_acl *se_nacl = sess->se_sess->se_node_acl; - struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl, - struct tcm_qla2xxx_nacl, se_node_acl); - u32 key; - - - if (sess->loop_id != loop_id || sess->d_id.b24 != s_id.b24) - pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n", - sess, sess->port_name, - sess->loop_id, loop_id, sess->d_id.b.domain, - sess->d_id.b.area, sess->d_id.b.al_pa, s_id.b.domain, - s_id.b.area, s_id.b.al_pa); - - if (sess->loop_id != loop_id) { - /* - * Because we can shuffle loop IDs around and we - * update different sessions non-atomically, we might - * have overwritten this session's old loop ID - * already, and we might end up overwriting some other - * session that will be updated later. So we have to - * be extra careful and we can't warn about those things... - */ - if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl) - lport->lport_loopid_map[sess->loop_id].se_nacl = NULL; - - lport->lport_loopid_map[loop_id].se_nacl = se_nacl; - - sess->loop_id = loop_id; - } - - if (sess->d_id.b24 != s_id.b24) { - key = (((u32) sess->d_id.b.domain << 16) | - ((u32) sess->d_id.b.area << 8) | - ((u32) sess->d_id.b.al_pa)); - - if (btree_lookup32(&lport->lport_fcport_map, key)) - WARN(btree_remove32(&lport->lport_fcport_map, key) != - se_nacl, "Found wrong se_nacl when updating s_id %x:%x:%x\n", - sess->d_id.b.domain, sess->d_id.b.area, - sess->d_id.b.al_pa); - else - WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n", - sess->d_id.b.domain, sess->d_id.b.area, - sess->d_id.b.al_pa); - - key = (((u32) s_id.b.domain << 16) | - ((u32) s_id.b.area << 8) | - ((u32) s_id.b.al_pa)); - - if (btree_lookup32(&lport->lport_fcport_map, key)) { - WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n", - s_id.b.domain, s_id.b.area, s_id.b.al_pa); - btree_update32(&lport->lport_fcport_map, key, se_nacl); - } else { - btree_insert32(&lport->lport_fcport_map, key, se_nacl, - GFP_ATOMIC); - } - - sess->d_id = s_id; - nacl->nport_id = key; - } - - sess->conf_compl_supported = conf_compl_supported; - -} - -/* - * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path. - */ -static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { - .find_cmd_by_tag = tcm_qla2xxx_find_cmd_by_tag, - .handle_cmd = tcm_qla2xxx_handle_cmd, - .handle_data = tcm_qla2xxx_handle_data, - .handle_tmr = tcm_qla2xxx_handle_tmr, - .get_cmd = tcm_qla2xxx_get_cmd, - .rel_cmd = tcm_qla2xxx_rel_cmd, - .free_cmd = tcm_qla2xxx_free_cmd, - .free_mcmd = tcm_qla2xxx_free_mcmd, - .free_session = tcm_qla2xxx_free_session, - .update_sess = tcm_qla2xxx_update_sess, - .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl, - .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id, - .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id, - .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, - .put_sess = tcm_qla2xxx_put_sess, - .shutdown_sess = tcm_qla2xxx_shutdown_sess, - .get_dif_tags = tcm_qla2xxx_dif_tags, - .chk_dif_tags = tcm_qla2xxx_chk_dif_tags, -}; - -static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) -{ - int rc; - - rc = btree_init32(&lport->lport_fcport_map); - if (rc) { - pr_err("Unable to initialize lport->lport_fcport_map btree\n"); - return rc; - } - - lport->lport_loopid_map = - vzalloc(array_size(65536, - sizeof(struct tcm_qla2xxx_fc_loopid))); - if (!lport->lport_loopid_map) { - pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n", - sizeof(struct tcm_qla2xxx_fc_loopid) * 65536); - btree_destroy32(&lport->lport_fcport_map); - return -ENOMEM; - } - pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n", - sizeof(struct tcm_qla2xxx_fc_loopid) * 65536); - return 0; -} - -static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha, - void *target_lport_ptr, - u64 npiv_wwpn, u64 npiv_wwnn) -{ - struct qla_hw_data *ha = vha->hw; - struct tcm_qla2xxx_lport *lport = - (struct tcm_qla2xxx_lport *)target_lport_ptr; - /* - * Setup tgt_ops, local pointer to vha and target_lport_ptr - */ - ha->tgt.tgt_ops = &tcm_qla2xxx_template; - vha->vha_tgt.target_lport_ptr = target_lport_ptr; - lport->qla_vha = vha; - - return 0; -} - -static struct se_wwn *tcm_qla2xxx_make_lport( - struct target_fabric_configfs *tf, - struct config_group *group, - const char *name) -{ - struct tcm_qla2xxx_lport *lport; - u64 wwpn; - int ret = -ENODEV; - - if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0) - return ERR_PTR(-EINVAL); - - lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); - if (!lport) { - pr_err("Unable to allocate struct tcm_qla2xxx_lport\n"); - return ERR_PTR(-ENOMEM); - } - lport->lport_wwpn = wwpn; - tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN, - wwpn); - sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn); - - ret = tcm_qla2xxx_init_lport(lport); - if (ret != 0) - goto out; - - ret = qlt_lport_register(lport, wwpn, 0, 0, - tcm_qla2xxx_lport_register_cb); - if (ret != 0) - goto out_lport; - - return &lport->lport_wwn; -out_lport: - vfree(lport->lport_loopid_map); - btree_destroy32(&lport->lport_fcport_map); -out: - kfree(lport); - return ERR_PTR(ret); -} - -static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn) -{ - struct tcm_qla2xxx_lport *lport = container_of(wwn, - struct tcm_qla2xxx_lport, lport_wwn); - struct scsi_qla_host *vha = lport->qla_vha; - struct se_node_acl *node; - u32 key = 0; - - /* - * Call into qla2x_target.c LLD logic to complete the - * shutdown of struct qla_tgt after the call to - * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above.. - */ - if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped) - qlt_stop_phase2(vha->vha_tgt.qla_tgt); - - qlt_lport_deregister(vha); - - vfree(lport->lport_loopid_map); - btree_for_each_safe32(&lport->lport_fcport_map, key, node) - btree_remove32(&lport->lport_fcport_map, key); - btree_destroy32(&lport->lport_fcport_map); - kfree(lport); -} - -static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha, - void *target_lport_ptr, - u64 npiv_wwpn, u64 npiv_wwnn) -{ - struct fc_vport *vport; - struct Scsi_Host *sh = base_vha->host; - struct scsi_qla_host *npiv_vha; - struct tcm_qla2xxx_lport *lport = - (struct tcm_qla2xxx_lport *)target_lport_ptr; - struct tcm_qla2xxx_lport *base_lport = - (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr; - struct fc_vport_identifiers vport_id; - - if (qla_ini_mode_enabled(base_vha)) { - pr_err("qla2xxx base_vha not enabled for target mode\n"); - return -EPERM; - } - - if (!base_lport || !base_lport->tpg_1 || - !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) { - pr_err("qla2xxx base_lport or tpg_1 not available\n"); - return -EPERM; - } - - memset(&vport_id, 0, sizeof(vport_id)); - vport_id.port_name = npiv_wwpn; - vport_id.node_name = npiv_wwnn; - vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; - vport_id.vport_type = FC_PORTTYPE_NPIV; - vport_id.disable = false; - - vport = fc_vport_create(sh, 0, &vport_id); - if (!vport) { - pr_err("fc_vport_create failed for qla2xxx_npiv\n"); - return -ENODEV; - } - /* - * Setup local pointer to NPIV vhba + target_lport_ptr - */ - npiv_vha = (struct scsi_qla_host *)vport->dd_data; - npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr; - lport->qla_vha = npiv_vha; - scsi_host_get(npiv_vha->host); - return 0; -} - - -static struct se_wwn *tcm_qla2xxx_npiv_make_lport( - struct target_fabric_configfs *tf, - struct config_group *group, - const char *name) -{ - struct tcm_qla2xxx_lport *lport; - u64 phys_wwpn, npiv_wwpn, npiv_wwnn; - char *p, tmp[128]; - int ret; - - snprintf(tmp, 128, "%s", name); - - p = strchr(tmp, '@'); - if (!p) { - pr_err("Unable to locate NPIV '@' separator\n"); - return ERR_PTR(-EINVAL); - } - *p++ = '\0'; - - if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0) - return ERR_PTR(-EINVAL); - - if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1, - &npiv_wwpn, &npiv_wwnn) < 0) - return ERR_PTR(-EINVAL); - - lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL); - if (!lport) { - pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n"); - return ERR_PTR(-ENOMEM); - } - lport->lport_npiv_wwpn = npiv_wwpn; - lport->lport_npiv_wwnn = npiv_wwnn; - sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn); - - ret = tcm_qla2xxx_init_lport(lport); - if (ret != 0) - goto out; - - ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn, - tcm_qla2xxx_lport_register_npiv_cb); - if (ret != 0) - goto out_lport; - - return &lport->lport_wwn; -out_lport: - vfree(lport->lport_loopid_map); - btree_destroy32(&lport->lport_fcport_map); -out: - kfree(lport); - return ERR_PTR(ret); -} - -static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn) -{ - struct tcm_qla2xxx_lport *lport = container_of(wwn, - struct tcm_qla2xxx_lport, lport_wwn); - struct scsi_qla_host *npiv_vha = lport->qla_vha; - struct qla_hw_data *ha = npiv_vha->hw; - scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); - - scsi_host_put(npiv_vha->host); - /* - * Notify libfc that we want to release the vha->fc_vport - */ - fc_vport_terminate(npiv_vha->fc_vport); - scsi_host_put(base_vha->host); - kfree(lport); -} - - -static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item, - char *page) -{ - return sprintf(page, - "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on %s\n", - QLA2XXX_VERSION, utsname()->sysname, - utsname()->machine, utsname()->release); -} - -CONFIGFS_ATTR_RO(tcm_qla2xxx_wwn_, version); - -static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = { - &tcm_qla2xxx_wwn_attr_version, - NULL, -}; - -static const struct target_core_fabric_ops tcm_qla2xxx_ops = { - .module = THIS_MODULE, - .fabric_name = "qla2xxx", - .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), - /* - * XXX: Limit assumes single page per scatter-gather-list entry. - * Current maximum is ~4.9 MB per se_cmd->t_data_sg with PAGE_SIZE=4096 - */ - .max_data_sg_nents = 1200, - .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, - .tpg_get_tag = tcm_qla2xxx_get_tag, - .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, - .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, - .tpg_check_demo_mode_write_protect = - tcm_qla2xxx_check_demo_write_protect, - .tpg_check_prod_mode_write_protect = - tcm_qla2xxx_check_prod_write_protect, - .tpg_check_prot_fabric_only = tcm_qla2xxx_check_prot_fabric_only, - .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, - .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, - .check_stop_free = tcm_qla2xxx_check_stop_free, - .release_cmd = tcm_qla2xxx_release_cmd, - .close_session = tcm_qla2xxx_close_session, - .sess_get_index = tcm_qla2xxx_sess_get_index, - .sess_get_initiator_sid = NULL, - .write_pending = tcm_qla2xxx_write_pending, - .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, - .get_cmd_state = tcm_qla2xxx_get_cmd_state, - .queue_data_in = tcm_qla2xxx_queue_data_in, - .queue_status = tcm_qla2xxx_queue_status, - .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, - .aborted_task = tcm_qla2xxx_aborted_task, - /* - * Setup function pointers for generic logic in - * target_core_fabric_configfs.c - */ - .fabric_make_wwn = tcm_qla2xxx_make_lport, - .fabric_drop_wwn = tcm_qla2xxx_drop_lport, - .fabric_make_tpg = tcm_qla2xxx_make_tpg, - .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, - .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, - - .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, - .tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs, - .tfc_tpg_attrib_attrs = tcm_qla2xxx_tpg_attrib_attrs, -}; - -static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = { - .module = THIS_MODULE, - .fabric_name = "qla2xxx_npiv", - .node_acl_size = sizeof(struct tcm_qla2xxx_nacl), - .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn, - .tpg_get_tag = tcm_qla2xxx_get_tag, - .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode, - .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache, - .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode, - .tpg_check_prod_mode_write_protect = - tcm_qla2xxx_check_prod_write_protect, - .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only, - .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index, - .check_stop_free = tcm_qla2xxx_check_stop_free, - .release_cmd = tcm_qla2xxx_release_cmd, - .close_session = tcm_qla2xxx_close_session, - .sess_get_index = tcm_qla2xxx_sess_get_index, - .sess_get_initiator_sid = NULL, - .write_pending = tcm_qla2xxx_write_pending, - .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs, - .get_cmd_state = tcm_qla2xxx_get_cmd_state, - .queue_data_in = tcm_qla2xxx_queue_data_in, - .queue_status = tcm_qla2xxx_queue_status, - .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp, - .aborted_task = tcm_qla2xxx_aborted_task, - /* - * Setup function pointers for generic logic in - * target_core_fabric_configfs.c - */ - .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport, - .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport, - .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg, - .fabric_drop_tpg = tcm_qla2xxx_drop_tpg, - .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl, - - .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs, - .tfc_tpg_base_attrs = tcm_qla2xxx_npiv_tpg_attrs, -}; - -static int tcm_qla2xxx_register_configfs(void) -{ - int ret; - - pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on %s\n", - QLA2XXX_VERSION, utsname()->sysname, - utsname()->machine, utsname()->release); - - ret = target_register_template(&tcm_qla2xxx_ops); - if (ret) - return ret; - - ret = target_register_template(&tcm_qla2xxx_npiv_ops); - if (ret) - goto out_fabric; - - tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free", - WQ_MEM_RECLAIM, 0); - if (!tcm_qla2xxx_free_wq) { - ret = -ENOMEM; - goto out_fabric_npiv; - } - - return 0; - -out_fabric_npiv: - target_unregister_template(&tcm_qla2xxx_npiv_ops); -out_fabric: - target_unregister_template(&tcm_qla2xxx_ops); - return ret; -} - -static void tcm_qla2xxx_deregister_configfs(void) -{ - destroy_workqueue(tcm_qla2xxx_free_wq); - - target_unregister_template(&tcm_qla2xxx_ops); - target_unregister_template(&tcm_qla2xxx_npiv_ops); -} static int __init tcm_qla2xxx_init(void) { - int ret; - - BUILD_BUG_ON(sizeof(struct abts_recv_from_24xx) != 64); - BUILD_BUG_ON(sizeof(struct abts_resp_from_24xx_fw) != 64); - BUILD_BUG_ON(sizeof(struct atio7_fcp_cmnd) != 32); - BUILD_BUG_ON(sizeof(struct atio_from_isp) != 64); - BUILD_BUG_ON(sizeof(struct ba_acc_le) != 12); - BUILD_BUG_ON(sizeof(struct ba_rjt_le) != 4); - BUILD_BUG_ON(sizeof(struct ctio7_from_24xx) != 64); - BUILD_BUG_ON(sizeof(struct ctio7_to_24xx) != 64); - BUILD_BUG_ON(sizeof(struct ctio_crc2_to_fw) != 64); - BUILD_BUG_ON(sizeof(struct ctio_crc_from_fw) != 64); - BUILD_BUG_ON(sizeof(struct ctio_to_2xxx) != 64); - BUILD_BUG_ON(sizeof(struct fcp_hdr) != 24); - BUILD_BUG_ON(sizeof(struct fcp_hdr_le) != 24); - BUILD_BUG_ON(sizeof(struct nack_to_isp) != 64); - - ret = tcm_qla2xxx_register_configfs(); - if (ret < 0) - return ret; - return 0; } static void __exit tcm_qla2xxx_exit(void) { - tcm_qla2xxx_deregister_configfs(); } MODULE_DESCRIPTION("TCM QLA24XX+ series NPIV enabled fabric driver"); -- Gitee From c53a62f85eb33dc8ac496d551bf0bbb4f0e02cc3 Mon Sep 17 00:00:00 2001 From: xiaosu3109 Date: Sun, 30 Oct 2022 15:08:06 +0800 Subject: [PATCH 2/2] new qla2xxx driver 10.02.08, modify Kconfig file --- drivers/scsi/qla2xxx/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 764501838e214..802c373fd6d92 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig @@ -6,7 +6,7 @@ config SCSI_QLA_FC depends on NVME_FC || !NVME_FC select FW_LOADER select BTREE - ---help--- + help This qla2xxx driver supports all QLogic Fibre Channel PCI and PCIe host adapters. @@ -37,14 +37,14 @@ config TCM_QLA2XXX depends on LIBFC select BTREE default n - ---help--- + help Say Y here to enable the TCM_QLA2XXX fabric module for QLogic 24xx+ series target mode HBAs if TCM_QLA2XXX config TCM_QLA2XXX_DEBUG bool "TCM_QLA2XXX fabric module DEBUG mode for QLogic 24xx+ series target mode HBAs" default n - ---help--- + help Say Y here to enable the TCM_QLA2XXX fabric module DEBUG for QLogic 24xx+ series target mode HBAs This will include code to enable the SCSI command jammer endif -- Gitee