From 9e634321b5b46b9535ae7a82ff72e3ccf777ba36 Mon Sep 17 00:00:00 2001 From: hemist Date: Fri, 11 Aug 2023 16:10:56 +0800 Subject: [PATCH] =?UTF-8?q?1)=20fix:=20=E6=89=A7=E8=A1=8C=20sysak=20memlea?= =?UTF-8?q?k=20-t=20slab=20-n=20kmalloc-4096=20-i=2030=20=E5=8D=A1?= =?UTF-8?q?=E4=BD=8F;=202)=20fix:=20sysak=20memleak=20-i=20=E5=8F=82?= =?UTF-8?q?=E6=95=B0=E6=B2=A1=E6=9C=89=E6=A0=A1=E9=AA=8C;=203)=20add:=20?= =?UTF-8?q?=E6=89=A7=E8=A1=8Csysak=20memleak=20-t=20slab/page=20=E7=9A=84?= =?UTF-8?q?=20-n=20=E5=8F=82=E6=95=B0=E6=8C=87=E5=AE=9Akmalloc=E6=97=B6?= =?UTF-8?q?=EF=BC=8C=E5=AF=B9=E8=BE=93=E5=85=A5=E5=8F=82=E6=95=B0=E8=BF=9B?= =?UTF-8?q?=E8=A1=8C=E6=A0=A1=E9=AA=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- config-host.mak | 6 + source/lib/internal/kernel_module/LICENSE | 339 ++++++++ source/lib/internal/kernel_module/Makefile | 72 ++ source/lib/internal/kernel_module/README.md | 11 + .../internal/kernel_module/common/blackbox.c | 749 ++++++++++++++++++ .../internal/kernel_module/common/chrdev.c | 154 ++++ .../lib/internal/kernel_module/common/event.c | 3 + .../lib/internal/kernel_module/common/hook.c | 177 +++++ .../internal/kernel_module/common/internal.h | 49 ++ .../internal/kernel_module/common/ksymbol.c | 40 + .../lib/internal/kernel_module/common/proc.c | 60 ++ .../lib/internal/kernel_module/common/stack.c | 3 + source/lib/internal/kernel_module/entry.c | 68 ++ .../internal/kernel_module/include/blackbox.h | 62 ++ .../internal/kernel_module/include/common.h | 16 + .../lib/internal/kernel_module/include/hook.h | 17 + .../internal/kernel_module/include/ksymbol.h | 5 + .../internal/kernel_module/include/memleak.h | 21 + .../lib/internal/kernel_module/include/proc.h | 129 +++ .../include/3.10.0-1062.1.1.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1062.1.1.el7.x86_64/virtio_blk.h | 175 ++++ .../include/3.10.0-1062.1.2.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1062.1.2.el7.x86_64/virtio_blk.h | 175 ++++ .../3.10.0-1062.12.1.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1062.12.1.el7.x86_64/virtio_blk.h | 175 ++++ .../3.10.0-1062.18.1.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1062.18.1.el7.x86_64/virtio_blk.h | 175 ++++ .../include/3.10.0-1062.4.1.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1062.4.1.el7.x86_64/virtio_blk.h | 175 ++++ .../include/3.10.0-1062.4.2.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1062.4.2.el7.x86_64/virtio_blk.h | 175 ++++ .../include/3.10.0-1062.4.3.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1062.4.3.el7.x86_64/virtio_blk.h | 175 ++++ .../include/3.10.0-1062.7.1.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1062.7.1.el7.x86_64/virtio_blk.h | 175 ++++ .../include/3.10.0-1062.9.1.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1062.9.1.el7.x86_64/virtio_blk.h | 175 ++++ .../include/3.10.0-1062.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1062.el7.x86_64/virtio_blk.h | 175 ++++ .../3.10.0-1127.10.1.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1127.10.1.el7.x86_64/virtio_blk.h | 175 ++++ .../3.10.0-1127.13.1.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1127.13.1.el7.x86_64/virtio_blk.h | 175 ++++ .../3.10.0-1127.18.2.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1127.18.2.el7.x86_64/virtio_blk.h | 175 ++++ .../3.10.0-1127.19.1.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1127.19.1.el7.x86_64/virtio_blk.h | 175 ++++ .../include/3.10.0-1127.8.2.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1127.8.2.el7.x86_64/virtio_blk.h | 175 ++++ .../include/3.10.0-1127.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1127.el7.x86_64/virtio_blk.h | 175 ++++ .../3.10.0-1160.11.1.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1160.11.1.el7.x86_64/virtio_blk.h | 175 ++++ .../3.10.0-1160.15.2.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1160.15.2.el7.x86_64/virtio_blk.h | 175 ++++ .../include/3.10.0-1160.2.1.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1160.2.1.el7.x86_64/virtio_blk.h | 175 ++++ .../include/3.10.0-1160.2.2.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1160.2.2.el7.x86_64/virtio_blk.h | 175 ++++ .../include/3.10.0-1160.6.1.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1160.6.1.el7.x86_64/virtio_blk.h | 175 ++++ .../include/3.10.0-1160.el7.x86_64/nvme.h | 60 ++ .../3.10.0-1160.el7.x86_64/virtio_blk.h | 175 ++++ .../include/3.10.0-862.14.4.el7.x86_64/nvme.h | 59 ++ .../3.10.0-862.14.4.el7.x86_64/virtio_blk.h | 175 ++++ .../include/3.10.0-957.21.3.el7.x86_64/nvme.h | 59 ++ .../3.10.0-957.21.3.el7.x86_64/virtio_blk.h | 175 ++++ .../include/4.19.24-9.al7.x86_64/nvme.h | 59 ++ .../include/4.19.24-9.al7.x86_64/virtio_blk.h | 219 +++++ .../include/4.19.81-17.1.al7.x86_64/nvme.h | 59 ++ .../4.19.81-17.1.al7.x86_64/virtio_blk.h | 219 +++++ .../include/4.19.81-17.2.al7.x86_64/nvme.h | 59 ++ .../4.19.81-17.2.al7.x86_64/virtio_blk.h | 219 +++++ .../4.19.91-013.ali4000.an7.x86_64/nvme.h | 61 ++ .../virtio_blk.h | 268 +++++++ .../include/4.19.91-18.al7.x86_64/nvme.h | 59 ++ .../4.19.91-18.al7.x86_64/virtio_blk.h | 226 ++++++ .../include/4.19.91-19.1.al7.x86_64/nvme.h | 59 ++ .../4.19.91-19.1.al7.x86_64/virtio_blk.h | 225 ++++++ .../include/4.19.91-19.2.al7.x86_64/nvme.h | 59 ++ .../4.19.91-19.2.al7.x86_64/virtio_blk.h | 225 ++++++ .../include/4.19.91-21.2.al7.x86_64/nvme.h | 61 ++ .../4.19.91-21.2.al7.x86_64/virtio_blk.h | 268 +++++++ .../include/4.19.91-21.al7.x86_64/nvme.h | 61 ++ .../4.19.91-21.al7.x86_64/virtio_blk.h | 268 +++++++ .../include/4.19.91-22.1.al7.x86_64/nvme.h | 61 ++ .../4.19.91-22.1.al7.x86_64/virtio_blk.h | 268 +++++++ .../include/4.19.91-22.2.al7.x86_64/nvme.h | 61 ++ .../4.19.91-22.2.al7.x86_64/virtio_blk.h | 268 +++++++ .../include/4.19.91-23.4.an8.x86_64/nvme.h | 61 ++ .../4.19.91-23.4.an8.x86_64/virtio_blk.h | 268 +++++++ .../include/4.19.91-23.al7.x86_64/nvme.h | 61 ++ .../4.19.91-23.al7.x86_64/virtio_blk.h | 268 +++++++ .../include/4.19.91-24.1.al7.x86_64/nvme.h | 61 ++ .../4.19.91-24.1.al7.x86_64/virtio_blk.h | 268 +++++++ .../include/4.19.91-24.8.an8.x86_64/nvme.h | 61 ++ .../4.19.91-24.8.an8.x86_64/virtio_blk.h | 268 +++++++ .../include/4.19.91-24.al7.x86_64/nvme.h | 61 ++ .../4.19.91-24.al7.x86_64/virtio_blk.h | 265 +++++++ .../include/4.19.91-25.an8.x86_64/nvme.h | 61 ++ .../4.19.91-25.an8.x86_64/virtio_blk.h | 268 +++++++ .../include/4.19.91-26.1.al7.x86_64/nvme.h | 61 ++ .../4.19.91-26.1.al7.x86_64/virtio_blk.h | 265 +++++++ .../include/4.19.91-26.6.al7.x86_64/nvme.h | 61 ++ .../4.19.91-26.6.al7.x86_64/virtio_blk.h | 265 +++++++ .../include/4.19.91-26.al7.x86_64/nvme.h | 61 ++ .../4.19.91-26.al7.x86_64/virtio_blk.h | 265 +++++++ .../include/4.19.91-26.an8.x86_64/nvme.h | 61 ++ .../4.19.91-26.an8.x86_64/virtio_blk.h | 268 +++++++ .../include/4.19.91-27.al7.x86_64/nvme.h | 61 ++ .../4.19.91-27.al7.x86_64/virtio_blk.h | 265 +++++++ .../include/5.10.112-11.1.al8.x86_64/nvme.h | 63 ++ .../5.10.112-11.1.al8.x86_64/virtio_blk.h | 310 ++++++++ .../include/5.10.112-11.2.al8.x86_64/nvme.h | 63 ++ .../5.10.112-11.2.al8.x86_64/virtio_blk.h | 310 ++++++++ .../include/5.10.112-11.al8.x86_64/nvme.h | 63 ++ .../5.10.112-11.al8.x86_64/virtio_blk.h | 310 ++++++++ .../include/5.10.134-12.1.al8.x86_64/nvme.h | 63 ++ .../5.10.134-12.1.al8.x86_64/virtio_blk.h | 310 ++++++++ .../include/5.10.134-12.2.al8.x86_64/nvme.h | 63 ++ .../5.10.134-12.2.al8.x86_64/virtio_blk.h | 310 ++++++++ .../include/5.10.134-12.al8.x86_64/nvme.h | 63 ++ .../5.10.134-12.al8.x86_64/virtio_blk.h | 310 ++++++++ .../include/5.10.134-13.1.al8.x86_64/nvme.h | 63 ++ .../5.10.134-13.1.al8.x86_64/virtio_blk.h | 310 ++++++++ .../include/5.10.134-13.al8.aarch64/nvme.h | 63 ++ .../5.10.134-13.al8.aarch64/virtio_blk.h | 310 ++++++++ .../include/5.10.134-13.al8.x86_64/nvme.h | 63 ++ .../5.10.134-13.al8.x86_64/virtio_blk.h | 310 ++++++++ .../include/5.10.84-10.1.al8.x86_64/nvme.h | 63 ++ .../5.10.84-10.1.al8.x86_64/virtio_blk.h | 310 ++++++++ .../include/5.10.84-10.2.al8.x86_64/nvme.h | 63 ++ .../5.10.84-10.2.al8.x86_64/virtio_blk.h | 310 ++++++++ .../include/5.10.84-10.3.al8.x86_64/nvme.h | 63 ++ .../5.10.84-10.3.al8.x86_64/virtio_blk.h | 310 ++++++++ .../include/5.10.84-10.4.al8.x86_64/nvme.h | 63 ++ .../5.10.84-10.4.al8.x86_64/virtio_blk.h | 310 ++++++++ .../include/5.10.84-10.al8.x86_64/nvme.h | 63 ++ .../5.10.84-10.al8.x86_64/virtio_blk.h | 310 ++++++++ .../include/5.17.0-1.an23.x86_64/nvme.h | 61 ++ .../include/5.17.0-1.an23.x86_64/virtio_blk.h | 304 +++++++ .../include/5.19.0-1_rc1.an23.x86_64/nvme.h | 61 ++ .../5.19.0-1_rc1.an23.x86_64/virtio_blk.h | 302 +++++++ .../kernel_module/modules/iosdiag/iosdiag.c | 420 ++++++++++ .../kernel_module/modules/iosdiag/iosdiag.h | 109 +++ .../kernel_module/modules/iosdiag/nvme.c | 65 ++ .../kernel_module/modules/iosdiag/rq_hang.c | 350 ++++++++ .../kernel_module/modules/iosdiag/scsi.c | 35 + .../modules/iosdiag/virtio_blk.c | 148 ++++ .../kernel_module/modules/memhunter/common.c | 227 ++++++ .../kernel_module/modules/memhunter/common.h | 133 ++++ .../modules/memhunter/filecache.c | 190 +++++ .../kernel_module/modules/memhunter/memcg.c | 513 ++++++++++++ .../kernel_module/modules/memhunter/memcg.h | 54 ++ .../modules/memhunter/memcg_dia.c | 30 + .../modules/memhunter/memcontrol_7.h | 284 +++++++ .../modules/memhunter/memhunter.c | 58 ++ .../kernel_module/modules/memleak/hashlist.c | 397 ++++++++++ .../kernel_module/modules/memleak/mem.h | 188 +++++ .../kernel_module/modules/memleak/memleak.c | 717 +++++++++++++++++ .../kernel_module/modules/memleak/objects.c | 322 ++++++++ .../kernel_module/modules/memleak/user.h | 54 ++ .../modules/mmaptrace/mmaptrace.c | 596 ++++++++++++++ .../kernel_module/modules/sched/noschedule.c | 696 ++++++++++++++++ .../modules/sched/trace_irqoff.c | 634 +++++++++++++++ .../modules/sched/trace_runqlat.c | 675 ++++++++++++++++ .../modules/schedtrace/schedtrace.c | 259 ++++++ .../kernel_module/modules/signal/trace_sig.c | 220 +++++ .../modules/task_ctl/task_ctrl.c | 169 ++++ .../kernel_module/modules/test_module/test.c | 26 + .../modules/ulockcheck/ulockcheck.c | 696 ++++++++++++++++ .../lib/internal/kernel_module/sysak_mods.c | 100 +++ .../lib/internal/kernel_module/sysak_mods.h | 21 + source/sysak | Bin 0 -> 38920 bytes source/tools/detect/mem/memleak/main.c | 84 +- sysak-module.zip | Bin 0 -> 298046 bytes 176 files changed, 28973 insertions(+), 2 deletions(-) create mode 100644 config-host.mak create mode 100644 source/lib/internal/kernel_module/LICENSE create mode 100644 source/lib/internal/kernel_module/Makefile create mode 100644 source/lib/internal/kernel_module/README.md create mode 100644 source/lib/internal/kernel_module/common/blackbox.c create mode 100644 source/lib/internal/kernel_module/common/chrdev.c create mode 100644 source/lib/internal/kernel_module/common/event.c create mode 100644 source/lib/internal/kernel_module/common/hook.c create mode 100644 source/lib/internal/kernel_module/common/internal.h create mode 100644 source/lib/internal/kernel_module/common/ksymbol.c create mode 100644 source/lib/internal/kernel_module/common/proc.c create mode 100644 source/lib/internal/kernel_module/common/stack.c create mode 100644 source/lib/internal/kernel_module/entry.c create mode 100644 source/lib/internal/kernel_module/include/blackbox.h create mode 100644 source/lib/internal/kernel_module/include/common.h create mode 100644 source/lib/internal/kernel_module/include/hook.h create mode 100644 source/lib/internal/kernel_module/include/ksymbol.h create mode 100644 source/lib/internal/kernel_module/include/memleak.h create mode 100644 source/lib/internal/kernel_module/include/proc.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.1.1.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.1.1.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.1.2.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.1.2.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.12.1.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.12.1.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.18.1.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.18.1.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.1.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.1.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.2.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.2.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.3.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.3.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.7.1.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.7.1.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.9.1.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.9.1.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.10.1.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.10.1.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.13.1.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.13.1.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.18.2.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.18.2.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.19.1.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.19.1.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.8.2.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.8.2.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.11.1.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.11.1.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.15.2.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.15.2.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.2.1.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.2.1.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.2.2.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.2.2.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.6.1.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.6.1.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-862.14.4.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-862.14.4.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-957.21.3.el7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-957.21.3.el7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.24-9.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.24-9.al7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.81-17.1.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.81-17.1.al7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.81-17.2.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.81-17.2.al7.x86_64/virtio_blk.h create mode 100755 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-013.ali4000.an7.x86_64/nvme.h create mode 100755 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-013.ali4000.an7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-18.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-18.al7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-19.1.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-19.1.al7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-19.2.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-19.2.al7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-21.2.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-21.2.al7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-21.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-21.al7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-22.1.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-22.1.al7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-22.2.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-22.2.al7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-23.4.an8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-23.4.an8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-23.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-23.al7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.1.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.1.al7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.8.an8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.8.an8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.al7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-25.an8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-25.an8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.1.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.1.al7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.6.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.6.al7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.al7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.an8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.an8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-27.al7.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-27.al7.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.1.al8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.1.al8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.2.al8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.2.al8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.al8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.al8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.1.al8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.1.al8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.2.al8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.2.al8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.al8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.al8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.1.al8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.1.al8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.al8.aarch64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.al8.aarch64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.al8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.al8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.1.al8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.1.al8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.2.al8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.2.al8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.3.al8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.3.al8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.4.al8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.4.al8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.al8.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.al8.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.17.0-1.an23.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.17.0-1.an23.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.19.0-1_rc1.an23.x86_64/nvme.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/include/5.19.0-1_rc1.an23.x86_64/virtio_blk.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/iosdiag.c create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/iosdiag.h create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/nvme.c create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/rq_hang.c create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/scsi.c create mode 100644 source/lib/internal/kernel_module/modules/iosdiag/virtio_blk.c create mode 100644 source/lib/internal/kernel_module/modules/memhunter/common.c create mode 100644 source/lib/internal/kernel_module/modules/memhunter/common.h create mode 100644 source/lib/internal/kernel_module/modules/memhunter/filecache.c create mode 100644 source/lib/internal/kernel_module/modules/memhunter/memcg.c create mode 100644 source/lib/internal/kernel_module/modules/memhunter/memcg.h create mode 100644 source/lib/internal/kernel_module/modules/memhunter/memcg_dia.c create mode 100644 source/lib/internal/kernel_module/modules/memhunter/memcontrol_7.h create mode 100644 source/lib/internal/kernel_module/modules/memhunter/memhunter.c create mode 100755 source/lib/internal/kernel_module/modules/memleak/hashlist.c create mode 100755 source/lib/internal/kernel_module/modules/memleak/mem.h create mode 100755 source/lib/internal/kernel_module/modules/memleak/memleak.c create mode 100644 source/lib/internal/kernel_module/modules/memleak/objects.c create mode 100644 source/lib/internal/kernel_module/modules/memleak/user.h create mode 100644 source/lib/internal/kernel_module/modules/mmaptrace/mmaptrace.c create mode 100644 source/lib/internal/kernel_module/modules/sched/noschedule.c create mode 100644 source/lib/internal/kernel_module/modules/sched/trace_irqoff.c create mode 100644 source/lib/internal/kernel_module/modules/sched/trace_runqlat.c create mode 100644 source/lib/internal/kernel_module/modules/schedtrace/schedtrace.c create mode 100755 source/lib/internal/kernel_module/modules/signal/trace_sig.c create mode 100755 source/lib/internal/kernel_module/modules/task_ctl/task_ctrl.c create mode 100755 source/lib/internal/kernel_module/modules/test_module/test.c create mode 100644 source/lib/internal/kernel_module/modules/ulockcheck/ulockcheck.c create mode 100644 source/lib/internal/kernel_module/sysak_mods.c create mode 100644 source/lib/internal/kernel_module/sysak_mods.h create mode 100755 source/sysak create mode 100644 sysak-module.zip diff --git a/config-host.mak b/config-host.mak new file mode 100644 index 00000000..a0dede6b --- /dev/null +++ b/config-host.mak @@ -0,0 +1,6 @@ +# Automatically generated by configure - do not modify +KERNEL_VERSION = 4.19.91-27.4.2.kos5.x86_64 +OBJPATH = /opt/sysak/out +BUILD_KERNEL_MODULE = YES +BUILD_LIBBPF = NO +TARGET_LIST = /opt/sysak/source/tools/detect/generic/cpuirq /opt/sysak/source/tools/detect/sched/sysmonitor /opt/sysak/source/tools/detect/sched/cpu_flamegraph /opt/sysak/source/tools/detect/mem/memleak diff --git a/source/lib/internal/kernel_module/LICENSE b/source/lib/internal/kernel_module/LICENSE new file mode 100644 index 00000000..89e08fb0 --- /dev/null +++ b/source/lib/internal/kernel_module/LICENSE @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/source/lib/internal/kernel_module/Makefile b/source/lib/internal/kernel_module/Makefile new file mode 100644 index 00000000..1b0ffc1b --- /dev/null +++ b/source/lib/internal/kernel_module/Makefile @@ -0,0 +1,72 @@ +ifeq ($(KERNEL_VERSION),) +KERNEL_VERSION = $(shell uname -r) +endif + +KERNEL_BUILD_PATH := /usr/src/kernels/$(KERNEL_VERSION) + +ifneq ($(KERNEL_BUILD_PATH), $(wildcard $(KERNEL_BUILD_PATH))) +KERNEL_BUILD_PATH := /lib/modules/$(KERNEL_VERSION)/build +endif + + +ifneq ($(KERNELRELEASE),) +#common +sysak-objs += ./common/chrdev.o ./common/event.o ./common/hook.o ./common/stack.o ./common/proc.o +sysak-objs += ./common/blackbox.o +sysak-objs += ./common/ksymbol.o +sysak-objs += ./entry.o sysak_mods.o + +#modules +#sysak-objs += modules/test_module/test.o +ifneq ($(findstring tracesig,$(TARGET_LIST)),) +sysak-objs += modules/signal/trace_sig.o +endif +ifneq ($(findstring memleak,$(TARGET_LIST)),) +sysak-objs += modules/memleak/memleak.o +sysak-objs += modules/memleak/objects.o +sysak-objs += modules/memleak/hashlist.o +endif +ifneq ($(findstring runlatency,$(TARGET_LIST)),) +sysak-objs += modules/sched/noschedule.o modules/sched/trace_irqoff.o modules/sched/trace_runqlat.o +endif +ifneq ($(findstring taskctl,$(TARGET_LIST)),) +sysak-objs += modules/task_ctl/task_ctrl.o +endif +ifneq ($(findstring schedtrace,$(TARGET_LIST)),) +sysak-objs += modules/schedtrace/schedtrace.o +endif +ifneq ($(findstring mmaptrace,$(TARGET_LIST)),) +sysak-objs += modules/mmaptrace/mmaptrace.o +endif +ifneq ($(findstring ulockcheck,$(TARGET_LIST)),) +sysak-objs += modules/ulockcheck/ulockcheck.o +endif +ifneq ($(findstring iosdiag,$(TARGET_LIST)),) +sysak-objs += modules/iosdiag/iosdiag.o modules/iosdiag/rq_hang.o modules/iosdiag/virtio_blk.o modules/iosdiag/nvme.o modules/iosdiag/scsi.o +endif +ifneq ($(findstring memhunter,$(TARGET_LIST)),) +sysak-objs += modules/memhunter/memhunter.o +sysak-objs += modules/memhunter/common.o +sysak-objs += modules/memhunter/memcg.o +sysak-objs += modules/memhunter/memcg_dia.o +sysak-objs += modules/memhunter/filecache.o +endif + +obj-m += sysak.o + + +EXTRA_CFLAGS := -I$(MODULE_SRC) +EXTRA_CFLAGS += -I$(MODULE_SRC)/include +ifneq ($(findstring iosdiag,$(TARGET_LIST)),) +EXTRA_CFLAGS += -I$(MODULE_SRC)/modules/iosdiag -I$(MODULE_SRC)/modules/iosdiag/include/$(KERNEL_VERSION) +endif + +else + +export MODULE_SRC=$(shell pwd) +sysak_mod: + make -C $(KERNEL_BUILD_PATH) M=$(MODULE_SRC) + +clean: + make -C $(KERNEL_BUILD_PATH) M=$(MODULE_SRC) clean +endif diff --git a/source/lib/internal/kernel_module/README.md b/source/lib/internal/kernel_module/README.md new file mode 100644 index 00000000..7b935f7b --- /dev/null +++ b/source/lib/internal/kernel_module/README.md @@ -0,0 +1,11 @@ +# sysak-module + +#### 介绍 +sysak对于部分低版本内核进行问题诊断,或对内核实现一些增强功能,需要通过内核模块的方式来进行实现基础框架。 + +#### 新增功能 +1) kernel module源码放在modules/目录下,新功能自己建子目录或单独放一个文件都可以,比如test/test_module.c +2) 将新功能模块的名字、init函数、exit函数放入sysak_mods.c中的sysak_modules数组中完成注册 +3) 将新功能模块的实现文件加入到模块makefile,比如上面新增的test/test_module.c + vi Makefile + sysak-objs += test/test_module.o diff --git a/source/lib/internal/kernel_module/common/blackbox.c b/source/lib/internal/kernel_module/common/blackbox.c new file mode 100644 index 00000000..52f0fdc6 --- /dev/null +++ b/source/lib/internal/kernel_module/common/blackbox.c @@ -0,0 +1,749 @@ +/* + * Copyright (C) 2018 Alibaba Group + * All rights reserved. + * Written by Wetp Zhang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "proc.h" +#include "blackbox.h" +#include "internal.h" + +#define DEFAULT_BBOX_SIZE 0x2000000 +static unsigned int bbox_total_size = DEFAULT_BBOX_SIZE; +static DEFINE_SPINLOCK(bbox_alloc_lock); +static void *bbox_vmalloc_base; +static unsigned long *bbox_map; +static unsigned int bbox_max_id; +static unsigned int bbox_latest_id; + +static unsigned long *bbox_dyn_map; +static unsigned int bbox_dynamic_max; +static unsigned long bbox_dynamic_start; +RADIX_TREE(bbox_dynamic_tree, GFP_NOWAIT); + +static inline unsigned int bbox_id_to_dyn_idx(unsigned int bbox_id) +{ + return bbox_id - bbox_max_id - 1; +} + +static inline unsigned int dyn_idx_to_bbox_id(unsigned int idx) +{ + return idx + bbox_max_id + 1; +} + +static inline struct bbox_info *get_bbox(unsigned int bbox_id) +{ + unsigned idx = bbox_id; + + if (!bbox_vmalloc_base) + return NULL; + + if (idx < bbox_max_id) + return bbox_vmalloc_base + (idx * BBOX_SIZE); + + idx = bbox_id_to_dyn_idx(bbox_id); + if (idx >= bbox_dynamic_max || !bbox_dyn_map) + return NULL; + + return radix_tree_lookup(&bbox_dynamic_tree, idx); +} + +static inline int bbox_type(struct bbox_info *bbox) +{ + return bbox->flags & BBOX_TYPE_MASK; +} + +static inline void bbox_lock(struct bbox_info *bbox, + unsigned long *flags) +{ + spin_lock_irqsave(&bbox->lock, *flags); +} + +static inline void bbox_unlock(struct bbox_info *bbox, + unsigned long flags) +{ + spin_unlock_irqrestore(&bbox->lock, flags); +} + +static inline void *bbox_record_top(struct bbox_info *bbox) +{ + if (bbox->records.cnt) + return bbox->records.arr[bbox->records.cnt - 1].start; + else + return bbox->data_end; +} + +static inline int avail_size(struct bbox_info *bbox) +{ + if (bbox_type(bbox) == BBOX_TYPE_RING) + return bbox->data_end - bbox->ringbuf.write_ptr; + else + return bbox_record_top(bbox) - bbox->data_base; +} + +static ssize_t bbox_ring_write(struct bbox_info *bbox, + struct bbox_data_info *data_info) +{ + int size = data_info->size; + int tail_size = avail_size(bbox); + int bbox_size = bbox->data_end - bbox->data_base; + + if (likely(size <= tail_size)) { + memcpy(bbox->ringbuf.write_ptr, data_info->data, size); + bbox->ringbuf.write_ptr += size; + } else { + if (size > bbox_size) + size = bbox_size; + + if (tail_size > 0) + memcpy(bbox->ringbuf.write_ptr, + data_info->data, tail_size); + memcpy(bbox->data_base, + data_info->data + tail_size, size - tail_size); + bbox->ringbuf.write_ptr = bbox->data_base + (size - tail_size); + } + + return size; +} + +static ssize_t bbox_record_write(struct bbox_info *bbox, + struct bbox_data_info *data_info) +{ + struct record_info *r_info; + unsigned int size = data_info->size; + unsigned int slot = data_info->slot; + + if (slot >= bbox->records.cnt) + return -EINVAL; + + r_info = &bbox->records.arr[slot]; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) + ktime_get_real_ts64(&r_info->mtime); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) + ktime_get_ts64(&r_info->mtime); +#else + getnstimeofday64(&r_info->mtime); +#endif + size = min(size, r_info->size); + memcpy(r_info->start, data_info->data, size); + if (virt_addr_valid(data_info->task)) { + strncpy(r_info->tsk_comm, data_info->task->comm, TASK_COMM_LEN); + r_info->cpu = task_cpu(data_info->task); + r_info->pid = data_info->task->pid; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) + r_info->state = data_info->task->__state; +#elif LINUX_VERSION_CODE == KERNEL_VERSION(4, 18, 0) + r_info->state = data_info->task->__state; +#else + r_info->state = data_info->task->state; +#endif + } + return size; +} + +ssize_t bbox_write(unsigned int bbox_id, struct bbox_data_info *data_info) +{ + struct bbox_info *bbox; + unsigned long flags; + int ret = -EINVAL; + + if (!data_info || !data_info->data) + return ret; + + bbox = get_bbox(bbox_id); + if (!bbox) + return ret; + + bbox_lock(bbox, &flags); + + if (bbox_type(bbox) == BBOX_TYPE_RING) + ret = bbox_ring_write(bbox, data_info); + else + ret = bbox_record_write(bbox, data_info); + + bbox_unlock(bbox, flags); + return ret; +} + +static ssize_t bbox_ring_read(struct bbox_info *bbox, + struct bbox_data_info *data_info) +{ + unsigned int count = 0, avl_sz, size = data_info->size; + void *read_end = READ_ONCE(bbox->ringbuf.write_ptr); + + if (bbox->ringbuf.read_ptr > read_end) { + avl_sz = bbox->data_end - bbox->ringbuf.read_ptr; + count = min(size, avl_sz); + memcpy(data_info->data, bbox->ringbuf.read_ptr, count); + size -= count; + bbox->ringbuf.read_ptr += count; + if (bbox->ringbuf.read_ptr >= bbox->data_end) + bbox->ringbuf.read_ptr = bbox->data_base; + } + + if (!size) + return count; + + avl_sz = read_end - bbox->ringbuf.read_ptr; + size = min(avl_sz, size); + if (size) { + memcpy(data_info->data + count, bbox->ringbuf.read_ptr, size); + bbox->ringbuf.read_ptr += size; + } + + count += size; + return count; +} + +static ssize_t bbox_record_read(struct bbox_info *bbox, + struct bbox_data_info *data_info) +{ + struct record_info *r_info; + unsigned long flags; + unsigned int slot = data_info->slot; + unsigned int size = data_info->size; + + bbox_lock(bbox, &flags); + + if (slot >= bbox->records.cnt) { + bbox_unlock(bbox, flags); + return -EINVAL; + } + + r_info = &bbox->records.arr[slot]; + size = min(size, r_info->size); + memcpy(data_info->data, r_info->start, size); + memcpy(&data_info->mtime, &r_info->mtime, + sizeof(struct timespec64)); + bbox_unlock(bbox, flags); + return size; +} + +ssize_t bbox_read(unsigned int bbox_id, struct bbox_data_info *data_info) +{ + struct bbox_info *bbox; + int ret = -EINVAL; + + if (!data_info || !data_info->data || data_info->size <= 0) + return ret; + + bbox = get_bbox(bbox_id); + if (!bbox) + return ret; + + if (bbox_type(bbox) == BBOX_TYPE_RING) + ret = bbox_ring_read(bbox, data_info); + else + ret = bbox_record_read(bbox, data_info); + + return ret; +} + +void +bbox_set_record_desc(unsigned int bbox_id, unsigned int slot, const char *desc) +{ + struct bbox_info *bbox; + struct record_info *r_info; + unsigned long flags; + + bbox = get_bbox(bbox_id); + if (!bbox) + return; + + if (bbox_type(bbox) != BBOX_TYPE_RECORD) + return; + + bbox_lock(bbox, &flags); + if (slot < bbox->records.cnt) { + r_info = &bbox->records.arr[slot]; + r_info->desc[BBOX_RECORD_DESC_LEN - 1] = 0; + if (desc) + strncpy(r_info->desc, desc, BBOX_RECORD_DESC_LEN - 1); + else + strcpy(r_info->desc, " "); + } + bbox_unlock(bbox, flags); +} + +int bbox_alloc_record_slot(unsigned int bbox_id, unsigned int size, + unsigned int type) +{ + struct bbox_info *bbox; + struct record_info *r_info; + unsigned long flags; + int slot = -EINVAL; + + bbox = get_bbox(bbox_id); + if (!bbox) + return slot; + + if (bbox_type(bbox) != BBOX_TYPE_RECORD) + return slot; + + bbox_lock(bbox, &flags); + + slot = -ENOSPC; + if (avail_size(bbox) < (size + sizeof(struct record_info))) + goto out; + + slot = bbox->records.cnt; + r_info = &bbox->records.arr[slot]; + r_info->start = bbox_record_top(bbox) - size; + r_info->size = size; + r_info->type = type; + r_info->mtime.tv_sec = 0; + r_info->mtime.tv_nsec = 0; + r_info->cpu = -1; + r_info->pid = -1; + r_info->state = -1; + r_info->tsk_comm[0] = '\0'; + r_info->desc[0] = 0; + + bbox->data_base += sizeof(struct record_info); + bbox->records.cnt++; +out: + bbox_unlock(bbox, flags); + return slot; +} + +static inline void bbox_record_clear_one(struct bbox_info *bbox, + unsigned int slot) +{ + struct record_info *r_info; + + if (slot >= bbox->records.cnt) + return; + + r_info = &bbox->records.arr[slot]; + r_info->mtime.tv_sec = 0; + r_info->mtime.tv_nsec = 0; +} + +static void bbox_record_clear_all(struct bbox_info *bbox) +{ + int i; + + for (i = 0; i < bbox->records.cnt; i++) + bbox_record_clear_one(bbox, i); +} + +void bbox_record_clear(unsigned int bbox_id, int slot_id) +{ + unsigned long flags; + struct bbox_info *bbox = get_bbox(bbox_id); + + if (!bbox) + return; + + bbox_lock(bbox, &flags); + if (slot_id < 0) + bbox_record_clear_all(bbox); + else + bbox_record_clear_one(bbox, slot_id); + bbox_unlock(bbox, flags); +} + +static void bbox_setup(struct bbox_info *bbox, + const char *name, int flags, int size) +{ + bbox->magic = BBOX_BUFF_MAGIC; + bbox->name[BBOX_NAME_LEN - 1] = '\0'; + if (name) + strncpy(bbox->name, name, BBOX_NAME_LEN - 1); + else + strncpy(bbox->name, "bbox", BBOX_NAME_LEN - 1); + + /* set flags first, then bbox_type() below can work */ + bbox->flags = flags; + + if (bbox_type(bbox) == BBOX_TYPE_RING) { + bbox->data_base = bbox + 1; + bbox->ringbuf.write_ptr = bbox->data_base; + bbox->ringbuf.read_ptr = bbox->data_base; + } else { + bbox->records.cnt = 0; + bbox->data_base = bbox->records.arr; + } + + bbox->data_end = (void *)bbox + size; + spin_lock_init(&bbox->lock); +} + +int bbox_alloc(const char *name, int flags) +{ + struct bbox_info *bbox; + unsigned int bbox_id; + + spin_lock(&bbox_alloc_lock); + + bbox_id = find_next_zero_bit(bbox_map, bbox_max_id, bbox_latest_id); + if (bbox_id >= bbox_max_id) + bbox_id = find_first_zero_bit(bbox_map, bbox_max_id); + + if (bbox_id >= bbox_max_id) { + spin_unlock(&bbox_alloc_lock); + return -ENOSPC; + } + + set_bit(bbox_id, bbox_map); + bbox_latest_id = bbox_id; + spin_unlock(&bbox_alloc_lock); + + bbox = get_bbox(bbox_id); + if (!bbox) { + /* should never be here */ + WARN_ONCE(true, "bbox_buffer was NULL, id %d\n", bbox_id); + return -EFAULT; + } + + bbox_setup(bbox, name, flags, BBOX_SIZE); + return bbox_id; +} + +void bbox_update_name(unsigned int bbox_id, const char *name) +{ + struct bbox_info *bbox = get_bbox(bbox_id); + unsigned long flags; + + if (!bbox || !name) + return; + + bbox_lock(bbox, &flags); + memset(bbox->name, 0, BBOX_NAME_LEN); + strncpy(bbox->name, name, BBOX_NAME_LEN - 1); + bbox_unlock(bbox, flags); +} + +void bbox_free(unsigned int bbox_id) +{ + if (bbox_id < bbox_max_id) + clear_bit(bbox_id, bbox_map); + else { + unsigned int idx = bbox_id_to_dyn_idx(bbox_id); + struct bbox_info *bbox; + + if (!bbox_dyn_map || idx >= bbox_dynamic_max) + return; + + spin_lock(&bbox_alloc_lock); + bbox = get_bbox(bbox_id); + if (!bbox) { + spin_unlock(&bbox_alloc_lock); + return; + } + + clear_bit(idx, bbox_dyn_map); + radix_tree_delete(&bbox_dynamic_tree, idx); + spin_unlock(&bbox_alloc_lock); + vfree(bbox); + } +} + +int bbox_alloc_dynamic(const char *name, int flags, unsigned int pages) +{ + int idx, ret; + struct bbox_info *bbox; + unsigned int size = pages << PAGE_SHIFT; + + bbox = vmalloc(size); + if (!bbox) + return -ENOMEM; + + spin_lock(&bbox_alloc_lock); + idx = find_next_zero_bit(bbox_dyn_map, bbox_dynamic_max, + bbox_dynamic_start); + if (idx >= bbox_dynamic_max) + idx = find_first_zero_bit(bbox_dyn_map, bbox_dynamic_max); + if (idx >= bbox_dynamic_max) { + spin_unlock(&bbox_alloc_lock); + vfree(bbox); + return -ENOSPC; + } + + ret = radix_tree_insert(&bbox_dynamic_tree, idx, bbox); + if (ret) { + spin_unlock(&bbox_alloc_lock); + vfree(bbox); + return ret; + } + + set_bit(idx, bbox_dyn_map); + bbox_dynamic_start = idx; + spin_unlock(&bbox_alloc_lock); + + bbox_setup(bbox, name, flags, size); + return dyn_idx_to_bbox_id(idx); +} + +static void free_dynamic_bbox(void) +{ + int idx = 0; + + if (!bbox_dyn_map) + return; + + idx = find_first_bit(bbox_dyn_map, bbox_dynamic_max); + while (idx < bbox_dynamic_max) { + bbox_free(dyn_idx_to_bbox_id(idx)); + idx = find_next_bit(bbox_dyn_map, bbox_dynamic_max, idx); + } +} + +/* just think it stores raw strings. */ +static int bbox_ring_show_content(struct seq_file *seq, struct bbox_info *bbox) +{ + char buf[128]; + int ret, i; + struct bbox_data_info data; + + data.data = buf; + data.size = 128; + + while (1) { + ret = bbox_ring_read(bbox, &data); + if (ret <= 0) + break; + + for (i = 0; i < ret; i++) + seq_printf(seq, "%c", buf[i]); + } + return 0; +} + +int bbox_ring_show(struct seq_file *seq, unsigned int bbox_id) +{ + struct bbox_info *bbox = get_bbox(bbox_id); + + if (!seq || !bbox) + return -EINVAL; + + return bbox_ring_show_content(seq, bbox); +} + +static void bbox_show_time(struct seq_file *seq, struct timespec64 *ts) +{ + struct rtc_time tm; + unsigned long local_time; + + local_time = (unsigned long)(ts->tv_sec - (sys_tz.tz_minuteswest * 60)); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) + rtc_time64_to_tm(local_time, &tm); +#else + rtc_time_to_tm(local_time, &tm); + +#endif + seq_printf(seq, "\n[%04d-%02d-%02d %02d:%02d:%02d.%ld]\n", + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + tm.tm_hour, tm.tm_min, tm.tm_sec, ts->tv_nsec); +} + +static int bbox_record_show_one(struct seq_file *seq, + struct bbox_info *bbox, unsigned int slot) +{ + struct record_info *r_info; + struct bbox_data_info data; + void *buf; + int ret; + + if (slot >= bbox->records.cnt) + return -EINVAL; + + r_info = &bbox->records.arr[slot]; + /*no data had been written, ignore*/ + if (!r_info->mtime.tv_sec) + return 0; + + buf = kmalloc(r_info->size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + data.data = buf; + data.slot = slot; + data.size = r_info->size; + ret = bbox_record_read(bbox, &data); + if (ret <= 0) { + kfree(buf); + return 0; + } + + bbox_show_time(seq, &data.mtime); + + switch (r_info->type) { + case BBOX_DATA_TYPE_STRING: + seq_printf(seq, "%s\n", (char *)buf); + break; + case BBOX_DATA_TYPE_TRACE: + seq_printf(seq, + "CPU: %d PID: %d state %d comm: %s %s Call Trace:\n", + r_info->cpu, r_info->pid, r_info->state, + r_info->tsk_comm, r_info->desc); + while (ret > 0) { + void *ptr = *(void **)buf; + + if (ptr) + seq_printf(seq, "%pS\n", ptr); + buf += sizeof(void *); + ret -= sizeof(void *); + } + break; + case BBOX_DATA_TYPE_DATA: + seq_printf(seq, "%d bytes data:\n", ret); + while (ret > 0) { + seq_printf(seq, "%lx\n", *(unsigned long *)buf); + buf += sizeof(long); + ret -= sizeof(long); + } + break; + default: + break; + } + + kfree(data.data); + return 0; +} + +static int bbox_record_show_all(struct seq_file *seq, struct bbox_info *bbox) +{ + int i; + + seq_printf(seq, "[%s] capacity: %d\n", bbox->name, bbox->records.cnt); + + for (i = 0; i < bbox->records.cnt; i++) { + bbox_record_show_one(seq, bbox, i); + cond_resched(); + } + + return 0; +} + +int bbox_record_show(struct seq_file *seq, unsigned int bbox_id, int slot_id) +{ + struct bbox_info *bbox = get_bbox(bbox_id); + + if (!seq || !bbox) + return -EINVAL; + + if (slot_id < 0) + return bbox_record_show_all(seq, bbox); + else + return bbox_record_show_one(seq, bbox, slot_id); +} + +static int bbox_seq_show(struct seq_file *seq, void *v) +{ + struct bbox_info *bbox = v; + + seq_printf(seq, "Bbox %s:\n", bbox->name); + + if (bbox_type(bbox) == BBOX_TYPE_RING) + bbox_ring_show_content(seq, bbox); + else + bbox_record_show_all(seq, bbox); + + seq_puts(seq, "\n"); + return 0; +} + +static void *bbox_seq_start(struct seq_file *seq, loff_t *pos) +{ + *pos = find_next_bit(bbox_map, bbox_max_id, *pos); + return get_bbox(*pos); +} + +static void *bbox_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + *pos = find_next_bit(bbox_map, bbox_max_id, *pos + 1); + return get_bbox(*pos); +} + +static void bbox_seq_stop(struct seq_file *seq, void *v) +{ +} + +static const struct seq_operations bbox_seq_ops = { + .start = bbox_seq_start, + .next = bbox_seq_next, + .stop = bbox_seq_stop, + .show = bbox_seq_show, +}; + +static int bbox_seq_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &bbox_seq_ops); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) +static const struct proc_ops proc_bbox_operations = { + .proc_open = bbox_seq_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = seq_release, +}; +#else +const struct file_operations proc_bbox_operations = { + .open = bbox_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; +#endif + +int sysak_bbox_init(void) +{ + void *addr; + unsigned int nlongs; + + bbox_max_id = bbox_total_size / BBOX_SIZE; + bbox_total_size = bbox_max_id * BBOX_SIZE; + if (!bbox_total_size) + return -EINVAL; + + nlongs = BITS_TO_LONGS(bbox_max_id); + bbox_map = kzalloc(sizeof(long) * nlongs, GFP_KERNEL); + if (!bbox_map) + return -ENOMEM; + + addr = vmalloc(bbox_total_size); + if (!addr) { + kfree(bbox_map); + return -ENOMEM; + } + + bbox_vmalloc_base = addr; + + bbox_dynamic_max = bbox_max_id * 100; + nlongs = BITS_TO_LONGS(bbox_dynamic_max); + bbox_dyn_map = kzalloc(sizeof(long) * nlongs, GFP_KERNEL); + if (!bbox_dyn_map) + printk(KERN_INFO "dynamic bbox is disabled\n"); + + sysak_proc_create("bbox", &proc_bbox_operations); + printk(KERN_INFO "pre-alloc %dB for blackbox\n", bbox_total_size); + return 0; +} + +void sysak_bbox_exit(void) +{ + free_dynamic_bbox(); + if (bbox_dyn_map) + kfree(bbox_dyn_map); + vfree(bbox_vmalloc_base); +} diff --git a/source/lib/internal/kernel_module/common/chrdev.c b/source/lib/internal/kernel_module/common/chrdev.c new file mode 100644 index 00000000..78c431df --- /dev/null +++ b/source/lib/internal/kernel_module/common/chrdev.c @@ -0,0 +1,154 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "common.h" + +static DEFINE_MUTEX(dev_mutex); +static int sysak_dev_major = -1; +static struct class *sysak_dev_class = NULL; +static struct device *sysak_dev = NULL; + +struct sysak_dev { + struct cdev cdev; +}; + +int __attribute__((weak)) memhunter_handler_cmd(int cmd, unsigned long arg) +{ + return -ENOSYS; +} + +int __attribute__((weak)) memleak_handler_cmd(int cmd, unsigned long arg) +{ + return -ENOSYS; +} + +static long sysak_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int ret = -EINVAL; + int type, nr; + + if (!mutex_trylock(&dev_mutex)) + return -EBUSY; + + type = _IOC_TYPE(cmd); + nr = _IOC_NR(cmd); + switch (type) { + case MEMLEAK_IOCTL_CMD: + ret = memleak_handler_cmd(nr, arg); + break; + case MEMHUNTER_IOCTL_CMD: + ret = memhunter_handler_cmd(nr, arg); + break; + default: + printk("defualt ioctl cmd =%d, nr = %d\n", type, nr); + break; + } + + mutex_unlock(&dev_mutex); + return ret; +} + +static int sysak_open(struct inode *inode, struct file *file) +{ + if (!mutex_trylock(&dev_mutex)) + return -EBUSY; + __module_get(THIS_MODULE); + printk("sysak open\n"); + mutex_unlock(&dev_mutex); + + return 0; +} + +static int sysak_release(struct inode *inode, struct file *file) +{ + + if (!mutex_trylock(&dev_mutex)) + return -EBUSY; + + printk("sysak close\n"); + module_put(THIS_MODULE); + mutex_unlock(&dev_mutex); + return 0; +} + +static const struct file_operations sysak_fops = { + .open = sysak_open, + .release = sysak_release, + .unlocked_ioctl = sysak_ioctl, +}; + +static char *sysak_devnode(struct device *dev, umode_t *mode) +{ + if (mode) + *mode = S_IRUGO | S_IRWXUGO | S_IALLUGO; + + return kstrdup("sysak", GFP_KERNEL);; +} + +int sysak_dev_init(void) +{ + int ret = 0; + + sysak_dev_major = register_chrdev(0, CHR_NAME, &sysak_fops);; + + if (sysak_dev_major < 0) { + printk("sysak: failed to register device\n"); + return sysak_dev_major; + } + + sysak_dev_class = class_create(THIS_MODULE, CHR_NAME); + if (IS_ERR(sysak_dev_class)) { + ret = PTR_ERR(sysak_dev_class); + printk(KERN_ERR "sysak: class_create err=%d", ret); + unregister_chrdev(sysak_dev_major, CHR_NAME); + + return ret; + } + sysak_dev_class->devnode = sysak_devnode; + + sysak_dev = device_create(sysak_dev_class, NULL, MKDEV(sysak_dev_major, 0), NULL, CHR_NAME); + if (IS_ERR(sysak_dev)) { + ret = PTR_ERR(sysak_dev); + printk(KERN_ERR "sysak: device_create err=%d", ret); + unregister_chrdev(sysak_dev_major, CHR_NAME); + class_destroy(sysak_dev_class); + + return ret; + } + + return 0; +} + +void sysak_dev_uninit(void) +{ + if (sysak_dev_major >= 0) + unregister_chrdev(sysak_dev_major, CHR_NAME); + + if (sysak_dev != NULL) + device_destroy(sysak_dev_class, MKDEV(sysak_dev_major, 0)); + + if (sysak_dev_class != NULL) + class_destroy(sysak_dev_class); + + sysak_dev_major = -1; + sysak_dev = NULL; + sysak_dev_class = NULL; +} diff --git a/source/lib/internal/kernel_module/common/event.c b/source/lib/internal/kernel_module/common/event.c new file mode 100644 index 00000000..9102812a --- /dev/null +++ b/source/lib/internal/kernel_module/common/event.c @@ -0,0 +1,3 @@ +/* + * event.c + */ diff --git a/source/lib/internal/kernel_module/common/hook.c b/source/lib/internal/kernel_module/common/hook.c new file mode 100644 index 00000000..c6eeee98 --- /dev/null +++ b/source/lib/internal/kernel_module/common/hook.c @@ -0,0 +1,177 @@ +/* + * hook.c + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hook.h" + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33) +int hook_tracepoint(const char *name, void *probe, void *data) +{ + return tracepoint_probe_register(name, probe); +} + +int unhook_tracepoint(const char *name, void *probe, void *data) +{ + int ret = 0; + + do { + ret = tracepoint_probe_unregister(name, probe); + } while (ret == -ENOMEM); + + return ret; +} +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +int hook_tracepoint(const char *name, void *probe, void *data) +{ + return tracepoint_probe_register(name, probe, data); +} + +int unhook_tracepoint(const char *name, void *probe, void *data) +{ + int ret = 0; + + do { + ret = tracepoint_probe_unregister(name, probe, data); + } while (ret == -ENOMEM); + + return ret; +} +#else +static struct tracepoint *tp_ret; +static void probe_tracepoint(struct tracepoint *tp, void *priv) +{ + char *n = priv; + + if (strcmp(tp->name, n) == 0) + tp_ret = tp; +} + +static struct tracepoint *find_tracepoint(const char *name) +{ + tp_ret = NULL; + for_each_kernel_tracepoint(probe_tracepoint, (void *)name); + + return tp_ret; +} + +int hook_tracepoint(const char *name, void *probe, void *data) +{ + struct tracepoint *tp; + + tp = find_tracepoint(name); + if (!tp) + return 0; + + return tracepoint_probe_register(tp, probe, data); +} + +int unhook_tracepoint(const char *name, void *probe, void *data) +{ + struct tracepoint *tp; + int ret = 0; + + tp = find_tracepoint(name); + if (!tp) + return 0; + + do { + ret = tracepoint_probe_unregister(tp, probe, data); + } while (ret == -ENOMEM); + + return ret; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) +int hook_kprobe(struct kprobe *kp, const char *name, + kprobe_pre_handler_t pre, kprobe_post_handler_t post) +{ + kprobe_opcode_t *addr; + + if (!name || strlen(name) >= 255) + return -EINVAL; + addr = (kprobe_opcode_t *)kallsyms_lookup_name(name); + if (!addr) + return -EINVAL; + + memset(kp, 0, sizeof(struct kprobe)); + kp->symbol_name = name; + kp->pre_handler = pre; + kp->post_handler = post; + + register_kprobe(kp); + + return 0; +} + +void unhook_kprobe(struct kprobe *kp) +{ + if (kp->symbol_name != NULL) + unregister_kprobe(kp); + + memset(kp, 0, sizeof(struct kprobe)); +} + +int hook_kretprobe(struct kretprobe *ptr_kretprobe, char *kretprobe_func, + kretprobe_handler_t kretprobe_entry_handler, + kretprobe_handler_t kretprobe_ret_handler, + size_t data_size) +{ + memset(ptr_kretprobe, 0, sizeof(struct kretprobe)); + ptr_kretprobe->kp.symbol_name = kretprobe_func; + ptr_kretprobe->handler = kretprobe_ret_handler; + ptr_kretprobe->entry_handler = kretprobe_entry_handler; + ptr_kretprobe->data_size = data_size; + ptr_kretprobe->maxactive = 200; + + return register_kretprobe(ptr_kretprobe); +} + +void unhook_kretprobe(struct kretprobe *ptr_kretprobe) +{ + if (!ptr_kretprobe->kp.addr) + return; + + unregister_kretprobe(ptr_kretprobe); + memset(ptr_kretprobe, 0, sizeof(struct kretprobe)); +} +#else + +int hook_kprobe(struct kprobe *kp, const char *name, + kprobe_pre_handler_t pre, kprobe_post_handler_t post) +{ + return -ENXIO; +} + +void unhook_kprobe(struct kprobe *kp) +{ +} + +int hook_kretprobe(struct kretprobe *ptr_kretprobe, char *kretprobe_func, + kretprobe_handler_t kretprobe_entry_handler, + kretprobe_handler_t kretprobe_ret_handler, + size_t data_size) +{ + return -ENXIO; +} + +void unhook_kretprobe(struct kretprobe *ptr_kretprobe) +{ +} +#endif diff --git a/source/lib/internal/kernel_module/common/internal.h b/source/lib/internal/kernel_module/common/internal.h new file mode 100644 index 00000000..43835a21 --- /dev/null +++ b/source/lib/internal/kernel_module/common/internal.h @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2018 Alibaba Group + * All rights reserved. + * Written by Wetp Zhang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include + +#define BBOX_MEM_MAX (100 << 20) /* 100M */ + +#define BBOX_SIZE PAGE_SIZE +#define BBOX_NAME_LEN 16 + +struct record_info { + void *start; + unsigned int size; + unsigned int type; + struct timespec64 mtime; + char tsk_comm[TASK_COMM_LEN]; + char desc[BBOX_RECORD_DESC_LEN]; + int cpu; + int pid; + int state; +}; + +/* bbox_info is stored at the head of a bbox */ +struct bbox_info { + u64 magic; + char name[BBOX_NAME_LEN]; + spinlock_t lock; + int flags; + void *data_base; + void *data_end; + union { + struct bbox_ring { + void *write_ptr; + void *read_ptr; + } ringbuf; + struct bbox_record { + unsigned int cnt; + struct record_info arr[0]; + } records; + }; +}; diff --git a/source/lib/internal/kernel_module/common/ksymbol.c b/source/lib/internal/kernel_module/common/ksymbol.c new file mode 100644 index 00000000..53e8b3f2 --- /dev/null +++ b/source/lib/internal/kernel_module/common/ksymbol.c @@ -0,0 +1,40 @@ + +#include +#include +#include "ksymbol.h" + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0) +#include +static struct kprobe kp = { + .symbol_name = "kallsyms_lookup_name" +}; + +typedef unsigned long (*kallsyms_lookup_name_t)(const char *name); +static kallsyms_lookup_name_t g_syms_lookup_name; +static kallsyms_lookup_name_t get_symbol_kallsyms_lookup_name(void) +{ + unsigned long syms_lookup_name; + + register_kprobe(&kp); + syms_lookup_name = (unsigned long)kp.addr; + unregister_kprobe(&kp); + return (kallsyms_lookup_name_t)syms_lookup_name; +} +#endif + +unsigned long get_func_syms_by_name(const char *name) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 7, 0) + if (!g_syms_lookup_name) { + g_syms_lookup_name = get_symbol_kallsyms_lookup_name(); + if (!g_syms_lookup_name) { + pr_err("con't get symbol of kallsyms_lookup_name\n"); + return 0; + } + } + return g_syms_lookup_name(name); +#else + return kallsyms_lookup_name(name); +#endif +} + diff --git a/source/lib/internal/kernel_module/common/proc.c b/source/lib/internal/kernel_module/common/proc.c new file mode 100644 index 00000000..814e34fe --- /dev/null +++ b/source/lib/internal/kernel_module/common/proc.c @@ -0,0 +1,60 @@ +#include "proc.h" + +static struct proc_dir_entry *sysak_root_dir; + +static bool check_sysak_root(void) +{ + if (!sysak_root_dir) { + sysak_root_dir = proc_mkdir("sysak", NULL); + if (!sysak_root_dir) + return false; + } + + return true; +} + +struct proc_dir_entry *sysak_proc_mkdir(const char *name) +{ + if (check_sysak_root()) + return proc_mkdir(name, sysak_root_dir); + + return NULL; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) +struct proc_dir_entry *sysak_proc_create(const char *name, + const struct proc_ops *proc_fops) +#else +struct proc_dir_entry *sysak_proc_create(const char *name, + const struct file_operations *proc_fops) +#endif +{ + if (check_sysak_root()) + return proc_create(name, 0644, sysak_root_dir, proc_fops); + + return NULL; +} + +void sysak_remove_proc_entry(const char *name) +{ + if (sysak_root_dir) + remove_proc_entry(name, sysak_root_dir); +} + +int sysak_remove_proc_subtree(const char *name) +{ + if (sysak_root_dir) + return remove_proc_subtree(name, sysak_root_dir); + return 0; +} + +int sysak_proc_init(void) +{ + return 0; +} + +void sysak_proc_exit(void) +{ + if (sysak_root_dir) + proc_remove(sysak_root_dir); +} diff --git a/source/lib/internal/kernel_module/common/stack.c b/source/lib/internal/kernel_module/common/stack.c new file mode 100644 index 00000000..c3b1c09a --- /dev/null +++ b/source/lib/internal/kernel_module/common/stack.c @@ -0,0 +1,3 @@ +/* + * stack.c +*/ diff --git a/source/lib/internal/kernel_module/entry.c b/source/lib/internal/kernel_module/entry.c new file mode 100644 index 00000000..8491fc9c --- /dev/null +++ b/source/lib/internal/kernel_module/entry.c @@ -0,0 +1,68 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "proc.h" +#include "sysak_mods.h" + +void sysak_module_get(int *mod_ref) +{ + if (*mod_ref) + return; + try_module_get(THIS_MODULE); + *mod_ref = 1; +} + +void sysak_module_put(int *mod_ref) +{ + if (*mod_ref) { + *mod_ref = 0; + module_put(THIS_MODULE); + } +} + +static int sysak_mod_init(void) +{ + int i, ret; + + ret = sysak_bbox_init(); + if (ret) + return ret; + sysak_proc_init(); + sysak_dev_init(); + + for (i = 0; i < sysk_module_num; i++) { + if (sysak_modules[i].init()) + printk("WARN: module %s init failed", sysak_modules[i].name); + } + + printk("sysak module loaded.\n"); + return 0; +} + +static void sysak_mod_exit(void) +{ + int i; + + sysak_dev_uninit(); + sysak_bbox_exit(); + sysak_proc_exit(); + + for (i = 0; i < sysk_module_num; i++) + sysak_modules[i].exit(); + + printk("sysak module unloaded.\n"); +} + +module_init(sysak_mod_init) +module_exit(sysak_mod_exit) +MODULE_LICENSE("GPL v2"); diff --git a/source/lib/internal/kernel_module/include/blackbox.h b/source/lib/internal/kernel_module/include/blackbox.h new file mode 100644 index 00000000..52d45e45 --- /dev/null +++ b/source/lib/internal/kernel_module/include/blackbox.h @@ -0,0 +1,62 @@ +/* + * Copyright (C) 2018 Alibaba Group + * All rights reserved. + * Written by Wetp Zhang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef BLACKBOX_H +#define BLACKBOX_H + +#include +#include + +#define BBOX_FLAG_MASK 0xffff0000 +#define BBOX_FLAG_SHIFT 16 + + +#define BBOX_TYPE_MASK 0x0000ffff +#define BBOX_TYPE_SHIFT 0 + +#define BBOX_TYPE_RING (0 << BBOX_TYPE_SHIFT) +#define BBOX_TYPE_RECORD (1 << BBOX_TYPE_SHIFT) + + +#define BBOX_DATA_TYPE_STRING 0x1 +#define BBOX_DATA_TYPE_TRACE 0x2 +#define BBOX_DATA_TYPE_DATA 0x3 + +#define BBOX_RECORD_DESC_LEN 16 + +#define BBOX_BUFF_MAGIC 0xe0e1e2e3e4e5e6e7ul + +struct bbox_data_info { + void *data; + unsigned int size; + unsigned int slot; + struct timespec64 mtime; + struct task_struct *task; +}; + +extern ssize_t bbox_write(unsigned int bbox_id, + struct bbox_data_info *data_info); +extern ssize_t bbox_read(unsigned int bbox_id, + struct bbox_data_info *data_info); +extern int bbox_alloc_record_slot(unsigned int bbox_id, unsigned int size, + unsigned int type); +extern void bbox_record_clear(unsigned int bbox_id, int slot_id); +extern int bbox_alloc(const char *name, int flags); +extern void bbox_free(unsigned int bbox_id); +extern int bbox_alloc_dynamic(const char *name, int flags, + unsigned int pages); +extern int bbox_ring_show(struct seq_file *seq, unsigned int bbox_id); +extern int bbox_record_show(struct seq_file *seq, + unsigned int bbox_id, int slot_id); +extern void bbox_set_record_desc(unsigned int bbox_id, + unsigned int slot, const char *desc); +extern void bbox_update_name(unsigned int bbox_id, const char *name); +#endif diff --git a/source/lib/internal/kernel_module/include/common.h b/source/lib/internal/kernel_module/include/common.h new file mode 100644 index 00000000..836d6eda --- /dev/null +++ b/source/lib/internal/kernel_module/include/common.h @@ -0,0 +1,16 @@ +#ifndef __COMMON__ +#define __COMMON__ + +#define NAME_LEN (128) + +#undef TASK_COMM_LEN +#define TASK_COMM_LEN (16) + +#define CHR_NAME "sysak" + +enum SYSAK_IOCTL_CMD { + MEMLEAK_IOCTL_CMD = 1, + MEMHUNTER_IOCTL_CMD = 2, +}; + +#endif diff --git a/source/lib/internal/kernel_module/include/hook.h b/source/lib/internal/kernel_module/include/hook.h new file mode 100644 index 00000000..4cad15bf --- /dev/null +++ b/source/lib/internal/kernel_module/include/hook.h @@ -0,0 +1,17 @@ +#ifndef _KERNEL_COMMON_HOOK_H +#define _KERNEL_COMMON_HOOK_H + +#include +extern int hook_tracepoint(const char *name, void *probe, void *data); +extern int unhook_tracepoint(const char *name, void *probe, void *data); + +extern int hook_kprobe(struct kprobe *kp, const char *name, + kprobe_pre_handler_t pre, kprobe_post_handler_t post); +extern void unhook_kprobe(struct kprobe *kp); + +extern int hook_kretprobe(struct kretprobe *ptr_kretprobe, char *kretprobe_func, + kretprobe_handler_t kretprobe_entry_handler, + kretprobe_handler_t kretprobe_ret_handler, + size_t data_size); +extern void unhook_kretprobe(struct kretprobe *ptr_kretprobe); +#endif diff --git a/source/lib/internal/kernel_module/include/ksymbol.h b/source/lib/internal/kernel_module/include/ksymbol.h new file mode 100644 index 00000000..7fe9a90e --- /dev/null +++ b/source/lib/internal/kernel_module/include/ksymbol.h @@ -0,0 +1,5 @@ +#ifndef _KERNEL_COMMON_SYMS_H +#define _KERNEL_COMMON_SYMS_H + +extern unsigned long get_func_syms_by_name(const char *name); +#endif diff --git a/source/lib/internal/kernel_module/include/memleak.h b/source/lib/internal/kernel_module/include/memleak.h new file mode 100644 index 00000000..d0b8057b --- /dev/null +++ b/source/lib/internal/kernel_module/include/memleak.h @@ -0,0 +1,21 @@ +#ifndef __MEMLEAK_IOCTL__ +#define __MEMLEAK_IOCTL__ +#include + +#include "common.h" + +#define MEMLEAK_CMD_ENALBE (0x0A) +#define MEMLEAK_CMD_SET (MEMLEAK_CMD_ENALBE + 1) +#define MEMLEAK_CMD_GET (MEMLEAK_CMD_SET + 1) +#define MEMLEAK_CMD_RESULT (MEMLEAK_CMD_GET + 1) +#define MEMLEAK_CMD_DISABLE (MEMLEAK_CMD_RESULT + 1) + +#define MEMLEAK_STATE_ON (1) +#define MEMLEAK_STATE_OFF (2) +#define MEMLEAK_STATE_INIT (3) + +#define MEMLEAK_ON _IOWR(MEMLEAK_IOCTL_CMD, MEMLEAK_CMD_ENALBE, struct memleak_settings) +#define MEMLEAK_OFF _IO(MEMLEAK_IOCTL_CMD, MEMLEAK_CMD_DISABLE) +#define MEMLEAK_RESULT _IOWR(MEMLEAK_IOCTL_CMD, MEMLEAK_CMD_RESULT, struct user_result) + +#endif diff --git a/source/lib/internal/kernel_module/include/proc.h b/source/lib/internal/kernel_module/include/proc.h new file mode 100644 index 00000000..82001686 --- /dev/null +++ b/source/lib/internal/kernel_module/include/proc.h @@ -0,0 +1,129 @@ +#ifndef _KERNEL_COMMON_PROC_H +#define _KERNEL_COMMON_PROC_H +#include +#include +#include + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +#include +#include + +int __weak kstrtobool_from_user(const char __user *s, size_t count, bool *res) +{ + /* Longest string needed to differentiate, newline, terminator */ + char buf[4]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return strtobool(buf, res); +} +#endif + + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) +#define DEFINE_PROC_ATTRIBUTE(name, __write) \ + static int name##_open(struct inode *inode, struct file *file) \ + { \ + return single_open(file, name##_show, PDE_DATA(inode)); \ + } \ + \ + static const struct file_operations name##_fops = { \ + .owner = THIS_MODULE, \ + .open = name##_open, \ + .read = seq_read, \ + .write = __write, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } + +#define DEFINE_PROC_ATTRIBUTE_RW(name) \ + static ssize_t name##_write(struct file *file, \ + const char __user *buf, \ + size_t count, loff_t *ppos) \ + { \ + return name##_store(PDE_DATA(file_inode(file)), buf, \ + count); \ + } \ + DEFINE_PROC_ATTRIBUTE(name, name##_write) + +#define DEFINE_PROC_ATTRIBUTE_RO(name) \ + DEFINE_PROC_ATTRIBUTE(name, NULL) +#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) +#define DEFINE_PROC_ATTRIBUTE(name, __write) \ + static int name##_open(struct inode *inode, struct file *file) \ + { \ + return single_open(file, name##_show, PDE_DATA(inode)); \ + } \ + \ + static const struct file_operations name##_fops = { \ + .owner = THIS_MODULE, \ + .open = name##_open, \ + .read = seq_read, \ + .write = __write, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } + +#define DEFINE_PROC_ATTRIBUTE_RW(name) \ + static ssize_t name##_write(struct file *file, \ + const char __user *buf, \ + size_t count, loff_t *ppos) \ + { \ + return name##_store(PDE_DATA(file_inode(file)), buf, \ + count); \ + } \ + DEFINE_PROC_ATTRIBUTE(name, name##_write) + +#define DEFINE_PROC_ATTRIBUTE_RO(name) \ + DEFINE_PROC_ATTRIBUTE(name, NULL) +#else +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) +#define PDE_DATA pde_data +#endif + +#define DEFINE_PROC_ATTRIBUTE(name, __write) \ + static int name##_open(struct inode *inode, struct file *file) \ + { \ + return single_open(file, name##_show, PDE_DATA(inode)); \ + } \ + \ + static const struct proc_ops name##_fops = { \ + .proc_open = name##_open, \ + .proc_read = seq_read, \ + .proc_write = __write, \ + .proc_lseek = seq_lseek, \ + .proc_release = single_release, \ + } + +#define DEFINE_PROC_ATTRIBUTE_RW(name) \ + static ssize_t name##_write(struct file *file, \ + const char __user *buf, \ + size_t count, loff_t *ppos) \ + { \ + return name##_store(PDE_DATA(file_inode(file)), buf, \ + count); \ + } \ + DEFINE_PROC_ATTRIBUTE(name, name##_write) + +#define DEFINE_PROC_ATTRIBUTE_RO(name) \ + DEFINE_PROC_ATTRIBUTE(name, NULL) +#endif + + +extern struct proc_dir_entry *sysak_proc_mkdir(const char *name); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) +extern struct proc_dir_entry *sysak_proc_create(const char *name, + const struct proc_ops *proc_fops); +#else +extern struct proc_dir_entry *sysak_proc_create(const char *name, + const struct file_operations *proc_fops); +#endif +extern void sysak_remove_proc_entry(const char *name); +extern int sysak_remove_proc_subtree(const char *name); + +extern int sysak_proc_init(void); +extern void sysak_proc_exit(void); +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.1.1.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.1.1.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.1.1.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.1.1.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.1.1.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.1.1.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.1.2.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.1.2.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.1.2.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.1.2.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.1.2.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.1.2.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.12.1.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.12.1.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.12.1.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.12.1.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.12.1.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.12.1.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.18.1.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.18.1.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.18.1.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.18.1.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.18.1.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.18.1.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.1.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.1.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.1.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.1.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.1.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.1.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.2.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.2.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.2.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.2.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.2.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.2.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.3.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.3.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.3.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.3.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.3.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.4.3.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.7.1.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.7.1.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.7.1.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.7.1.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.7.1.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.7.1.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.9.1.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.9.1.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.9.1.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.9.1.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.9.1.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.9.1.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1062.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.10.1.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.10.1.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.10.1.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.10.1.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.10.1.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.10.1.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.13.1.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.13.1.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.13.1.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.13.1.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.13.1.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.13.1.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.18.2.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.18.2.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.18.2.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.18.2.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.18.2.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.18.2.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.19.1.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.19.1.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.19.1.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.19.1.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.19.1.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.19.1.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.8.2.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.8.2.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.8.2.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.8.2.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.8.2.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.8.2.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1127.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.11.1.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.11.1.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.11.1.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.11.1.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.11.1.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.11.1.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.15.2.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.15.2.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.15.2.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.15.2.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.15.2.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.15.2.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.2.1.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.2.1.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.2.1.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.2.1.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.2.1.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.2.1.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.2.2.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.2.2.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.2.2.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.2.2.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.2.2.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.2.2.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.6.1.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.6.1.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.6.1.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.6.1.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.6.1.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.6.1.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.el7.x86_64/nvme.h new file mode 100644 index 00000000..3190b86c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.el7.x86_64/nvme.h @@ -0,0 +1,60 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-1160.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-862.14.4.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-862.14.4.el7.x86_64/nvme.h new file mode 100644 index 00000000..c0ff7648 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-862.14.4.el7.x86_64/nvme.h @@ -0,0 +1,59 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 cqe_seen; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-862.14.4.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-862.14.4.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-862.14.4.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-957.21.3.el7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-957.21.3.el7.x86_64/nvme.h new file mode 100644 index 00000000..c0ff7648 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-957.21.3.el7.x86_64/nvme.h @@ -0,0 +1,59 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + char irqname[24]; /* nvme4294967295-65535\0 */ + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 cqe_seen; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-957.21.3.el7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-957.21.3.el7.x86_64/virtio_blk.h new file mode 100644 index 00000000..faa22a32 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/3.10.0-957.21.3.el7.x86_64/virtio_blk.h @@ -0,0 +1,175 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct request *req; + struct virtio_blk_outhdr out_hdr; + struct virtio_scsi_inhdr in_hdr; + u8 status; + struct scatterlist sg[]; +}; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + RH_KABI_DEPRECATE(unsigned int, ipi_redirect) + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? ((struct virtblk_req *)data)->req : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +static inline u64 get_check_hang_time_ns(void) +{ + return sched_clock(); +} + +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + blk_mq_tagset_busy_iter(q->tag_set, fn, data); + return 0; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.24-9.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.24-9.al7.x86_64/nvme.h new file mode 100644 index 00000000..b0aaeba4 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.24-9.al7.x86_64/nvme.h @@ -0,0 +1,59 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.24-9.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.24-9.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..fb79209c --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.24-9.al7.x86_64/virtio_blk.h @@ -0,0 +1,219 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 0; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.81-17.1.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.81-17.1.al7.x86_64/nvme.h new file mode 100644 index 00000000..b0aaeba4 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.81-17.1.al7.x86_64/nvme.h @@ -0,0 +1,59 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.81-17.1.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.81-17.1.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..34e5cb77 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.81-17.1.al7.x86_64/virtio_blk.h @@ -0,0 +1,219 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.81-17.2.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.81-17.2.al7.x86_64/nvme.h new file mode 100644 index 00000000..b0aaeba4 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.81-17.2.al7.x86_64/nvme.h @@ -0,0 +1,59 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.81-17.2.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.81-17.2.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..34e5cb77 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.81-17.2.al7.x86_64/virtio_blk.h @@ -0,0 +1,219 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-013.ali4000.an7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-013.ali4000.an7.x86_64/nvme.h new file mode 100755 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-013.ali4000.an7.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-013.ali4000.an7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-013.ali4000.an7.x86_64/virtio_blk.h new file mode 100755 index 00000000..0842a735 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-013.ali4000.an7.x86_64/virtio_blk.h @@ -0,0 +1,268 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-18.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-18.al7.x86_64/nvme.h new file mode 100644 index 00000000..b0aaeba4 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-18.al7.x86_64/nvme.h @@ -0,0 +1,59 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-18.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-18.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..04f75180 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-18.al7.x86_64/virtio_blk.h @@ -0,0 +1,226 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +void get_vq_info(struct vq_info *vq_i, struct request *rq); +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-19.1.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-19.1.al7.x86_64/nvme.h new file mode 100644 index 00000000..b0aaeba4 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-19.1.al7.x86_64/nvme.h @@ -0,0 +1,59 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-19.1.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-19.1.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..1a189196 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-19.1.al7.x86_64/virtio_blk.h @@ -0,0 +1,225 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-19.2.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-19.2.al7.x86_64/nvme.h new file mode 100644 index 00000000..b0aaeba4 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-19.2.al7.x86_64/nvme.h @@ -0,0 +1,59 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-19.2.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-19.2.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..1a189196 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-19.2.al7.x86_64/virtio_blk.h @@ -0,0 +1,225 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned int index_hw; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + int cpu) +{ + return q->queue_hw_ctx[q->mq_map[cpu]]; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-21.2.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-21.2.al7.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-21.2.al7.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-21.2.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-21.2.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..853fa75e --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-21.2.al7.x86_64/virtio_blk.h @@ -0,0 +1,268 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-21.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-21.al7.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-21.al7.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-21.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-21.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..853fa75e --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-21.al7.x86_64/virtio_blk.h @@ -0,0 +1,268 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-22.1.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-22.1.al7.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-22.1.al7.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-22.1.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-22.1.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..853fa75e --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-22.1.al7.x86_64/virtio_blk.h @@ -0,0 +1,268 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-22.2.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-22.2.al7.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-22.2.al7.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-22.2.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-22.2.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..853fa75e --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-22.2.al7.x86_64/virtio_blk.h @@ -0,0 +1,268 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-23.4.an8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-23.4.an8.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-23.4.an8.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-23.4.an8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-23.4.an8.x86_64/virtio_blk.h new file mode 100644 index 00000000..853fa75e --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-23.4.an8.x86_64/virtio_blk.h @@ -0,0 +1,268 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-23.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-23.al7.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-23.al7.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-23.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-23.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..853fa75e --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-23.al7.x86_64/virtio_blk.h @@ -0,0 +1,268 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.1.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.1.al7.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.1.al7.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.1.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.1.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..853fa75e --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.1.al7.x86_64/virtio_blk.h @@ -0,0 +1,268 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.8.an8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.8.an8.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.8.an8.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.8.an8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.8.an8.x86_64/virtio_blk.h new file mode 100644 index 00000000..853fa75e --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.8.an8.x86_64/virtio_blk.h @@ -0,0 +1,268 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.al7.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.al7.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..42192d6d --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-24.al7.x86_64/virtio_blk.h @@ -0,0 +1,265 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-25.an8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-25.an8.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-25.an8.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-25.an8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-25.an8.x86_64/virtio_blk.h new file mode 100644 index 00000000..853fa75e --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-25.an8.x86_64/virtio_blk.h @@ -0,0 +1,268 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.1.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.1.al7.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.1.al7.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.1.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.1.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..42192d6d --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.1.al7.x86_64/virtio_blk.h @@ -0,0 +1,265 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.6.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.6.al7.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.6.al7.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.6.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.6.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..42192d6d --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.6.al7.x86_64/virtio_blk.h @@ -0,0 +1,265 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.al7.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.al7.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..42192d6d --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.al7.x86_64/virtio_blk.h @@ -0,0 +1,265 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.an8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.an8.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.an8.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.an8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.an8.x86_64/virtio_blk.h new file mode 100644 index 00000000..853fa75e --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-26.an8.x86_64/virtio_blk.h @@ -0,0 +1,268 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +//#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +//#endif +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-27.al7.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-27.al7.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-27.al7.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-27.al7.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-27.al7.x86_64/virtio_blk.h new file mode 100644 index 00000000..42192d6d --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/4.19.91-27.al7.x86_64/virtio_blk.h @@ -0,0 +1,265 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + size_t queue_size_in_bytes; + dma_addr_t queue_dma_addr; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Per-descriptor state. */ + struct vring_desc_state desc_state[]; +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { +#ifdef CONFIG_VIRTIO_BLK_SCSI + struct scsi_request sreq; /* for SCSI passthrough, must be first */ + u8 sense[SCSI_SENSE_BUFFERSIZE]; + struct virtio_scsi_inhdr in_hdr; +#endif + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_queue_delayed:1; + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + /* + * flush_rq shares tag with this rq, both can't be active + * at the same time + */ + struct request *orig_rq; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, + enum hctx_type type, + unsigned int cpu) +{ + return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; +} + +/* + * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue + * @q: request queue + * @flags: request command flags + * @cpu: CPU + */ +static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, + unsigned int flags, + unsigned int cpu) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + if ((flags & REQ_HIPRI) && + q->tag_set->nr_maps > HCTX_TYPE_POLL && + q->tag_set->map[HCTX_TYPE_POLL].nr_queues && + test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) + type = HCTX_TYPE_POLL; + + else if (((flags & REQ_OP_MASK) == REQ_OP_READ) && + q->tag_set->nr_maps > HCTX_TYPE_READ && + q->tag_set->map[HCTX_TYPE_READ].nr_queues) + type = HCTX_TYPE_READ; + + return blk_mq_map_queue_type(q, type, cpu); +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return blk_mq_map_queue(rq->q, rq->cmd_flags, rq->mq_ctx->cpu); +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = to_vvq(vq)->desc_state[head].data; + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->__deadline > rq->timeout) + return jiffies_to_usecs(rq->__deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static void blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.1.al8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.1.al8.x86_64/nvme.h new file mode 100644 index 00000000..acda33af --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.1.al8.x86_64/nvme.h @@ -0,0 +1,63 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + /* only used for poll queues: */ + spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + unsigned long flags; +#define NVMEQ_ENABLED 0 +#define NVMEQ_SQ_CMB 1 +#define NVMEQ_DELETE_ERROR 2 +#define NVMEQ_POLLED 3 + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (tail >= nvmeq->q_depth) + break; + memcpy(&cmd, nvmeq->sq_cmds + (tail << nvmeq->sqes), + sizeof(cmd)); + if (cmd.common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.1.al8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.1.al8.x86_64/virtio_blk.h new file mode 100644 index 00000000..ed80e59f --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.1.al8.x86_64/virtio_blk.h @@ -0,0 +1,310 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct vring_desc_state_split { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + u16 num; /* Descriptor list length. */ + u16 next; /* The next desc state in a list. */ + u16 last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra_packed { + dma_addr_t addr; /* Buffer DMA addr. */ + u32 len; /* Buffer length. */ + u16 flags; /* Descriptor flags. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + union { + /* Available for split ring */ + struct { + /* Actual memory layout for this queue. */ + struct vring vring; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* + * Last written value to avail->idx in + * guest byte order. + */ + u16 avail_idx_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_split *desc_state; + + /* DMA address and size information */ + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Device ring wrap counter. */ + bool used_wrap_counter; + + /* Avail used flags. */ + u16 avail_used_flags; + + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_state_packed *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +#define VQ_NAME_LEN 16 +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[HCTX_MAX_TYPES]; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + struct lock_class_key key; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + /* + * The caller ensure that if REQ_HIPRI, poll must be enabled. + */ + if (flags & REQ_HIPRI) + type = HCTX_TYPE_POLL; + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) + type = HCTX_TYPE_READ; + return type; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return rq->mq_ctx->hctxs[blk_mq_get_hctx_type(rq->cmd_flags)]; +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = (to_vvq(vq)->packed_ring ? to_vvq(vq)->packed.desc_state[head].data : + to_vvq(vq)->split.desc_state[head].data); + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->deadline > rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static bool blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); + return true; +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.2.al8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.2.al8.x86_64/nvme.h new file mode 100644 index 00000000..acda33af --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.2.al8.x86_64/nvme.h @@ -0,0 +1,63 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + /* only used for poll queues: */ + spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + unsigned long flags; +#define NVMEQ_ENABLED 0 +#define NVMEQ_SQ_CMB 1 +#define NVMEQ_DELETE_ERROR 2 +#define NVMEQ_POLLED 3 + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (tail >= nvmeq->q_depth) + break; + memcpy(&cmd, nvmeq->sq_cmds + (tail << nvmeq->sqes), + sizeof(cmd)); + if (cmd.common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.2.al8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.2.al8.x86_64/virtio_blk.h new file mode 100644 index 00000000..ed80e59f --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.2.al8.x86_64/virtio_blk.h @@ -0,0 +1,310 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct vring_desc_state_split { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + u16 num; /* Descriptor list length. */ + u16 next; /* The next desc state in a list. */ + u16 last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra_packed { + dma_addr_t addr; /* Buffer DMA addr. */ + u32 len; /* Buffer length. */ + u16 flags; /* Descriptor flags. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + union { + /* Available for split ring */ + struct { + /* Actual memory layout for this queue. */ + struct vring vring; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* + * Last written value to avail->idx in + * guest byte order. + */ + u16 avail_idx_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_split *desc_state; + + /* DMA address and size information */ + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Device ring wrap counter. */ + bool used_wrap_counter; + + /* Avail used flags. */ + u16 avail_used_flags; + + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_state_packed *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +#define VQ_NAME_LEN 16 +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[HCTX_MAX_TYPES]; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + struct lock_class_key key; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + /* + * The caller ensure that if REQ_HIPRI, poll must be enabled. + */ + if (flags & REQ_HIPRI) + type = HCTX_TYPE_POLL; + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) + type = HCTX_TYPE_READ; + return type; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return rq->mq_ctx->hctxs[blk_mq_get_hctx_type(rq->cmd_flags)]; +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = (to_vvq(vq)->packed_ring ? to_vvq(vq)->packed.desc_state[head].data : + to_vvq(vq)->split.desc_state[head].data); + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->deadline > rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static bool blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); + return true; +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.al8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.al8.x86_64/nvme.h new file mode 100644 index 00000000..acda33af --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.al8.x86_64/nvme.h @@ -0,0 +1,63 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + /* only used for poll queues: */ + spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + unsigned long flags; +#define NVMEQ_ENABLED 0 +#define NVMEQ_SQ_CMB 1 +#define NVMEQ_DELETE_ERROR 2 +#define NVMEQ_POLLED 3 + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (tail >= nvmeq->q_depth) + break; + memcpy(&cmd, nvmeq->sq_cmds + (tail << nvmeq->sqes), + sizeof(cmd)); + if (cmd.common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.al8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.al8.x86_64/virtio_blk.h new file mode 100644 index 00000000..ed80e59f --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.112-11.al8.x86_64/virtio_blk.h @@ -0,0 +1,310 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct vring_desc_state_split { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + u16 num; /* Descriptor list length. */ + u16 next; /* The next desc state in a list. */ + u16 last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra_packed { + dma_addr_t addr; /* Buffer DMA addr. */ + u32 len; /* Buffer length. */ + u16 flags; /* Descriptor flags. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + union { + /* Available for split ring */ + struct { + /* Actual memory layout for this queue. */ + struct vring vring; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* + * Last written value to avail->idx in + * guest byte order. + */ + u16 avail_idx_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_split *desc_state; + + /* DMA address and size information */ + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Device ring wrap counter. */ + bool used_wrap_counter; + + /* Avail used flags. */ + u16 avail_used_flags; + + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_state_packed *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +#define VQ_NAME_LEN 16 +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[HCTX_MAX_TYPES]; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + struct lock_class_key key; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + /* + * The caller ensure that if REQ_HIPRI, poll must be enabled. + */ + if (flags & REQ_HIPRI) + type = HCTX_TYPE_POLL; + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) + type = HCTX_TYPE_READ; + return type; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return rq->mq_ctx->hctxs[blk_mq_get_hctx_type(rq->cmd_flags)]; +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = (to_vvq(vq)->packed_ring ? to_vvq(vq)->packed.desc_state[head].data : + to_vvq(vq)->split.desc_state[head].data); + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->deadline > rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static bool blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); + return true; +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.1.al8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.1.al8.x86_64/nvme.h new file mode 100644 index 00000000..acda33af --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.1.al8.x86_64/nvme.h @@ -0,0 +1,63 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + /* only used for poll queues: */ + spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + unsigned long flags; +#define NVMEQ_ENABLED 0 +#define NVMEQ_SQ_CMB 1 +#define NVMEQ_DELETE_ERROR 2 +#define NVMEQ_POLLED 3 + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (tail >= nvmeq->q_depth) + break; + memcpy(&cmd, nvmeq->sq_cmds + (tail << nvmeq->sqes), + sizeof(cmd)); + if (cmd.common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.1.al8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.1.al8.x86_64/virtio_blk.h new file mode 100644 index 00000000..ed80e59f --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.1.al8.x86_64/virtio_blk.h @@ -0,0 +1,310 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct vring_desc_state_split { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + u16 num; /* Descriptor list length. */ + u16 next; /* The next desc state in a list. */ + u16 last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra_packed { + dma_addr_t addr; /* Buffer DMA addr. */ + u32 len; /* Buffer length. */ + u16 flags; /* Descriptor flags. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + union { + /* Available for split ring */ + struct { + /* Actual memory layout for this queue. */ + struct vring vring; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* + * Last written value to avail->idx in + * guest byte order. + */ + u16 avail_idx_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_split *desc_state; + + /* DMA address and size information */ + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Device ring wrap counter. */ + bool used_wrap_counter; + + /* Avail used flags. */ + u16 avail_used_flags; + + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_state_packed *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +#define VQ_NAME_LEN 16 +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[HCTX_MAX_TYPES]; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + struct lock_class_key key; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + /* + * The caller ensure that if REQ_HIPRI, poll must be enabled. + */ + if (flags & REQ_HIPRI) + type = HCTX_TYPE_POLL; + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) + type = HCTX_TYPE_READ; + return type; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return rq->mq_ctx->hctxs[blk_mq_get_hctx_type(rq->cmd_flags)]; +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = (to_vvq(vq)->packed_ring ? to_vvq(vq)->packed.desc_state[head].data : + to_vvq(vq)->split.desc_state[head].data); + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->deadline > rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static bool blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); + return true; +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.2.al8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.2.al8.x86_64/nvme.h new file mode 100644 index 00000000..acda33af --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.2.al8.x86_64/nvme.h @@ -0,0 +1,63 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + /* only used for poll queues: */ + spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + unsigned long flags; +#define NVMEQ_ENABLED 0 +#define NVMEQ_SQ_CMB 1 +#define NVMEQ_DELETE_ERROR 2 +#define NVMEQ_POLLED 3 + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (tail >= nvmeq->q_depth) + break; + memcpy(&cmd, nvmeq->sq_cmds + (tail << nvmeq->sqes), + sizeof(cmd)); + if (cmd.common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.2.al8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.2.al8.x86_64/virtio_blk.h new file mode 100644 index 00000000..ed80e59f --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.2.al8.x86_64/virtio_blk.h @@ -0,0 +1,310 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct vring_desc_state_split { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + u16 num; /* Descriptor list length. */ + u16 next; /* The next desc state in a list. */ + u16 last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra_packed { + dma_addr_t addr; /* Buffer DMA addr. */ + u32 len; /* Buffer length. */ + u16 flags; /* Descriptor flags. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + union { + /* Available for split ring */ + struct { + /* Actual memory layout for this queue. */ + struct vring vring; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* + * Last written value to avail->idx in + * guest byte order. + */ + u16 avail_idx_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_split *desc_state; + + /* DMA address and size information */ + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Device ring wrap counter. */ + bool used_wrap_counter; + + /* Avail used flags. */ + u16 avail_used_flags; + + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_state_packed *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +#define VQ_NAME_LEN 16 +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[HCTX_MAX_TYPES]; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + struct lock_class_key key; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + /* + * The caller ensure that if REQ_HIPRI, poll must be enabled. + */ + if (flags & REQ_HIPRI) + type = HCTX_TYPE_POLL; + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) + type = HCTX_TYPE_READ; + return type; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return rq->mq_ctx->hctxs[blk_mq_get_hctx_type(rq->cmd_flags)]; +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = (to_vvq(vq)->packed_ring ? to_vvq(vq)->packed.desc_state[head].data : + to_vvq(vq)->split.desc_state[head].data); + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->deadline > rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static bool blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); + return true; +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.al8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.al8.x86_64/nvme.h new file mode 100644 index 00000000..acda33af --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.al8.x86_64/nvme.h @@ -0,0 +1,63 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + /* only used for poll queues: */ + spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + unsigned long flags; +#define NVMEQ_ENABLED 0 +#define NVMEQ_SQ_CMB 1 +#define NVMEQ_DELETE_ERROR 2 +#define NVMEQ_POLLED 3 + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (tail >= nvmeq->q_depth) + break; + memcpy(&cmd, nvmeq->sq_cmds + (tail << nvmeq->sqes), + sizeof(cmd)); + if (cmd.common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.al8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.al8.x86_64/virtio_blk.h new file mode 100644 index 00000000..ed80e59f --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-12.al8.x86_64/virtio_blk.h @@ -0,0 +1,310 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct vring_desc_state_split { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + u16 num; /* Descriptor list length. */ + u16 next; /* The next desc state in a list. */ + u16 last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra_packed { + dma_addr_t addr; /* Buffer DMA addr. */ + u32 len; /* Buffer length. */ + u16 flags; /* Descriptor flags. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + union { + /* Available for split ring */ + struct { + /* Actual memory layout for this queue. */ + struct vring vring; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* + * Last written value to avail->idx in + * guest byte order. + */ + u16 avail_idx_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_split *desc_state; + + /* DMA address and size information */ + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Device ring wrap counter. */ + bool used_wrap_counter; + + /* Avail used flags. */ + u16 avail_used_flags; + + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_state_packed *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +#define VQ_NAME_LEN 16 +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[HCTX_MAX_TYPES]; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + struct lock_class_key key; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + /* + * The caller ensure that if REQ_HIPRI, poll must be enabled. + */ + if (flags & REQ_HIPRI) + type = HCTX_TYPE_POLL; + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) + type = HCTX_TYPE_READ; + return type; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return rq->mq_ctx->hctxs[blk_mq_get_hctx_type(rq->cmd_flags)]; +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = (to_vvq(vq)->packed_ring ? to_vvq(vq)->packed.desc_state[head].data : + to_vvq(vq)->split.desc_state[head].data); + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->deadline > rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static bool blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); + return true; +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.1.al8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.1.al8.x86_64/nvme.h new file mode 100644 index 00000000..acda33af --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.1.al8.x86_64/nvme.h @@ -0,0 +1,63 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + /* only used for poll queues: */ + spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + unsigned long flags; +#define NVMEQ_ENABLED 0 +#define NVMEQ_SQ_CMB 1 +#define NVMEQ_DELETE_ERROR 2 +#define NVMEQ_POLLED 3 + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (tail >= nvmeq->q_depth) + break; + memcpy(&cmd, nvmeq->sq_cmds + (tail << nvmeq->sqes), + sizeof(cmd)); + if (cmd.common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.1.al8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.1.al8.x86_64/virtio_blk.h new file mode 100644 index 00000000..ed80e59f --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.1.al8.x86_64/virtio_blk.h @@ -0,0 +1,310 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct vring_desc_state_split { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + u16 num; /* Descriptor list length. */ + u16 next; /* The next desc state in a list. */ + u16 last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra_packed { + dma_addr_t addr; /* Buffer DMA addr. */ + u32 len; /* Buffer length. */ + u16 flags; /* Descriptor flags. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + union { + /* Available for split ring */ + struct { + /* Actual memory layout for this queue. */ + struct vring vring; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* + * Last written value to avail->idx in + * guest byte order. + */ + u16 avail_idx_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_split *desc_state; + + /* DMA address and size information */ + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Device ring wrap counter. */ + bool used_wrap_counter; + + /* Avail used flags. */ + u16 avail_used_flags; + + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_state_packed *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +#define VQ_NAME_LEN 16 +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[HCTX_MAX_TYPES]; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + struct lock_class_key key; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + /* + * The caller ensure that if REQ_HIPRI, poll must be enabled. + */ + if (flags & REQ_HIPRI) + type = HCTX_TYPE_POLL; + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) + type = HCTX_TYPE_READ; + return type; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return rq->mq_ctx->hctxs[blk_mq_get_hctx_type(rq->cmd_flags)]; +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = (to_vvq(vq)->packed_ring ? to_vvq(vq)->packed.desc_state[head].data : + to_vvq(vq)->split.desc_state[head].data); + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->deadline > rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static bool blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); + return true; +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.al8.aarch64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.al8.aarch64/nvme.h new file mode 100644 index 00000000..acda33af --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.al8.aarch64/nvme.h @@ -0,0 +1,63 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + /* only used for poll queues: */ + spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + unsigned long flags; +#define NVMEQ_ENABLED 0 +#define NVMEQ_SQ_CMB 1 +#define NVMEQ_DELETE_ERROR 2 +#define NVMEQ_POLLED 3 + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (tail >= nvmeq->q_depth) + break; + memcpy(&cmd, nvmeq->sq_cmds + (tail << nvmeq->sqes), + sizeof(cmd)); + if (cmd.common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.al8.aarch64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.al8.aarch64/virtio_blk.h new file mode 100644 index 00000000..ed80e59f --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.al8.aarch64/virtio_blk.h @@ -0,0 +1,310 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct vring_desc_state_split { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + u16 num; /* Descriptor list length. */ + u16 next; /* The next desc state in a list. */ + u16 last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra_packed { + dma_addr_t addr; /* Buffer DMA addr. */ + u32 len; /* Buffer length. */ + u16 flags; /* Descriptor flags. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + union { + /* Available for split ring */ + struct { + /* Actual memory layout for this queue. */ + struct vring vring; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* + * Last written value to avail->idx in + * guest byte order. + */ + u16 avail_idx_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_split *desc_state; + + /* DMA address and size information */ + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Device ring wrap counter. */ + bool used_wrap_counter; + + /* Avail used flags. */ + u16 avail_used_flags; + + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_state_packed *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +#define VQ_NAME_LEN 16 +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[HCTX_MAX_TYPES]; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + struct lock_class_key key; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + /* + * The caller ensure that if REQ_HIPRI, poll must be enabled. + */ + if (flags & REQ_HIPRI) + type = HCTX_TYPE_POLL; + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) + type = HCTX_TYPE_READ; + return type; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return rq->mq_ctx->hctxs[blk_mq_get_hctx_type(rq->cmd_flags)]; +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = (to_vvq(vq)->packed_ring ? to_vvq(vq)->packed.desc_state[head].data : + to_vvq(vq)->split.desc_state[head].data); + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->deadline > rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static bool blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); + return true; +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.al8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.al8.x86_64/nvme.h new file mode 100644 index 00000000..acda33af --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.al8.x86_64/nvme.h @@ -0,0 +1,63 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + /* only used for poll queues: */ + spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + unsigned long flags; +#define NVMEQ_ENABLED 0 +#define NVMEQ_SQ_CMB 1 +#define NVMEQ_DELETE_ERROR 2 +#define NVMEQ_POLLED 3 + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (tail >= nvmeq->q_depth) + break; + memcpy(&cmd, nvmeq->sq_cmds + (tail << nvmeq->sqes), + sizeof(cmd)); + if (cmd.common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.al8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.al8.x86_64/virtio_blk.h new file mode 100644 index 00000000..ed80e59f --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.134-13.al8.x86_64/virtio_blk.h @@ -0,0 +1,310 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct vring_desc_state_split { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + u16 num; /* Descriptor list length. */ + u16 next; /* The next desc state in a list. */ + u16 last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra_packed { + dma_addr_t addr; /* Buffer DMA addr. */ + u32 len; /* Buffer length. */ + u16 flags; /* Descriptor flags. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + union { + /* Available for split ring */ + struct { + /* Actual memory layout for this queue. */ + struct vring vring; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* + * Last written value to avail->idx in + * guest byte order. + */ + u16 avail_idx_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_split *desc_state; + + /* DMA address and size information */ + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Device ring wrap counter. */ + bool used_wrap_counter; + + /* Avail used flags. */ + u16 avail_used_flags; + + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_state_packed *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +#define VQ_NAME_LEN 16 +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[HCTX_MAX_TYPES]; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + struct lock_class_key key; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + /* + * The caller ensure that if REQ_HIPRI, poll must be enabled. + */ + if (flags & REQ_HIPRI) + type = HCTX_TYPE_POLL; + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) + type = HCTX_TYPE_READ; + return type; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return rq->mq_ctx->hctxs[blk_mq_get_hctx_type(rq->cmd_flags)]; +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = (to_vvq(vq)->packed_ring ? to_vvq(vq)->packed.desc_state[head].data : + to_vvq(vq)->split.desc_state[head].data); + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->deadline > rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static bool blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); + return true; +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.1.al8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.1.al8.x86_64/nvme.h new file mode 100644 index 00000000..acda33af --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.1.al8.x86_64/nvme.h @@ -0,0 +1,63 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + /* only used for poll queues: */ + spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + unsigned long flags; +#define NVMEQ_ENABLED 0 +#define NVMEQ_SQ_CMB 1 +#define NVMEQ_DELETE_ERROR 2 +#define NVMEQ_POLLED 3 + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (tail >= nvmeq->q_depth) + break; + memcpy(&cmd, nvmeq->sq_cmds + (tail << nvmeq->sqes), + sizeof(cmd)); + if (cmd.common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.1.al8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.1.al8.x86_64/virtio_blk.h new file mode 100644 index 00000000..ed80e59f --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.1.al8.x86_64/virtio_blk.h @@ -0,0 +1,310 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct vring_desc_state_split { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + u16 num; /* Descriptor list length. */ + u16 next; /* The next desc state in a list. */ + u16 last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra_packed { + dma_addr_t addr; /* Buffer DMA addr. */ + u32 len; /* Buffer length. */ + u16 flags; /* Descriptor flags. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + union { + /* Available for split ring */ + struct { + /* Actual memory layout for this queue. */ + struct vring vring; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* + * Last written value to avail->idx in + * guest byte order. + */ + u16 avail_idx_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_split *desc_state; + + /* DMA address and size information */ + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Device ring wrap counter. */ + bool used_wrap_counter; + + /* Avail used flags. */ + u16 avail_used_flags; + + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_state_packed *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +#define VQ_NAME_LEN 16 +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[HCTX_MAX_TYPES]; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + struct lock_class_key key; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + /* + * The caller ensure that if REQ_HIPRI, poll must be enabled. + */ + if (flags & REQ_HIPRI) + type = HCTX_TYPE_POLL; + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) + type = HCTX_TYPE_READ; + return type; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return rq->mq_ctx->hctxs[blk_mq_get_hctx_type(rq->cmd_flags)]; +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = (to_vvq(vq)->packed_ring ? to_vvq(vq)->packed.desc_state[head].data : + to_vvq(vq)->split.desc_state[head].data); + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->deadline > rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static bool blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); + return true; +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.2.al8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.2.al8.x86_64/nvme.h new file mode 100644 index 00000000..acda33af --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.2.al8.x86_64/nvme.h @@ -0,0 +1,63 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + /* only used for poll queues: */ + spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + unsigned long flags; +#define NVMEQ_ENABLED 0 +#define NVMEQ_SQ_CMB 1 +#define NVMEQ_DELETE_ERROR 2 +#define NVMEQ_POLLED 3 + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (tail >= nvmeq->q_depth) + break; + memcpy(&cmd, nvmeq->sq_cmds + (tail << nvmeq->sqes), + sizeof(cmd)); + if (cmd.common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.2.al8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.2.al8.x86_64/virtio_blk.h new file mode 100644 index 00000000..ed80e59f --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.2.al8.x86_64/virtio_blk.h @@ -0,0 +1,310 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct vring_desc_state_split { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + u16 num; /* Descriptor list length. */ + u16 next; /* The next desc state in a list. */ + u16 last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra_packed { + dma_addr_t addr; /* Buffer DMA addr. */ + u32 len; /* Buffer length. */ + u16 flags; /* Descriptor flags. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + union { + /* Available for split ring */ + struct { + /* Actual memory layout for this queue. */ + struct vring vring; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* + * Last written value to avail->idx in + * guest byte order. + */ + u16 avail_idx_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_split *desc_state; + + /* DMA address and size information */ + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Device ring wrap counter. */ + bool used_wrap_counter; + + /* Avail used flags. */ + u16 avail_used_flags; + + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_state_packed *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +#define VQ_NAME_LEN 16 +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[HCTX_MAX_TYPES]; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + struct lock_class_key key; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + /* + * The caller ensure that if REQ_HIPRI, poll must be enabled. + */ + if (flags & REQ_HIPRI) + type = HCTX_TYPE_POLL; + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) + type = HCTX_TYPE_READ; + return type; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return rq->mq_ctx->hctxs[blk_mq_get_hctx_type(rq->cmd_flags)]; +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = (to_vvq(vq)->packed_ring ? to_vvq(vq)->packed.desc_state[head].data : + to_vvq(vq)->split.desc_state[head].data); + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->deadline > rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static bool blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); + return true; +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.3.al8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.3.al8.x86_64/nvme.h new file mode 100644 index 00000000..acda33af --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.3.al8.x86_64/nvme.h @@ -0,0 +1,63 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + /* only used for poll queues: */ + spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + unsigned long flags; +#define NVMEQ_ENABLED 0 +#define NVMEQ_SQ_CMB 1 +#define NVMEQ_DELETE_ERROR 2 +#define NVMEQ_POLLED 3 + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (tail >= nvmeq->q_depth) + break; + memcpy(&cmd, nvmeq->sq_cmds + (tail << nvmeq->sqes), + sizeof(cmd)); + if (cmd.common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.3.al8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.3.al8.x86_64/virtio_blk.h new file mode 100644 index 00000000..ed80e59f --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.3.al8.x86_64/virtio_blk.h @@ -0,0 +1,310 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct vring_desc_state_split { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + u16 num; /* Descriptor list length. */ + u16 next; /* The next desc state in a list. */ + u16 last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra_packed { + dma_addr_t addr; /* Buffer DMA addr. */ + u32 len; /* Buffer length. */ + u16 flags; /* Descriptor flags. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + union { + /* Available for split ring */ + struct { + /* Actual memory layout for this queue. */ + struct vring vring; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* + * Last written value to avail->idx in + * guest byte order. + */ + u16 avail_idx_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_split *desc_state; + + /* DMA address and size information */ + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Device ring wrap counter. */ + bool used_wrap_counter; + + /* Avail used flags. */ + u16 avail_used_flags; + + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_state_packed *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +#define VQ_NAME_LEN 16 +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[HCTX_MAX_TYPES]; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + struct lock_class_key key; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + /* + * The caller ensure that if REQ_HIPRI, poll must be enabled. + */ + if (flags & REQ_HIPRI) + type = HCTX_TYPE_POLL; + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) + type = HCTX_TYPE_READ; + return type; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return rq->mq_ctx->hctxs[blk_mq_get_hctx_type(rq->cmd_flags)]; +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = (to_vvq(vq)->packed_ring ? to_vvq(vq)->packed.desc_state[head].data : + to_vvq(vq)->split.desc_state[head].data); + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->deadline > rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static bool blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); + return true; +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.4.al8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.4.al8.x86_64/nvme.h new file mode 100644 index 00000000..acda33af --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.4.al8.x86_64/nvme.h @@ -0,0 +1,63 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + /* only used for poll queues: */ + spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + unsigned long flags; +#define NVMEQ_ENABLED 0 +#define NVMEQ_SQ_CMB 1 +#define NVMEQ_DELETE_ERROR 2 +#define NVMEQ_POLLED 3 + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (tail >= nvmeq->q_depth) + break; + memcpy(&cmd, nvmeq->sq_cmds + (tail << nvmeq->sqes), + sizeof(cmd)); + if (cmd.common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.4.al8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.4.al8.x86_64/virtio_blk.h new file mode 100644 index 00000000..ed80e59f --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.4.al8.x86_64/virtio_blk.h @@ -0,0 +1,310 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct vring_desc_state_split { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + u16 num; /* Descriptor list length. */ + u16 next; /* The next desc state in a list. */ + u16 last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra_packed { + dma_addr_t addr; /* Buffer DMA addr. */ + u32 len; /* Buffer length. */ + u16 flags; /* Descriptor flags. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + union { + /* Available for split ring */ + struct { + /* Actual memory layout for this queue. */ + struct vring vring; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* + * Last written value to avail->idx in + * guest byte order. + */ + u16 avail_idx_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_split *desc_state; + + /* DMA address and size information */ + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Device ring wrap counter. */ + bool used_wrap_counter; + + /* Avail used flags. */ + u16 avail_used_flags; + + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_state_packed *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +#define VQ_NAME_LEN 16 +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[HCTX_MAX_TYPES]; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + struct lock_class_key key; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + /* + * The caller ensure that if REQ_HIPRI, poll must be enabled. + */ + if (flags & REQ_HIPRI) + type = HCTX_TYPE_POLL; + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) + type = HCTX_TYPE_READ; + return type; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return rq->mq_ctx->hctxs[blk_mq_get_hctx_type(rq->cmd_flags)]; +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = (to_vvq(vq)->packed_ring ? to_vvq(vq)->packed.desc_state[head].data : + to_vvq(vq)->split.desc_state[head].data); + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->deadline > rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static bool blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); + return true; +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.al8.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.al8.x86_64/nvme.h new file mode 100644 index 00000000..acda33af --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.al8.x86_64/nvme.h @@ -0,0 +1,63 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + void *sq_cmds; + /* only used for poll queues: */ + spinlock_t cq_poll_lock ____cacheline_aligned_in_smp; + struct nvme_completion *cqes; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 qid; + u8 cq_phase; + u8 sqes; + unsigned long flags; +#define NVMEQ_ENABLED 0 +#define NVMEQ_SQ_CMB 1 +#define NVMEQ_DELETE_ERROR 2 +#define NVMEQ_POLLED 3 + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; + struct completion delete_done; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (tail >= nvmeq->q_depth) + break; + memcpy(&cmd, nvmeq->sq_cmds + (tail << nvmeq->sqes), + sizeof(cmd)); + if (cmd.common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.al8.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.al8.x86_64/virtio_blk.h new file mode 100644 index 00000000..ed80e59f --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.10.84-10.al8.x86_64/virtio_blk.h @@ -0,0 +1,310 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct vring_desc_state_split { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + u16 num; /* Descriptor list length. */ + u16 next; /* The next desc state in a list. */ + u16 last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra_packed { + dma_addr_t addr; /* Buffer DMA addr. */ + u32 len; /* Buffer length. */ + u16 flags; /* Descriptor flags. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + union { + /* Available for split ring */ + struct { + /* Actual memory layout for this queue. */ + struct vring vring; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* + * Last written value to avail->idx in + * guest byte order. + */ + u16 avail_idx_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_split *desc_state; + + /* DMA address and size information */ + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Device ring wrap counter. */ + bool used_wrap_counter; + + /* Avail used flags. */ + u16 avail_used_flags; + + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_state_packed *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +#define VQ_NAME_LEN 16 +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* What host tells us, plus 2 for header & tailer. */ + unsigned int sg_elems; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + u8 status; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_lists[HCTX_MAX_TYPES]; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; + + /* incremented at dispatch time */ + unsigned long rq_dispatched[2]; + unsigned long rq_merged; + + /* incremented at completion time */ + unsigned long ____cacheline_aligned_in_smp rq_completed[2]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + struct lock_class_key key; + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + /* + * The caller ensure that if REQ_HIPRI, poll must be enabled. + */ + if (flags & REQ_HIPRI) + type = HCTX_TYPE_POLL; + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) + type = HCTX_TYPE_READ; + return type; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return rq->mq_ctx->hctxs[blk_mq_get_hctx_type(rq->cmd_flags)]; +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = (to_vvq(vq)->packed_ring ? to_vvq(vq)->packed.desc_state[head].data : + to_vvq(vq)->split.desc_state[head].data); + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->deadline > rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static bool blk_mq_check_rq_hang(struct blk_mq_hw_ctx *hctx, + struct request *rq, void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); + return true; +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.17.0-1.an23.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.17.0-1.an23.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.17.0-1.an23.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.17.0-1.an23.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.17.0-1.an23.x86_64/virtio_blk.h new file mode 100644 index 00000000..dd97cab2 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.17.0-1.an23.x86_64/virtio_blk.h @@ -0,0 +1,304 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct vring_desc_state_split { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + u16 num; /* Descriptor list length. */ + u16 last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra { + dma_addr_t addr; /* Descriptor DMA addr. */ + u32 len; /* Descriptor length. */ + u16 flags; /* Descriptor flags. */ + u16 next; /* The next desc state in a list. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Hint for event idx: already triggered no need to disable. */ + bool event_triggered; + + union { + /* Available for split ring */ + struct { + /* Actual memory layout for this queue. */ + struct vring vring; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* + * Last written value to avail->idx in + * guest byte order. + */ + u16 avail_idx_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_split *desc_state; + struct vring_desc_extra *desc_extra; + + /* DMA address and size information */ + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Device ring wrap counter. */ + bool used_wrap_counter; + + /* Avail used flags. */ + u16 avail_used_flags; + + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_extra *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +#define VQ_NAME_LEN 16 +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* + * Tracks references from block_device_operations open/release and + * virtio_driver probe/remove so this object can be freed once no + * longer in use. + */ + refcount_t refs; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + u8 status; + struct sg_table sg_table; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list[HCTX_MAX_TYPES]; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + /* + * The caller ensure that if REQ_POLLED, poll must be enabled. + */ + if (flags & REQ_POLLED) + type = HCTX_TYPE_POLL; + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) + type = HCTX_TYPE_READ; + return type; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return rq->mq_ctx->hctxs[blk_mq_get_hctx_type(rq->cmd_flags)]; +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = (to_vvq(vq)->packed_ring ? to_vvq(vq)->packed.desc_state[head].data : + to_vvq(vq)->split.desc_state[head].data); + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->deadline > rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static bool blk_mq_check_rq_hang(struct request *rq, + void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); + return true; +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.19.0-1_rc1.an23.x86_64/nvme.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.19.0-1_rc1.an23.x86_64/nvme.h new file mode 100644 index 00000000..9f328608 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.19.0-1_rc1.an23.x86_64/nvme.h @@ -0,0 +1,61 @@ +#ifndef _NVME_H +#define _NVME_H + +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +struct nvme_queue { + struct device *q_dmadev; + void *nvme_dev; //struct nvme_dev *dev; + spinlock_t sq_lock; + struct nvme_command *sq_cmds; + struct nvme_command __iomem *sq_cmds_io; + spinlock_t cq_lock ____cacheline_aligned_in_smp; + volatile struct nvme_completion *cqes; + struct blk_mq_tags **tags; + dma_addr_t sq_dma_addr; + dma_addr_t cq_dma_addr; + u32 __iomem *q_db; + u16 q_depth; + s16 cq_vector; + u16 sq_tail; + u16 last_sq_tail; + u16 cq_head; + u16 last_cq_head; + u16 qid; + u8 cq_phase; + u8 polled; + u32 *dbbuf_sq_db; + u32 *dbbuf_cq_db; + u32 *dbbuf_sq_ei; + u32 *dbbuf_cq_ei; +}; + +static int get_sq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int tail = nvmeq->sq_tail; + struct nvme_command cmd; + + do { + if (nvmeq->sq_cmds_io) { + memcpy_toio(&cmd, &nvmeq->sq_cmds_io[tail], sizeof(struct nvme_command)); + if (cmd.common.command_id == rq->tag) + return tail; + } + else if (nvmeq->sq_cmds[tail].common.command_id == rq->tag) + return tail; + } while (--tail >= 0); + return -1; +} + +static unsigned long get_cmd_ctx(struct nvme_queue *nvmeq, struct request *rq) +{ + //struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/include/5.19.0-1_rc1.an23.x86_64/virtio_blk.h b/source/lib/internal/kernel_module/modules/iosdiag/include/5.19.0-1_rc1.an23.x86_64/virtio_blk.h new file mode 100644 index 00000000..da9179e2 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/include/5.19.0-1_rc1.an23.x86_64/virtio_blk.h @@ -0,0 +1,302 @@ +#ifndef _VIRTIO_BLK_H +#define _VIRTIO_BLK_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +#define VQ_NAME_LEN 16 + +struct virtio_blk_vq { + struct virtqueue *vq; + spinlock_t lock; + char name[VQ_NAME_LEN]; +} ____cacheline_aligned_in_smp; + +struct vring_desc_state_split { + void *data; /* Data for callback. */ + struct vring_desc *indir_desc; /* Indirect descriptor, if any. */ +}; + +struct vring_desc_state_packed { + void *data; /* Data for callback. */ + struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */ + u16 num; /* Descriptor list length. */ + u16 last; /* The last desc state in a list. */ +}; + +struct vring_desc_extra { + dma_addr_t addr; /* Descriptor DMA addr. */ + u32 len; /* Descriptor length. */ + u16 flags; /* Descriptor flags. */ + u16 next; /* The next desc state in a list. */ +}; + +struct vring_virtqueue { + struct virtqueue vq; + + /* Is this a packed ring? */ + bool packed_ring; + + /* Is DMA API used? */ + bool use_dma_api; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. + * for split ring, it just contains last used index + * for packed ring: + * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index. + * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter. + */ + u16 last_used_idx; + + /* Hint for event idx: already triggered no need to disable. */ + bool event_triggered; + + union { + /* Available for split ring */ + struct { + /* Actual memory layout for this queue. */ + struct vring vring; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* + * Last written value to avail->idx in + * guest byte order. + */ + u16 avail_idx_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_split *desc_state; + struct vring_desc_extra *desc_extra; + + /* DMA address and size information */ + dma_addr_t queue_dma_addr; + size_t queue_size_in_bytes; + } split; + + /* Available for packed ring */ + struct { + /* Actual memory layout for this queue. */ + struct { + unsigned int num; + struct vring_packed_desc *desc; + struct vring_packed_desc_event *driver; + struct vring_packed_desc_event *device; + } vring; + + /* Driver ring wrap counter. */ + bool avail_wrap_counter; + + /* Avail used flags. */ + u16 avail_used_flags; + + /* Index of the next avail descriptor. */ + u16 next_avail_idx; + + /* + * Last written value to driver->flags in + * guest byte order. + */ + u16 event_flags_shadow; + + /* Per-descriptor state. */ + struct vring_desc_state_packed *desc_state; + struct vring_desc_extra *desc_extra; + + /* DMA address and size information */ + dma_addr_t ring_dma_addr; + dma_addr_t driver_event_dma_addr; + dma_addr_t device_event_dma_addr; + size_t ring_size_in_bytes; + size_t event_size_in_bytes; + } packed; + }; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + + /* DMA, allocation, and size information */ + bool we_own_ring; + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif +}; + +struct virtio_blk { + /* + * This mutex must be held by anything that may run after + * virtblk_remove() sets vblk->vdev to NULL. + * + * blk-mq, virtqueue processing, and sysfs attribute code paths are + * shut down before vblk->vdev is set to NULL and therefore do not need + * to hold this mutex. + */ + struct mutex vdev_mutex; + struct virtio_device *vdev; + + /* The disk structure for the kernel. */ + struct gendisk *disk; + + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + + /* Process context for config space updates */ + struct work_struct config_work; + + /* Ida index - used to track minor number allocations. */ + int index; + + /* num of vqs */ + int num_vqs; + int io_queues[HCTX_MAX_TYPES]; + struct virtio_blk_vq *vqs; +}; + +struct virtblk_req { + struct virtio_blk_outhdr out_hdr; + u8 status; + struct sg_table sg_table; + struct scatterlist sg[]; +}; +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +struct blk_mq_ctxs { + struct kobject kobj; + struct blk_mq_ctx __percpu *queue_ctx; +}; + +struct blk_mq_ctx { + struct { + spinlock_t lock; + struct list_head rq_list[HCTX_MAX_TYPES]; + } ____cacheline_aligned_in_smp; + + unsigned int cpu; + unsigned short index_hw[HCTX_MAX_TYPES]; + struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; + + struct request_queue *queue; + struct blk_mq_ctxs *ctxs; + struct kobject kobj; +} ____cacheline_aligned_in_smp; + +struct blk_flush_queue { + unsigned int flush_pending_idx:1; + unsigned int flush_running_idx:1; + blk_status_t rq_status; + unsigned long flush_pending_since; + struct list_head flush_queue[2]; + struct list_head flush_data_in_flight; + struct request *flush_rq; + + spinlock_t mq_flush_lock; +}; + +static inline int enable_detect_flush_rq(void) +{ + return 1; +} + +static inline enum hctx_type blk_mq_get_hctx_type(unsigned int flags) +{ + enum hctx_type type = HCTX_TYPE_DEFAULT; + + /* + * The caller ensure that if REQ_POLLED, poll must be enabled. + */ + if (flags & REQ_POLLED) + type = HCTX_TYPE_POLL; + else if ((flags & REQ_OP_MASK) == REQ_OP_READ) + type = HCTX_TYPE_READ; + return type; +} + +static inline struct blk_mq_hw_ctx *blk_mq_get_hctx_byrq(struct request *rq) +{ + return rq->mq_hctx; + //return rq->mq_ctx->hctxs[blk_mq_get_hctx_type(rq->cmd_flags)]; +} + +static inline struct request *desc_state_data_to_req(struct virtqueue *vq, int head) +{ + void *data = (to_vvq(vq)->packed_ring ? to_vvq(vq)->packed.desc_state[head].data : + to_vvq(vq)->split.desc_state[head].data); + return data ? blk_mq_rq_from_pdu(data) : NULL; +} + +static inline int get_rq_internal_tag(struct request *rq) +{ + return rq ? rq->internal_tag : -1; +} + +static inline unsigned long get_issue_driver_ns(struct request *rq) +{ + if (!rq) + return 0; + if (rq->io_start_time_ns) + return rq->io_start_time_ns; + if (rq->deadline > rq->timeout) + return jiffies_to_usecs(rq->deadline - rq->timeout) * 1000; + return 0; +} + +/* + * LINUX_VERSION_CODE >= KERNEL_VERSION(4, 17, 0) + */ +static inline u64 get_check_hang_time_ns(void) +{ + return ktime_get_ns(); +} + +extern fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter; +typedef void (*blk_mq_rq_iter)(struct request *, void *, bool); +static blk_mq_rq_iter fn_blk_mq_check_hang = NULL; +static bool blk_mq_check_rq_hang(struct request *rq, + void *priv, bool reserved) +{ + if (fn_blk_mq_check_hang) + fn_blk_mq_check_hang(rq, priv, reserved); + return true; +} + +static inline int iter_all_rq(struct request_queue *q, blk_mq_rq_iter fn, void *data) +{ + fn_blk_mq_check_hang = fn; + + sym_blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, data); + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/iosdiag/iosdiag.c b/source/lib/internal/kernel_module/modules/iosdiag/iosdiag.c new file mode 100644 index 00000000..5b42a5bc --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/iosdiag.c @@ -0,0 +1,420 @@ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" +#include + +#define DISKHANG_DIR_NAME "disk_hang" + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0) +#define get_ino_data(x) PDE_DATA(x) +#else +#define get_ino_data(x) pde_data(x) +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0) +#define DEFINE_PROC_OPS(name, _write, _mmap) \ + static const struct file_operations name##_fops = { \ + .owner = THIS_MODULE, \ + .open = name##_open, \ + .read = seq_read, \ + .write = _write, \ + .mmap = _mmap, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } +#else +#define DEFINE_PROC_OPS(name, write, mmap) \ + static const struct proc_ops name##_fops = { \ + .proc_open = name##_open, \ + .proc_read = seq_read, \ + .proc_write = write, \ + .proc_mmap = mmap, \ + } +#endif + +#define DEFINE_PROC_ATTRIBUTE(name, __write, __mmap) \ + static int name##_open(struct inode *inode, struct file *file) \ + { \ + return single_open(file, name##_show, get_ino_data(inode)); \ + } \ + DEFINE_PROC_OPS(name, __write, __mmap) + +#define DEFINE_PROC_ATTRIBUTE_RW(name) \ + static ssize_t name##_write(struct file *file, \ + const char __user *buf, \ + size_t count, loff_t *ppos) \ + { \ + return name##_store(get_ino_data(file_inode(file)), buf, \ + count); \ + } \ + DEFINE_PROC_ATTRIBUTE(name, name##_write, name##_mmap) + +static DEFINE_MUTEX(rq_hang_buffer_mutex); + +struct rq_store { + struct list_head list; + struct request *rq; +}; +static struct rq_hang_info *g_rq_hang_info; +static int g_rq_hang_idx; +static unsigned long long g_rq_hang_total; +static int g_disk_type = -1; +static int g_bio_file_info; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) +fn_queue_tag_busy_iter sym_blk_mq_queue_tag_busy_iter = NULL; +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) +typedef void (*fn_mq_free_request)(struct request *rq); +static fn_mq_free_request sym_blk_mq_free_request; +#endif +fn_get_files_struct sym_get_files_struct = NULL; +fn_put_files_struct sym_put_files_struct = NULL; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) +typedef struct block_device *(*fn_blkdev_get_no_open)(dev_t dev); +typedef void (*fn_blkdev_put_no_open)(struct block_device *bdev); +static fn_blkdev_get_no_open sym_blkdev_get_no_open; +static fn_blkdev_put_no_open sym_blkdev_put_no_open; +fn_fget_task sym_fget_task = NULL; +#endif + +static void set_disk_type(char *buf) +{ + if (buf[0] == 'v' && buf[1] == 'd' && (buf[2] >= 'a' && buf[2] <= 'z')) + g_disk_type = DISK_VIRTIO_BLK; + else if (buf[0] == 's' && buf[1] == 'd' && (buf[2] >= 'a' && buf[2] <= 'z')) + g_disk_type = DISK_SCSI; + else if (!strncmp(buf, "nvme", 4)) + g_disk_type = DISK_NVME; + else + g_disk_type = -1; +} + +static int get_disk_type(void) +{ + return g_disk_type; +} + +int get_bio_file_info(void) +{ + return g_bio_file_info; +} + +static void store_hang_rq(struct request *rq, unsigned long long now) +{ + int index; + + if (g_rq_hang_idx >= MAX_STORE_RQ_CNT) + return; + + g_rq_hang_total++; + index = g_rq_hang_idx; + if (fill_hang_info_from_rq(&g_rq_hang_info[index], rq, + get_disk_type())) + return; + g_rq_hang_info[index].check_hang_ns = now; + g_rq_hang_info[index].req_addr = (unsigned long)rq; + g_rq_hang_idx++; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) +static int is_flush_rq(struct request *rq) +{ + struct blk_mq_hw_ctx *hctx = blk_mq_get_hctx_byrq(rq); + + if (hctx && hctx->fq) + return hctx->fq->flush_rq == rq; + return 0; +} +#endif + +static void mq_check_rq_hang(struct request *rq, void *priv, bool reserved) +{ + int rq_hang_threshold = *((int *)priv); + u64 now = get_check_hang_time_ns(); + u64 duration; + + if (!rq) + return; + + if (g_rq_hang_idx >= MAX_STORE_RQ_CNT) + return; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) + //if (is_flush_rq(rq) && !enable_detect_flush_rq()) + if (is_flush_rq(rq)) + return; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0) + if (!refcount_inc_not_zero(&rq->ref)) + return; +#endif +#else + if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) + return; +#endif + duration = div_u64(now - rq->start_time_ns, NSEC_PER_MSEC); + if (duration >= rq_hang_threshold) + store_hang_rq(rq, now); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) + //if (is_flush_rq(rq) && rq->end_io) + // rq->end_io(rq, 0); + //else if (refcount_dec_and_test(&rq->ref)) +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0) + if (refcount_dec_and_test(&rq->ref)) + sym_blk_mq_free_request(rq); +#endif +#endif +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) +static struct rq_store g_rq_store[MAX_STORE_RQ_CNT]; +static int sq_check_rq_hang(struct request_queue *q, int rq_hang_threshold) +{ + u64 now = get_check_hang_time_ns(); + u64 duration; + unsigned long flags; + struct request *rq, *tmp; + LIST_HEAD(rq_list); + int rq_store_idx = 0; + spinlock_t *queue_lock = q->queue_lock; + + spin_lock_irqsave(queue_lock, flags); + list_for_each_entry_safe(rq, tmp, &q->queue_head, queuelist) { + duration = div_u64(now - rq->start_time_ns, NSEC_PER_MSEC); + if (duration >= rq_hang_threshold && rq_store_idx < MAX_STORE_RQ_CNT) { + g_rq_store[rq_store_idx].rq = rq; + INIT_LIST_HEAD(&g_rq_store[rq_store_idx].list); + list_add(&g_rq_store[rq_store_idx].list, &rq_list); + rq_store_idx++; + } else + continue; + } + spin_unlock_irqrestore(queue_lock, flags); + + spin_lock_irqsave(queue_lock, flags); + list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) { + duration = div_u64(now - rq->start_time_ns, NSEC_PER_MSEC); + if (duration >= rq_hang_threshold && rq_store_idx < MAX_STORE_RQ_CNT) { + g_rq_store[rq_store_idx].rq = rq; + INIT_LIST_HEAD(&g_rq_store[rq_store_idx].list); + list_add(&g_rq_store[rq_store_idx].list, &rq_list); + rq_store_idx++; + } else + continue; + } + spin_unlock_irqrestore(queue_lock, flags); + while(!list_empty(&rq_list)) { + struct rq_store *rqs; + rqs = list_first_entry(&rq_list, struct rq_store, list); + if (rqs->rq) + store_hang_rq(rqs->rq, now); + list_del_init(&rqs->list); + } + return 0; +} +#else +static int sq_check_rq_hang(struct request_queue *q, int rq_hang_threshold) {return 0;} +#endif + +static int rq_hang_detect(dev_t devnum, int rq_hang_threshold) +{ + int ret = 0; + struct request_queue *q; + struct block_device *bdev; + + if (!devnum || rq_hang_threshold <= 0) + return -EINVAL; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) + bdev = bdget(devnum); +#else + bdev = sym_blkdev_get_no_open(devnum); +#endif + if (!bdev) { + printk("error: invalid devnum(%d:%d)\n", MAJOR(devnum), MINOR(devnum)); + return -EFAULT; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) || LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) + if (!bdev->bd_queue) { +#endif + if (!bdev->bd_disk || !(q = bdev_get_queue(bdev))) { + printk("error: can't get request queue for devnum(%d:%d)\n", + MAJOR(devnum), MINOR(devnum)); + ret = -EFAULT; + goto out; + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 8, 0) || LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) + } else + q = bdev->bd_queue; +#endif + + if (q->mq_ops) + ret = iter_all_rq(q, mq_check_rq_hang, &rq_hang_threshold); + else + ret = sq_check_rq_hang(q, rq_hang_threshold); +out: +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) + bdput(bdev); +#else + sym_blkdev_put_no_open(bdev); +#endif + return ret; +} + +static int rq_hang_show(struct seq_file *m, void *ptr) +{ + seq_printf(m, "total_rq_hang:%llu\n", g_rq_hang_total); + return 0; +} + +static ssize_t rq_hang_store(struct file *file, + const char __user *buf, size_t count) +{ + int ret; + char *p; + char chr[256]; + char diskname[BDEVNAME_SIZE] = {0}; + int major, minor; + int threshold = 0; + + if (count < 1) + return -EINVAL; + + if (copy_from_user(chr, buf, 256)) + return -EFAULT; + + /* echo "vdb:253:16 1000" > /proc/xxxxx */ + if ((p = strstr(chr, ":"))) { + memcpy(diskname, chr, (p - chr)); + ret = sscanf(p+1, "%d:%d %d %d", &major, &minor, &threshold, &g_bio_file_info); + if (ret < 3 || threshold <= 0 || major < 1 || minor < 0) { + printk("invalid argument \'%s\'\n", chr); + return -EINVAL; + } + } else { + printk("invalid argument \'%s\'\n", chr); + return -EINVAL; + } + mutex_lock(&rq_hang_buffer_mutex); + set_disk_type(diskname); + g_rq_hang_idx = 0; + memset(g_rq_hang_info, 0x0, sizeof(struct rq_hang_info) * MAX_STORE_RQ_CNT); + ret = rq_hang_detect(MKDEV(major, minor), threshold); + mutex_unlock(&rq_hang_buffer_mutex); + return ret ? ret : count; +} + +static int rq_hang_mmap(struct file *file, struct vm_area_struct *vma) +{ + return remap_vmalloc_range(vma, (void *)g_rq_hang_info, vma->vm_pgoff); +} +DEFINE_PROC_ATTRIBUTE_RW(rq_hang); + +static int fill_ksymbols(void) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) + char *blkdev_get_func_name = "bdget"; + char *blkdev_put_func_name = "bdput"; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) + blkdev_get_func_name = "blkdev_get_no_open"; + blkdev_put_func_name = "blkdev_put_no_open"; +#endif + sym_blkdev_get_no_open = + (fn_blkdev_get_no_open)get_func_syms_by_name(blkdev_get_func_name); + if (!sym_blkdev_get_no_open) { + pr_err("not found symbol \"%s\"\n", blkdev_get_func_name); + return -EFAULT; + } + sym_blkdev_put_no_open = + (fn_blkdev_put_no_open)get_func_syms_by_name(blkdev_put_func_name); + if (!sym_blkdev_put_no_open) { + pr_err("not found symbol \"%s\"\n", blkdev_put_func_name); + return -EFAULT; + } + sym_fget_task = + (fn_fget_task)get_func_syms_by_name("fget_task"); + if (!sym_fget_task) + pr_warn("not found symbol \"fget_task\"\n"); +#else + sym_get_files_struct = + (fn_get_files_struct)get_func_syms_by_name("get_files_struct"); + if (!sym_get_files_struct) + pr_warn("not found symbol \"get_files_struct\"\n"); + + sym_put_files_struct = + (fn_put_files_struct)get_func_syms_by_name("put_files_struct"); + if (!sym_put_files_struct) + pr_warn("not found symbol \"put_files_struct\"\n"); +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) + sym_blk_mq_queue_tag_busy_iter = + (fn_queue_tag_busy_iter)get_func_syms_by_name("blk_mq_queue_tag_busy_iter"); + if (!sym_blk_mq_queue_tag_busy_iter) { + pr_err("not found symbol \"blk_mq_queue_tag_busy_iter\"\n"); + return -EFAULT; + } +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 18, 0) + sym_blk_mq_free_request = + (fn_mq_free_request)get_func_syms_by_name("__blk_mq_free_request"); + if (!sym_blk_mq_free_request) { + pr_err("not found symbol \"__blk_mq_free_request\"\n"); + return -EFAULT; + } +#endif + return 0; +} + +int disk_hang_init(void) +{ + int ret; + struct proc_dir_entry *disk_hang_dir = NULL; + + if (fill_ksymbols()) { + pr_err("init ksymbols fail!\n"); + return -EPERM; + } + + disk_hang_dir = proc_mkdir(DISKHANG_DIR_NAME, NULL); + if (!disk_hang_dir) { + pr_err("create \"/proc/%s\" fail\n", DISKHANG_DIR_NAME); + return -ENOMEM; + } + if (!proc_create_data("rq_hang_detect", 0600, disk_hang_dir, + &rq_hang_fops, NULL)) { + pr_err("create \"/proc/%s/rq_hang_detect\" fail\n", + DISKHANG_DIR_NAME); + ret = -ENOMEM; + goto remove_proc; + } + g_rq_hang_info = vmalloc_user(sizeof(struct rq_hang_info) * MAX_STORE_RQ_CNT); + if (!g_rq_hang_info) { + pr_err("alloc memory \"rq hang info buffer\" fail\n"); + ret = -ENOMEM; + goto remove_proc; + } + memset(g_rq_hang_info, 0x0, sizeof(struct rq_hang_info) * MAX_STORE_RQ_CNT); + pr_info("iosdiag load success\n"); + return 0; +remove_proc: + remove_proc_subtree(DISKHANG_DIR_NAME, NULL); + return ret; +} + +int disk_hang_exit(void) +{ + if (g_rq_hang_info) { + vfree(g_rq_hang_info); + g_rq_hang_info = NULL; + } + remove_proc_subtree(DISKHANG_DIR_NAME, NULL); + return 0; +} + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/iosdiag.h b/source/lib/internal/kernel_module/modules/iosdiag/iosdiag.h new file mode 100644 index 00000000..87f735ca --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/iosdiag.h @@ -0,0 +1,109 @@ +#ifndef __IOSDIAG_H +#define __IOSDIAG_H +#include +#include +#include "ksymbol.h" + +#define MAX_STORE_RQ_CNT 128 +#define MAX_FILE_NAME_LEN 255 +#define BIO_INFO_MAX_PAGES 32 +#define MAX_REQ_BIOS 32 + +enum disk_type { + DISK_VIRTIO_BLK, + DISK_NVME, + DISK_SCSI, +}; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0) +enum rq_atomic_flags { + REQ_ATOM_COMPLETE = 0, + REQ_ATOM_STARTED, +}; +#endif +/* +struct rq_buffer { + struct request rq; + unsigned long long check_time_ns; + void *rq_addr; +}; +*/ +struct vq_info { + int qid; + int vring_num; + int last_used_idx; + int used_idx; + int used_ring_flags; + int last_avail_idx; + int avail_idx; + int avail_ring_flags; + int event; + int rq_avail_idx; + int last_kick_avail_idx; + int rq_used_idx; +}; + +struct nvme_info { + int qid; + int q_depth; //sq/cq depth + int cq_head; //nvmeq->cqes[cq_head]~nvmeq->cqes[cq_end], including req->tag? + int cq_end; + int cq_rq_idx; //rq idx in cq + //int last_cq_head; //nvmeq->sq_head or nvmeq->last_cq_head + int sq_tail; //0~nvmeq->sq_cmds[idx].command_id, including req->tag? + int sq_rq_idx; //rq idx in sq + int sq_last_db; //last sq idx host kick nvme, nvmeq->q_db + unsigned long cmd_ctx; +}; + +struct scsi_info { + int done_hander_defined; + int is_mq; +}; + +struct bio_info { + unsigned long bio_addr; + unsigned long sector; + unsigned int size; + unsigned int pid; + char comm[TASK_COMM_LEN]; + char filename[MAX_FILE_NAME_LEN]; +}; + +struct rq_hang_info { + unsigned int data_len; + unsigned long sector; + unsigned long req_addr; + unsigned long long io_start_ns; + unsigned long long io_issue_driver_ns; + unsigned long long check_hang_ns; + char op[64]; + char state[16]; + struct vq_info vq; + struct nvme_info nvme; + struct scsi_info scsi; + int tag; + int internal_tag; + int cpu; + char diskname[BDEVNAME_SIZE]; + //int errors; + //unsigned long cmd_flags; + struct bio_info first_bio; +}; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 17, 0) +typedef void (*fn_queue_tag_busy_iter)(struct request_queue *q, busy_iter_fn *fn, void *priv); +#else +typedef void (*fn_queue_tag_busy_iter)(struct request_queue *q, busy_tag_iter_fn *fn, void *priv); +#endif +#if LINUX_VERSION_CODE > KERNEL_VERSION(5, 10, 0) +typedef struct file *(*fn_fget_task)(struct task_struct *task, unsigned int fd); +#endif +typedef struct files_struct *(*fn_get_files_struct)(struct task_struct *); +typedef void (*fn_put_files_struct)(struct files_struct *fs); + + +int fill_hang_info_from_rq(struct rq_hang_info *rq_hang_info, + struct request *rq, + int disk_type); +#endif + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/nvme.c b/source/lib/internal/kernel_module/modules/iosdiag/nvme.c new file mode 100644 index 00000000..b890579b --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/nvme.c @@ -0,0 +1,65 @@ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct nvme_queue *get_nvme_queue_by_rq(struct request *rq) +{ + struct blk_mq_hw_ctx *hctx; + + if (!rq) + return NULL; + + hctx = blk_mq_get_hctx_byrq(rq); + if (!hctx) + return NULL; + return hctx->driver_data ? hctx->driver_data : NULL; +} + +static int get_cq_end(struct nvme_queue *nvmeq, struct request *rq) +{ + int head = nvmeq->cq_head; + + do { + if (nvmeq->cqes[head].command_id == -1) + return head; + } while (++head < nvmeq->q_depth); + return -1; +} + +static int get_cq_rq_idx(struct nvme_queue *nvmeq, struct request *rq) +{ + int head = 0; + + do { + if (nvmeq->cqes[head].command_id == rq->tag) + return head; + } while (++head < nvmeq->q_depth); + return -1; +} + +void get_nvme_info(struct nvme_info *nvme_i, struct request *rq) +{ + struct nvme_queue *nvmeq; + + if (!(nvmeq = get_nvme_queue_by_rq(rq))) + return; + + nvme_i->qid = nvmeq->qid; + nvme_i->q_depth = nvmeq->q_depth; + nvme_i->cq_head = nvmeq->cq_head; + nvme_i->cq_end = get_cq_end(nvmeq, rq); + nvme_i->cq_rq_idx = get_cq_rq_idx(nvmeq, rq); + nvme_i->sq_tail = nvmeq->sq_tail; + nvme_i->sq_rq_idx = get_sq_rq_idx(nvmeq, rq); + nvme_i->sq_last_db = readl(nvmeq->q_db); + nvme_i->cmd_ctx = get_cmd_ctx(nvmeq, rq); +} + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/rq_hang.c b/source/lib/internal/kernel_module/modules/iosdiag/rq_hang.c new file mode 100644 index 00000000..12a22cfb --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/rq_hang.c @@ -0,0 +1,350 @@ + +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 0) +#include +#endif +#include +#include +#include +#include +#include +#include "iosdiag.h" +#include + +struct req_op_name{ + int op; + char *op_str; +}; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) +#define REQ_OP_NAME(name) {REQ_OP_##name, #name} +#else +#define REQ_READ 0 +#define REQ_OP_NAME(name) {REQ_##name, #name} +#endif +static struct req_op_name g_op_name[] = { + REQ_OP_NAME(READ), + REQ_OP_NAME(WRITE), + REQ_OP_NAME(FLUSH), + REQ_OP_NAME(DISCARD), + REQ_OP_NAME(WRITE_SAME), +}; +#define SINGLE_OP_NAME_SIZE 16 +#define MAX_OP_NAME_SIZE ((SINGLE_OP_NAME_SIZE + 1) * 5) + +static const char *const blk_mq_rq_state_name_array[] = { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0) + [REQ_ATOM_COMPLETE] = "complete", + [REQ_ATOM_STARTED] = "in_flight", +#else + [MQ_RQ_IDLE] = "idle", + [MQ_RQ_IN_FLIGHT] = "in_flight", + [MQ_RQ_COMPLETE] = "complete", +#endif +}; + +extern fn_get_files_struct sym_get_files_struct; +extern fn_put_files_struct sym_put_files_struct; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) +extern fn_fget_task sym_fget_task; +#endif +extern int get_bio_file_info(void); + +extern void get_vq_info(struct vq_info *vq_i, struct request *rq); +extern void get_scsi_info(struct scsi_info *scsi_i, struct request *rq); +extern void get_nvme_info(struct nvme_info *nvme_i, struct request *rq); + +static char *get_disk_name(struct gendisk *hd, int partno, char *buf) +{ + if (!partno) + snprintf(buf, BDEVNAME_SIZE, "%s", hd->disk_name); + else if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) + snprintf(buf, BDEVNAME_SIZE, "%sp%d", hd->disk_name, partno); + else + snprintf(buf, BDEVNAME_SIZE, "%s%d", hd->disk_name, partno); + return buf; +} + +static void blk_rq_op_name(int op_flags, char *op_buf, int buf_len) +{ + int i = 0; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) + for (; i < (sizeof(g_op_name) / sizeof(g_op_name[0])); i++) { + if (op_flags == g_op_name[i].op) { + strcat(op_buf, g_op_name[i].op_str); + return; + } + } +#else + int len; + for (; i < (sizeof(g_op_name) / sizeof(g_op_name[0])); i++) { + if (op_flags & g_op_name[i].op) { + if ((len = strlen(op_buf)) >= buf_len) + return; + if (len) { + strncat(op_buf, "|", min((strlen("|") + 1),(buf_len - len))); + op_buf[buf_len - 1] = '\0'; + if ((len = strlen(op_buf)) >= buf_len) + return; + } + strncat(op_buf, g_op_name[i].op_str, + min((strlen(g_op_name[i].op_str) + 1), + (buf_len - len))); + op_buf[buf_len - 1] = '\0'; + } + } +#endif +} + +static const char *blk_mq_rq_state_name(unsigned int rq_state) +{ + if (WARN_ON_ONCE(rq_state >= + ARRAY_SIZE(blk_mq_rq_state_name_array))) + return "(?)"; + return blk_mq_rq_state_name_array[rq_state]; +} + +static char *__dentry_name(struct dentry *dentry, char *name) +{ + char *p = dentry_path_raw(dentry, name, PATH_MAX); + + if (IS_ERR(p)) { + __putname(name); + return NULL; + } + + if (p + strlen(p) + 1 != name + PATH_MAX) { + __putname(name); + return NULL; + } + + if (p > name) + strcpy(name, p); + + return name; +} + +static char *dentry_name(struct dentry *dentry) +{ + char *name = __getname(); + if (!name) + return NULL; + + return __dentry_name(dentry, name); +} + +static char *inode_name(struct inode *ino) +{ + struct dentry *dentry; + char *name; + + dentry = d_find_alias(ino); + if (!dentry) + return NULL; + + name = dentry_name(dentry); + dput(dentry); + return name; +} + +static int is_task_open_file(struct task_struct *p, struct inode *ino) +{ + struct files_struct *files; + struct file *file; + struct fdtable *fdt; + unsigned int fd; + int found = 0; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) + files = p->files; +#else + if (!sym_get_files_struct || !sym_put_files_struct) + return found; + files = sym_get_files_struct(p); +#endif + if (files) { + rcu_read_lock(); + fdt = files_fdtable(files); + fd = find_first_bit(fdt->open_fds, fdt->max_fds); + while (fd < fdt->max_fds) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) + file = fcheck_files(files, fd); +#else + if (!sym_fget_task) + break; + file = sym_fget_task(p, fd); +#endif + if (file && (file_inode(file) == ino)) + found = 1; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) + fput(file); +#endif + if (found) + break; + fd = find_next_bit(fdt->open_fds, fdt->max_fds, fd + 1); + } + rcu_read_unlock(); + sym_put_files_struct(files); + } + return found; +} + +static void get_task_info_lsof(struct inode *ino, unsigned int *pid, + char *comm) +{ + struct task_struct *p; + + rcu_read_lock(); + for_each_process(p) { + if (p->flags & PF_KTHREAD) + continue; + rcu_read_unlock(); + get_task_struct(p); + if (is_task_open_file(p, ino)) { + *pid = p->pid; + memcpy(comm, p->comm, sizeof(p->comm)); + put_task_struct(p); + return; + } + put_task_struct(p); + + cond_resched(); + rcu_read_lock(); + } + rcu_read_unlock(); +} + +static int get_inode_filename(struct inode *ino, char *name_buf, + int len) +{ + char *name; + + if (!ino->i_ino) + return -1; + + name = inode_name(ino); + if (name) { + if (strlen(name) + 1 <= len) + strlcpy(name_buf, name, strlen(name) + 1); + else { + strlcpy(name_buf, "...", 4); + strlcpy(name_buf + 3, + name + (strlen(name) + 1 - (len - 3)), + (len - 3)); + } + __putname(name); + return 0; + } + return -1; +} + +static void get_bio_info(struct bio_info *bio_i, struct bio *bio) +{ + struct bio_vec *bvec; + + if (!bio) + return; + + bio_i->bio_addr = (unsigned long)bio; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0) + bio_i->sector = bio->bi_iter.bi_sector; + bio_i->size = bio->bi_iter.bi_size; +#else + bio_i->sector = bio->bi_sector; + bio_i->size = bio->bi_size; +#endif + if (get_bio_file_info()) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) + int i; + bio_for_each_segment_all(bvec, bio, i) { +#else + struct bvec_iter_all iter_all; + bio_for_each_segment_all(bvec, bio, iter_all) { +#endif + struct page *page = bvec->bv_page; + + if (!page) + continue; + if (page->mapping && page->mapping->host) { + if (get_inode_filename(page->mapping->host, bio_i->filename, + sizeof(bio_i->filename))) { + continue; + } + + if (sym_get_files_struct && sym_put_files_struct) + get_task_info_lsof(page->mapping->host, &bio_i->pid, + bio_i->comm); + break; + } + } + } +} + +static void get_rq_info(struct rq_hang_info *rq_hi, struct request *rq) +{ + char op_buf[MAX_OP_NAME_SIZE]; + + rq_hi->data_len = rq->__data_len; + rq_hi->sector = rq->__sector; + strcpy(op_buf, ""); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0) + blk_rq_op_name(req_op(rq), op_buf, sizeof(op_buf)); +#else + blk_rq_op_name(rq->cmd_flags, op_buf, sizeof(op_buf)); +#endif + strncpy(rq_hi->op, op_buf, min(strlen(op_buf), sizeof(rq_hi->op) - 1)); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 16, 0) + strcpy(rq_hi->state, (blk_mq_rq_state_name((test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags) ? + REQ_ATOM_COMPLETE : REQ_ATOM_STARTED)))); +#else + strcpy(rq_hi->state, blk_mq_rq_state_name(READ_ONCE(rq->state))); +#endif + rq_hi->tag = rq->tag; + rq_hi->internal_tag = get_rq_internal_tag(rq); + if (rq->mq_ctx) + rq_hi->cpu = rq->mq_ctx->cpu; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) + else + rq_hi->cpu = rq->cpu; +#endif + rq_hi->io_start_ns = rq->start_time_ns; + rq_hi->io_issue_driver_ns = get_issue_driver_ns(rq); +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0) + if (rq->rq_disk) + get_disk_name(rq->rq_disk, rq->part ? rq->part->partno : 0, + rq_hi->diskname); +#else + if (rq->q && rq->q->disk) + get_disk_name(rq->q->disk, rq->part ? rq->part->bd_partno : 0, + rq_hi->diskname); +#endif + get_bio_info(&rq_hi->first_bio, rq->bio); +} + +int fill_hang_info_from_rq(struct rq_hang_info *rq_hi, + struct request *rq, int disk_type) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 18, 0) + if (!rq || !test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) + return -1; +#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0) + if (!rq || !refcount_read(&rq->ref)) + return -1; +#endif + get_rq_info(rq_hi, rq); + if (disk_type == DISK_VIRTIO_BLK) + get_vq_info(&rq_hi->vq, rq); + else if (disk_type == DISK_NVME) + get_nvme_info(&rq_hi->nvme, rq); + else if (disk_type == DISK_SCSI) + get_scsi_info(&rq_hi->scsi, rq); + return 0; +} + diff --git a/source/lib/internal/kernel_module/modules/iosdiag/scsi.c b/source/lib/internal/kernel_module/modules/iosdiag/scsi.c new file mode 100644 index 00000000..d9288b85 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/scsi.c @@ -0,0 +1,35 @@ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "iosdiag.h" + +void get_scsi_info(struct scsi_info *scsi_i, struct request *rq) +{ + struct scsi_cmnd *cmd; + + if (rq->q->mq_ops) { + scsi_i->is_mq = 1; + cmd = blk_mq_rq_to_pdu(rq); + } +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0) + else + cmd = rq->special; +#endif + + if (!cmd) + return; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0) + scsi_i->done_hander_defined = cmd->scsi_done ? 1 : 0; +#else + scsi_i->done_hander_defined = 1; +#endif +} diff --git a/source/lib/internal/kernel_module/modules/iosdiag/virtio_blk.c b/source/lib/internal/kernel_module/modules/iosdiag/virtio_blk.c new file mode 100644 index 00000000..9b856613 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/iosdiag/virtio_blk.c @@ -0,0 +1,148 @@ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static struct vring *get_vring_by_vq(struct virtqueue *vq) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) + return &to_vvq(vq)->vring; +#else + if (to_vvq(vq)->packed_ring) + return NULL;//(struct vring *)&to_vvq(vq)->packed.vring; + + return (struct vring *)&to_vvq(vq)->split.vring; +#endif +} + +static struct virtqueue *get_virtqueue_by_rq(struct request *rq) +{ + struct virtio_blk *vblk; + int qid; + struct blk_mq_hw_ctx *hctx; + struct virtqueue *vq; + + if (!rq) + return NULL; + + hctx = blk_mq_get_hctx_byrq(rq); + if (!hctx) + return NULL; + qid = hctx->queue_num; + vblk = hctx->queue->queuedata; + if (qid >= vblk->num_vqs) + return NULL; + vq = vblk->vqs[qid].vq; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0) + if (!vq || !get_vring_by_vq(vq)) + return NULL; +#endif + return vq; +} + + +static int get_vq_id(struct request *rq) +{ + struct blk_mq_hw_ctx *hctx; + + if (!rq) + return -1; + + hctx = blk_mq_get_hctx_byrq(rq); + if (!hctx) + return -1; + return hctx->queue_num; +} + +static int get_rq_avail_idx(struct request *rq) +{ + int i; + unsigned int loop = 0; + struct vring *vring; + struct virtqueue *vq; + u16 last_used_idx; + u16 current_avail_idx; + int head; + + if (!(vq = get_virtqueue_by_rq(rq))) + return -1; + vring = get_vring_by_vq(vq); + current_avail_idx = vring->avail->idx; + last_used_idx = to_vvq(vq)->last_used_idx; + while (last_used_idx <= current_avail_idx && (loop++) < vring->num) { + i = last_used_idx & (vring->num - 1); + head = virtio16_to_cpu(vq->vdev, vring->avail->ring[i]); + if (head < vring->num) { + if (desc_state_data_to_req(vq, head) == rq) + return last_used_idx; + } else { + return -1; + } + last_used_idx++; + } + return -1; +} + +static int get_rq_used_idx(struct request *rq) +{ + int i; + unsigned int loop = 0; + struct vring *vring; + struct virtqueue *vq; + u16 last_used_idx; + u16 used_idx; + int head; + + if (!(vq = get_virtqueue_by_rq(rq))) + return -1; + vring = get_vring_by_vq(vq); + used_idx = virtio16_to_cpu(vq->vdev, vring->used->idx); + last_used_idx = to_vvq(vq)->last_used_idx; + while (last_used_idx < used_idx && (loop++) < vring->num) { + i = last_used_idx & (vring->num - 1); + head = virtio32_to_cpu(vq->vdev, vring->used->ring[i].id); + if (head < vring->num) { + if (desc_state_data_to_req(vq, head) == rq) + return last_used_idx; + } else { + return -1; + } + last_used_idx++; + } + return -1; +} + +void get_vq_info(struct vq_info *vq_i, struct request *rq) +{ + struct vring *vring; + struct virtqueue *vq; + + if (!(vq = get_virtqueue_by_rq(rq))) + return; + + vring = get_vring_by_vq(vq); + vq_i->qid = get_vq_id(rq); + vq_i->vring_num = vring->num; + vq_i->event = to_vvq(vq)->event ? 1 : 0; + vq_i->last_used_idx = to_vvq(vq)->last_used_idx; + vq_i->used_idx = vring->used->idx; + vq_i->used_ring_flags = vring->used->flags; + if (vq_i->event == 1) + vq_i->last_avail_idx = + *(__virtio16 *)&vring->used->ring[vring->num]; + else + vq_i->last_avail_idx = -1; + vq_i->avail_idx = vring->avail->idx; + vq_i->avail_ring_flags = vring->avail->flags; + vq_i->last_kick_avail_idx = vq_i->avail_idx - to_vvq(vq)->num_added; + vq_i->rq_avail_idx = get_rq_avail_idx(rq); + vq_i->rq_used_idx = get_rq_used_idx(rq); +} + diff --git a/source/lib/internal/kernel_module/modules/memhunter/common.c b/source/lib/internal/kernel_module/modules/memhunter/common.c new file mode 100644 index 00000000..f69daaac --- /dev/null +++ b/source/lib/internal/kernel_module/modules/memhunter/common.c @@ -0,0 +1,227 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" + +int prepend(char **buffer, int *buflen, const char *str, int namelen) +{ + *buflen -= namelen; + if (*buflen < 0) + return -1; + *buffer -= namelen; + memcpy(*buffer, str, namelen); + return 0; +} + +static struct mount *inode2mount(struct inode *inode) +{ + struct list_head *pos; + struct mount *mount = NULL; + + if(inode && !_IS_ERR(inode)){ + pos = inode->i_sb->s_mounts.next; + if(pos && !_IS_ERR(pos)){ + mount = container_of(pos, struct mount, mnt_instance); + } + } + return mount; +} + + +static int mnt_has_parent(struct mount *mnt) +{ + return !!(mnt != mnt->mnt_parent); +} + +static struct dentry *__lock_parent(struct dentry *dentry) +{ + struct dentry *parent; + rcu_read_lock(); + spin_unlock(&dentry->d_lock); +again: + parent = READ_ONCE(dentry->d_parent); + spin_lock(&parent->d_lock); + if (unlikely(parent != dentry->d_parent)) { + spin_unlock(&parent->d_lock); + goto again; + } + rcu_read_unlock(); + if (parent != dentry) + spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); + else + parent = NULL; + return parent; +} + +static inline struct dentry *lock_parent(struct dentry *dentry) +{ + struct dentry *parent = dentry->d_parent; + if (IS_ROOT(dentry)) + return NULL; + if (likely(spin_trylock(&parent->d_lock))) + return parent; + return __lock_parent(dentry); +} + +static inline void __dget_dlock(struct dentry *dentry) +{ + dentry->d_lockref.count++; +} + +static struct dentry *__d_find_alias(struct inode *inode) +{ + struct dentry *alias; + + if (S_ISDIR(inode->i_mode)) + return NULL; + +#ifndef LINUX_310 + hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) +#else + hlist_for_each_entry(alias, &inode->i_dentry, d_alias) +#endif + { + spin_lock(&alias->d_lock); + __dget_dlock(alias); + spin_unlock(&alias->d_lock); + return alias; + } + return NULL; +} + +static struct dentry *_d_find_alias(struct inode *inode) +{ + struct dentry *de = NULL; + + if (!hlist_empty(&inode->i_dentry)) { + spin_lock(&inode->i_lock); + de = __d_find_alias(inode); + spin_unlock(&inode->i_lock); + } + return de; +} + + + +static char *dentry_name(struct inode *inode, struct dentry *dentry, char *name, int len) +{ + struct mount *mnt; + struct mount *prev = NULL; + char *p; + char *tmp; + char *end; + int ret = 0; + + tmp = kmalloc(PATH_MAX, GFP_ATOMIC); + if (!tmp) + return NULL; + + end = name; + end[len - 1] = 0; + end = name + len - 1; + + mnt = inode2mount(inode); + do { + memset(tmp, 0, PATH_MAX); + p = dentry_path_raw(dentry, tmp, PATH_MAX); + //pr_err("%s-%d:inode %px mountpoint %px dentry:%px\n", p, strlen(p), inode, mnt->mnt_mountpoint, dentry); + ret = prepend(&end, &len, p, strlen(p)); + if (ret) + pr_err("prepend error\n"); + //pr_err("mnt:%px parent:%px end:%px len %d tmp:%px\n", mnt,mnt->mnt_parent, end, len, tmp); + prev = mnt; + dentry = mnt->mnt_mountpoint; + mnt = mnt->mnt_parent; + } while (mnt_has_parent(prev) && (dentry != mnt->mnt_mountpoint)); + + kfree(tmp); + memmove(name, end, strlen(end) + 1); + return name; +} + +int scan_inode_name(struct inode *inode, char *buf, int len, unsigned long *cached, int *deleted) +{ + struct dentry *dt; + struct dentry *parent; + + *cached = inode->i_data.nrpages; + dt = _d_find_alias(inode); + if (!dt) { + *deleted = 1; + return 0; + } + + spin_lock(&inode->i_lock); + spin_lock(&dt->d_lock); + parent = lock_parent(dt); + + dentry_name(inode, dt, buf, len); + *deleted = d_unlinked(dt); + + if (parent) + spin_unlock(&parent->d_lock); + spin_unlock(&dt->d_lock); + spin_unlock(&inode->i_lock); + dput(dt); + + return 0; +} + +void radix_init(struct radix_tree_root *root) +{ + INIT_RADIX_TREE(root, GFP_ATOMIC); +} + +int radix_insert(struct radix_tree_root *root, unsigned long key, void *ptr) +{ + return radix_tree_insert(root, key, ptr); +} + +void *radix_lookup(struct radix_tree_root *root, unsigned long key) +{ + return (void *)radix_tree_lookup(root, key); +} + +int radix_delete(struct radix_tree_root *root, unsigned long key) +{ + return radix_tree_delete(root, key); +} +#undef NR +#define NR (10) +int radix_delete_all(struct radix_tree_root *root, node_free_t free) +{ + int found, i; + unsigned long pos = 0; + struct radix_item *res[NR]; + + do { + found = radix_tree_gang_lookup(root, (void **)res, pos, NR); + for (i = 0; i < found; i++) { + radix_delete(root, res[i]->key); + if (free) + free(res[i]); + } + } while (found > 0); + + return 0; +} diff --git a/source/lib/internal/kernel_module/modules/memhunter/common.h b/source/lib/internal/kernel_module/modules/memhunter/common.h new file mode 100644 index 00000000..f111575e --- /dev/null +++ b/source/lib/internal/kernel_module/modules/memhunter/common.h @@ -0,0 +1,133 @@ +#ifndef __RADIX_TREE__ +#define __RADIX_TREE__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 9) +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#else +#define LINUX_310 +#include "memcontrol_7.h" +#endif + +#define NAME_LEN (1024) +struct radix_item { + unsigned long key; +}; +struct mount { + struct hlist_node mnt_hash; + struct mount *mnt_parent; + struct dentry *mnt_mountpoint; + struct vfsmount mnt; +#if LINUX_VERSION_CODE > KERNEL_VERSION(3, 14, 0) + union { + struct rcu_head mnt_rcu; + struct llist_node mnt_llist; + }; +#endif +#ifdef CONFIG_SMP + struct mnt_pcp __percpu *mnt_pcp; +#else + int mnt_count; + int mnt_writers; +#endif + struct list_head mnt_mounts; /* list of children, anchored here */ + struct list_head mnt_child; /* and going through their mnt_child */ + struct list_head mnt_instance; /* mount instance on sb->s_mounts */ +}; + +struct inode_item { + struct radix_item node; + struct list_head inode; + unsigned long i_ino; + int nr_pages; + int deleted:4; + int shmem:4; + unsigned long cached; + char *filename; +}; + +struct file_item_list { + struct list_head items_list; + char filename[NAME_LEN]; + unsigned long size; + unsigned long cached; + int deleted; +}; + +struct filecache_result_list { + int num; + char fsname[NAME_LEN]; + struct list_head file_items_list; +}; + +struct file_item { + char filename[NAME_LEN]; + unsigned long size; + unsigned long cached; + int deleted; +}; + +struct filecache_result { + int num; + char fsname[NAME_LEN]; + struct file_item *filecache_items; +}; + +typedef enum _memhunter_type { + MEMHUNTER_CACHE_TYPE_FILE = 1, + MEMHUNTER_CACHE_TYPE_MEMCG_DYING, + MEMHUNTER_CACHE_TYPE_MEMCG_ONE, +} memhunter_type; + +typedef void (*node_free_t)(void *args); + +static inline bool _IS_ERR(const void *ptr) +{ + if((unsigned long)ptr < 0xffff000000000000){ + return 1; + } + return IS_ERR_VALUE((unsigned long)ptr); +} +static inline int _page_is_file_cache(struct page *page) +{ + return !PageSwapBacked(page); +} + +static inline int page_is_shmem(struct page *page) +{ + return !!(!_page_is_file_cache(page) && !PageAnon(page)); +} +int prepend(char **buffer, int *buflen, const char *str, int namelen); +int scan_inode_name(struct inode *inode, char *buf, int len, unsigned long *cached, int *deleted); +void radix_init(struct radix_tree_root *root); +int radix_insert(struct radix_tree_root *root, unsigned long key, void *ptr); +void *radix_lookup(struct radix_tree_root *root, unsigned long key); +int radix_delete(struct radix_tree_root *root, unsigned long key); +int radix_delete_all(struct radix_tree_root *root, node_free_t free); +int filecache_scan(void); +int memcg_dying_scan(void); +int memcg_scan_one(void); +int filecache_main(unsigned long arg); +#ifdef LINUX_310 +enum { + CSS_DYING = 0, /* this CSS is dying*/ +}; +#endif +#endif diff --git a/source/lib/internal/kernel_module/modules/memhunter/filecache.c b/source/lib/internal/kernel_module/modules/memhunter/filecache.c new file mode 100644 index 00000000..29639fdb --- /dev/null +++ b/source/lib/internal/kernel_module/modules/memhunter/filecache.c @@ -0,0 +1,190 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "common.h" + +static unsigned long totalCache = 0; +static char * fileName = NULL; +struct filecache_result_list result_list; + +static int dump_filecache_result(struct filecache_result __user *result) +{ + struct file_item *tmp, *fnext; + struct filecache_result res; + int count = 0; + int i = 0; + struct file_item_list *items, *tmp2; + int ret = 0; + + if(copy_from_user(&res, result, sizeof(res))) + { + pr_err("can not copy from user %d:%d\n",count,__LINE__); + ret = copy_to_user(result, &count, sizeof(count)); + return 0; + } + if (!res.num || !res.filecache_items) + { + pr_err("num %d ,items %p \n", res.num, res.filecache_items); + ret = copy_to_user(result, &count, sizeof(count)); + return 0; + } + + i = res.num > result_list.num ? result_list.num : res.num; + if(i <= 0) + { + i = copy_to_user(result, &count, sizeof(count)); + return i; + } + if((tmp = vmalloc(sizeof(struct file_item) * i)) == NULL) + { + pr_err("vmalloc error %d:%d\n",count,__LINE__); + ret = copy_to_user(result, &count, sizeof(count)); + return 0; + } + + fnext = tmp; + + list_for_each_entry_safe(items, tmp2, &(result_list.file_items_list), items_list) + { + pr_err("filename:%s size:%lu cached:%lu deleted:%d\n", items->filename, items->size, items->cached, items->deleted); + strcpy(fnext->filename, items->filename); + fnext->size = items->size; + fnext->cached = items->cached; + fnext->deleted = items->deleted; + count += 1; + fnext ++; + if(count >= i) + break; + } + res.num = count; + + i = copy_to_user(result, &count, sizeof(count)); + i = copy_to_user(res.filecache_items, tmp, sizeof(struct file_item) * count); + vfree(tmp); + return i; +} + +static void scan_super_block(struct super_block *sb, void * args) +{ + struct inode *inode, *next; + unsigned long cached; + int deleted; + struct file_item_list *tmp, *tmp2 = NULL; + +#ifdef LINUX_310 + spinlock_t *sb_inode_lock = kallsyms_lookup_name("inode_sb_list_lock"); + spin_lock(sb_inode_lock); +#else + spin_lock(&sb->s_inode_list_lock); +#endif + list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { + if (S_ISREG(inode->i_mode)) { + scan_inode_name(inode, fileName, PATH_MAX, &cached, &deleted); + if (cached*4 > 1024*50) + { + pr_err("filename:%s size:%lu cached:%lu deleted:%d\n", fileName, (unsigned long)inode->i_size, cached, deleted); + tmp2 = vzalloc(sizeof(struct file_item_list)); + if(!tmp2) + { + pr_err("vzalloc error: %d",__LINE__); + break; + } + strncpy(tmp2->filename, fileName,strlen(fileName)); + tmp2->size = (unsigned long)inode->i_size; + tmp2->cached = cached; + tmp2->deleted = deleted; + result_list.num += 1; + list_add_tail(&tmp2->items_list, &(result_list.file_items_list)); + } + } + } +#ifdef LINUX_310 + spin_unlock(sb_inode_lock); +#else + spin_unlock(&sb->s_inode_list_lock); +#endif +} + +static int scan_filesystem_type(char * fs) +{ + struct file_system_type * file_system; + + file_system = get_fs_type(fs); + if (!file_system) + return 0; + + iterate_supers_type(file_system, scan_super_block, NULL); + module_put(file_system->owner); + + return 0; +} + +int filecache_init(void) +{ + fileName = __getname(); + if(!fileName) + return 0; + result_list.num = 0; + INIT_LIST_HEAD(&(result_list.file_items_list)); + return 0; +} + +void filecache_exit(void) +{ + struct file_item_list *files_all, *tmp; + if (fileName) + __putname(fileName); + if(result_list.num) + { + list_for_each_entry_safe(files_all, tmp, &(result_list.file_items_list), items_list) + vfree(files_all); + } +} + +int filecache_scan(void) +{ + filecache_init(); + scan_filesystem_type("ext4"); + pr_err("total file cached %lu\n",totalCache); + totalCache = 0; + scan_filesystem_type("tmpfs"); + pr_err("total tmpfs %lu\n",totalCache); + filecache_exit(); + return 0; +} +int filecache_main(unsigned long arg) +{ + struct filecache_result *res = (struct filecache_result*)arg; + int ret = 0; + char fsname[NAME_LEN]; + memset(fsname, 0, sizeof(fsname)); + ret = copy_from_user(fsname, res->fsname, NAME_LEN); + pr_err("fsname:%s\n",fsname); + filecache_init(); + scan_filesystem_type(fsname); + pr_err("total file cached %lu\n",totalCache); + dump_filecache_result(res); + return 0; +} diff --git a/source/lib/internal/kernel_module/modules/memhunter/memcg.c b/source/lib/internal/kernel_module/modules/memhunter/memcg.c new file mode 100644 index 00000000..b90043d7 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/memhunter/memcg.c @@ -0,0 +1,513 @@ +#include +#include +#include +#include +#include +#include "memcg.h" + +static struct mem_cgroup *(* _mem_cgroup_iter)(struct mem_cgroup *,struct mem_cgroup *,struct mem_cgroup_reclaim_cookie *); +static void (*_mem_cgroup_iter_break)(struct mem_cgroup*, struct mem_cgroup*); +static struct zone *(*_next_zone)(struct zone *zone); +static struct address_space *(*_page_mapping)(struct page *page); +struct pglist_data *(*_first_online_pgdat)(void); + +#define for_each_mem_cgroup(iter, start) \ + for (iter = _mem_cgroup_iter(start, NULL, NULL); \ + iter != NULL; \ + iter = _mem_cgroup_iter(start, iter, NULL)) +#define _for_each_zone(zone) \ + for (zone = (_first_online_pgdat())->node_zones; \ + zone; \ + zone = _next_zone(zone)) + +static void free_item(void *args) +{ + struct inode_item *item = (struct inode_item *)args; + kfree(item->filename); + kfree(item); +} + +static void memcg_free_item(struct memcg_item *cgitem) +{ + radix_delete_all(&cgitem->inode_root, free_item); +} + + +static int get_page_inode(struct memcg_item *cgitem, struct page *page) +{ + struct address_space *mapping; + struct inode *inode; + struct inode_item *item; + unsigned long cached; + int deleted; + int ret = 0; + char *fileName; + + fileName = kzalloc(PATH_MAX, GFP_ATOMIC); + if (!fileName) + return 0; + get_page(page); + if (PageAnon(page)) { + cgitem->anon++; + goto _skip; + } + if (!_page_is_file_cache(page)) + cgitem->shmem++; + else + cgitem->file++; +#ifdef LINUX_310 + mapping = _page_mapping(page); +#else + mapping = page_mapping(page); +#endif + if (!mapping) + goto _skip; + inode = mapping->host; + if (!inode) + goto _skip; + + item = radix_lookup(&cgitem->inode_root, (unsigned long)inode); + if (item) { + item->nr_pages++; + goto _skip; + } + scan_inode_name(inode, fileName, PATH_MAX, &cached, &deleted); + if (!cached) + goto _skip; + + item = kzalloc(sizeof(*item), GFP_ATOMIC); + if (!item) + goto _skip; + item->i_ino = inode->i_ino; + item->nr_pages = 1; + item->filename = kmemdup(fileName, strlen(fileName) + 1, GFP_ATOMIC); + item->cached = cached; + item->deleted = deleted; + item->shmem = page_is_shmem(page); + item->node.key = (unsigned long)inode; + list_add_tail(&item->inode, &cgitem->head); + ret = radix_insert(&cgitem->inode_root, (unsigned long)inode, (void*)item); + if(ret) + pr_info("insert file:%s error\n", item->filename); + cgitem->num_file++; +_skip: + kfree(fileName); + put_page(page); + return 0; +} + + +static void get_lru_page(struct memcg_item *item, struct lruvec *vec) +{ + struct page *page, *tmp; + enum lru_list lru; + unsigned long flags; + +#ifdef LINUX_310 + struct zone *lruzone; + lruzone = vec->zone; + if (!lruzone) { + pr_err("lru zone error for memcg:%px cg:%s\n", item->memcg, item->cgname); + return; + } + if(lruzone) + spin_lock_irqsave(&lruzone->lru_lock, flags); +#else + struct pglist_data *pgdat; + pgdat = vec->pgdat; + if (!pgdat) { + pr_err("lru pgdata error for memcg:%px cg:%s\n", item->memcg, item->cgname); + return; + } + if(pgdat) + spin_lock_irqsave(&pgdat->lru_lock, flags); +#endif + for_each_lru(lru) { + struct list_head *list = &vec->lists[lru]; + list_for_each_entry_safe(page, tmp, list, lru) { + get_page_inode(item, page); + } + } +#ifdef LINUX_310 + if (lruzone) + spin_unlock_irqrestore(&lruzone->lru_lock, flags); +#else + if (pgdat) + spin_unlock_irqrestore(&pgdat->lru_lock, flags); +#endif +} + +static void get_memcg_page(struct memcg_item *item) +{ +#ifdef LINUX_310 + struct mem_cgroup_per_zone *mz; + struct zone *z; + _for_each_zone(z) { + if((unsigned)zone_to_nid(z) >= nr_node_ids) + continue; + mz = &item->memcg->info.nodeinfo[zone_to_nid(z)]->zoneinfo[zone_idx(z)]; + get_lru_page(item, &mz->lruvec); + } +#else + struct mem_cgroup_per_node *mz; + int nid; + for_each_node(nid) { + mz = mem_cgroup_nodeinfo(item->memcg, nid); + get_lru_page(item, &mz->lruvec); + } +#endif +} + +static void memcg_get_name(struct mem_cgroup *memcg, char *name, unsigned int len) +{ + char *end; + int pos; + struct cgroup *cg = memcg->css.cgroup; +#ifdef LINUX_310 + if (!cg) + return; + rcu_read_lock(); + cgroup_path(cg, name, PATH_MAX); + rcu_read_unlock(); + end = name+strlen("/sys/fs/cgroup/memory"); + memmove(end, name, strlen(name)+1); + prepend(&end, &len, "/sys/fs/cgroup/memory", strlen("/sys/fs/cgroup/memory")); + pr_err("cg:name: %s, len:%d\n",name,strlen(name)); +#else + struct kernfs_node *kn; + struct kernfs_node *pkn; + if (!cg|| !cg->kn) + return; + kn = cg->kn; + + kernfs_get(kn); + end = name + len - 1; + prepend(&end, &len, "\0", 1); + pkn = kn; + while (pkn) { + pos = prepend(&end, &len, pkn->name, strlen(pkn->name)); + if (pos) + break; + if ((pkn == pkn->parent) || !pkn->parent) + break; + pos = prepend(&end, &len, "/", 1); + if (pos) + break; + pkn = pkn->parent; + } + + prepend(&end, &len, "/sys/fs/cgroup/memory", strlen("/sys/fs/cgroup/memory")); + + kernfs_put(kn); + memmove(name, end, strlen(end) + 1); +#endif +} + +static struct memcg_item *memcg_init_item(struct mem_cgroup *cg) +{ + struct memcg_item *item = NULL; + char *fileName; + + fileName = kzalloc(PATH_MAX, GFP_ATOMIC); + if (!fileName) + return NULL; + item = kzalloc(sizeof(*item), GFP_ATOMIC); + if (!item) { + goto _out; + } + memcg_get_name(cg, fileName, PATH_MAX); + item->memcg = cg; +#ifdef LINUX_310 + item->size = cg->res.usage; + item->cgname = kmemdup(fileName, strlen(fileName) + 2, GFP_ATOMIC); + if(item->cgname) + item->cgname[strlen(fileName) + 1] = '\0'; +#else + item->size = page_counter_read(&cg->memory); + item->cgname = kmemdup_nul(fileName, strlen(fileName) + 1, GFP_ATOMIC); +#endif + INIT_LIST_HEAD(&item->head); + INIT_LIST_HEAD(&item->offline); + radix_init(&item->inode_root); +_out: + kfree(fileName); + return item; +} + +int memcg_dump_to_user(struct memcg_info *info, struct memcg_info_user __user *result) +{ + struct memcg_item *cgitem; + struct inode_item *item; + struct memcg_info_user res; + struct memcg_item_user *cgitem_u, *tmp, *user_head; + struct memcg_item_user tmp_memcg; + struct inode_item_user *tmp2, *free_tmp; + int count_m = 0, i = 0, j = 0, count_i; + int ret = 0; + + if(copy_from_user(&res, result, sizeof(res))) + { + pr_err("can not copy from user %d:%d\n",count_m,__LINE__); + ret = copy_to_user(result, &count_m, sizeof(count_m)); + return 0; + } + + if (!res.nr || !res.items) + { + pr_err("num %d ,items %p \n", res.nr, res.items); + ret = copy_to_user(result, &count_m, sizeof(count_m)); + return 0; + } + + i = res.nr > info->nr ? info->nr : res.nr; + if(i == 0) + { + ret = copy_to_user(result, &count_m, sizeof(count_m)); + return 0; + } + if((tmp = vzalloc(sizeof(struct memcg_item_user) * i)) == NULL) + { + pr_err("vmalloc error %d:%d\n",count_m,__LINE__); + ret = copy_to_user(result, &count_m, sizeof(count_m)); + return 0; + } + + cgitem_u = res.items; + user_head = tmp; + + list_for_each_entry(cgitem, &info->head, offline) { + if(count_m >= i) + break; + pr_err("cg:%s memory:%lu file:%lu anon:%lu shmem:%lu num_file:%d\n", cgitem->cgname, cgitem->size, cgitem->file, cgitem->anon, cgitem->shmem, cgitem->num_file); + strncpy(tmp->cgname, cgitem->cgname,strlen(cgitem->cgname)); + tmp->size = cgitem->size; + tmp->file = cgitem->file; + tmp->anon = cgitem->anon; + tmp->shmem = cgitem->shmem; + tmp->num_file = cgitem->num_file; + + j = INODE_LIMIT > tmp->num_file ? tmp->num_file : INODE_LIMIT; + if(j <= 0) + { + if(copy_from_user(&tmp_memcg, cgitem_u, sizeof(tmp_memcg))) + { + pr_err("can not copy from user %d:%d\n",count_m,__LINE__); + ret = copy_to_user(result, &count_m, sizeof(count_m)); + vfree(tmp->inode_items); + vfree(user_head); + return 0; + } + tmp->inode_items = tmp_memcg.inode_items; + copy_to_user(cgitem_u, tmp, sizeof(struct memcg_item_user)); + tmp++; + cgitem_u++; + count_m++; + continue; + } + if((tmp2 = vzalloc(sizeof(struct inode_item_user) * j)) == NULL) + { + pr_err("vmalloc error %d:%d\n",count_m,__LINE__); + ret = copy_to_user(result, &count_m, sizeof(count_m)); + vfree(user_head); + return 0; + } + free_tmp = tmp2; + count_i = 0; + + list_for_each_entry(item, &cgitem->head,inode) { + if(count_i >= j) + break; + pr_err("ino:%lu, filename:%s cached:%lu nr_pages:%d deleted:%d shmem:%d\n", item->i_ino, item->filename, item->cached, item->nr_pages, item->deleted, item->shmem); + tmp2->i_ino = item->i_ino; + strcpy(tmp2->filename, item->filename); + tmp2->cached = item->cached; + tmp2->nr_pages = item->nr_pages; + tmp2->deleted = item->deleted; + tmp2->shmem = item->shmem; + tmp2++; + count_i++; + } + if(copy_from_user(&tmp_memcg, cgitem_u, sizeof(tmp_memcg))) + { + pr_err("can not copy from user %d:%d\n",count_m,__LINE__); + ret = copy_to_user(result, &count_m, sizeof(count_m)); + vfree(tmp->inode_items); + vfree(user_head); + return 0; + } + copy_to_user(tmp_memcg.inode_items, free_tmp, sizeof(struct inode_item_user) * j); + tmp->inode_items = tmp_memcg.inode_items; + copy_to_user(cgitem_u, tmp, sizeof(struct memcg_item_user)); + vfree(free_tmp); + tmp++; + cgitem_u++; + count_m++; + } + i = copy_to_user(result, &count_m, sizeof(count_m)); + vfree(user_head); + return i; + +} + +void memcg_dump(struct memcg_info *info) +{ + struct memcg_item *cgitem; + struct inode_item *item; + int a = 0; + list_for_each_entry(cgitem, &info->head, offline) { + pr_err("cg:%s memory:%lu file:%lu anon:%lu shmem:%lu num_file:%d\n", cgitem->cgname, cgitem->size, cgitem->file, cgitem->anon, cgitem->shmem, cgitem->num_file); + list_for_each_entry(item, &cgitem->head,inode) { + pr_err("ino:%lu, filename:%s cached:%lu nr_pages:%d deleted:%d shmem:%d\n", item->i_ino, item->filename, item->cached, item->nr_pages, item->deleted, item->shmem); + a = 1; + } + } +} + +void memcg_free_all(struct memcg_info *info) +{ + struct memcg_item *cgitem; + struct memcg_item *tmp; + + list_for_each_entry_safe(cgitem, tmp, &info->head, offline) { + memcg_free_item(cgitem); + kfree(cgitem->cgname); + kfree(cgitem); + } +} + +static int memcg_init(void) +{ + + if (_mem_cgroup_iter && _mem_cgroup_iter_break) + return 0; + + _mem_cgroup_iter = kallsyms_lookup_name("mem_cgroup_iter"); + if (!_mem_cgroup_iter) { + pr_err("lookup mem cgroup iter error\n"); + return -1; + } + + _mem_cgroup_iter_break = kallsyms_lookup_name("mem_cgroup_iter_break"); + if (!_mem_cgroup_iter_break) { + pr_err("lookup iter break error\n"); + return -1; + } + _next_zone = kallsyms_lookup_name("next_zone"); + if (!_next_zone) { + pr_err("next_zone error\n"); + return -1; + } + + _first_online_pgdat = kallsyms_lookup_name("first_online_pgdat"); + if (!_first_online_pgdat) { + pr_err("first_online_pgdat error\n"); + return -1; + } + _page_mapping = kallsyms_lookup_name("page_mapping"); + if (!_page_mapping) { + pr_err("page_mapping error\n"); + return -1; + } + + return 0; +} + +int memcg_scan(struct memcg_info *info, struct mem_cgroup *start, int offline) +{ + struct mem_cgroup *iter = NULL; + struct memcg_item *item; + + if (memcg_init()) + return -1; + + for_each_mem_cgroup(iter, start) { +#ifdef LINUX_310 + if (offline && !(iter->css.flags&CSS_DYING)) +#else + if (offline && (iter->css.flags&CSS_ONLINE)) +#endif + continue; + item = memcg_init_item(iter); + if (!item) + continue; + get_memcg_page(item); + info->nr++; + list_add_tail(&item->offline, &info->head); + } + return 0; +} + +struct mem_cgroup *memcg_get_by_name(char *cgname) +{ + struct mem_cgroup *iter = NULL; + char *fileName; + + if (memcg_init()) + return 0; + fileName = kzalloc(PATH_MAX, GFP_ATOMIC); + if (!fileName) { + pr_err("alloc memory failed:%pF\n", __FUNCTION__); + return 0; + } + + for_each_mem_cgroup(iter, NULL) { + memcg_get_name(iter, fileName, PATH_MAX); + if (!strncmp(fileName, cgname, strlen(cgname))) { + pr_err("filename:%s, cgname:%s\n",fileName, cgname); + _mem_cgroup_iter_break(NULL, iter); + break; + } + } + + kfree(fileName); + return iter; +} +int memcg_dying_main(unsigned long arg) +{ + pr_err("try_to dump to user\n"); + struct memcg_info info; + struct mem_cgroup *memcg = NULL; + info.nr = 0; + char cgname_u[NAME_LEN]; + struct memcg_info_user *res = (struct memcg_info_user*)arg; + + memset(cgname_u, 0, sizeof(cgname_u)); + copy_from_user(cgname_u, res->cgname, NAME_LEN); + if(strlen(cgname_u) > 0) + { + if(cgname_u[strlen(cgname_u)-1] == '/') + cgname_u[strlen(cgname_u)-1] = '\0'; + pr_err("cgname:%s\n",cgname_u); + memcg = memcg_get_by_name(cgname_u); + } + + INIT_LIST_HEAD(&info.head); + memcg_scan(&info, memcg, 1); + memcg_dump_to_user(&info, (struct memcg_info_user*)arg); + memcg_free_all(&info); + + return 0; +} +int memcg_one_main(unsigned long arg) +{ + char *cgname = "/sys/fs/cgroup/memory/agent"; + struct mem_cgroup *memcg = NULL; + struct memcg_info info; + char cgname_u[NAME_LEN]; + struct memcg_info_user *res = (struct memcg_info_user*)arg; + info.nr = 0; + + memset(cgname_u, 0, sizeof(cgname_u)); + copy_from_user(cgname_u, res->cgname, NAME_LEN); + pr_err("cgname:%s\n",cgname_u); + if(strlen(cgname_u) > 0) + if(cgname_u[strlen(cgname_u)-1] == '/') + cgname_u[strlen(cgname_u)-1] = '\0'; + INIT_LIST_HEAD(&info.head); + memcg = memcg_get_by_name(cgname_u); + if(memcg != NULL) + memcg_scan(&info, memcg, 0); + memcg_dump_to_user(&info, (struct memcg_info_user*)arg); + memcg_free_all(&info); + return 0; +} diff --git a/source/lib/internal/kernel_module/modules/memhunter/memcg.h b/source/lib/internal/kernel_module/modules/memhunter/memcg.h new file mode 100644 index 00000000..88d85a9f --- /dev/null +++ b/source/lib/internal/kernel_module/modules/memhunter/memcg.h @@ -0,0 +1,54 @@ +#ifndef __MEMCG_FILE__ +#define __MEMCG_FILE__ +#include +#include "common.h" + +#define INODE_LIMIT (50) +struct memcg_info { + int nr ;/* number of memcg for dying*/ + struct list_head head; /* memcg offline*/ +}; + +struct memcg_item { + struct radix_tree_root inode_root; + struct list_head head; + struct list_head offline; + unsigned long anon; + unsigned long shmem; + unsigned long file; + unsigned long size; + int num_file; + struct mem_cgroup *memcg; + char *cgname; +}; + +struct inode_item_user { + unsigned long i_ino; + int nr_pages; + int deleted:4; + int shmem:4; + unsigned long cached; + char filename[NAME_LEN]; +}; +struct memcg_item_user { + struct inode_item_user *inode_items; + unsigned long anon; + unsigned long shmem; + unsigned long file; + unsigned long size; + int num_file; + char cgname[NAME_LEN]; +}; + +struct memcg_info_user { + int nr ;/* number of memcg for dying*/ + struct memcg_item_user* items; + char cgname[NAME_LEN]; +}; +void memcg_dump(struct memcg_info *info); +void memcg_free_all(struct memcg_info *info); +int memcg_scan(struct memcg_info *info, struct mem_cgroup *start, int offline); +int memcg_dying_main(unsigned long arg); +int memcg_one_main(unsigned long arg); +struct mem_cgroup *memcg_get_by_name(char *cgname); +#endif diff --git a/source/lib/internal/kernel_module/modules/memhunter/memcg_dia.c b/source/lib/internal/kernel_module/modules/memhunter/memcg_dia.c new file mode 100644 index 00000000..24a5cc86 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/memhunter/memcg_dia.c @@ -0,0 +1,30 @@ +#include +#include "common.h" +#include "memcg.h" + +int memcg_dying_scan(void) +{ + int offline = 1; + struct memcg_info info; + + INIT_LIST_HEAD(&info.head); + memcg_scan(&info, NULL, offline); + memcg_dump(&info); + memcg_free_all(&info); + + return 0; +} + +int memcg_scan_one(void) +{ + char *cgname = "/sys/fs/cgroup/memory/agent"; + struct mem_cgroup *memcg; + struct memcg_info info; + + INIT_LIST_HEAD(&info.head); + memcg = memcg_get_by_name(cgname); + memcg_scan(&info, memcg, 0); + memcg_dump(&info); + memcg_free_all(&info); + return 0; +} diff --git a/source/lib/internal/kernel_module/modules/memhunter/memcontrol_7.h b/source/lib/internal/kernel_module/modules/memhunter/memcontrol_7.h new file mode 100644 index 00000000..1ac50bd6 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/memhunter/memcontrol_7.h @@ -0,0 +1,284 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct mem_cgroup_lru_info; +enum drt_count_t { + DRT_0_50, + DRT_50_100, + DRT_100_200, + DRT_200_500, + DRT_500_1k, + DRT_1k_5k, + DRT_5k_10k, + DRT_10k_100k, + DRT_100k_INF, + DRT_COUNT, +}; +struct eventfd_ctx { + struct kref kref; + wait_queue_head_t wqh; + /* + * Every time that a write(2) is performed on an eventfd, the + * value of the __u64 being written is added to "count" and a + * wakeup is performed on "wqh". A read(2) will return the "count" + * value to userspace, and will reset "count" to zero. The kernel + * side eventfd_signal() also, adds to the "count" counter and + * issue a wakeup. + */ + __u64 count; + unsigned int flags; +}; +struct mem_cgroup_threshold { + struct eventfd_ctx *eventfd; + u64 threshold; +}; + +/* For threshold */ +struct mem_cgroup_threshold_ary { + /* An array index points to threshold just below or equal to usage. */ + int current_threshold; + /* Size of entries[] */ + unsigned int size; + /* Array of thresholds */ + struct mem_cgroup_threshold entries[0]; +}; + +struct mem_cgroup_thresholds { + /* Primary thresholds array */ + struct mem_cgroup_threshold_ary *primary; + /* + * * Spare threshold array. + * * This is needed to make mem_cgroup_unregister_event() "never fail". + * * It must be able to store at least primary->size - 1 entries. + * */ + struct mem_cgroup_threshold_ary *spare; +}; + +struct mem_cgroup_reclaim_iter { + /* + * last scanned hierarchy member. Valid only if last_dead_count + * matches memcg->dead_count of the hierarchy root group. + */ + struct mem_cgroup *last_visited; + unsigned long last_dead_count; + + /* scan generation, increased every round-trip */ + unsigned int generation; +}; +struct mem_cgroup_per_zone { + struct lruvec lruvec; + unsigned long lru_size[NR_LRU_LISTS]; + + struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; + + struct rb_node tree_node; /* RB tree node */ + unsigned long long usage_in_excess;/* Set to the value by which */ + /* the soft limit is exceeded*/ + bool on_tree; + bool writeback; /* memcg kswapd reclaim writeback */ + bool dirty; /* memcg kswapd reclaim dirty */ + bool congested; /* memcg has many dirty pages */ + /* backed by a congested BDI */ + struct mem_cgroup *memcg; /* Back pointer, we cannot */ + /* use container_of */ + + unsigned long pages_scanned; /* since last reclaim */ + bool all_unreclaimable; /* All pages pinned */ +}; + +struct mem_cgroup_per_node { + struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES]; +}; +struct mem_cgroup_lru_info { + struct mem_cgroup_per_node *nodeinfo[0]; +}; +struct mem_cgroup { + struct cgroup_subsys_state css; + /* + ┆* the counter to account for memory usage + ┆*/ + struct res_counter res; + +#ifdef CONFIG_MEM_DELAY + /* Memory delay measurement domain */ + struct memdelay_domain *memdelay_domain; +#endif + + /* vmpressure notifications */ + struct vmpressure vmpressure; + + union { + /* + ┆* the counter to account for mem+swap usage. + ┆*/ + struct res_counter memsw; + + /* + ┆* rcu_freeing is used only when freeing struct mem_cgroup, + ┆* so put it into a union to avoid wasting more memory. + ┆* It must be disjoint from the css field. It could be + ┆* in a union with the res field, but res plays a much + ┆* larger part in mem_cgroup life than memsw, and might + ┆* be of interest, even at time of free, when debugging. + ┆* So share rcu_head with the less interesting memsw. + ┆*/ + struct rcu_head rcu_freeing; + /* + ┆* We also need some space for a worker in deferred freeing. + ┆* By the time we call it, rcu_freeing is no longer in use. + ┆*/ + struct work_struct work_freeing; + }; + + /* + ┆* the counter to account for kernel memory usage. + ┆*/ + struct res_counter kmem; + /* + ┆* Should the accounting and control be hierarchical, per subtree? + ┆*/ + bool use_hierarchy; + unsigned long kmem_account_flags; /* See KMEM_ACCOUNTED_*, below */ + + int oom_kill; + bool oom_lock; + atomic_t under_oom; + atomic_t oom_wakeups; + + atomic_t refcnt; + + int swappiness; + + int priority; + + bool oom_kill_all; + bool use_priority_oom; + /* OOM-Killer disable */ + int oom_kill_disable; + + /* set when res.limit == memsw.limit */ + bool memsw_is_minimum; + + /* protect arrays of thresholds */ + struct mutex thresholds_lock; + + /* thresholds for memory usage. RCU-protected */ + struct mem_cgroup_thresholds thresholds; + + /* thresholds for mem+swap usage. RCU-protected */ + struct mem_cgroup_thresholds memsw_thresholds; + + /* For oom notifier event fd */ + struct list_head oom_notify; + +#ifdef CONFIG_CGROUP_WRITEBACK + struct list_head cgwb_list; + struct wb_domain cgwb_domain; +#endif + + /* + ┆* Should we move charges of a task when a task is moved into this + ┆* mem_cgroup ? And what type of charges should we move ? + ┆*/ + unsigned long move_charge_at_immigrate; + /* + ┆* set > 0 if pages under this cgroup are moving to other cgroup. + ┆*/ + atomic_t moving_account; + /* taken only while moving_account > 0 */ + spinlock_t move_lock; + struct task_struct *move_lock_task; + unsigned long move_lock_flags; + /* + ┆* percpu counter. + ┆*/ + struct mem_cgroup_stat_cpu __percpu *stat; + spinlock_t pcp_counter_lock; + +#ifdef CONFIG_CGROUP_WRITEBACK + int dirty_ratio; + int dirty_bg_ratio; +#endif + atomic_t wmark_ratio; + atomic64_t wmark_extra; + atomic_t force_empty_ctl; + + bool kswapd_stop; /* Protected by kswapds_spinlock */ + struct mutex kswapd_mutex; + wait_queue_head_t *kswapd_wait; + + atomic_t dead_count; +#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) + struct tcp_memcontrol tcp_mem; +#endif +#if defined(CONFIG_MEMCG_KMEM) + /* analogous to slab_common's slab_caches list. per-memcg */ + struct list_head memcg_slab_caches; + /* Not a spinlock, we can take a lot of time walking the list */ + struct mutex slab_caches_mutex; + /* Index in the kmem_cache->memcg_params->memcg_caches array */ + int kmemcg_id; +#endif + + int last_scanned_node; +#if MAX_NUMNODES > 1 + nodemask_t scan_nodes; + atomic_t numainfo_events; + atomic_t numainfo_updating; +#endif + u64 direct_reclaim_time_count[DRT_COUNT]; + spinlock_t direct_reclaim_time_count_lock; + + u64 direct_reclaim_sched_time_histogram + [DRSTH_COUNT][DRSTH_TYPE_COUNT]; + spinlock_t direct_reclaim_sched_time_histogram_lock; + +#ifdef CONFIG_KIDLED + struct rw_semaphore idle_stats_rwsem; + unsigned long idle_scans; + struct kidled_scan_period scan_period; + int idle_stable_idx; + struct idle_page_stats idle_stats[KIDLED_STATS_NR_TYPE]; +#endif + + /* + * Per cgroup active and inactive list, similar to the + * per zone LRU lists. + * + * WARNING: This has to be the last element of the struct. Don't + * add new fields after this point. + */ + struct mem_cgroup_lru_info info; +}; diff --git a/source/lib/internal/kernel_module/modules/memhunter/memhunter.c b/source/lib/internal/kernel_module/modules/memhunter/memhunter.c new file mode 100644 index 00000000..04b057b7 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/memhunter/memhunter.c @@ -0,0 +1,58 @@ +#include +#include +#include +#include +#include +#include +#include +#include "common.h" +#include "memcg.h" + +#define CHR_NAME "memhunter" +static DEFINE_MUTEX(dev_mutex); +static int memhunter_dev_major = -1; +static struct class *memhunter_dev_class = NULL; +static struct device *memhunter_dev = NULL; + +int memhunter_handler_cmd(unsigned int cmd, unsigned long arg) +{ + int ret = -EINVAL; + int type, nr; + printk("debug -- 1\n"); + + if (!mutex_trylock(&dev_mutex)) + return -EBUSY; + + type = _IOC_TYPE(cmd); + nr = _IOC_NR(cmd); + printk("type: %d\n", nr); + switch (nr) { + case MEMHUNTER_CACHE_TYPE_FILE: + ret = filecache_main(arg); + break; + case MEMHUNTER_CACHE_TYPE_MEMCG_DYING: + printk("dying\n"); + ret = memcg_dying_main(arg); + break; + case MEMHUNTER_CACHE_TYPE_MEMCG_ONE: + ret = memcg_one_main(arg); + break; + default: + printk("defualt ioctl cmd =%d, nr = %d\n", type, nr); + break; + } + + mutex_unlock(&dev_mutex); + return ret; +} + +static __init int memhunter_init(void) +{ + return 0; +} + +static __exit void memhunter_exit(void) +{ + return ; +} + diff --git a/source/lib/internal/kernel_module/modules/memleak/hashlist.c b/source/lib/internal/kernel_module/modules/memleak/hashlist.c new file mode 100755 index 00000000..ff7b587e --- /dev/null +++ b/source/lib/internal/kernel_module/modules/memleak/hashlist.c @@ -0,0 +1,397 @@ +#include +#include "mem.h" +#include + +static DEFINE_PER_CPU(int, nest_count); + +static inline u32 ptr_hash(const void *ptr) +{ + return jhash((void *)&ptr, sizeof(ptr), 0); +} + +static inline struct bucket *__select_bucket(struct memleak_htab *htab, u32 hash) +{ + return &htab->buckets[hash & (htab->n_buckets - 1)]; +} + +static inline struct list_head *select_bucket(struct memleak_htab *htab, u32 hash) +{ + return &__select_bucket(htab, hash)->head; +} + +static inline int _get_cpu(void) +{ + int cpu = 0; + + preempt_disable(); + cpu = smp_processor_id(); + preempt_enable(); + + return cpu; +} + + void *internal_alloc(size_t size, gfp_t flags) +{ + void *ret; + int cpu = 0; + cpu = _get_cpu(); + per_cpu(nest_count, cpu) += 1; + ret = kmalloc(size, flags); + per_cpu(nest_count, cpu) -= 1; + + return ret; +} + + void internal_kfree(void *addr) +{ + int cpu = _get_cpu(); + + per_cpu(nest_count, cpu) += 1; + kfree(addr); + per_cpu(nest_count, cpu) -= 1; +} + +int memleak_entry_reentrant(void) +{ + int cpu = _get_cpu(); + per_cpu(nest_count, cpu) += 1; + return per_cpu(nest_count, cpu) > 1; +} + +void memleak_exit_reentrant(void) +{ + int cpu = _get_cpu(); + + per_cpu(nest_count, cpu) -= 1; +} + +int memleak_hashlist_init(struct memleak_htab *htab) +{ + int i = 0; + int size; + struct alloc_desc *desc; + + htab->buckets = internal_alloc(htab->n_buckets * sizeof(struct bucket), GFP_KERNEL); + if (!htab->buckets) { + return -ENOMEM; + } + + memset(htab->buckets, 0, htab->n_buckets * sizeof(struct bucket)); + + INIT_LIST_HEAD(&htab->freelist); + + for (i = 0; i < htab->n_buckets; i++) { + INIT_LIST_HEAD(&htab->buckets[i].head); + spin_lock_init(&htab->buckets[i].lock); + + } + + htab->free = 0; + + size = sizeof(struct alloc_desc) + sizeof(u64) * htab->stack_deep; + /*prealloc one by one */ + for (i = 0; i < htab->total; i++) { + desc = internal_alloc(size, GFP_KERNEL | __GFP_ZERO); + if (desc) { + desc->num = htab->stack_deep; + list_add(&desc->node, &htab->freelist); + htab->free++; + } + } + + return 0; +} + +struct alloc_desc * memleak_alloc_desc(struct memleak_htab *htab) +{ + struct alloc_desc *desc; + unsigned long flags; + int size = sizeof(struct alloc_desc) + sizeof(u64) * htab->stack_deep; + + if (!htab->set.ext) + htab->stack_deep = 0; + + if (!htab->free) { + desc = internal_alloc(size, GFP_ATOMIC | __GFP_ZERO); + if (desc) + desc->num = htab->stack_deep; + return desc; + } + spin_lock_irqsave(&htab->lock, flags); + + desc = list_first_entry_or_null(&htab->freelist, struct alloc_desc, node); + if (desc) { + htab->free--; + desc->num = htab->stack_deep; + list_del_init(&desc->node); + } + + spin_unlock_irqrestore(&htab->lock, flags); + + return desc; +} + +int memleak_free_desc(struct memleak_htab *htab, struct alloc_desc *desc) +{ + unsigned long flags; + + if (!desc) + return 0; + + if (htab->free >= htab->total) { + + internal_kfree(desc); + return 0; + } + + spin_lock_irqsave(&htab->lock, flags); + + memset(desc, 0, sizeof(*desc)); + list_add(&desc->node, &htab->freelist); + htab->free++; + + spin_unlock_irqrestore(&htab->lock, flags); + + return 0; +} + +int memleak_insert_desc(struct memleak_htab *htab, struct alloc_desc *desc) +{ + unsigned long flags; + struct bucket *bucket; + + if (!desc || !desc->ptr) + return 0; + + desc->hash = ptr_hash(desc->ptr); + + bucket = __select_bucket(htab, desc->hash); + + spin_lock_irqsave(&bucket->lock, flags); + + list_add(&desc->node, &bucket->head); + bucket->nr++; + atomic_add(1, &htab->count); + spin_unlock_irqrestore(&bucket->lock,flags); + + return 0; +} + +struct alloc_desc * memleak_del_desc(struct memleak_htab *htab, const void *ptr) +{ + unsigned long flags; + struct bucket *bucket; + struct alloc_desc *tmp1, *tmp2; + struct alloc_desc *desc = NULL; + u32 hash; + + if (!ptr) + return NULL; + + hash = ptr_hash(ptr); + bucket = __select_bucket(htab, hash); + + spin_lock_irqsave(&bucket->lock, flags); + + list_for_each_entry_safe(tmp1, tmp2, &bucket->head, node) { + if (tmp1->ptr == ptr && (tmp1->hash == hash)) { + list_del_init(&tmp1->node); + desc = tmp1; + bucket->nr--; + atomic_sub(1, &htab->count); + break; + } + } + + spin_unlock_irqrestore(&bucket->lock, flags); + + + return desc; +} + +int memleak_hashlist_uninit(struct memleak_htab *htab) +{ + struct bucket *bucket; + struct alloc_desc *tmp1, *tmp2; + int i; + + htab->free = 0; + + for (i = 0; i < htab->n_buckets; i++) { + bucket = &htab->buckets[i]; + + list_for_each_entry_safe(tmp1, tmp2, &bucket->head, node) { + list_del_init(&tmp1->node); + internal_kfree(tmp1); + htab->free++; + } + } + + list_for_each_entry_safe(tmp1, tmp2, &htab->freelist, node) { + list_del_init(&tmp1->node); + internal_kfree(tmp1); + htab->free++; + } + + if (htab->free != htab->total) + pr_info("memleak free %u ,total %u\n", htab->free, htab->total); + + if (htab->buckets) + internal_kfree(htab->buckets); + + htab->buckets = NULL; + + return 0; +} + +static void memleak_dump_object(struct memleak_htab *htab, struct max_object *object) +{ + struct kmem_cache *cache = htab->check.cache; + + if (!cache || !object) + return ; + + strncpy(object->slabname, cache->name, NAME_LEN); + object->object_size = cache->size; + object->object_num = htab->check.object_num; + + if (!htab->info.object) + return ; + + object->similar_object = htab->info.object->valid_object; + object->ptr = htab->info.object->ptr; +} + + +int memleak_dump_leak(struct memleak_htab *htab, struct user_result __user *result) +{ + struct bucket *bucket; + struct alloc_desc *tmp1, *tmp2; + struct user_alloc_desc *desc; + struct user_result res; + struct max_object object; + void __user *tmp; + + int i = 0; + int j = 0; + int num = 0; + int count = atomic_read(&htab->count); + int ret = 0; + unsigned long long curr_ts = sched_clock(); + + if ((count <= 0) || copy_from_user(&res, result, sizeof(res))) { + pr_err("count zero %d:%d\n",count,__LINE__); + ret = copy_to_user(result, &i, sizeof(i)); + return 0; + } + + if (!res.num || !res.desc) { + pr_err("num %d ,desc %p \n", res.num, res.desc); + ret = copy_to_user(result, &i, sizeof(i)); + return 0; + } + + pr_info("total memleak number %d user %d ts=%llu\n", count, res.num, sched_clock()); + + res.num = (res.num > count) ? count : res.num; + num = res.num; + + desc = vmalloc(sizeof(*desc) * num); + if (!desc) { + pr_err("vmalloc error %d:%d\n",count,__LINE__); + ret = copy_to_user(result, &i, sizeof(i)); + return 0; + } + + tmp = res.desc; + res.desc = desc; + j = 0; + + /*copy object info */ + if (res.objects) { + memset(&object, 0, sizeof(object)); + memleak_dump_object(htab, &object); + ret = copy_to_user(res.objects, &object, sizeof(object)); + } + + for (i = 0; i < htab->n_buckets; i++) { + int z = 0; + bucket = &htab->buckets[i]; + if (bucket->nr <= 0) { + continue; + } + + list_for_each_entry_safe(tmp1, tmp2, &bucket->head, node) { + list_del_init(&tmp1->node); + if ((htab->set.type == MEMLEAK_TYPE_PAGE) && PageSlab((struct page*)tmp1->ptr)) { + goto _skip; + } + + desc->ts = (curr_ts - tmp1->ts)>>30; + desc->ptr = tmp1->ptr; + desc->pid = tmp1->pid; + desc->mark = memleak_mark_leak(htab, tmp1); + desc->order = tmp1->order; + desc->call_site = tmp1->call_site; + strcpy(desc->comm,tmp1->comm); + snprintf(desc->function, NAME_LEN, "%pS", (void *)tmp1->call_site); + desc->num = tmp1->num; + for (z = 0; z < desc->num; z++) { + snprintf(desc->backtrace[z], 128, "%pS", tmp1->backtrace[z]); + } + desc++; + j++; +_skip: + memleak_free_desc(htab, tmp1); + atomic_sub(1, &htab->count); + bucket->nr--; + if (!--num) + goto _out; + } + } + +_out: + + i = copy_to_user(result, &j, sizeof(j)); + i = copy_to_user(tmp, res.desc, sizeof(*desc) * j); + + vfree(res.desc); + pr_info("get num %d htab %d, %d\n", j, atomic_read(&htab->count), num); + return i; +} + +int memleak_clear_leak(struct memleak_htab *htab) +{ + struct bucket *bucket; + struct alloc_desc *tmp1, *tmp2; + int i; + + + if (!atomic_read(&htab->count)) { + return 0; + } + + pr_info(" clear leak %d \n", atomic_read(&htab->count)); + + + for (i = 0; i < htab->n_buckets; i++) { + + bucket = &htab->buckets[i]; + cond_resched(); + + if (bucket->nr) { + + list_for_each_entry_safe(tmp1, tmp2, &bucket->head, node) { + + list_del_init(&tmp1->node); + memleak_free_desc(htab, tmp1); + } + } + + bucket->nr = 0; + } + + atomic_set(&htab->count, 0); + + return 0; +} diff --git a/source/lib/internal/kernel_module/modules/memleak/mem.h b/source/lib/internal/kernel_module/modules/memleak/mem.h new file mode 100755 index 00000000..9ee4ccee --- /dev/null +++ b/source/lib/internal/kernel_module/modules/memleak/mem.h @@ -0,0 +1,188 @@ +#ifndef __MEMLEAK__ +#define __MEMLEAK__ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) +#include +#else +#include +#endif +#include +#include + +#include "common.h" +#include "memleak.h" +#include "user.h" + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) +/* Reuses the bits in struct page */ +struct slab { + unsigned long __page_flags; + +#if defined(CONFIG_SLAB) + + union { + struct list_head slab_list; + struct rcu_head rcu_head; + }; + struct kmem_cache *slab_cache; + void *freelist; /* array of free object indexes */ + void *s_mem; /* first object */ + unsigned int active; + +#elif defined(CONFIG_SLUB) + + union { + struct list_head slab_list; + struct rcu_head rcu_head; +#ifdef CONFIG_SLUB_CPU_PARTIAL + struct { + struct slab *next; + int slabs; /* Nr of slabs left */ + }; +#endif + }; +struct kmem_cache *slab_cache; + /* Double-word boundary */ + void *freelist; /* first free object */ + union { + unsigned long counters; + struct { + unsigned inuse:16; + unsigned objects:15; + unsigned frozen:1; + }; + }; + unsigned int __unused; + +#elif defined(CONFIG_SLOB) + + struct list_head slab_list; + void *__unused_1; + void *freelist; /* first free block */ + long units; + unsigned int __unused_2; + +#else +#error "Unexpected slab allocator configured" +#endif + + atomic_t __page_refcount; +#ifdef CONFIG_MEMCG + unsigned long memcg_data; +#endif +}; + +#define slab_folio(s) (_Generic((s), \ + const struct slab *: (const struct folio *)s, \ + struct slab *: (struct folio *)s)) + + +#define folio_slab(folio) (_Generic((folio), \ + const struct folio *: (const struct slab *)(folio), \ + struct folio *: (struct slab *)(folio))) + + +static inline void *slab_address(const struct slab *slab) +{ + return folio_address(slab_folio(slab)); +} +#endif +#include +#include +#include +#include +struct bucket { + struct list_head head; + u32 nr; + spinlock_t lock; +}; + +struct slab_info { + struct mutex *slab_mutex; + struct list_head *slab_caches; + struct kmem_cache *cache; + unsigned long object_num; +}; + +struct object { + struct list_head node; + void *ptr; + int valid_byte; + int valid_object; + void *page; +}; + +struct object_info { + struct list_head head; + struct object *object; + int object_size; + int size; + int num; +}; + + +struct memleak_htab { + struct bucket *buckets; + struct list_head freelist; + spinlock_t lock; + u32 n_buckets; + u32 free; + u32 total; + u32 stack_deep; + atomic_t count; + int state; + int rate; + struct slab_info check; + struct object_info info; + struct delayed_work work; + struct memleak_settings set; +}; + +struct alloc_desc { + struct list_head node; + unsigned long ts; + const void *ptr; + unsigned long long call_site; + int pid; + int order; + char comm[TASK_COMM_LEN]; + u32 hash; + u32 num; + u64 backtrace[]; +}; + +int memleak_hashlist_init(struct memleak_htab *htab); +struct alloc_desc * memleak_alloc_desc(struct memleak_htab *htab); +int memleak_free_desc(struct memleak_htab *htab, struct alloc_desc *desc); +int memleak_insert_desc(struct memleak_htab *htab, struct alloc_desc *desc); +struct alloc_desc * memleak_del_desc(struct memleak_htab *htab, const void *ptr); +int memleak_hashlist_uninit(struct memleak_htab *htab); +int memleak_entry_reentrant(void); +void memleak_exit_reentrant(void); +int memleak_dump_leak(struct memleak_htab *htab, struct user_result *result); + +void * internal_alloc(size_t size, gfp_t flags); +void internal_kfree(void *addr); + + +int memleak_clear_leak(struct memleak_htab *htab); +int memleak_trace_off(struct memleak_htab *htab); +int memleak_trace_on(struct memleak_htab *htab); + +int memleak_handler_cmd(int cmd, unsigned long arg); +int memleak_mark_leak(struct memleak_htab *htab, struct alloc_desc *desc); +int memleak_free_object(struct memleak_htab *htab); +int memleak_max_object(struct memleak_htab *htab); +#endif diff --git a/source/lib/internal/kernel_module/modules/memleak/memleak.c b/source/lib/internal/kernel_module/modules/memleak/memleak.c new file mode 100755 index 00000000..660da77d --- /dev/null +++ b/source/lib/internal/kernel_module/modules/memleak/memleak.c @@ -0,0 +1,717 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mem.h" +#include +#include +#include "sysak_mods.h" +#include "hook.h" + +#define HASH_SIZE (1024) +#define PRE_ALLOC (2048) + +static int inited = 0; + +static struct memleak_htab *tab; +static int memleak_ref; +static ssize_t (*show_slab_objects)(struct kmem_cache *s, char *buf); +int __memleak_init(void); +int __memleak_uninit(void); + + +static unsigned long (*__kallsyms_lookup_name)(const char *name); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,17,0) +void * virt_to_slab_cache(const void *x) +{ + struct page *page; + + if (unlikely(ZERO_OR_NULL_PTR(x))) + return NULL; + + page = virt_to_head_page(x); + if (!page || unlikely(!PageSlab(page))) { + return NULL; + } + return page->slab_cache; +} +#else +void * virt_to_slab_cache(const void *x) +{ + struct folio *folio; + struct slab *slab; + if (unlikely(ZERO_OR_NULL_PTR(x))) + return NULL; + folio = virt_to_folio(x); + if (unlikely(!folio_test_slab(folio))) { + return NULL; + } + slab = folio_slab(folio); + return slab->slab_cache; +} +#endif + +static int memleak_is_target(struct memleak_htab *htab, const void *x) +{ + void *cache; + + + if (!htab->check.cache) + return 1; + + if (unlikely(ZERO_OR_NULL_PTR(x))) + return 0; + + cache = virt_to_slab_cache(x); + return (cache == (void *)htab->check.cache); +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0) +static unsigned long get_stack_rip(unsigned long *arr, int max_entries) +{ + struct stack_trace stack_trace; + unsigned long trace[16] = {0}; + + stack_trace.nr_entries = 0; + stack_trace.skip = 3; + if (arr && max_entries) { + stack_trace.max_entries = max_entries; + stack_trace.entries = arr; + } else { + stack_trace.max_entries = 16; + stack_trace.entries = trace; + } + + save_stack_trace(&stack_trace); + + return stack_trace.nr_entries; +} +#else +static unsigned long get_stack_rip(unsigned long *arr, int max_entries) +{ + return stack_trace_save(arr,max_entries, 3); +} +#endif + +static void memleak_alloc_desc_push(struct memleak_htab *htab, unsigned long call_site, const void *ptr, int order) +{ + unsigned long flags; + struct alloc_desc *desc; + + if (!ptr || !memleak_is_target(htab, ptr)) + return; + + local_irq_save(flags); + if (memleak_entry_reentrant()) + goto _out; + + desc = memleak_alloc_desc(htab); + if (!desc) + goto _out; + + desc->call_site = call_site; + desc->ptr = ptr; + desc->order = order; + desc->ts = sched_clock(); + desc->pid = current->pid; + strcpy(desc->comm, current->comm); + + if (desc->num) { + desc->num = get_stack_rip((unsigned long *)desc->backtrace, desc->num); + } + if (!call_site && desc->num) + desc->call_site = desc->backtrace[2]; + + memleak_insert_desc(htab, desc); + +_out: + memleak_exit_reentrant(); + local_irq_restore(flags); +} + +static void memleak_alloc_desc_pop(struct memleak_htab *htab,unsigned long call_site, const void *ptr,int order) +{ + unsigned long flags; + struct alloc_desc *desc; + + if (!ptr || !memleak_is_target(htab, ptr)) + return; + + local_irq_save(flags); + + if (memleak_entry_reentrant()) + goto _out; + + desc = memleak_del_desc(htab, ptr); + memleak_free_desc(htab, desc); + +_out: + memleak_exit_reentrant(); + local_irq_restore(flags); +} + +#if KERNEL_VERSION(3, 10, 0) <= LINUX_VERSION_CODE +static void trace_slab_alloc(void *__data, unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags) +#else +static void trace_slab_alloc(unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags) +#endif +{ + memleak_alloc_desc_push(tab, call_site, ptr, 0); +} + +#if KERNEL_VERSION(3, 10, 0) <= LINUX_VERSION_CODE +static void trace_slab_free(void *ignore, unsigned long call_site, const void *ptr) +#else +static void trace_slab_free(unsigned long call_site, const void *ptr) +#endif +{ + + memleak_alloc_desc_pop(tab, call_site, ptr, 0); +} + +#ifdef CONFIG_NUMA +#if KERNEL_VERSION(3, 10, 0) <= LINUX_VERSION_CODE +static void trace_slab_alloc_node(void *__data, unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node) +#else +static void trace_slab_alloc_node(unsigned long call_site, const void *ptr, + size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags, int node) +#endif +{ + memleak_alloc_desc_push(tab, call_site, ptr, 0); +} + +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0) +static void trace_page_alloc(void *ignore, struct page *page, + unsigned int order, gfp_t gfp_flags, int migratetype) +#else +static void trace_page_alloc(struct page *page, + unsigned int order, gfp_t gfp_flags, int migratetype) +#endif +{ + + if ((migratetype == 1) || (migratetype == 2)) { + return; + } + + memleak_alloc_desc_push(tab, 0, page, order); + +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0) +static void trace_page_free(void *ignore, struct page *page, + unsigned int order) +#else +static void trace_page_free(struct page *page, + unsigned int order) +#endif +{ + if (((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0) + return; + + memleak_alloc_desc_pop(tab, 0, page, order); +} + + + +static int slab_tracepoint_init(void) +{ + int ret = 0; + + ret = hook_tracepoint("kmem_cache_alloc", trace_slab_alloc, NULL); + if (ret) { + pr_err("memleak register kmem cache alloc tracepoint error %d\n", ret); + } + + ret = hook_tracepoint("kmem_cache_free", trace_slab_free, NULL); + if (ret) { + pr_err("memleak register kmem cache free tracepoint error %d\n", ret); + } + + ret = hook_tracepoint("kmalloc", trace_slab_alloc, NULL); + if (ret) { + pr_err("memleak register kmalloc tracepoint error %d\n", ret); + } + + ret = hook_tracepoint("kfree", trace_slab_free, NULL); + if (ret) { + pr_err("memleak register kfree tracepoint error %d\n", ret); + } + +#ifdef CONFIG_NUMA + ret = hook_tracepoint("kmalloc_node", trace_slab_alloc_node, NULL); + if (ret) { + pr_err("memleak register kmalloc node tracepoint error %d\n", ret); + } +#ifdef CONFIG_NUMA + ret = hook_tracepoint("kmem_cache_alloc_node", trace_slab_alloc_node, NULL); + if (ret) { + pr_err("memleak register kmem_cache_alloc node tracepoint error %d\n", ret); + } +#endif +#endif + return 0; +} + +static void slab_tracepoint_alloc_uninit(void) +{ + unhook_tracepoint("kmem_cache_alloc", trace_slab_alloc, NULL); + unhook_tracepoint("kmalloc", trace_slab_alloc, NULL); + +#ifdef CONFIG_NUMA + unhook_tracepoint("kmalloc_node", trace_slab_alloc_node, NULL); +#ifdef CONFIG_TRACING + unhook_tracepoint("kmem_cache_alloc_node", trace_slab_alloc_node, NULL); +#endif +#endif +} + +static void slab_tracepoint_free_uninit(void) +{ + unhook_tracepoint("kfree", trace_slab_free, NULL); + unhook_tracepoint("kmem_cache_free", trace_slab_free, NULL); +} + +static void page_tracepoint_init(void) +{ + int ret = 0; + + ret = hook_tracepoint("mm_page_free", trace_page_free, NULL); + if(ret) + pr_err("register mm page free error\n"); + + + ret = hook_tracepoint("mm_page_alloc", trace_page_alloc, NULL); + if(ret) + pr_err("register mm page alloc error\n"); +} + +static void page_tracepoint_alloc_uninit(void) +{ + + unhook_tracepoint("mm_page_alloc", trace_page_alloc, NULL); +} + +static void page_tracepoint_free_uninit(void) +{ + + unhook_tracepoint("mm_page_free", trace_page_free, NULL); +} + +static void memleak_tracepoint_init(struct memleak_htab *htab) +{ + if (htab->set.type == MEMLEAK_TYPE_SLAB) { + slab_tracepoint_init(); + }else if (htab->set.type == MEMLEAK_TYPE_PAGE) { + page_tracepoint_init(); + } else + pr_err("trace type error %d\n", htab->set.type); +} + +static void memleak_tracepoint_alloc_uninit(struct memleak_htab *htab) +{ + if (htab->set.type == MEMLEAK_TYPE_SLAB) { + slab_tracepoint_alloc_uninit(); + } else if (htab->set.type == MEMLEAK_TYPE_PAGE) { + page_tracepoint_alloc_uninit(); + } else + pr_err("trace alloc uninit type %d\n", htab->set.type); +} + +static void memleak_tracepoint_free_uninit(struct memleak_htab *htab) +{ + if (htab->set.type == MEMLEAK_TYPE_SLAB) { + slab_tracepoint_free_uninit(); + } else if (htab->set.type == MEMLEAK_TYPE_PAGE) { + page_tracepoint_free_uninit(); + } else + pr_err("trace free uninit type %d\n", htab->set.type); + +} + +static unsigned long str2num(char *buf) +{ + unsigned long objects = 0; + int ret; + char * tmp = buf; + + while (*buf && *++buf != ' '); + + if (!*buf) + return 0; + + *buf = 0; + ret = kstrtoul(tmp, 10, &objects); + return objects; +} + +static int memleak_get_maxslab(struct memleak_htab *htab) +{ + unsigned long size = 0; + unsigned long max = 0; + struct kmem_cache *tmp; + char *object_buffer; + void **show_slab = (void **)&show_slab_objects; + +#ifndef CONFIG_SLUB_DEBUG + return 0; +#endif + + *show_slab = (void *)__kallsyms_lookup_name("objects_show"); + if (!*show_slab) { + pr_err("Get show_slab objects error\n"); + return 0; + } + pr_err("get slab size is :%px\n",show_slab_objects); + object_buffer = (char *)__get_free_page(GFP_KERNEL); + if (!object_buffer) + return 0; + mutex_lock(htab->check.slab_mutex); + + list_for_each_entry(tmp, htab->check.slab_caches, list) { + if (tmp->flags & SLAB_RECLAIM_ACCOUNT) + continue; + + size = show_slab_objects(tmp, object_buffer); + if (size < 0) + continue; + + size = str2num(object_buffer); + if (size <= 0) + continue; + + if (size > max) { + max = size; + htab->check.cache = tmp; + htab->check.object_num = max; + } + } + + if (htab->check.cache) + pr_info("max cache %s size = %lu \n", htab->check.cache->name, max); + + mutex_unlock(htab->check.slab_mutex); + free_page(object_buffer); + return 0; +} + + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0) +#include +static struct kprobe kprobe_kallsyms_lookup_name = { + .symbol_name = "kallsyms_lookup_name" +}; +int init_symbol(void) +{ + register_kprobe(&kprobe_kallsyms_lookup_name); + __kallsyms_lookup_name = (void *)kprobe_kallsyms_lookup_name.addr; + unregister_kprobe(&kprobe_kallsyms_lookup_name); + + pr_err("kallsyms_lookup_name is %px\n", __kallsyms_lookup_name); + + if (!__kallsyms_lookup_name) { + return -EINVAL; + } + + return 0; +} +#else +int init_symbol(void) +{ + __kallsyms_lookup_name = kallsyms_lookup_name; + return 0; +} +#endif +static int memleak_slab_init(struct memleak_htab *htab) +{ + struct mutex *slab_mutex; + struct kmem_cache *s; + struct list_head *slab_caches; + + slab_mutex = (struct mutex *)__kallsyms_lookup_name("slab_mutex"); + slab_caches = (struct list_head *)__kallsyms_lookup_name("slab_caches"); + + if (!slab_mutex || !slab_caches) { + pr_err("memleak:can't get slab mutex/caches %p:%p\n", slab_mutex, slab_caches); + return -EIO; + } + + htab->check.slab_mutex = slab_mutex; + htab->check.slab_caches = slab_caches; + htab->check.object_num = 0; + + if (!htab->set.name[0]) { + memleak_get_maxslab(htab); + goto _out; + } + + if (!strcmp(htab->set.name, "all")) + return 0; + + mutex_lock(slab_mutex); + + list_for_each_entry(s, slab_caches, list) { + if (!strcmp(s->name, htab->set.name)) { + htab->check.cache = s; + pr_info("get slab %s,%p\n",s->name, htab->check.cache); + break; + } + } + + mutex_unlock(slab_mutex); + +_out: + return !htab->check.cache; +} + + +static int memleak_mem_init(struct memleak_htab *htab) +{ + + htab->n_buckets = HASH_SIZE; + htab->total = PRE_ALLOC; + htab->stack_deep = 16; + + return memleak_hashlist_init(tab); +} + +static void memleak_mem_uninit(struct memleak_htab *htab) +{ + memleak_hashlist_uninit(htab); +} + +static void memleak_delay_work(struct work_struct *work) +{ + struct memleak_htab *htab; + int delay = 0; + + htab = (struct memleak_htab *)container_of(work, struct memleak_htab, work.work); + + if (htab->state == MEMLEAK_STATE_INIT) { + pr_err("memleak delay work state on\n"); + memleak_tracepoint_alloc_uninit(htab); + + htab->state = MEMLEAK_STATE_ON; + delay = (htab->set.monitor_time * htab->set.rate)/100; + schedule_delayed_work(&htab->work, HZ * delay); + + } else if (htab->state == MEMLEAK_STATE_ON) { + + pr_err("memleak delay work state off\n"); + + memleak_tracepoint_free_uninit(htab); + + htab->state = MEMLEAK_STATE_OFF; + } +} + +static int memleak_trace_slab(struct memleak_htab *htab) +{ + int ret; + + htab->check.cache = NULL; + htab->check.object_num = 0; + atomic_set(&htab->count, 0); + + init_symbol(); + ret = memleak_slab_init(htab); + + memleak_max_object(htab); + + return ret; +} + +static int memleak_trace_slab_uninit(struct memleak_htab *htab) +{ + if (htab->set.type != MEMLEAK_TYPE_SLAB) + return 0; + + memleak_free_object(htab); + + htab->check.cache = NULL; + htab->check.object_num = 0; + + return 0; +} + +int memleak_trace_off(struct memleak_htab *htab) +{ + cancel_delayed_work_sync(&htab->work); + + if (htab->state == MEMLEAK_STATE_INIT) { + + memleak_tracepoint_alloc_uninit(htab); + memleak_tracepoint_free_uninit(htab); + + } else if (htab->state == MEMLEAK_STATE_ON) { + memleak_tracepoint_free_uninit(htab); + } + + htab->state = MEMLEAK_STATE_OFF; + + memleak_trace_slab_uninit(htab); + + return 0; +} + + int memleak_trace_on(struct memleak_htab *htab) +{ + int ret = 0; + int delay = 0; + + if (!htab) + return ret; + + if (!htab->set.monitor_time) + htab->set.monitor_time = MONITOR_TIME; + + if (!htab->set.rate) + htab->set.rate = MONITOR_RATE; + + if (!htab->set.type) + htab->set.type = MEMLEAK_TYPE_SLAB; + + switch (htab->set.type) { + + case MEMLEAK_TYPE_VMALLOC: + pr_info("trace vmalloc\n"); + htab->check.cache = NULL; + break; + case MEMLEAK_TYPE_PAGE: + htab->check.cache = NULL; + pr_info("trace alloc page\n"); + break; + default: + ret = memleak_trace_slab(htab); + } + + htab->state = MEMLEAK_STATE_INIT; + atomic_set(&htab->count, 0); + + memleak_tracepoint_init(htab); + + atomic_set(&htab->count, 0); + delay = htab->set.monitor_time; + delay = delay - (delay * htab->set.rate)/100; + + pr_info("delay = %d\n",delay); + schedule_delayed_work(&htab->work, HZ * delay); + + return ret; +} + +int memleak_release(void) +{ + printk("memleak release\n"); + memleak_trace_off(tab); + memleak_clear_leak(tab); + + return 0; +} + +int memleak_handler_cmd(int cmd, unsigned long arg) +{ + int ret = -EINVAL; + struct memleak_settings set; + struct memleak_htab * htab=NULL; + + if (!inited && (cmd != MEMLEAK_CMD_DISABLE)) { + inited = 1; + __memleak_init(); + } + + if (!tab) + return -EBUSY; + + htab = tab; + + if (htab->state != MEMLEAK_STATE_OFF && + (cmd == MEMLEAK_CMD_RESULT || cmd == MEMLEAK_CMD_ENALBE)) { + pr_info("htab busy wait\n"); + return -EAGAIN; + } + + switch (cmd) { + + case MEMLEAK_CMD_ENALBE: + ret = copy_from_user(&set, (void *)arg, sizeof(set)); + if (ret) + return ret; + pr_info("type = %d time = %d,slabname %s ext %d,rate=%d\n",set.type, set.monitor_time, set.name, set.ext,set.rate); + htab->set = set; + ret = memleak_trace_on(htab); + if (!ret) + sysak_module_get(&memleak_ref); + break; + + case MEMLEAK_CMD_RESULT: + pr_info("get result\n"); + ret = memleak_dump_leak(htab, (struct user_result __user*)arg); + break; + + case MEMLEAK_CMD_DISABLE: + __memleak_uninit(); + inited = 0; + sysak_module_put(&memleak_ref); + }; + + return ret; +} + + int memleak_init(void) +{ + return 0; +} + int __memleak_init(void) +{ + int ret = 0; + + tab = kzalloc(sizeof(struct memleak_htab), GFP_KERNEL); + if (!tab) { + pr_err("alloc memleak hash table failed\n"); + return -ENOMEM; + } + + spin_lock_init(&tab->lock); + INIT_DELAYED_WORK(&tab->work, memleak_delay_work); + tab->state = MEMLEAK_STATE_OFF; + + ret = memleak_mem_init(tab); + if (ret) { + kfree(tab); + ret = -ENOMEM; + tab = NULL; + } + + return ret; +} + +int memleak_uninit(void) +{ + return 0; +} + +int __memleak_uninit(void) +{ + if (!tab) + return 0; + + memleak_release(); + + memleak_mem_uninit(tab); + + kfree(tab); + tab = NULL; + + return 0; +} diff --git a/source/lib/internal/kernel_module/modules/memleak/objects.c b/source/lib/internal/kernel_module/modules/memleak/objects.c new file mode 100644 index 00000000..8285f575 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/memleak/objects.c @@ -0,0 +1,322 @@ +#include +#include +#include +#include +#include +#include +#include "mem.h" +#include +#include +#include + +#define MAX_OBJECT (1000) + +/* 400G*/ +#define MAX_MEM (400*1024*1024>>(PAGE_SHIFT-10)) + +/* 100G */ +#define MED_MEM (100*1024*1024>>(PAGE_SHIFT-10)) + +static int rate = 65; + +static inline int is_invalid_byte(unsigned char byte) +{ + return (byte == 0x00 || byte == 0xff + || byte == 0xbb || byte == 0xcc + || byte == 0x5a || byte == 0x6a); +} +static int compute_valid_num(unsigned char *src, int size) +{ + int i ; + int valid = 0; + + for (i = 0; i < size; i++) { + if (is_invalid_byte(src[i])) + continue; + valid++; + } + return valid; +} + +static int compare_one_object(struct object *object, unsigned char *dst, int size) +{ + int i ; + int valid_num = 0; + unsigned char *src = (unsigned char *)object->ptr; + if (dst == NULL || src == NULL) + return 0; + + for (i = 0; i < size; i++) { + + if (is_invalid_byte(src[i])) + continue; + if (src[i] == dst[i]) + valid_num++; + } + + return ((valid_num * 100) >= (object->valid_byte * rate)); +} + +static int find_similar_object(struct object_info *info, struct object *object, unsigned long long *arr, int num) +{ + int i, j; + int valid = 0; + int ret = 0; + int max = 0; + struct object tmp; + + + for (i = 0; i < num; i++) { + + valid = 0; + memset(&tmp, 0, sizeof(tmp)); + tmp.valid_byte = compute_valid_num((unsigned char *)arr[i], info->object_size); + + if (tmp.valid_byte < 4) + continue; + + tmp.ptr = (void *)arr[i]; + + for (j = 0; j < num; j++) { + + if (i == j) + continue; + + ret = compare_one_object(&tmp, (unsigned char *)(arr[j]), info->object_size); + if (ret) + valid++; + } + + if (valid >= max) { + max = valid; + *object = tmp; + object->valid_object = max; + } + + if ((object->valid_object * 2) >= num) + break; + } + + return 0; +} + +static int merge_similar_object(struct object_info *info, struct object *object, int i) +{ + int merge = 0; + struct object *tmp; + unsigned char *ptr = (unsigned char *)object->ptr; + + if (object->valid_object < i / 2) { + return 1; + } + + list_for_each_entry(tmp, &info->head, node) { + + merge = compare_one_object(tmp, ptr, info->size); + if (merge) + break; + } + + if (!info->object) + info->object = object; + + if (merge) { + tmp->valid_object += object->valid_object; + + if (tmp->valid_object > info->object->valid_object) + info->object = tmp; + + } else { + if (info->num > MAX_OBJECT) { + return 1; + } + info->num++; + list_add(&object->node, &info->head); + } + + return merge; +} + +static int scan_one_page(struct page *page, struct object_info *info) +{ + int n; + int num = PAGE_SIZE / info->size; + char unuse[num]; + int i = num; + struct object *object; + void *meta; + unsigned long long *tmp; + + void *start = page_address(page); + + memset(unuse, 0, sizeof(unuse)); + +#if 0 + for (p = page->freelist; p && p < end; p = (*(void **)p)) { + n = (p - start) / info->size ; + if (n < num) { + unuse[n] = 1; + i--; + } + } +#endif + if ( i <= (num >> 1)) + return 0; + + object = internal_alloc(sizeof(*object), GFP_ATOMIC); + if (!object) { + pr_err(" alloc object info error\n"); + return 0; + } + + memset(object, 0, sizeof(*object)); + + meta = internal_alloc(sizeof(void *) * i, GFP_ATOMIC); + if (!meta) { + internal_kfree(object); + return 0; + } + + memset(meta, 0, sizeof(void *) * i); + + tmp = (unsigned long long *)meta; + + for (n = 0; n < num; n++) { + if (unuse[n]) + continue; + *tmp = (unsigned long long )(start + n * info->size); + tmp++; + } + + + find_similar_object(info, object, (unsigned long long *)meta, i); + + object->page = (void *)start; + + n = merge_similar_object(info, object, i); + if (n) { + internal_kfree(object); + } + + internal_kfree(meta); + + return 0; +} + +int memleak_free_object(struct memleak_htab *htab) +{ + struct object *tmp1, *tmp2; + struct object_info *info = &htab->info; + + if (!htab->check.cache) + return 0; + + list_for_each_entry_safe(tmp1, tmp2, &info->head, node) { + list_del_init(&tmp1->node); + internal_kfree(tmp1); + } + + memset(info, 0, sizeof(*info)); + INIT_LIST_HEAD(&info->head); + + return 0; +} + +extern void * virt_to_slab_cache(const void *x); + +int memleak_max_object(struct memleak_htab *htab) +{ + int i = 0; + struct object_info *info = &htab->info; + struct kmem_cache *cache = htab->check.cache; + struct object *object; + struct sysinfo meminfo; + int skip = 0; + + memset(info, 0, sizeof(*info)); + INIT_LIST_HEAD(&info->head); + + if (!cache) { + pr_info("slab cache is null\n"); + return 0; + } + + if (htab->rate) + rate = htab->rate; + + info->object_size = cache->object_size; + info->size = cache->size; + + si_meminfo(&meminfo); + if (meminfo.totalram > MAX_MEM) + skip = 3; + else if (meminfo.totalram > MED_MEM) + skip = 1; + else + skip = 0; + + for_each_online_node(i) { + unsigned long start_pfn = node_start_pfn(i); + unsigned long end_pfn = node_end_pfn(i); + unsigned long pfn; + unsigned long order; + for (pfn = start_pfn; pfn < end_pfn;) { + struct page *page = NULL; + + cond_resched(); + + if (!pfn_valid(pfn)) { + pfn++; + continue; + } + + page = pfn_to_page(pfn); + if (!page) { + pfn++; + continue; + } + + if (PageCompound(page)) + order = compound_order(page); + else if (PageBuddy(page)) + order = page->private; + else + order = 0; + pfn += (1 << (order >= MAX_ORDER ? 0 : order)); + + /* only scan pages belonging to this node */ + if (page_to_nid(page) != i) + continue; + /* only scan if page is in use */ + if (page_count(page) == 0) + continue; + /*only scan slab page */ + if (!PageSlab(page)) + continue; + /*only scan target slab */ + if (virt_to_slab_cache(pfn_to_kaddr(pfn)) != cache) + continue; + + scan_one_page(page, info); + pfn += skip; + } + } + + pr_info("find object %d\n", info->num); + object = info->object; + if (object) + pr_info("start %px ptr %px byte %d object %d \n", object->page, object->ptr, object->valid_byte, object->valid_object); + + return 0; +} + +int memleak_mark_leak(struct memleak_htab *htab, struct alloc_desc *desc) +{ + struct object_info *info = &htab->info; + + if (!htab->check.cache || !info->object || !desc) + return 0; + + return !!compare_one_object(info->object, (unsigned char *)desc->ptr, info->object_size); +} + diff --git a/source/lib/internal/kernel_module/modules/memleak/user.h b/source/lib/internal/kernel_module/modules/memleak/user.h new file mode 100644 index 00000000..aa623484 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/memleak/user.h @@ -0,0 +1,54 @@ +#ifndef __USER__ +#define __USER__ + +#define MONITOR_TIME (300) +#define MONITOR_RATE (20) /* 20% */ + +typedef enum _memleak_type { + MEMLEAK_TYPE_SLAB = 1, + MEMLEAK_TYPE_PAGE, + MEMLEAK_TYPE_VMALLOC, +} memleak_type; + +struct memleak_settings { + memleak_type type; + int monitor_time;/*default 300 seconds */ + int rate; + char name[NAME_LEN]; + int ext;/*extension function */ +}; + +struct max_object { + char slabname[NAME_LEN]; + void *ptr; + int object_size; + unsigned long object_num; + unsigned long similar_object; +}; +struct user_result { + int num; + struct max_object *objects; + struct user_alloc_desc *desc; +}; + +struct user_alloc_desc { + int pid; + int mark; + int order; + const void *ptr; + char comm[TASK_COMM_LEN]; + char function[NAME_LEN]; + unsigned long long call_site; + unsigned long long ts; + int num; + char backtrace[32][128]; +}; + +struct user_call_site { + unsigned long long call_site; + int nr; + int mark_nr; + char function[NAME_LEN]; +}; + +#endif diff --git a/source/lib/internal/kernel_module/modules/mmaptrace/mmaptrace.c b/source/lib/internal/kernel_module/modules/mmaptrace/mmaptrace.c new file mode 100644 index 00000000..da9aefbb --- /dev/null +++ b/source/lib/internal/kernel_module/modules/mmaptrace/mmaptrace.c @@ -0,0 +1,596 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 81) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 91) +#include +#endif +#include "proc.h" + +#ifdef CONFIG_X86 +#define MAX_SYMBOL_LEN 64 +#define PATH_LEN 256 +#define STACK_DEPTH 100 +#define STACK_DETAIL_DEPTH 20 +#define PERTASK_STACK 10 +#define LIST_LEN 10 +#define PROC_NUMBUF 128 +#define REGISTER_FAILED 1 + +extern struct mm_struct *get_task_mm(struct task_struct *task); + +static bool enable_mmaptrace = false; +static unsigned long mmap_len = 246 << 10; +static pid_t mmap_pid; +static int brk; + +LIST_HEAD(threads_list); +LIST_HEAD(threadvma_list); + +DECLARE_RWSEM(threadslist_sem); +DECLARE_RWSEM(vmalist_sem); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) +static struct kprobe kp_mmap = { + .symbol_name = "ksys_mmap_pgoff", +}; + +static struct kprobe kp_brk = { + .symbol_name = "do_brk_flags", +}; +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 0, 0) +static struct kprobe kp_mmap = { + .symbol_name = "vm_mmap_pgoff", +}; + +static struct kprobe kp_brk = { + .symbol_name = "do_brk_flags", +}; +#else +static struct kprobe kp_mmap = { + .symbol_name = "vm_mmap_pgoff", +}; + +static struct kprobe kp_brk = { + .symbol_name = "do_brk", +}; +#endif + +struct stack_info { + unsigned long bp; + char path[PATH_LEN]; +}; + +struct user_stack_detail { + struct list_head list; + int is_brk; +#if defined(DIAG_ARM64) + //struct user_pt_regs regs; +#else + //struct pt_regs regs; +#endif + //unsigned long ip; + //unsigned long bp; + //unsigned long sp; + struct stack_info stack[STACK_DETAIL_DEPTH]; +}; + +struct task_info{ + pid_t pid; + pid_t tgid; + struct list_head task_list; + unsigned long mmap_count; + struct list_head vma_list; + unsigned long userstack_list_len; + struct list_head userstack_list; + char comm[TASK_COMM_LEN]; +}; + +struct vma_info{ + struct list_head list; + pid_t pid; + unsigned long start; + unsigned long end; + int exectue; + char path[PATH_LEN]; +}; + +struct stack_frame_user { + const void __user *next_fp; + unsigned long ret_addr; +}; + + +static void save_mmapstack_trace_user(struct task_struct *task, struct task_info *tsk) +{ + struct list_head *vma_entry; + const struct pt_regs *regs = task_pt_regs(current); + const void __user *fp = (const void __user *)regs->sp; + int stack_len = 0 ; + int i; + + struct user_stack_detail *new_stack = kzalloc(sizeof(struct user_stack_detail),GFP_KERNEL); + if (!new_stack) + return; + new_stack->is_brk = brk; + for (i = 0; i < STACK_DEPTH; i++){ + if (stack_len > STACK_DETAIL_DEPTH) + break; + list_for_each(vma_entry, &threadvma_list){ + //struct vma_info *vma = (struct vma_info *)vma_entry; + struct vma_info *vma = container_of(vma_entry, struct vma_info, list); + unsigned long tmp; + + if (!copy_from_user(&tmp, fp+i*__SIZEOF_LONG__, __SIZEOF_LONG__)) { + if ((tmp >= vma->start) && (tmp <= vma->end)) { + new_stack->stack[stack_len].bp = tmp; + strcpy(new_stack->stack[stack_len].path,vma->path); + stack_len++; + } + } + } + } + list_add_tail(&new_stack->list, &tsk->userstack_list); +} + +static int save_calltrace(struct pt_regs *regs) +{ + struct list_head *tsk_entry; + struct task_info *new_tsk; + pid_t tgid = 0; + + //down_write(&threadslist_sem); + list_for_each(tsk_entry, &threads_list){ + struct task_info *tsk = container_of(tsk_entry, struct task_info, task_list); + tgid = tsk->tgid; + if (tsk->pid == current->pid){ + if (tsk->userstack_list_len > LIST_LEN){ + return 0; + } + save_mmapstack_trace_user(current,tsk); + return 0; + } + //save stack + } + if (tgid == current->tgid){ + new_tsk = kzalloc(sizeof(struct task_info),GFP_KERNEL); + if (!new_tsk) + return 0; + new_tsk->pid = current->pid; + new_tsk->tgid = tgid; + memcpy(new_tsk->comm,current->comm,sizeof(new_tsk->comm)); + new_tsk->mmap_count++; + INIT_LIST_HEAD(&new_tsk->userstack_list); + save_mmapstack_trace_user(current,new_tsk); + list_add_tail(&new_tsk->task_list,&threads_list); + } + //up_write(&threadslist_sem); + return 0; +} + +static int before_mmap_pgoff(struct kprobe *p, struct pt_regs *regs) +{ + int ret; + + brk = 0; + if (regs->si < mmap_len){ + return 0; + } + if (!current || !current->mm) + return 0; + + ret = save_calltrace(regs); + return 0; +} + +static void after_mmap_pgoff(struct kprobe *p, struct pt_regs *regs, + unsigned long flags) +{ + return; +} + +static void get_filename(char *buf, const struct path *path, size_t size) +{ + //int res = -1; + //char *end; + if (size) { + char *p = d_path(path, buf, size); + if (!IS_ERR(p)) { + strcpy(buf,p); + //end = mangle_path(buf, p, "\n"); + //if (end) + //res = end - buf; + } + } + return; +} + +static int mmaptrace_print_show(struct seq_file *m, void *v) +{ + struct list_head *tsk_entry; + struct list_head *stack_entry; + int loop_count = 0; + char *syscall_name; + int i; + + //down_read(&threadslist_sem); + if (list_empty(&threads_list)){ + //up_read(&threadslist_sem); + seq_printf(m, "task list is empty\n"); + return 0; + } + + list_for_each(tsk_entry, &threads_list){ + struct task_info *tsk = container_of(tsk_entry, struct task_info, task_list); + seq_printf(m, "pid[%d],name[%s],tgid[%d]\n", + tsk->pid, tsk->comm, tsk->tgid); + list_for_each(stack_entry, &tsk->userstack_list){ + struct user_stack_detail *user_stack = (struct user_stack_detail *)stack_entry; + loop_count++; + syscall_name = user_stack->is_brk ? "brk" : "mmap"; + seq_printf(m, "%s,用户态堆栈%d:\n", syscall_name,loop_count); + for (i = 0; i < STACK_DETAIL_DEPTH; i++) { + if (user_stack->stack[i].bp == 0) { + continue; + } + seq_printf(m,"#~ 0x%lx", user_stack->stack[i].bp); + seq_printf(m," %s\n",user_stack->stack[i].path); + } + } + } + //up_read(&threadslist_sem); + return 0; +} + +DEFINE_PROC_ATTRIBUTE_RO(mmaptrace_print); + +static int mmaptrace_pid_show(struct seq_file *m, void *v) +{ + seq_printf(m, "pid:%d, len:%ld\n", mmap_pid, mmap_len); + return 0; + +} + +static ssize_t mmaptrace_pid_store(void *priv, const char __user *buf, size_t count) +{ + struct task_struct *tsk; + struct task_info *new_tsk; + struct mm_struct *mm; + struct file *vma_file; + struct vm_area_struct *vma; + struct vma_info *new_vma; + struct pid *pid; + char buffer[PROC_NUMBUF]; + char buff[PATH_LEN]; + pid_t pid_i; + int err = -1; + + if (!enable_mmaptrace){ + pr_warn("mmaptrace disabled!"); + return count; + } + + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + return -EFAULT; + } + + err = kstrtoint(strstrip(buffer), 0, &pid_i); + if (err) + return -EINVAL; + + if (!list_empty(&threads_list)){ + struct list_head *entry; + list_for_each(entry, &threads_list){ + struct task_info *pos = (struct task_info *)entry; + if (pos->pid == pid_i) + return count; + } + } + + rcu_read_lock(); + + pid= find_get_pid(pid_i); + tsk = pid_task(pid, PIDTYPE_PID); + if (!tsk || !(tsk->mm)){ + rcu_read_unlock(); + return -EINVAL; + } + mmap_pid = pid_i; + + if (mmap_pid != 0 ){ + new_tsk = kzalloc(sizeof(struct task_info),GFP_KERNEL); + if (!new_tsk) + goto failed_tsk; + new_tsk->pid = mmap_pid; + new_tsk->tgid = tsk->tgid; + memcpy(new_tsk->comm,tsk->comm,sizeof(tsk->comm)); + new_tsk->mmap_count++; + //INIT_LIST_HEAD(&new_tsk->vma_list); + INIT_LIST_HEAD(&new_tsk->userstack_list); + + mm = get_task_mm(tsk); + + if (IS_ERR_OR_NULL(mm)){ + rcu_read_unlock(); + return -EINVAL; + } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) + if (!mmap_read_trylock(mm)){ +#else + if (!down_read_trylock(&mm->mmap_sem)){ +#endif + rcu_read_unlock(); + return -EINTR; + } + for (vma = mm->mmap; vma; vma = vma->vm_next){ + //if (vma->vm_file && vma->vm_flags & VM_EXEC && !inode_open_for_write(file_inode(vma->vm_file))){ + if (vma->vm_file && vma->vm_flags & VM_EXEC){ + new_vma = kzalloc(sizeof(struct vma_info),GFP_KERNEL); + if (!new_vma){ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) + mmap_read_unlock(mm); +#else + up_read(&mm->mmap_sem); +#endif + goto failed_vma; + } + new_vma->start = vma->vm_start; + new_vma->pid = current->pid; + new_vma->end = vma->vm_end; + vma_file = vma->vm_file; + + if (vma_file){ + get_filename(buff, &vma_file->f_path, PATH_LEN); + } + strcpy(new_vma->path, buff); + //(&vmalist_sem); + list_add_tail(&new_vma->list,&threadvma_list); + //up_write(&vmalist_sem); + } + } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) + mmap_read_unlock(mm); +#else + up_read(&mm->mmap_sem); +#endif + //down_write(&threadslist_sem); + list_add_tail(&new_tsk->task_list, &threads_list); + //up_write(&threadslist_sem); + } + rcu_read_unlock(); + return count; +failed_vma: + kfree(new_tsk); +failed_tsk: + rcu_read_unlock(); + return -ENOMEM; +} + +DEFINE_PROC_ATTRIBUTE_RW(mmaptrace_pid); + +static ssize_t mmaptrace_len_store(void *priv, const char __user *buf, size_t count) +{ + char buffer[PROC_NUMBUF]; + unsigned long length; + int err = -1; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + return -EFAULT; + } + + err = _kstrtoul(strstrip(buffer), 0, &length); + if (err) + return -EINVAL; + mmap_len = length; + return count; +} + +static int mmaptrace_len_show(struct seq_file *m, void *v) +{ + seq_printf(m, "monitor len: %ld\n", mmap_len); + return 0; + +} + +DEFINE_PROC_ATTRIBUTE_RW(mmaptrace_len); + +static int before_do_brk(struct kprobe *p, struct pt_regs *regs) +{ + int ret; + + brk = 1; + if (regs->si < mmap_len){ + return 0; + } + + if (!current || !current->mm) + return 0; + ret = save_calltrace(regs); + return 0; +} + +static void after_do_brk(struct kprobe *p, struct pt_regs *regs, + unsigned long flags) +{ + return; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) +static int handler_fault(struct kprobe *p, struct pt_regs *regs, int trapnr) +{ + pr_info("fault_handler: p->addr = 0x%p, trap #%dn", p->addr, trapnr); + return 0; +} +#endif + +static int mmaptrace_enable(void) +{ + int ret_mmap, ret_brk; + + kp_mmap.pre_handler = before_mmap_pgoff; + kp_mmap.post_handler = after_mmap_pgoff; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) + kp_mmap.fault_handler = handler_fault; +#endif + + kp_brk.pre_handler = before_do_brk; + kp_brk.post_handler = after_do_brk; +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0) + kp_brk.fault_handler = handler_fault; +#endif + + ret_mmap = register_kprobe(&kp_mmap); + if (ret_mmap < 0) { + pr_err("register_kprobe mmap failed, returned %d\n", ret_mmap); + return -REGISTER_FAILED; + } + + ret_brk = register_kprobe(&kp_brk); + if (ret_brk < 0) { + unregister_kprobe(&kp_mmap); + pr_err("register_kprobe brk failed, returned %d\n", ret_brk); + return -REGISTER_FAILED; + } + + pr_info("Planted kprobe at %p\n", kp_mmap.addr); + pr_info("Planted kprobe at %p\n", kp_brk.addr); + return 0; +} + +void mmaptrace_disable(void) +{ + unregister_kprobe(&kp_mmap); + unregister_kprobe(&kp_brk); + pr_info("kprobe at %p unregistered\n", kp_mmap.addr); + pr_info("kprobe at %p unregistered\n", kp_brk.addr); +} + +static int mmaptrace_enable_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", (int)enable_mmaptrace); + return 0; +} + +static ssize_t mmaptrace_enable_store(void *priv, const char __user *buf, size_t count) +{ + char buffer[PROC_NUMBUF]; + int val; + int err = -1; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + return -EFAULT; + } + err = kstrtoint(strstrip(buffer), 0, &val); + + if (val == 1){ + if (!mmaptrace_enable()) + enable_mmaptrace = true; + }else if (val == 0){ + if (enable_mmaptrace){ + mmaptrace_disable(); + enable_mmaptrace = false; + } + } + return count; +} + +DEFINE_PROC_ATTRIBUTE_RW(mmaptrace_enable); + +int mmaptrace_init(void) +{ + struct proc_dir_entry *parent_dir; + struct proc_dir_entry *entry_print; + struct proc_dir_entry *entry_pid; + struct proc_dir_entry *entry_len; + struct proc_dir_entry *entry_enable; + + parent_dir = sysak_proc_mkdir("mmaptrace"); + if (!parent_dir) { + goto failed_root; + } + + entry_print = proc_create("mmaptrace_print", 0444, parent_dir, &mmaptrace_print_fops); + if(!entry_print) { + goto failed; + } + + entry_pid = proc_create("mmaptrace_pid", 0664, parent_dir, &mmaptrace_pid_fops); + if(!entry_pid) { + goto failed; + } + + entry_len = proc_create("mmaptrace_len", 0444, parent_dir, &mmaptrace_len_fops); + if(!entry_len) { + goto failed; + } + + entry_enable = proc_create("mmaptrace_enable", 0664, parent_dir, &mmaptrace_enable_fops); + if(!entry_enable) { + goto failed; + } + return 0; + +failed: + sysak_remove_proc_entry("mmaptrace"); +failed_root: + return -1; +} + +int mmaptrace_exit(void) +{ + struct list_head *tsk_entry; + struct list_head *vma_entry; + struct list_head *tsk_prev; + struct list_head *vma_prev; + + if (enable_mmaptrace){ + mmaptrace_disable(); + } + + list_for_each(tsk_entry, &threads_list){ + struct task_info *tsk = container_of(tsk_entry, struct task_info, task_list); + tsk_prev = tsk_entry->prev; + + list_del(tsk_entry); + kfree(tsk); + tsk_entry = tsk_prev; + } + + list_for_each(vma_entry, &threadvma_list){ + struct vma_info *vma = container_of(vma_entry, struct vma_info, list); + vma_prev = vma_entry->prev; + + list_del(vma_entry); + kfree(vma); + vma_entry = vma_prev; + } + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/modules/sched/noschedule.c b/source/lib/internal/kernel_module/modules/sched/noschedule.c new file mode 100644 index 00000000..4014b8ba --- /dev/null +++ b/source/lib/internal/kernel_module/modules/sched/noschedule.c @@ -0,0 +1,696 @@ +#define pr_fmt(fmt) "trace-nosched: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sysak_mods.h" +#include "proc.h" + +static unsigned long (*__kallsyms_lookup_name)(const char *name); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0) +#include +static struct kprobe kprobe_kallsyms_lookup_name = { + .symbol_name = "kallsyms_lookup_name" +}; + +static int init_symbol(void) +{ + int ret = -ENODEV; + + ret = register_kprobe(&kprobe_kallsyms_lookup_name); + if (!ret) { + __kallsyms_lookup_name = (void *)kprobe_kallsyms_lookup_name.addr; + unregister_kprobe(&kprobe_kallsyms_lookup_name); + + pr_info("kallsyms_lookup_name is %px\n", __kallsyms_lookup_name); + if (!__kallsyms_lookup_name) + return -ENODEV; + } + + return ret; +} +#else +static int init_symbol(void) +{ + __kallsyms_lookup_name = kallsyms_lookup_name; + return 0; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) +#include +#else +#include +#include +#include +#endif + +//#define CONFIG_DEBUG_TRACE_NOSCHED +#define NUMBER_CHARACTER 40 +#define PROC_DIR_NAME "nosch" +#define NUM_TRACEPOINTS 1 +#define MAX_TRACE_ENTRIES (SZ_1K / sizeof(void *)) +#define PER_TRACE_ENTRIES_AVERAGE 8 + +#define MAX_STACE_TRACE_ENTRIES \ + (MAX_TRACE_ENTRIES / PER_TRACE_ENTRIES_AVERAGE) + + +/** + * If we call register_trace_sched_{wakeup,wakeup_new,switch,migrate_task}() + * directly in a kernel module, the compiler will complain about undefined + * symbol of __tracepoint_sched_{wakeup, wakeup_new, switch, migrate_task} + * because the kernel do not export the tracepoint symbol. Here is a workaround + * via for_each_kernel_tracepoint() to lookup the tracepoint and save. + */ +struct tracepoint_entry { + void *probe; + const char *name; + struct tracepoint *tp; +}; + +struct stack_entry { + unsigned int nr_entries; + unsigned long *entries; +}; + +struct per_cpu_stack_trace { + u64 last_timestamp; + struct hrtimer hrtimer; + struct task_struct *skip; + + unsigned int nr_stack_entries; + unsigned int nr_entries; + struct stack_entry stack_entries[MAX_STACE_TRACE_ENTRIES]; + unsigned long entries[MAX_TRACE_ENTRIES]; + + char comms[MAX_STACE_TRACE_ENTRIES][TASK_COMM_LEN]; + pid_t pids[MAX_STACE_TRACE_ENTRIES]; + u64 duration[MAX_STACE_TRACE_ENTRIES]; + u64 stamp[MAX_STACE_TRACE_ENTRIES]; +}; + +struct noschedule_info { + struct tracepoint_entry tp_entries[NUM_TRACEPOINTS]; + unsigned int tp_initalized; + + struct per_cpu_stack_trace __percpu *stack_trace; +}; + +static int nosched_ref; + +/* Whether to enable the tracker. */ +static bool trace_enable; + +/* Default sampling period is 4 000 000ns. The minimum value is 1000000ns. */ +static u64 sampling_period = 4 * 1000 * 1000UL; + +/** + * How many nanoseconds should we record the stack trace. + * Default is 10 000 000ns. + */ +static u64 duration_threshold = 10 * 1000 * 1000UL; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) +static void (*save_stack_trace_effective)(struct pt_regs *regs, + struct stack_trace *trace); + +static inline void stack_trace_skip_hardirq_init(void) +{ + save_stack_trace_effective = + (void *)__kallsyms_lookup_name("save_stack_trace_regs"); +} + +static inline void store_stack_trace(struct pt_regs *regs, + struct stack_entry *stack_entry, + unsigned long *entries, + unsigned int max_entries, int skip) +{ + struct stack_trace stack_trace; + + stack_trace.nr_entries = 0; + stack_trace.max_entries = max_entries; + stack_trace.entries = entries; + stack_trace.skip = skip; + + if (likely(regs && save_stack_trace_effective)) + save_stack_trace_effective(regs, &stack_trace); + else + save_stack_trace(&stack_trace); + + stack_entry->entries = entries; + stack_entry->nr_entries = stack_trace.nr_entries; + + /* + * Some daft arches put -1 at the end to indicate its a full trace. + * + * this is buggy anyway, since it takes a whole extra entry so a + * complete trace that maxes out the entries provided will be reported + * as incomplete, friggin useless . + */ + if (stack_entry->nr_entries != 0 && + stack_entry->entries[stack_entry->nr_entries - 1] == ULONG_MAX) + stack_entry->nr_entries--; +} +#else +static unsigned int (*stack_trace_save_skip_hardirq)(struct pt_regs *regs, + unsigned long *store, + unsigned int size, + unsigned int skipnr); + +static inline void stack_trace_skip_hardirq_init(void) +{ + stack_trace_save_skip_hardirq = + (void *)__kallsyms_lookup_name("stack_trace_save_regs"); +} + +static inline void store_stack_trace(struct pt_regs *regs, + struct stack_entry *stack_entry, + unsigned long *entries, + unsigned int max_entries, int skip) +{ + stack_entry->entries = entries; + if (regs && stack_trace_save_skip_hardirq) + stack_entry->nr_entries = stack_trace_save_skip_hardirq(regs, + entries, max_entries, skip); + else + stack_entry->nr_entries = stack_trace_save(entries, max_entries, + skip); +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +static struct tracepoint **nosch__start___tracepoints_ptrs; +static struct tracepoint **nosch__stop___tracepoints_ptrs; + +static int nosch_init_local_tracepoints(void) +{ + nosch__start___tracepoints_ptrs = (void *)__kallsyms_lookup_name("__start___tracepoints_ptrs"); + nosch__stop___tracepoints_ptrs = (void *)__kallsyms_lookup_name("__stop___tracepoints_ptrs"); + if (nosch__start___tracepoints_ptrs == NULL || nosch__stop___tracepoints_ptrs == NULL) { + return -1; + } + return 0; +} + +static void nosch_for_each_tracepoint_range(struct tracepoint * const *begin, + struct tracepoint * const *end, + void (*fct)(struct tracepoint *tp, void *priv), + void *priv) +{ + struct tracepoint * const *iter; + + if (!begin) + return; + for (iter = begin; iter < end; iter++) + fct(*iter, priv); +} + +/** + * nosch_for_each_kernel_tracepoint - iteration on all kernel tracepoints + * @fct: callback + * @priv: private data + */ +void nosch_for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), + void *priv) +{ + nosch_for_each_tracepoint_range(nosch__start___tracepoints_ptrs, + nosch__stop___tracepoints_ptrs, fct, priv); +} +#endif + +static bool __stack_trace_record(struct per_cpu_stack_trace *stack_trace, + struct pt_regs *regs, u64 duration) +{ + unsigned int nr_entries, nr_stack_entries; + struct stack_entry *stack_entry; + + nr_stack_entries = stack_trace->nr_stack_entries; + if (nr_stack_entries >= ARRAY_SIZE(stack_trace->stack_entries)) + return false; + + nr_entries = stack_trace->nr_entries; + if (nr_entries >= ARRAY_SIZE(stack_trace->entries)) + return false; + + /* Save the thread command, pid and duration. */ + strlcpy(stack_trace->comms[nr_stack_entries], current->comm, + TASK_COMM_LEN); + stack_trace->pids[nr_stack_entries] = current->pid; + stack_trace->duration[nr_stack_entries] = duration; + stack_trace->stamp[nr_stack_entries] = stack_trace->last_timestamp/1000; + + stack_entry = stack_trace->stack_entries + nr_stack_entries; + store_stack_trace(regs, stack_entry, stack_trace->entries + nr_entries, + ARRAY_SIZE(stack_trace->entries) - nr_entries, 0); + stack_trace->nr_entries += stack_entry->nr_entries; + + /** + * Ensure that the initialisation of @stack_entry is complete before we + * update the @nr_stack_entries. + */ + smp_store_release(&stack_trace->nr_stack_entries, nr_stack_entries + 1); + + if (unlikely(stack_trace->nr_entries >= + ARRAY_SIZE(stack_trace->entries))) { + pr_info("BUG: MAX_TRACE_ENTRIES too low on cpu: %d!\n", + smp_processor_id()); + + return false; + } + + return true; +} + +/* Note: Must be called with irq disabled. */ +static inline bool stack_trace_record(struct per_cpu_stack_trace *stack_trace, + u64 delta) +{ + if (unlikely(delta >= duration_threshold)) + return __stack_trace_record(stack_trace, get_irq_regs(), delta); + + return false; +} + +static enum hrtimer_restart trace_nosched_hrtimer_handler(struct hrtimer *hrtimer) +{ + struct pt_regs *regs = get_irq_regs(); + struct per_cpu_stack_trace *stack_trace; + u64 now = local_clock(); + + stack_trace = container_of(hrtimer, struct per_cpu_stack_trace, + hrtimer); + /** + * Skip the idle task and make sure we are not only the + * running task on the CPU. If we are interrupted from + * user mode, it indicate that we are not executing in + * the kernel space, so we should also skip it. + */ + if (!is_idle_task(current) && regs && !user_mode(regs) && + !single_task_running()) { + u64 delta; + + delta = now - stack_trace->last_timestamp; + if (!stack_trace->skip && stack_trace_record(stack_trace, delta)) + stack_trace->skip = current; + } else { + stack_trace->last_timestamp = now; + } + + hrtimer_forward_now(hrtimer, ns_to_ktime(sampling_period)); + + return HRTIMER_RESTART; +} + +/* interrupts should be disabled from __schedule() */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +static void probe_sched_switch(void *priv, + struct task_struct *prev, + struct task_struct *next) +#else +static void probe_sched_switch(void *priv, bool preempt, + struct task_struct *prev, + struct task_struct *next) +#endif +{ + u64 now = local_clock(); + struct per_cpu_stack_trace __percpu *stack_trace = priv; + struct per_cpu_stack_trace *cpu_stack_trace = this_cpu_ptr(stack_trace); + u64 last = cpu_stack_trace->last_timestamp; + + if (unlikely(!trace_enable)) + return; + + cpu_stack_trace->last_timestamp = now; + if (unlikely(cpu_stack_trace->skip)) { + unsigned int index = cpu_stack_trace->nr_stack_entries - 1; + + cpu_stack_trace->skip = NULL; + cpu_stack_trace->duration[index] = now - last; + } + +} + +static struct noschedule_info nosched_info = { + .tp_entries = { + [0] = { + .name = "sched_switch", + .probe = probe_sched_switch, + }, + }, + .tp_initalized = 0, +}; + +static inline bool is_tracepoint_lookup_success(struct noschedule_info *info) +{ + return info->tp_initalized == ARRAY_SIZE(info->tp_entries); +} + +static void tracepoint_lookup(struct tracepoint *tp, void *priv) +{ + int i; + struct noschedule_info *info = priv; + + if (is_tracepoint_lookup_success(info)) + return; + + for (i = 0; i < ARRAY_SIZE(info->tp_entries); i++) { + if (info->tp_entries[i].tp || !info->tp_entries[i].name || + strcmp(tp->name, info->tp_entries[i].name)) + continue; + info->tp_entries[i].tp = tp; + info->tp_initalized++; + } +} + +static int threshold_show(struct seq_file *m, void *ptr) +{ + seq_printf(m, "%llu ms\n", duration_threshold/(1000*1000)); + + return 0; +} + +static ssize_t threshold_store(void *priv, const char __user *buf, size_t count) +{ + u64 val; + + if (kstrtou64_from_user(buf, count, 0, &val)) + return -EINVAL; + + duration_threshold = val; + + return count; +} +DEFINE_PROC_ATTRIBUTE_RW(threshold); + +static int enable_show(struct seq_file *m, void *ptr) +{ + seq_printf(m, "%s\n", trace_enable ? "enabled" : "disabled"); + + return 0; +} + +static void each_hrtimer_start(void *priv) +{ + u64 now = local_clock(); + struct per_cpu_stack_trace __percpu *stack_trace = priv; + struct hrtimer *hrtimer = this_cpu_ptr(&stack_trace->hrtimer); + + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_PINNED); + hrtimer->function = trace_nosched_hrtimer_handler; + + __this_cpu_write(stack_trace->last_timestamp, now); + + hrtimer_start_range_ns(hrtimer, ns_to_ktime(sampling_period), 0, + HRTIMER_MODE_REL_PINNED); +} + +static inline void trace_nosched_hrtimer_start(void) +{ + on_each_cpu(each_hrtimer_start, nosched_info.stack_trace, true); +} + +static inline void trace_nosched_hrtimer_cancel(void) +{ + int cpu; + + for_each_online_cpu(cpu) + hrtimer_cancel(per_cpu_ptr(&nosched_info.stack_trace->hrtimer, + cpu)); +} + +static int trace_nosched_register_tp(void) +{ + int i; + struct noschedule_info *info = &nosched_info; + + for (i = 0; i < ARRAY_SIZE(info->tp_entries); i++) { + int ret; + struct tracepoint_entry *entry = info->tp_entries + i; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + ret = tracepoint_probe_register(entry->tp->name, entry->probe, + info->stack_trace); +#else + ret = tracepoint_probe_register(entry->tp, entry->probe, + info->stack_trace); +#endif + if (ret && ret != -EEXIST) { + pr_err("sched trace: can not activate tracepoint " + "probe to %s with error code: %d\n", + entry->name, ret); + while (i--) { + entry = info->tp_entries + i; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + tracepoint_probe_unregister(entry->tp->name, + entry->probe, + info->stack_trace); +#else + tracepoint_probe_unregister(entry->tp, + entry->probe, + info->stack_trace); +#endif + } + return ret; + } + } + + return 0; +} + +static int trace_nosched_unregister_tp(void) +{ + int i; + struct noschedule_info *info = &nosched_info; + + for (i = 0; i < ARRAY_SIZE(info->tp_entries); i++) { + int ret; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + ret = tracepoint_probe_unregister(info->tp_entries[i].tp->name, + info->tp_entries[i].probe, + info->stack_trace); +#else + ret = tracepoint_probe_unregister(info->tp_entries[i].tp, + info->tp_entries[i].probe, + info->stack_trace); +#endif + if (ret && ret != -ENOENT) { + pr_err("sched trace: can not inactivate tracepoint " + "probe to %s with error code: %d\n", + info->tp_entries[i].name, ret); + return ret; + } + } + + return 0; +} + +static ssize_t enable_store(void *priv, const char __user *buf, size_t count) +{ + int enable; + + if (kstrtoint_from_user(buf, count, 16, &enable)) + return -EINVAL; + + if (!!enable == !!trace_enable) + return count; + + if (enable) { + if (!trace_nosched_register_tp()) { + trace_nosched_hrtimer_start(); + sysak_module_get(&nosched_ref); + } + else + return -EAGAIN; + } else { + trace_nosched_hrtimer_cancel(); + if (trace_nosched_unregister_tp()) + return -EAGAIN; + sysak_module_put(&nosched_ref); + } + + trace_enable = enable; + return count; +} +DEFINE_PROC_ATTRIBUTE_RW(enable); + +static void each_stack_trace_clear(void *priv) +{ + struct per_cpu_stack_trace __percpu *stack_trace = priv; + struct per_cpu_stack_trace *cpu_stack_trace = this_cpu_ptr(stack_trace); + + cpu_stack_trace->nr_entries = 0; + cpu_stack_trace->nr_stack_entries = 0; +} + +static inline void seq_print_stack_trace(struct seq_file *m, + struct stack_entry *entry) +{ + int i; + + if (WARN_ON(!entry->entries)) + return; + + for (i = 0; i < entry->nr_entries; i++) + seq_printf(m, "%*c%pS\n", 5, ' ', (void *)entry->entries[i]); +} + +static int stack_trace_show(struct seq_file *m, void *ptr) +{ + int cpu; + struct per_cpu_stack_trace __percpu *stack_trace = m->private; + + for_each_online_cpu(cpu) { + int i; + unsigned int nr; + struct per_cpu_stack_trace *cpu_stack_trace; + + cpu_stack_trace = per_cpu_ptr(stack_trace, cpu); + + /** + * Paired with smp_store_release() in the + * __stack_trace_record(). + */ + nr = smp_load_acquire(&cpu_stack_trace->nr_stack_entries); + if (!nr) + continue; + +// seq_printf(m, " cpu: %d\n", cpu); + + for (i = 0; i < nr; i++) { + struct stack_entry *entry; + + entry = cpu_stack_trace->stack_entries + i; + seq_printf(m, "%*ccpu:%d\tcommand:%s\tpid:%d\tlatency:%lluus\tSTAMP:%llu\n", + 5, ' ', cpu, cpu_stack_trace->comms[i], + cpu_stack_trace->pids[i], + cpu_stack_trace->duration[i] / (1000UL), + cpu_stack_trace->stamp[i]); + seq_print_stack_trace(m, entry); + seq_putc(m, '\n'); + + cond_resched(); + } + } + + return 0; +} + +static ssize_t stack_trace_store(void *priv, const char __user *buf, + size_t count) +{ + int clear; + + if (kstrtoint_from_user(buf, count, 10, &clear) || clear != 0) + return -EINVAL; + + on_each_cpu(each_stack_trace_clear, priv, true); + + return count; +} +DEFINE_PROC_ATTRIBUTE_RW(stack_trace); + +#ifdef CONFIG_DEBUG_TRACE_NOSCHED +#include + +static int nosched_test_show(struct seq_file *m, void *ptr) +{ + return 0; +} + +static ssize_t nosched_test_store(void *priv, const char __user *buf, + size_t count) +{ + int delay; + + if (kstrtoint_from_user(buf, count, 0, &delay) || delay == 0) + return -EINVAL; + + mdelay(delay); + + return count; +} +DEFINE_PROC_ATTRIBUTE_RW(nosched_test); +#endif + +int trace_noschedule_init(struct proc_dir_entry *root_dir) +{ + int ret = 0; + struct proc_dir_entry *parent_dir; + struct noschedule_info *info = &nosched_info; + + if((ret=init_symbol())) + return ret; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + if (nosch_init_local_tracepoints()) + return -ENODEV; +#endif + + stack_trace_skip_hardirq_init(); + + /* Lookup for the tracepoint that we needed */ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + nosch_for_each_kernel_tracepoint(tracepoint_lookup, info); +#else + for_each_kernel_tracepoint(tracepoint_lookup, info); +#endif + + if (!is_tracepoint_lookup_success(info)) + return -ENODEV; + + info->stack_trace = alloc_percpu(struct per_cpu_stack_trace); + if (!info->stack_trace) + return -ENOMEM; + + parent_dir = proc_mkdir(PROC_DIR_NAME, root_dir); + if (!parent_dir) + goto free_buf; + if (!proc_create_data("threshold", 0644, parent_dir, &threshold_fops, + info->stack_trace)) + goto remove_proc; + if (!proc_create_data("enable", 0644, parent_dir, &enable_fops, + info->stack_trace)) + goto remove_proc; + if (!proc_create_data("stack_trace", 0, parent_dir, &stack_trace_fops, + info->stack_trace)) + goto remove_proc; +#ifdef CONFIG_DEBUG_TRACE_NOSCHED + if (!proc_create_data("nosched_test", 0644, parent_dir, + &nosched_test_fops, info->stack_trace)) + goto remove_proc; +#endif + + return 0; +remove_proc: + remove_proc_subtree(PROC_DIR_NAME, root_dir); +free_buf: + free_percpu(info->stack_trace); + + return -ENOMEM; +} + +void trace_noschedule_exit(void) +{ + if (trace_enable) { + trace_nosched_hrtimer_cancel(); + trace_nosched_unregister_tp(); + tracepoint_synchronize_unregister(); + } + free_percpu(nosched_info.stack_trace); +} diff --git a/source/lib/internal/kernel_module/modules/sched/trace_irqoff.c b/source/lib/internal/kernel_module/modules/sched/trace_irqoff.c new file mode 100644 index 00000000..7a7b5707 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/sched/trace_irqoff.c @@ -0,0 +1,634 @@ +#define pr_fmt(fmt) "trace-irqoff: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sysak_mods.h" +#include "proc.h" + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) +#include +#else +#include +#endif + +#define MAX_TRACE_ENTRIES (SZ_1K / sizeof(unsigned long)) +#define PER_TRACE_ENTRIES_AVERAGE (8 + 8) + +#define MAX_STACE_TRACE_ENTRIES \ + (MAX_TRACE_ENTRIES / PER_TRACE_ENTRIES_AVERAGE) + +#define MAX_LATENCY_RECORD 10 + +static int irqoff_ref; +static bool trace_enable; + +/** + * Default sampling period is 4,000,000ns. The minimum value is 1,000,000ns. + */ +static u64 sampling_period = 4 * 1000 * 1000UL; + +/** + * How many times should we record the stack trace. + * Default is 10,000,000ns. + */ +static u64 trace_irqoff_latency = 10 * 1000 * 1000UL; + +struct irqoff_trace { + unsigned int nr_entries; + unsigned long *entries; +}; + +struct stack_trace_metadata { + u64 last_timestamp; + unsigned long nr_irqoff_trace; + struct irqoff_trace trace[MAX_STACE_TRACE_ENTRIES]; + unsigned long nr_entries; + unsigned long entries[MAX_TRACE_ENTRIES]; + unsigned long latency_count[MAX_LATENCY_RECORD]; + + /* Task command names*/ + char comms[MAX_STACE_TRACE_ENTRIES][TASK_COMM_LEN]; + + /* Task pids*/ + pid_t pids[MAX_STACE_TRACE_ENTRIES]; + + struct { + u64 nsecs:63; + u64 more:1; + } latency[MAX_STACE_TRACE_ENTRIES]; + u64 stamp[MAX_STACE_TRACE_ENTRIES]; +}; + +struct per_cpu_stack_trace { + struct timer_list timer; + struct hrtimer hrtimer; + struct stack_trace_metadata hardirq_trace; + struct stack_trace_metadata softirq_trace; + + bool softirq_delayed; +}; + +static struct per_cpu_stack_trace __percpu *cpu_stack_trace; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) +static void (*save_stack_trace_skip_hardirq)(struct pt_regs *regs, + struct stack_trace *trace); + +static inline void stack_trace_skip_hardirq_init(void) +{ + save_stack_trace_skip_hardirq = + (void *)kallsyms_lookup_name("save_stack_trace_regs"); +} + +static inline void store_stack_trace(struct pt_regs *regs, + struct irqoff_trace *trace, + unsigned long *entries, + unsigned int max_entries, int skip) +{ + struct stack_trace stack_trace; + + stack_trace.nr_entries = 0; + stack_trace.max_entries = max_entries; + stack_trace.entries = entries; + stack_trace.skip = skip; + + if (regs && save_stack_trace_skip_hardirq) + save_stack_trace_skip_hardirq(regs, &stack_trace); + else + save_stack_trace(&stack_trace); + + trace->entries = entries; + trace->nr_entries = stack_trace.nr_entries; + + /* + * Some daft arches put -1 at the end to indicate its a full trace. + * + * this is buggy anyway, since it takes a whole extra entry so a + * complete trace that maxes out the entries provided will be reported + * as incomplete, friggin useless . + */ + if (trace->nr_entries != 0 && + trace->entries[trace->nr_entries - 1] == ULONG_MAX) + trace->nr_entries--; +} +#else +static unsigned int (*stack_trace_save_skip_hardirq)(struct pt_regs *regs, + unsigned long *store, + unsigned int size, + unsigned int skipnr); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0) +static inline void stack_trace_skip_hardirq_init(void) +{ + stack_trace_save_skip_hardirq = + (void *)kallsyms_lookup_name("stack_trace_save_regs"); +} +#else /* LINUX_VERSION_CODE */ + +static int noop_pre_handler(struct kprobe *p, struct pt_regs *regs){ + return 0; +} + +/** + * + * We can only find the kallsyms_lookup_name's addr by using kprobes, then use + * the unexported kallsyms_lookup_name to find symbols. + */ +static void stack_trace_skip_hardirq_init(void) +{ + int ret; + struct kprobe kp; + unsigned long (*kallsyms_lookup_name_fun)(const char *name); + + + ret = -1; + kp.symbol_name = "kallsyms_lookup_name"; + kp.pre_handler = noop_pre_handler; + stack_trace_save_skip_hardirq = NULL; + + ret = register_kprobe(&kp); + if (ret < 0) { + return; + } + + kallsyms_lookup_name_fun = (void*)kp.addr; + unregister_kprobe(&kp); + + stack_trace_save_skip_hardirq = + (void *)kallsyms_lookup_name_fun("stack_trace_save_regs"); +} +#endif /* LINUX_VERSION_CODE */ + +static inline void store_stack_trace(struct pt_regs *regs, + struct irqoff_trace *trace, + unsigned long *entries, + unsigned int max_entries, int skip) +{ + trace->entries = entries; + if (regs && stack_trace_save_skip_hardirq) + trace->nr_entries = stack_trace_save_skip_hardirq(regs, entries, + max_entries, + skip); + else + trace->nr_entries = stack_trace_save(entries, max_entries, + skip); +} +#endif + +/** + * Note: Must be called with irq disabled. + */ +static bool save_trace(struct pt_regs *regs, bool hardirq, u64 latency, u64 stamp) +{ + unsigned long nr_entries, nr_irqoff_trace; + struct irqoff_trace *trace; + struct stack_trace_metadata *stack_trace; + + stack_trace = hardirq ? this_cpu_ptr(&cpu_stack_trace->hardirq_trace) : + this_cpu_ptr(&cpu_stack_trace->softirq_trace); + + nr_irqoff_trace = stack_trace->nr_irqoff_trace; + if (unlikely(nr_irqoff_trace >= MAX_STACE_TRACE_ENTRIES)) + return false; + + nr_entries = stack_trace->nr_entries; + if (unlikely(nr_entries >= MAX_TRACE_ENTRIES - 1)) + return false; + + strlcpy(stack_trace->comms[nr_irqoff_trace], current->comm, + TASK_COMM_LEN); + stack_trace->pids[nr_irqoff_trace] = current->pid; + stack_trace->latency[nr_irqoff_trace].nsecs = latency; + stack_trace->latency[nr_irqoff_trace].more = !hardirq && regs; + stack_trace->stamp[nr_irqoff_trace] = stamp; + + trace = stack_trace->trace + nr_irqoff_trace; + store_stack_trace(regs, trace, stack_trace->entries + nr_entries, + MAX_TRACE_ENTRIES - nr_entries, 0); + stack_trace->nr_entries += trace->nr_entries; + + /** + * Ensure that the initialisation of @trace is complete before we + * update the @nr_irqoff_trace. + */ + smp_store_release(&stack_trace->nr_irqoff_trace, nr_irqoff_trace + 1); + + if (unlikely(stack_trace->nr_entries >= MAX_TRACE_ENTRIES - 1)) { + pr_info("BUG: MAX_TRACE_ENTRIES too low!"); + + return false; + } + + return true; +} + +static bool trace_irqoff_record(u64 delta, bool hardirq, bool skip, u64 stamp) +{ + int index = 0; + u64 throttle = sampling_period << 1; + u64 delta_old = delta; + + if (delta < throttle) + return false; + + delta >>= 1; + while (delta > throttle) { + index++; + delta >>= 1; + } + + if (unlikely(index >= MAX_LATENCY_RECORD)) + index = MAX_LATENCY_RECORD - 1; + + if (hardirq) + __this_cpu_inc(cpu_stack_trace->hardirq_trace.latency_count[index]); + else if (!skip) + __this_cpu_inc(cpu_stack_trace->softirq_trace.latency_count[index]); + + if (unlikely(delta_old >= trace_irqoff_latency)) + save_trace(skip ? get_irq_regs() : NULL, hardirq, delta_old, stamp); + + return true; +} + +static enum hrtimer_restart trace_irqoff_hrtimer_handler(struct hrtimer *hrtimer) +{ + u64 now = local_clock(), delta, stamp; + + stamp = __this_cpu_read(cpu_stack_trace->hardirq_trace.last_timestamp); + delta = now - stamp; + __this_cpu_write(cpu_stack_trace->hardirq_trace.last_timestamp, now); + + if (trace_irqoff_record(delta, true, true, stamp)) { + __this_cpu_write(cpu_stack_trace->softirq_trace.last_timestamp, + now); + } else if (!__this_cpu_read(cpu_stack_trace->softirq_delayed)) { + u64 delta_soft; + + stamp = __this_cpu_read(cpu_stack_trace->softirq_trace.last_timestamp); + delta_soft = now - stamp; + + if (unlikely(delta_soft >= trace_irqoff_latency)) { + __this_cpu_write(cpu_stack_trace->softirq_delayed, true); + trace_irqoff_record(delta_soft, false, true, stamp); + } + } + + hrtimer_forward_now(hrtimer, ns_to_ktime(sampling_period)); + + return HRTIMER_RESTART; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) +static void trace_irqoff_timer_handler(unsigned long data) +#else +static void trace_irqoff_timer_handler(struct timer_list *timer) +#endif +{ + u64 now = local_clock(), delta, stamp; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + struct timer_list *timer = (struct timer_list *)data; +#endif + + stamp = __this_cpu_read(cpu_stack_trace->softirq_trace.last_timestamp); + delta = now - stamp; + __this_cpu_write(cpu_stack_trace->softirq_trace.last_timestamp, now); + + __this_cpu_write(cpu_stack_trace->softirq_delayed, false); + + trace_irqoff_record(delta, false, false, stamp); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0) + mod_timer_pinned(timer, + jiffies + msecs_to_jiffies(sampling_period / 1000000UL)); +#else + mod_timer(timer, + jiffies + msecs_to_jiffies(sampling_period / 1000000UL)); +#endif +} + +static void smp_clear_stack_trace(void *info) +{ + int i; + struct per_cpu_stack_trace *stack_trace = info; + + stack_trace->hardirq_trace.nr_entries = 0; + stack_trace->hardirq_trace.nr_irqoff_trace = 0; + stack_trace->softirq_trace.nr_entries = 0; + stack_trace->softirq_trace.nr_irqoff_trace = 0; + + for (i = 0; i < MAX_LATENCY_RECORD; i++) { + stack_trace->hardirq_trace.latency_count[i] = 0; + stack_trace->softirq_trace.latency_count[i] = 0; + } +} + +static void smp_timers_start(void *info) +{ + u64 now = local_clock(); + struct per_cpu_stack_trace *stack_trace = info; + struct hrtimer *hrtimer = &stack_trace->hrtimer; + struct timer_list *timer = &stack_trace->timer; + + stack_trace->hardirq_trace.last_timestamp = now; + stack_trace->softirq_trace.last_timestamp = now; + + hrtimer_start_range_ns(hrtimer, ns_to_ktime(sampling_period), + 0, HRTIMER_MODE_REL_PINNED); + + timer->expires = jiffies + msecs_to_jiffies(sampling_period / 1000000UL); + add_timer_on(timer, smp_processor_id()); +} + + +static void seq_print_stack_trace(struct seq_file *m, struct irqoff_trace *trace) +{ + int i; + + if (WARN_ON(!trace->entries)) + return; + + for (i = 0; i < trace->nr_entries; i++) + seq_printf(m, "%*c%pS\n", 5, ' ', (void *)trace->entries[i]); +} + +static void trace_latency_show_one(struct seq_file *m, void *v, bool hardirq) +{ + int cpu; + + for_each_online_cpu(cpu) { + int i; + unsigned long nr_irqoff_trace; + struct stack_trace_metadata *stack_trace; + + stack_trace = hardirq ? + per_cpu_ptr(&cpu_stack_trace->hardirq_trace, cpu) : + per_cpu_ptr(&cpu_stack_trace->softirq_trace, cpu); + + /** + * Paired with smp_store_release() in the save_trace(). + */ + nr_irqoff_trace = smp_load_acquire(&stack_trace->nr_irqoff_trace); + + if (!nr_irqoff_trace) + continue; + + for (i = 0; i < nr_irqoff_trace; i++) { + struct irqoff_trace *trace = stack_trace->trace + i; + + seq_printf(m, "%*ccpu:%d\tcommand:%s\tpid:%d\tlatency:%lu%s\tSTAMP:%llu\n", + 5, ' ', cpu, stack_trace->comms[i], + stack_trace->pids[i], + stack_trace->latency[i].nsecs / (1000 * 1000UL), + stack_trace->latency[i].more ? "+ms" : "ms", + stack_trace->stamp[i]); + seq_print_stack_trace(m, trace); + seq_putc(m, '\n'); + + cond_resched(); + } + } +} + +static int trace_latency_show(struct seq_file *m, void *v) +{ + int cpu; + seq_printf(m, "trace_irqoff_latency: %llums\n\n", + trace_irqoff_latency / (1000 * 1000UL)); + + seq_puts(m, " hardirq:\n"); + trace_latency_show_one(m, v, true); + + seq_puts(m, " softirq:\n"); + trace_latency_show_one(m, v, false); + + for_each_online_cpu(cpu) + smp_call_function_single(cpu, smp_clear_stack_trace, + per_cpu_ptr(cpu_stack_trace, cpu), + true); + return 0; +} + + +static ssize_t trace_latency_store(void *priv, const char __user *buf, size_t count) +{ + u64 latency; + + if (kstrtou64_from_user(buf, count, 0, &latency)) + return -EINVAL; + + if (latency == 0) { + int cpu; + + for_each_online_cpu(cpu) + smp_call_function_single(cpu, smp_clear_stack_trace, + per_cpu_ptr(cpu_stack_trace, cpu), + true); + return count; + } else if (latency < (sampling_period << 1) / (1000 * 1000UL)) + return -EINVAL; + + trace_irqoff_latency = latency; + + return count; +} + +DEFINE_PROC_ATTRIBUTE_RW(trace_latency); + +static void trace_irqoff_start_timers(void) +{ + int cpu; + + for_each_online_cpu(cpu) { + struct hrtimer *hrtimer; + struct timer_list *timer; + + hrtimer = per_cpu_ptr(&cpu_stack_trace->hrtimer, cpu); + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_PINNED); + hrtimer->function = trace_irqoff_hrtimer_handler; + + timer = per_cpu_ptr(&cpu_stack_trace->timer, cpu); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0) + __setup_timer(timer, trace_irqoff_timer_handler, + (unsigned long)timer, TIMER_IRQSAFE); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0) + timer->flags = TIMER_PINNED | TIMER_IRQSAFE; + setup_timer(timer, trace_irqoff_timer_handler, + (unsigned long)timer); +#else + timer_setup(timer, trace_irqoff_timer_handler, + TIMER_PINNED | TIMER_IRQSAFE); +#endif + + smp_call_function_single(cpu, smp_timers_start, + per_cpu_ptr(cpu_stack_trace, cpu), + true); + } +} + +static void trace_irqoff_cancel_timers(void) +{ + int cpu; + + for_each_online_cpu(cpu) { + struct hrtimer *hrtimer; + struct timer_list *timer; + + hrtimer = per_cpu_ptr(&cpu_stack_trace->hrtimer, cpu); + hrtimer_cancel(hrtimer); + + timer = per_cpu_ptr(&cpu_stack_trace->timer, cpu); + del_timer_sync(timer); + } +} + +static int enable_show(struct seq_file *m, void *ptr) +{ + seq_printf(m, "%s\n", trace_enable ? "enabled" : "disabled"); + + return 0; +} + +static ssize_t enable_store(void *priv, const char __user *buf, size_t count) +{ + bool enable; + + if (kstrtobool_from_user(buf, count, &enable)) + return -EINVAL; + + if (!!enable == !!trace_enable) + return count; + + if (enable) { + trace_irqoff_start_timers(); + sysak_module_get(&irqoff_ref); + } + else { + trace_irqoff_cancel_timers(); + sysak_module_put(&irqoff_ref); + } + + trace_enable = enable; + + return count; +} +DEFINE_PROC_ATTRIBUTE_RW(enable); + +static int sampling_period_show(struct seq_file *m, void *ptr) +{ + seq_printf(m, "%llums\n", sampling_period / (1000 * 1000UL)); + + return 0; +} + +static ssize_t sampling_period_store(void *priv, const char __user *buf, size_t count) +{ + unsigned long period; + + if (trace_enable) + return -EINVAL; + + if (kstrtoul_from_user(buf, count, 0, &period)) + return -EINVAL; + + period *= 1000 * 1000UL; + if (period > (trace_irqoff_latency >> 1)) + trace_irqoff_latency = period << 1; + + sampling_period = period; + + return count; +} +DEFINE_PROC_ATTRIBUTE_RW(sampling_period); + + +extern int trace_noschedule_init(struct proc_dir_entry *root_dir); +extern void trace_noschedule_exit(void); +extern int trace_runqlat_init(struct proc_dir_entry *root_dir); +extern void trace_runqlat_exit(void); + +int trace_irqoff_init(void) +{ + int ret; + struct proc_dir_entry *root_dir = NULL; + struct proc_dir_entry *parent_dir; + + cpu_stack_trace = alloc_percpu(struct per_cpu_stack_trace); + if (!cpu_stack_trace) + return -ENOMEM; + + stack_trace_skip_hardirq_init(); + + root_dir = sysak_proc_mkdir("runlatency"); + if (!root_dir) { + ret = -ENOMEM; + goto free_percpu; + } + + parent_dir = proc_mkdir("irqoff", root_dir); + if (!parent_dir) { + ret = -ENOMEM; + goto remove_root; + } + + if (!proc_create("latency", S_IRUSR | S_IWUSR, parent_dir, + &trace_latency_fops)){ + ret = -ENOMEM; + goto remove_proc; + } + + if (!proc_create("enable", S_IRUSR | S_IWUSR, parent_dir, &enable_fops)){ + ret = -ENOMEM; + goto remove_proc; + } + + + if (!proc_create("period", S_IRUSR | S_IWUSR, parent_dir, + &sampling_period_fops)){ + ret = -ENOMEM; + goto remove_proc; + } + + ret = trace_noschedule_init(root_dir); + if (ret){ + goto remove_proc; + } + + ret = trace_runqlat_init(root_dir); + if (ret){ + trace_noschedule_exit(); + goto remove_proc; + } + + return 0; + +remove_proc: + remove_proc_subtree("irqoff", root_dir); +remove_root: + sysak_remove_proc_entry("runlatency"); +free_percpu: + free_percpu(cpu_stack_trace); + + return -ENOMEM; +} + +void trace_irqoff_exit(void) +{ + if (trace_enable) + trace_irqoff_cancel_timers(); + trace_noschedule_exit(); + trace_runqlat_exit(); + free_percpu(cpu_stack_trace); +} + diff --git a/source/lib/internal/kernel_module/modules/sched/trace_runqlat.c b/source/lib/internal/kernel_module/modules/sched/trace_runqlat.c new file mode 100644 index 00000000..7879f77a --- /dev/null +++ b/source/lib/internal/kernel_module/modules/sched/trace_runqlat.c @@ -0,0 +1,675 @@ +#define pr_fmt(fmt) "runqlat: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "sysak_mods.h" +#include "proc.h" + +static unsigned long (*__kallsyms_lookup_name)(const char *name); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,7,0) +#include +static struct kprobe kprobe_kallsyms_lookup_name = { + .symbol_name = "kallsyms_lookup_name" +}; + +static int init_symbol(void) +{ + int ret = -ENODEV; + + ret = register_kprobe(&kprobe_kallsyms_lookup_name); + if (!ret) { + __kallsyms_lookup_name = (void *)kprobe_kallsyms_lookup_name.addr; + unregister_kprobe(&kprobe_kallsyms_lookup_name); + + pr_info("kallsyms_lookup_name is %px\n", __kallsyms_lookup_name); + if (!__kallsyms_lookup_name) + return -ENODEV; + } + + return ret; +} +#else +static int init_symbol(void) +{ + __kallsyms_lookup_name = kallsyms_lookup_name; + return 0; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0) +#include +#else +#include +#include +#endif + +#define MAX_TRACE_ENTRIES 128 +#define PER_TRACE_ENTRY_TASKS 16 +#define MAX_TRACE_ENTRY_TASKS \ + (MAX_TRACE_ENTRIES * PER_TRACE_ENTRY_TASKS) + +/* 20ms */ +#define THRESHOLD_DEFAULT (20*1000*1000UL) + +#define INVALID_PID -1 +#define INVALID_CPU -1 +#define PROBE_TRACEPOINTS 4 + +/** + * If we call register_trace_sched_{wakeup,wakeup_new,switch,migrate_task}() + * directly in a kernel module, the compiler will complain about undefined + * symbol of __tracepoint_sched_{wakeup, wakeup_new, switch, migrate_task} + * because the kernel do not export the tracepoint symbol. Here is a workaround + * via for_each_kernel_tracepoint() to lookup the tracepoint and save. + */ +struct tracepoints_probe { + struct tracepoint *tps[PROBE_TRACEPOINTS]; + const char *tp_names[PROBE_TRACEPOINTS]; + void *tp_probes[PROBE_TRACEPOINTS]; + void *priv; + int num_initalized; +}; + +struct task_entry { + u64 runtime; + pid_t pid; + char comm[TASK_COMM_LEN]; +}; + +struct trace_entry { + int cpu; + pid_t pid; + char comm[TASK_COMM_LEN]; + u64 latency; + u64 rq_start; + unsigned int nr_tasks; + struct task_entry *entries; +}; + +struct runqlat_info { + int cpu; /* The target CPU */ + pid_t pid; /* Trace this pid only */ + char comm[TASK_COMM_LEN]; /* target task's comm */ + u64 rq_start; + u64 run_start; + u64 threshold; + struct task_struct *curr; + + unsigned int nr_trace; + struct trace_entry *trace_entries; + + unsigned int nr_task; + struct task_entry *task_entries; + + arch_spinlock_t lock; +}; + +static struct runqlat_info runqlat_info = { + .pid = INVALID_PID, + .cpu = INVALID_CPU, + .threshold = THRESHOLD_DEFAULT, + .lock = __ARCH_SPIN_LOCK_UNLOCKED, +}; + +static int runqlat_ref; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +static struct tracepoint **runq__start___tracepoints_ptrs; +static struct tracepoint **runq__stop___tracepoints_ptrs; + +static int runq_init_local_tracepoints(void) +{ + runq__start___tracepoints_ptrs = (void *)kallsyms_lookup_name("__start___tracepoints_ptrs"); + runq__stop___tracepoints_ptrs = (void *)kallsyms_lookup_name("__stop___tracepoints_ptrs"); + if (runq__start___tracepoints_ptrs == NULL || runq__stop___tracepoints_ptrs == NULL) { + return -1; + } + return 0; +} + +static void runq_for_each_tracepoint_range(struct tracepoint * const *begin, + struct tracepoint * const *end, + void (*fct)(struct tracepoint *tp, void *priv), + void *priv) +{ + struct tracepoint * const *iter; + + if (!begin) + return; + for (iter = begin; iter < end; iter++) + fct(*iter, priv); +} + +/** + * for_each_kernel_tracepoint - iteration on all kernel tracepoints + * @fct: callback + * @priv: private data + */ +void runq_for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), + void *priv) +{ + runq_for_each_tracepoint_range(runq__start___tracepoints_ptrs, + runq__stop___tracepoints_ptrs, fct, priv); +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +static void probe_sched_wakeup(void *priv, struct task_struct *p, int success) +#else +static void probe_sched_wakeup(void *priv, struct task_struct *p) +#endif +{ + struct runqlat_info *info = priv; + + if (p->pid != info->pid) + return; + + /* interrupts should be off from try_to_wake_up() */ + arch_spin_lock(&info->lock); + if (unlikely(p->pid != info->pid)) { + arch_spin_unlock(&info->lock); + return; + } + + info->rq_start = local_clock(); + info->run_start = info->rq_start; + info->cpu = task_cpu(p); + arch_spin_unlock(&info->lock); +} + +static inline void runqlat_info_reset(struct runqlat_info *info) +{ + info->rq_start = 0; + info->run_start = 0; + info->cpu = INVALID_CPU; + info->curr = NULL; +} + +/* Must be called with @info->lock held */ +static void record_task(struct runqlat_info *info, struct task_struct *p, + u64 runtime) + __must_hold(&info->lock) +{ + struct task_entry *task; + struct trace_entry *trace; + + task = info->task_entries + info->nr_task; + trace = info->trace_entries + info->nr_trace; + + if (trace->nr_tasks == 0) + trace->entries = task; + WARN_ON_ONCE(trace->entries != task - trace->nr_tasks); + trace->nr_tasks++; + + task->pid = p->pid; + task->runtime = runtime; + strncpy(task->comm, p->comm, TASK_COMM_LEN); + + info->nr_task++; + if (unlikely(info->nr_task >= MAX_TRACE_ENTRY_TASKS)) { + pr_info("BUG: MAX_TRACE_ENTRY_TASKS too low!"); + runqlat_info_reset(info); + /* Force disable trace */ + info->pid = INVALID_PID; + } +} + +/* Must be called with @info->lock held */ +static bool record_task_commit(struct runqlat_info *info, u64 latency) + __must_hold(&info->lock) +{ + struct trace_entry *trace; + + trace = info->trace_entries + info->nr_trace; + if (trace->nr_tasks == 0) + return false; + + if (latency >= info->threshold) { + trace->latency = latency; + trace->rq_start = info->rq_start; + trace->cpu = info->cpu; + trace->pid = info->pid; + strncpy(trace->comm, info->comm, TASK_COMM_LEN); + info->nr_trace++; + if (unlikely(info->nr_trace >= MAX_TRACE_ENTRIES)) { + pr_info("BUG: MAX_TRACE_ENTRIES too low!"); + runqlat_info_reset(info); + /* Force disable trace */ + info->pid = INVALID_PID; + } + } else { + info->nr_task -= trace->nr_tasks; + trace->nr_tasks = 0; + trace->entries = NULL; + } + + return true; +} + +/* interrupts should be off from __schedule() */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +static void probe_sched_switch(void *priv, + struct task_struct *prev, + struct task_struct *next) +#else +static void probe_sched_switch(void *priv, bool preempt, + struct task_struct *prev, + struct task_struct *next) +#endif +{ + struct runqlat_info *info = priv; + int cpu = smp_processor_id(); + arch_spinlock_t *lock = &info->lock; + + if (info->pid == INVALID_PID) + return; + + if (info->cpu != INVALID_CPU && info->cpu != cpu) + return; + + if (READ_ONCE(info->cpu) == INVALID_CPU) { + if (READ_ONCE(info->pid) != prev->pid || +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) + prev->__state != TASK_RUNNING) +#elif LINUX_VERSION_CODE == KERNEL_VERSION(4, 18, 0) + prev->__state != TASK_RUNNING) +#else + prev->state != TASK_RUNNING) + return; +#endif + + arch_spin_lock(lock); + /* We could race with grabbing lock */ + if (unlikely(info->cpu != INVALID_CPU || + info->pid != prev->pid)) { + arch_spin_unlock(lock); + return; + } + info->rq_start = cpu_clock(cpu); + info->run_start = info->rq_start; + info->cpu = task_cpu(prev); + + /* update curr for migrate task probe using*/ + if (!is_idle_task(next)) + info->curr = next; + arch_spin_unlock(lock); + } else { + u64 now; + + if (unlikely(READ_ONCE(info->cpu) != cpu || + READ_ONCE(info->pid) == INVALID_PID)) + return; + + arch_spin_lock(lock); + /* We could race with grabbing lock */ + if (unlikely(info->cpu != cpu || info->pid == INVALID_PID)) { + arch_spin_unlock(lock); + return; + } + + /* update curr for migrate task probe using*/ + if (!is_idle_task(next)) + info->curr = next; + + now = cpu_clock(cpu); + if (info->pid == next->pid) { + if (info->run_start) + record_task(info, prev, now - info->run_start); + record_task_commit(info, now - info->rq_start); + } else if (info->pid == prev->pid) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0) + if (prev->__state == TASK_RUNNING) { +#elif LINUX_VERSION_CODE == KERNEL_VERSION(4, 18, 0) + if (prev->__state == TASK_RUNNING) { +#else + if (prev->state == TASK_RUNNING) { +#endif + info->rq_start = now; + info->run_start = now; + } else { + runqlat_info_reset(info); + } + } else { + if (info->run_start) + record_task(info, prev, now - info->run_start); + info->run_start = now; + } + arch_spin_unlock(lock); + } +} + +static void probe_sched_migrate_task(void *priv, struct task_struct *p, int cpu) +{ + u64 now; + struct runqlat_info *info = priv; + struct task_struct *curr; + + if (p->pid != info->pid || info->cpu == INVALID_CPU) + return; + + /* interrupts should be off from set_task_cpu() */ + arch_spin_lock(&info->lock); + if (unlikely(p->pid != info->pid || info->cpu == INVALID_CPU)) + goto unlock; + + now = local_clock(); + curr = info->curr; + if (curr) { + get_task_struct(curr); + if (info->run_start) + record_task(info, curr, now - info->run_start); + put_task_struct(curr); + } + + info->cpu = cpu; + info->run_start = now; +unlock: + arch_spin_unlock(&info->lock); +} + +static struct tracepoints_probe tps_probe = { + .tp_names = { + "sched_wakeup", + "sched_wakeup_new", + "sched_switch", + "sched_migrate_task", + }, + .tp_probes = { + probe_sched_wakeup, + probe_sched_wakeup, + probe_sched_switch, + probe_sched_migrate_task, + }, + .priv = &runqlat_info, +}; + +static inline bool is_tracepoint_lookup_success(struct tracepoints_probe *tps) +{ + return tps->num_initalized == PROBE_TRACEPOINTS; +} + +static void tracepoint_lookup(struct tracepoint *tp, void *priv) +{ + int i; + struct tracepoints_probe *tps = priv; + + if (is_tracepoint_lookup_success(tps)) + return; + + for (i = 0; i < ARRAY_SIZE(tps->tp_names); i++) { + if (tps->tps[i] || strcmp(tp->name, tps->tp_names[i])) + continue; + tps->tps[i] = tp; + tps->num_initalized++; + } +} + +static int trace_pid_show(struct seq_file *m, void *ptr) +{ + struct runqlat_info *info = m->private; + + seq_printf(m, "%d\n", info->pid); + + return 0; +} + +static struct task_struct *loc_find_get_task_by_vpid(int nr) +{ + struct pid * pid_obj; + struct task_struct *task; + + rcu_read_lock(); + pid_obj = find_vpid(nr); + if (!pid_obj) + goto fail; + + task = pid_task(pid_obj, PIDTYPE_PID); + if (!task) + goto fail; + + get_task_struct(task); + rcu_read_unlock(); + return task; +fail: + rcu_read_unlock(); + return NULL; +} +static ssize_t trace_pid_store(void *priv, const char __user *buf, size_t count) +{ + int pid; + struct task_struct *task = NULL; + struct runqlat_info *info = priv; + + if (kstrtoint_from_user(buf, count, 0, &pid)) + return -EINVAL; + + if (info->pid != INVALID_PID && pid != INVALID_PID) + return -EPERM; + + local_irq_disable(); + arch_spin_lock(&info->lock); + if (info->pid == pid) { + if (pid == INVALID_PID) + sysak_module_put(&runqlat_ref); + goto unlock; + } + + if (pid != INVALID_PID) { + + info->nr_trace = 0; + info->nr_task = 0; + memset(info->trace_entries, 0, + MAX_TRACE_ENTRIES * sizeof(struct trace_entry) + + MAX_TRACE_ENTRY_TASKS * sizeof(struct task_entry)); + sysak_module_get(&runqlat_ref); + } else { + sysak_module_put(&runqlat_ref); + } + runqlat_info_reset(info); + smp_wmb(); + info->pid = pid; + task = loc_find_get_task_by_vpid(pid); + if (task) { + strncpy(info->comm, task->comm, TASK_COMM_LEN); + put_task_struct(task); + } else { + strncpy(info->comm, "NULL", 5); + } +unlock: + arch_spin_unlock(&info->lock); + local_irq_enable(); + + return count; +} + +DEFINE_PROC_ATTRIBUTE_RW(trace_pid); + +static int threshold_show(struct seq_file *m, void *ptr) +{ + struct runqlat_info *info = m->private; + + seq_printf(m, "%llu ms\n", info->threshold/(1000*1000)); + + return 0; +} + +static ssize_t threshold_store(void *priv, const char __user *buf, size_t count) +{ + unsigned long threshold; + struct runqlat_info *info = priv; + + if (kstrtoul_from_user(buf, count, 0, &threshold)) + return -EINVAL; + + info->threshold = threshold; + + return count; +} + +DEFINE_PROC_ATTRIBUTE_RW(threshold); + +static int runqlat_show(struct seq_file *m, void *ptr) +{ + int i, j; + struct runqlat_info *info = m->private; + + if (info->pid != INVALID_CPU) + return -EPERM; + + local_irq_disable(); + arch_spin_lock(&info->lock); + for (i = 0; i < info->nr_trace; i++) { + struct trace_entry *entry = info->trace_entries + i; + + seq_printf(m, "%*ccpu:%d\tcommand:%s\tpid:%d\tlatency:%llums\tSTAMP:%llu\trunqlen:%d\n", + 5, ' ', entry->cpu, + entry->comm, entry->pid, + entry->latency/(1000*1000), + entry->rq_start, + entry->nr_tasks); + } + arch_spin_unlock(&info->lock); + local_irq_enable(); + + return 0; +} + +static ssize_t runqlat_store(void *priv, const char __user *buf, size_t count) +{ + int clear; + struct runqlat_info *info = priv; + + if (kstrtoint_from_user(buf, count, 10, &clear) || clear != 0) + return -EINVAL; + + local_irq_disable(); + arch_spin_lock(&info->lock); + info->nr_trace = 0; + info->nr_task = 0; + memset(info->trace_entries, 0, + MAX_TRACE_ENTRIES * sizeof(struct trace_entry) + + MAX_TRACE_ENTRY_TASKS * sizeof(struct task_entry)); + + runqlat_info_reset(info); + smp_wmb(); + arch_spin_unlock(&info->lock); + local_irq_enable(); + + return count; +} + +DEFINE_PROC_ATTRIBUTE_RW(runqlat); + +int trace_runqlat_init(struct proc_dir_entry *root_dir) +{ + int i; + void *buf; + int ret = -ENOMEM; + struct tracepoints_probe *tps = &tps_probe; + struct proc_dir_entry *parent_dir; + struct runqlat_info *info = &runqlat_info; + + if((ret=init_symbol())) + return ret; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + if (runq_init_local_tracepoints()) + return -ENODEV; +#endif + + buf = vzalloc(MAX_TRACE_ENTRIES * sizeof(struct trace_entry) + + MAX_TRACE_ENTRY_TASKS * sizeof(struct task_entry)); + if (!buf) + return -ENOMEM; + info->trace_entries = buf; + info->task_entries = (void *)(info->trace_entries + MAX_TRACE_ENTRIES); + + parent_dir = proc_mkdir("runqlat", root_dir); + if (!parent_dir) + goto free_buf; + + if (!proc_create_data("pid", 0644, parent_dir, &trace_pid_fops, info)) + goto remove_proc; + + if (!proc_create_data("threshold", 0644, parent_dir, &threshold_fops, + info)) + goto remove_proc; + + if (!proc_create_data("runqlat", 0, parent_dir, &runqlat_fops, info)) + goto remove_proc; + + /* Lookup for the tracepoint that we needed */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + runq_for_each_kernel_tracepoint(tracepoint_lookup, tps); +#else + for_each_kernel_tracepoint(tracepoint_lookup, tps); +#endif + + if (!is_tracepoint_lookup_success(tps)) + goto remove_proc; + + for (i = 0; i < PROBE_TRACEPOINTS; i++) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + ret = tracepoint_probe_register(tps->tps[i]->name, tps->tp_probes[i], + tps->priv); +#else + ret = tracepoint_probe_register(tps->tps[i], tps->tp_probes[i], + tps->priv); +#endif + if (ret) { + pr_err("sched trace: can not activate tracepoint " + "probe to %s\n", tps->tp_names[i]); + while (i--) +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + tracepoint_probe_unregister(tps->tps[i]->name, + tps->tp_probes[i], + tps->priv); +#else + tracepoint_probe_unregister(tps->tps[i], + tps->tp_probes[i], + tps->priv); +#endif + goto remove_proc; + } + } + + return 0; +remove_proc: + remove_proc_subtree("runqlat", root_dir); +free_buf: + vfree(buf); + + return ret; +} + +void trace_runqlat_exit(void) +{ + int i; + struct tracepoints_probe *tps = &tps_probe; + struct runqlat_info *info = &runqlat_info; + + for (i = 0; i < PROBE_TRACEPOINTS; i++) +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + tracepoint_probe_unregister(tps->tps[i]->name, tps->tp_probes[i], + tps->priv); +#else + tracepoint_probe_unregister(tps->tps[i], tps->tp_probes[i], + tps->priv); +#endif + + tracepoint_synchronize_unregister(); + vfree(info->trace_entries); +} diff --git a/source/lib/internal/kernel_module/modules/schedtrace/schedtrace.c b/source/lib/internal/kernel_module/modules/schedtrace/schedtrace.c new file mode 100644 index 00000000..88accb64 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/schedtrace/schedtrace.c @@ -0,0 +1,259 @@ +#include +#include +#include /* regs_get_kernel_argument */ +#include /* PID_MAX_LIMIT */ +#include +#include +#include +#include +#include +#include "sysak_mods.h" +#include "proc.h" + +/* ARRAY_LEN is to define a trace buffer */ +#define ARRAY_LEN 1 +#define BUF_LEN 1024 +#define MAX_STACK_TRACE_DEPTH 8 + +struct tracepoints_probe { + struct tracepoint *tp; + char *name; +}; + +struct traceinfo { + int idx; + struct stack_trace trace[ARRAY_LEN]; + unsigned long entries[ARRAY_LEN][MAX_STACK_TRACE_DEPTH]; +}; + +static int trace_in_fly; +static int target_pid; +char buff[BUF_LEN] = {0}; +struct traceinfo traceinfos; + +struct tracepoints_probe mytp = { + .tp = NULL, + .name = "sched_switch", +}; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +static struct tracepoint **swtc__start___tracepoints_ptrs; +static struct tracepoint **swtc__stop___tracepoints_ptrs; + +static int swtc_init_local_tracepoints(void) +{ + swtc__start___tracepoints_ptrs = (void *)kallsyms_lookup_name("__start___tracepoints_ptrs"); + swtc__stop___tracepoints_ptrs = (void *)kallsyms_lookup_name("__stop___tracepoints_ptrs"); + if (swtc__start___tracepoints_ptrs == NULL || swtc__stop___tracepoints_ptrs == NULL) { + return -1; + } + return 0; +} + +static void swtc_for_each_tracepoint_range(struct tracepoint * const *begin, + struct tracepoint * const *end, + void (*fct)(struct tracepoint *tp, void *priv), + void *priv) +{ + struct tracepoint * const *iter; + + if (!begin) + return; + for (iter = begin; iter < end; iter++) + fct(*iter, priv); +} + +/** + * for_each_kernel_tracepoint - iteration on all kernel tracepoints + * @fct: callback + * @priv: private data + */ +void swtc_for_each_kernel_tracepoint(void (*fct)(struct tracepoint *tp, void *priv), + void *priv) +{ + swtc_for_each_tracepoint_range(swtc__start___tracepoints_ptrs, + swtc__stop___tracepoints_ptrs, fct, priv); +} +#endif +static void tracepoint_lookup(struct tracepoint *tp, void *priv) +{ + struct tracepoints_probe *tps = priv; + + if (!strcmp(tp->name, tps->name)) + tps->tp = tp; +} + +static void +(*stack_save_regs)(struct pt_regs *regs, struct stack_trace *trace); +static void +(*stack_save_tsk)(struct task_struct *tsk, struct stack_trace *trace); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) +static void trace_sched_switch(void *priv, + struct task_struct *prev, + struct task_struct *next) + +#else +static void trace_sched_switch(void *priv, bool preempt, + struct task_struct *prev, + struct task_struct *next) +#endif +{ + struct task_struct *p; + int i, size = 0; + + p = prev; + if (((pid_t)target_pid == p->pid) && (p->state)) { + struct traceinfo *tf = &traceinfos; + struct stack_trace *trace = tf->trace; + int idx = tf->idx; + + tf->idx = (idx + 1)%ARRAY_LEN; + trace->nr_entries = 0; + trace->entries = tf->entries[idx]; + trace->max_entries = MAX_STACK_TRACE_DEPTH; + trace->skip = 1; + stack_save_tsk(prev, trace); + + idx = 0; + for (i = 0; i < trace->nr_entries - 1; i++) { + if ((void *)trace->entries[i]) { + size = sprintf(&buff[idx], "<%px>", (void *)(trace->entries[i])); + idx += size; + if (idx > BUF_LEN) + break; + size = sprint_symbol(&buff[idx], trace->entries[i]); + idx += size; + if (idx > BUF_LEN) + break; + size = sprintf(&buff[idx], ","); + idx += size; + if (idx > BUF_LEN) + break; + } + } + trace_printk("%s\n", buff); + memset(trace, 0, sizeof(struct stack_trace)); + } +} + +static int pid_show(struct seq_file *m, void *v) +{ + seq_printf(m, "pid=%d\n", target_pid); + return 0; +} + +static int pid_open(struct inode *inode, struct file *file) +{ + return single_open(file, pid_show, inode->i_private); +} + +static ssize_t pid_write(struct file *f, const char __user *buf, + size_t count, loff_t *ppos) +{ + if (count <= 0 || count > PID_MAX_LIMIT) + return -EINVAL; + + if (kstrtoint_from_user(buf, count, 0, &target_pid)) { + pr_warn("copy_from_user fail\n"); + return -EFAULT; + } + + if (target_pid < 0 && target_pid != -1) + return -EINVAL; + + if (target_pid == -1 && trace_in_fly) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + tracepoint_probe_unregister(mytp.name, trace_sched_switch, NULL); +#else + tracepoint_probe_unregister(mytp.tp, trace_sched_switch, NULL); +#endif + trace_in_fly = 0; + } else if (target_pid > 0 && !trace_in_fly) { + int ret; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + ret = tracepoint_probe_register(mytp.name, trace_sched_switch, NULL); +#else + ret = tracepoint_probe_register(mytp.tp, trace_sched_switch, NULL); +#endif + if (ret) + trace_in_fly = 1; + else + return ret; + } + return count; +} + +static struct file_operations pid_fops = { + .owner = THIS_MODULE, + .read = seq_read, + .open = pid_open, + .write = pid_write, + .release = seq_release, +}; + +static int proc_init(void) +{ + struct proc_dir_entry *parent; + + parent = sysak_proc_mkdir("schedtrace"); + if (!parent) + return -ENOMEM; + + if(!proc_create("pid", + S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP, + parent, + &pid_fops)) + goto proc_fail; + pr_info("proc_init schedtrace success\n"); + return 0; + +proc_fail: + sysak_remove_proc_entry("schedtrace"); + return -ENOMEM; +} + +int schedtrace_init(void) +{ + int ret = 0; + + mytp.tp = NULL; + trace_in_fly = 0; + target_pid = -1; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + if (swtc_init_local_tracepoints()) + return -ENODEV; +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + swtc_for_each_kernel_tracepoint(tracepoint_lookup, &mytp); +#else + for_each_kernel_tracepoint(tracepoint_lookup, &mytp); +#endif + stack_save_tsk = (void *)kallsyms_lookup_name("save_stack_trace_tsk"); + stack_save_regs = (void *)kallsyms_lookup_name("save_stack_trace_regs"); + + if (!stack_save_tsk || !stack_save_regs) { + ret = -EINVAL; + pr_warn("stack_save not found\n"); + goto fail; + } + + ret = proc_init(); + if (ret < 0) { + pr_warn("proc_init fail\n"); + } + +fail: + return ret; +} + +void schedtrace_exit(void) +{ + if (trace_in_fly) +#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) + tracepoint_probe_unregister(mytp.name, trace_sched_switch, NULL); +#else + tracepoint_probe_unregister(mytp.tp, trace_sched_switch, NULL); +#endif +} diff --git a/source/lib/internal/kernel_module/modules/signal/trace_sig.c b/source/lib/internal/kernel_module/modules/signal/trace_sig.c new file mode 100755 index 00000000..b2153a5d --- /dev/null +++ b/source/lib/internal/kernel_module/modules/signal/trace_sig.c @@ -0,0 +1,220 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sysak_mods.h" +#include "hook.h" +#include "proc.h" +#include "blackbox.h" + +struct trace_sig_info { + char comm[TASK_COMM_LEN]; + int pid; + int sig; +}trace_info; + +static int sig_ref; +#define BUFFER_LEN 256 +static char process_info_buf[BUFFER_LEN]; +static int tracesig_bid = -1; + +static void save_process_info(struct task_struct *task, void *buf, int size) +{ + int ret; + + ret = snprintf(buf, size, "%s(%d)", task->comm, task->pid); + if (ret <= 0) + return; + + while (ret > 0 && task->parent && task->parent->pid > 1) { + size = size - ret; + if (size <= 1) + break; + task = task->parent; + buf += ret; + ret = snprintf(buf, size, "< %s(%d)", task->comm, task->pid); + } +} + +static void print_signal_info(struct task_struct *task, int sig) +{ + struct bbox_data_info data_info; + int ret, len; + + memset(process_info_buf, 0, BUFFER_LEN); + ret = snprintf(process_info_buf, BUFFER_LEN,"send sig %d to task %s[%d], generated by:", + sig, task->comm, task->pid); + if (ret <= 0 || ret >= (BUFFER_LEN - 1)) { + printk("ret %d\n", ret); + return; + } + + save_process_info(current, process_info_buf + ret, BUFFER_LEN - ret); + len = strlen(process_info_buf); + process_info_buf[len] = '\n'; + data_info.data = process_info_buf; + data_info.size = len + 1; + bbox_write(tracesig_bid, &data_info); +} + +#if KERNEL_VERSION(4, 19, 0) <= LINUX_VERSION_CODE +static void signal_generate_trace(void *ignore, int sig, + struct siginfo *info, struct task_struct *task, + int type, int result) +#elif KERNEL_VERSION(3, 10, 0) <= LINUX_VERSION_CODE +static void signal_generate_trace(void *ignore, int sig, + struct siginfo *info, struct task_struct *task, + int group, int result) +#else +static void signal_generate_trace(int sig, + struct siginfo *info, struct task_struct *task, + int group) +#endif +{ + if (trace_info.sig && trace_info.sig != sig) + return; + + if (trace_info.pid && trace_info.pid != task->pid) + return; + + if (strlen(trace_info.comm) && strcmp(trace_info.comm, task->comm)) + return; + print_signal_info(task, sig); +} + +static bool trace_enabled; +static void trace_sig_enable(void) +{ + if (trace_enabled) + return; + + tracesig_bid = bbox_alloc("tracesig", BBOX_TYPE_RING); + if (tracesig_bid < 0) { + printk("bbox alloc failed,cannot enable\n"); + return; + } + + hook_tracepoint("signal_generate", signal_generate_trace, NULL); + trace_enabled = true; + sysak_module_get(&sig_ref); +} + +static void trace_sig_disable(void) +{ + if (!trace_enabled) + return; + + unhook_tracepoint("signal_generate", signal_generate_trace, NULL); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) + synchronize_rcu(); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) || LINUX_VERSION_CODE <= KERNEL_VERSION(4, 17, 0) + synchronize_sched(); +#endif + bbox_free(tracesig_bid); + trace_enabled = false; + sysak_module_put(&sig_ref); +} + +static ssize_t signal_trace_write(struct file *file, + const char __user *buf, size_t count, loff_t *offs) +{ + int ret; + char cmd[256]; + char chr[256]; + int pid, sig; + + if (count < 1 || *offs) + return -EINVAL; + + if (copy_from_user(chr, buf, 256)) + return -EFAULT; + + ret = sscanf(chr, "%255s", cmd); + if (ret <= 0) + return -EINVAL; + + if (strcmp(cmd, "comm") == 0) { + ret = sscanf(chr, "comm %s", cmd); + if (ret <= 0) + return -EINVAL; + strncpy(trace_info.comm, cmd, TASK_COMM_LEN); + trace_info.comm[TASK_COMM_LEN - 1] = '\0'; + } else if (strcmp(cmd, "pid") == 0) { + ret = sscanf(chr, "pid %d", &pid); + if (ret <= 0) + return -EINVAL; + trace_info.pid = pid; + } else if (strcmp(cmd, "sig") == 0) { + ret = sscanf(chr, "sig %d", &sig); + if (ret <= 0) + return -EINVAL; + trace_info.sig = sig; + } else if (strcmp(cmd, "enable") == 0) { + trace_sig_enable(); + } else if (strcmp(cmd, "disable") == 0) { + trace_sig_disable(); + } else { + return -EINVAL; + } + + return count; +} + +static int signal_trace_show(struct seq_file *m, void *v) +{ + seq_printf(m, "comm: %s\n", trace_info.comm); + seq_printf(m, "pid: %d\n", trace_info.pid); + seq_printf(m, "sig: %d\n", trace_info.sig); + if (trace_enabled) + bbox_ring_show(m, tracesig_bid); + return 0; +} + +static int signal_trace_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, signal_trace_show, NULL); +} + +static struct proc_dir_entry *signal_trace_proc; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) +static const struct proc_ops signal_trace_fops = { + .proc_open = signal_trace_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_write = signal_trace_write, + .proc_release = single_release, +}; +#else +const struct file_operations signal_trace_fops = { + .open = signal_trace_open, + .read = seq_read, + .llseek = seq_lseek, + .write = signal_trace_write, + .release = single_release, +}; +#endif + +int trace_sig_init(void) +{ + signal_trace_proc = sysak_proc_create("sig_trace", &signal_trace_fops); + + return 0; +} + +int trace_sig_exit(void) +{ + trace_sig_disable(); + return 0; +} + diff --git a/source/lib/internal/kernel_module/modules/task_ctl/task_ctrl.c b/source/lib/internal/kernel_module/modules/task_ctl/task_ctrl.c new file mode 100755 index 00000000..7be37639 --- /dev/null +++ b/source/lib/internal/kernel_module/modules/task_ctl/task_ctrl.c @@ -0,0 +1,169 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sysak_mods.h" +#include "hook.h" +#include "proc.h" + +enum TASK_CTL_TYPE{ + TASK_LOOP, + TASK_SLEEP, + MAX_CTL_TYPE +}; + +#define TASK_CTL_VALID(x) ((unsigned)(x) < MAX_CTL_TYPE) + +struct task_ctl_info { + int pid; + enum TASK_CTL_TYPE type; +}ctl_info; + +static int taskctl_ref; +static bool ctl_enabled; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33) +static void syscall_enter_trace(struct pt_regs *regs, long id) +#else +static void syscall_enter_trace(void *__data, struct pt_regs *regs, long id) +#endif +{ + while(ctl_enabled && ctl_info.pid == current->pid) { + if (!TASK_CTL_VALID(ctl_info.type)) + break; + else if (ctl_info.type == TASK_SLEEP) + msleep_interruptible(100); + else + cond_resched(); + rmb(); + } +} + +static void task_ctl_enable(void) +{ + if (ctl_enabled) + return; + hook_tracepoint("sys_enter", syscall_enter_trace, NULL); + ctl_enabled = true; + sysak_module_get(&taskctl_ref); +} + +static void task_ctl_disable(void) +{ + if (!ctl_enabled) + return; + + unhook_tracepoint("sys_enter", syscall_enter_trace, NULL); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) + synchronize_rcu(); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) || LINUX_VERSION_CODE <= KERNEL_VERSION(4, 17, 0) + synchronize_sched(); +#endif + ctl_enabled = false; + sysak_module_put(&taskctl_ref); +} + +static ssize_t task_ctl_write(struct file *file, + const char __user *buf, size_t count, loff_t *offs) +{ + int ret; + char cmd[256]; + char chr[256]; + int pid; + + if (count < 1 || *offs) + return -EINVAL; + + if (copy_from_user(chr, buf, 256)) + return -EFAULT; + + ret = sscanf(chr, "%255s", cmd); + if (ret <= 0) + return -EINVAL; + + if (strcmp(cmd, "pid") == 0) { + ret = sscanf(chr, "pid %d", &pid); + if (ret <= 0) + return -EINVAL; + ctl_info.pid = pid; + } else if (strcmp(cmd, "type") == 0) { + ret = sscanf(chr, "type %s", cmd); + if (ret <= 0) + return -EINVAL; + if (strcmp(cmd, "loop") == 0) + ctl_info.type = TASK_LOOP; + else if (strcmp(cmd, "sleep") == 0) + ctl_info.type = TASK_SLEEP; + else + ctl_info.type = MAX_CTL_TYPE; + } else if (strcmp(cmd, "enable") == 0) { + task_ctl_enable(); + } else if (strcmp(cmd, "disable") == 0) { + task_ctl_disable(); + } else { + return -EINVAL; + } + + return count; +} + +static int task_ctl_show(struct seq_file *m, void *v) +{ + seq_printf(m, "pid: %d\n", ctl_info.pid); + if (ctl_info.type == TASK_LOOP) + seq_printf(m, "type: loop"); + else if (ctl_info.type == TASK_SLEEP) + seq_printf(m, "type: sleep"); + else + seq_printf(m, "type: invalid"); + + return 0; +} + +static int task_ctl_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, task_ctl_show, NULL); +} + +static struct proc_dir_entry *task_ctl_proc; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) +static const struct proc_ops task_ctl_fops = { + .proc_open = task_ctl_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_write = task_ctl_write, + .proc_release = single_release, +}; +#else +const struct file_operations task_ctl_fops = { + .open = task_ctl_open, + .read = seq_read, + .llseek = seq_lseek, + .write = task_ctl_write, + .release = single_release, +}; +#endif + +int task_ctl_init(void) +{ + task_ctl_proc = sysak_proc_create("task_ctl", &task_ctl_fops); + + return 0; +} + +int task_ctl_exit(void) +{ + task_ctl_disable(); + return 0; +} + diff --git a/source/lib/internal/kernel_module/modules/test_module/test.c b/source/lib/internal/kernel_module/modules/test_module/test.c new file mode 100755 index 00000000..5c04c24b --- /dev/null +++ b/source/lib/internal/kernel_module/modules/test_module/test.c @@ -0,0 +1,26 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int test_init(void) +{ + printk("test_module enter.\n"); + return 0; +} + +int test_exit(void) +{ + printk("test_module exit.\n"); + return 0; +} + diff --git a/source/lib/internal/kernel_module/modules/ulockcheck/ulockcheck.c b/source/lib/internal/kernel_module/modules/ulockcheck/ulockcheck.c new file mode 100644 index 00000000..29ff4cac --- /dev/null +++ b/source/lib/internal/kernel_module/modules/ulockcheck/ulockcheck.c @@ -0,0 +1,696 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "include/blackbox.h" +#include "proc.h" + +#ifdef CONFIG_X86 +#define MAX_SYMBOL_LEN 64 +#define PATH_LEN 256 +#define STACK_DEPTH 100 +#define STACK_DETAIL_DEPTH 20 +#define PROC_NUMBUF 256 +#define SHOW_BUF_LEN 64 + +#define WAIT_TIMEOUT HZ +#define LOCK_TIMEOUT HZ + +#define REGISTER_FAILED 1 + +LIST_HEAD(monitor_list); +LIST_HEAD(vma_list); +rwlock_t thdlist_lock; +extern struct mm_struct *get_task_mm(struct task_struct *task); + +static struct kprobe kp_wake = { + .symbol_name = "futex_wake", +}; + +static struct kretprobe krp_wait = { + .kp.symbol_name = "futex_wait", + .maxactive = 10000, +}; + +pid_t monitor_pid = 0; +pid_t lock_owner= 0; +bool enable_print_ustack = false; +bool enbale_ulockcheck = false; +unsigned long max_wait_time; +unsigned long max_lock_time; +int wait_delay_thresold = WAIT_TIMEOUT; +int lock_delay_thresold = LOCK_TIMEOUT; + +static int ulock_bid = -1; + +struct stack_info { + unsigned long bp; + char path[PATH_LEN]; +}; + +struct vma_info{ + struct list_head list; + unsigned long start; + unsigned long end; + int exectue; + char path[PATH_LEN]; +}; + +struct task_info{ + pid_t pid; + pid_t tgid; + struct list_head task_list; + char comm[TASK_COMM_LEN]; + + unsigned long fwait_count; + unsigned long fwait_delay; + + unsigned long fwake_count; + unsigned long fwake_time; + + unsigned long wait_time; + unsigned long outtime_count; + + unsigned long sch_total; + unsigned long total_delay; + + unsigned long uaddr; + + unsigned long lock_time; + unsigned long lock_delay; + bool lock; + unsigned long lock_count; + + //struct list_head vma_list; + struct stack_info stack[STACK_DETAIL_DEPTH]; +}; + +void save_mmapstack_trace_user(struct task_struct *task, struct task_info *tsk) +{ + struct list_head *vma_entry; + const struct pt_regs *regs = task_pt_regs(current); + const void __user *fp = (const void __user *)regs->sp; + int stack_len = 0 ; + int i; + + for (i = 0; i < STACK_DEPTH; i++){ + if (stack_len > STACK_DETAIL_DEPTH) + break; + list_for_each(vma_entry, &vma_list){ + //struct vma_info *vma = (struct vma_info *)vma_entry; + struct vma_info *vma = container_of(vma_entry, struct vma_info, list); + unsigned long tmp; + + if (!copy_from_user(&tmp, fp+i*__SIZEOF_LONG__, __SIZEOF_LONG__)) { + if ((tmp >= vma->start) && (tmp <= vma->end)) { + tsk->stack[stack_len].bp = tmp; + strcpy(tsk->stack[stack_len].path,vma->path); + stack_len++; + } + } + } + } +} + +static int save_calltrace(struct pt_regs *regs) +{ + struct list_head *tsk_entry; + struct task_info *new_tsk; + pid_t tgid = 0; + + list_for_each(tsk_entry, &monitor_list){ + struct task_info *tsk = container_of(tsk_entry, struct task_info, task_list); + tgid = tsk->tgid; + if (tsk->pid == current->pid){ + tsk->fwait_count++; + tsk->wait_time = jiffies; + tsk->uaddr = regs->di; + save_mmapstack_trace_user(current,tsk); + return 0; + } + } + if (tgid == current->tgid){ + new_tsk = kzalloc(sizeof(struct task_info),GFP_KERNEL); + if (!new_tsk) + return 0; + new_tsk->pid = current->pid; + new_tsk->tgid = tgid; + memcpy(new_tsk->comm,current->comm,sizeof(new_tsk->comm)); + new_tsk->fwait_count++; + new_tsk->wait_time = jiffies; + new_tsk->uaddr = regs->di; + + save_mmapstack_trace_user(current,new_tsk); + list_add_tail(&new_tsk->task_list,&monitor_list); + } + return 0; +} + +static void get_filename(char *buf, const struct path *path, size_t size) +{ + if (size) { + char *p = d_path(path, buf, size); + if (!IS_ERR(p)) { + strcpy(buf,p); + } + } +} + +/*static int before_futex_wait(struct kprobe *p, struct pt_regs *regs) +{ + int ret; + + if (!monitor_pid || (monitor_pid != current->pid && monitor_pid != current->tgid)) + return 0; + + write_lock(&thdlist_lock); + ret = save_calltrace(regs); + write_unlock(&thdlist_lock); + return 0; +} +*/ + +static int after_futex_wait(struct kretprobe_instance *ri, struct pt_regs *regs) +{ + struct list_head *pos; + unsigned long wait_time; + int i, len; + char task_show_buf[SHOW_BUF_LEN]; + struct bbox_data_info data_info; + + if (!monitor_pid || (monitor_pid != current->pid && monitor_pid != current->tgid)) + return 0 ; + + data_info.data = task_show_buf; + + read_lock(&thdlist_lock); + list_for_each(pos, &monitor_list){ + struct task_info *tsk_info = container_of(pos, struct task_info, task_list); + if (tsk_info->pid == current->pid){ + tsk_info->fwait_delay += jiffies - tsk_info->wait_time; + wait_time = jiffies - tsk_info->wait_time; + max_wait_time = wait_time > max_wait_time ? wait_time : max_wait_time; + if (wait_time > wait_delay_thresold){ + tsk_info->outtime_count++; + if (enable_print_ustack){ + len = snprintf(task_show_buf, SHOW_BUF_LEN, "task %d[%s], wait delay %ld ticks,", + tsk_info->pid, tsk_info->comm, wait_time); + data_info.size = len; + bbox_write(ulock_bid, &data_info); + len = sprintf(task_show_buf,"user stack:\n"); + data_info.size = len; + bbox_write(ulock_bid, &data_info); + for (i = 0; i < STACK_DETAIL_DEPTH; i++){ + if (tsk_info->stack[i].bp == 0) { + continue; + } + len = sprintf(task_show_buf, "#~ 0x%lx %s\n", + tsk_info->stack[i].bp, tsk_info->stack[i].path); + data_info.size = len; + bbox_write(ulock_bid, &data_info); + } + } + } + tsk_info->lock_time = jiffies; + lock_owner = tsk_info->pid; + //tsk_info->lock = TRUE; + tsk_info->lock_count++; + break; + } + + } + read_unlock(&thdlist_lock); + return 0; +} + +static int before_futex_wake(struct kprobe *p, struct pt_regs *regs) +{ + + struct list_head *pos; + char task_show_buf[SHOW_BUF_LEN]; + struct bbox_data_info data_info; + int len, i; + + if (!monitor_pid || (monitor_pid != current->pid && monitor_pid != current->tgid)) + return 0; + data_info.data = task_show_buf; + read_lock(&thdlist_lock); + list_for_each(pos, &monitor_list){ + struct task_info *tsk_info = container_of(pos, struct task_info, task_list); + if (tsk_info->pid == current->pid){ + //pos->fw_cout++; + tsk_info->lock_delay = jiffies - tsk_info->lock_time; + max_lock_time = tsk_info->lock_delay > max_lock_time ? tsk_info->lock_delay : max_lock_time; + if (enable_print_ustack && tsk_info->lock && (tsk_info->lock_delay > lock_delay_thresold)){ + len = snprintf(task_show_buf, SHOW_BUF_LEN, "task %d[%s], lock over %ld ticks,", + current->pid,current->comm, tsk_info->lock_delay); + data_info.size = len; + bbox_write(ulock_bid, &data_info); + len = sprintf(task_show_buf,"user stack:\n"); + data_info.size = len; + bbox_write(ulock_bid, &data_info); + for (i = 0; i < STACK_DETAIL_DEPTH; i++){ + if (tsk_info->stack[i].bp == 0) { + continue; + } + len = sprintf(task_show_buf, "#~ 0x%lx %s\n", + tsk_info->stack[i].bp, tsk_info->stack[i].path); + data_info.size = len; + bbox_write(ulock_bid, &data_info); + } + } + + //tsk_info->lock = FALSE; + tsk_info->fwake_time = jiffies; + tsk_info->fwake_count++; + break; + } + + } + read_unlock(&thdlist_lock); + return 0; +} + +static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs) +{ + int ret; + + if (!monitor_pid || (monitor_pid != current->pid && monitor_pid != current->tgid)) + return 0; + + write_lock(&thdlist_lock); + ret = save_calltrace(regs); + write_unlock(&thdlist_lock); + return 0; +} + + +/*static int handler_fault(struct kprobe *p, struct pt_regs *regs, int trapnr) +{ + pr_info("fault_handler: p->addr = 0x%p, trap #%dn", p->addr, trapnr); + return 0; +} +*/ + + +static int futexpid_show(struct seq_file *m, void *v) +{ + struct list_head *pos; + + if (!monitor_pid) { + seq_printf(m, "futex monitor list is empty\n"); + return 0; + } + + seq_printf(m, "max_wait_time %ld ticks, max_lock_time %ld ticks\n", + max_wait_time, max_lock_time); + read_lock(&thdlist_lock); + list_for_each(pos, &monitor_list){ + struct task_info *tsk = container_of(pos, struct task_info, task_list); + if (lock_owner && (tsk->pid == lock_owner)) + seq_puts(m,"current owner:\n"); + seq_printf(m, "pid[%d],name[%s],futex wait count[%lu],total futex_delay[%lu],", + tsk->pid, tsk->comm, tsk->fwait_count, tsk->fwait_delay); + seq_printf(m, "futex lock count[%lu],lock delay[%lu],wait over thresold count[%lu]\n", + tsk->lock_count, tsk->lock_delay, tsk->outtime_count); + //seq_printf(m,"schdule delay[none], ratio :futex[none]/schdule[none]\n", + //tsk->fwait_delay, tsk->fwait_delay, tsk->fwait_delay); + + } + read_unlock(&thdlist_lock); + bbox_ring_show(m, ulock_bid); + return 0; +} + +static ssize_t futexpid_store(void *priv, const char __user *buf, size_t count) +{ + char buffer[PROC_NUMBUF]; + struct task_struct *tsk; + struct task_info *new_tsk; + struct mm_struct *mm; + struct file *vma_file; + struct vm_area_struct *vma; + struct vma_info *new_vma; + struct pid *pid; + pid_t pid_i; + int err = -1; + + if (!enbale_ulockcheck){ + pr_warn("ulockcheck disabled!"); + return count; + } + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + return -EFAULT;; + } + err = kstrtoint(strstrip(buffer), 0, &pid_i); + if (err) + return -EINVAL; + read_lock(&thdlist_lock); + + if (!list_empty(&monitor_list)){ + read_unlock(&thdlist_lock); + return count; + } + read_unlock(&thdlist_lock); + + rcu_read_lock(); + + pid= find_get_pid(pid_i); + tsk = pid_task(pid, PIDTYPE_PID); + if (!tsk || !(tsk->mm)){ + rcu_read_unlock(); + return -EINVAL; + } + + monitor_pid = pid_i; + + if (monitor_pid != 0 ){ + + new_tsk = kzalloc(sizeof(struct task_info),GFP_KERNEL); + if (!new_tsk) + goto failed_tsk; + new_tsk->pid = monitor_pid; + new_tsk->tgid = tsk->tgid; + memcpy(new_tsk->comm,tsk->comm,sizeof(tsk->comm)); + + mm = get_task_mm(tsk); + + if (IS_ERR_OR_NULL(mm)){ + rcu_read_unlock(); + goto failed; + } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) + if (!mmap_read_trylock(mm)){ +#else + if (!down_read_trylock(&mm->mmap_sem)){ +#endif + rcu_read_unlock(); + goto failed; + } + + for (vma = mm->mmap; vma; vma = vma->vm_next){ + if (vma->vm_file && vma->vm_flags & VM_EXEC){ + char buff[PATH_LEN]; + + new_vma = kzalloc(sizeof(struct vma_info),GFP_KERNEL); + if (!new_vma){ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) + mmap_read_unlock(mm); +#else + up_read(&mm->mmap_sem); +#endif + goto failed; + } + new_vma->start = vma->vm_start; + new_vma->end = vma->vm_end; + vma_file = vma->vm_file; + + if (vma_file){ + get_filename(buff, &vma_file->f_path, PATH_LEN); + strcpy(new_vma->path, buff); + list_add_tail(&new_vma->list,&vma_list); + } + } + } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) + mmap_read_unlock(mm); +#else + up_read(&mm->mmap_sem); +#endif + write_lock(&thdlist_lock); + list_add_tail(&new_tsk->task_list, &monitor_list); + write_unlock(&thdlist_lock); + } + rcu_read_unlock(); + return count; + +failed: + kfree(new_tsk); +failed_tsk: + rcu_read_unlock(); + monitor_pid = 0; + return -ENOMEM; +} + +DEFINE_PROC_ATTRIBUTE_RW(futexpid); + +static int futexprint_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", (int)enable_print_ustack); + return 0; +} + +static ssize_t futexprint_store(void *priv, const char __user *buf, size_t count) +{ + char buffer[PROC_NUMBUF]; + int val; + int err = -1; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + return -EFAULT; + } + err = kstrtoint(strstrip(buffer), 0, &val); + + if (val == 1) + enable_print_ustack = true; + else if (val == 0) + enable_print_ustack = false; + return count; +} + +DEFINE_PROC_ATTRIBUTE_RW(futexprint); + +static int ulockcheck_enable(void) +{ + int ret_wake, ret_wait; + + kp_wake.pre_handler = before_futex_wake; + + krp_wait.handler = after_futex_wait; + krp_wait.entry_handler = entry_handler; + + ret_wake = register_kprobe(&kp_wake); + if (ret_wake < 0) { + pr_err("register_kprobe failed, returned %d\n", ret_wake); + return -REGISTER_FAILED; + } + + ret_wait = register_kretprobe(&krp_wait); + if (ret_wait < 0) { + pr_err("register_kretprobe failed, returned %d\n", ret_wait); + unregister_kprobe(&kp_wake); + return -REGISTER_FAILED; + } + pr_info("Planted return probe at %s: %p\n", + krp_wait.kp.symbol_name, krp_wait.kp.addr); + pr_info("Planted kprobe futex_wake at %p\n", kp_wake.addr); + + ulock_bid = bbox_alloc("ulockcheck", BBOX_TYPE_RING); + if (ulock_bid < 0) { + printk("bbox alloc failed,cannot enable\n"); + unregister_kprobe(&kp_wake); + unregister_kretprobe(&krp_wait); + return -ENOMEM; + } + + return 0; +} + +void ulockcheck_disable(void) +{ + unregister_kprobe(&kp_wake); + unregister_kretprobe(&krp_wait); + + pr_info("kprobe futex_wake at %p unregistered\n", kp_wake.addr); + pr_info("kretprobe futex_wait at %p unregistered\n", krp_wait.kp.addr); + /* nmissed > 0 suggests that maxactive was set too low. */ + pr_info("Missed probing %d instances of %s\n", + krp_wait.nmissed, krp_wait.kp.symbol_name); + + bbox_free(ulock_bid); +} + +static int futexenable_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", (int)enbale_ulockcheck); + return 0; +} + +static ssize_t futexenable_store(void *priv, const char __user *buf, size_t count) +{ + char buffer[PROC_NUMBUF]; + int val; + int err = -1; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + return -EFAULT; + } + err = kstrtoint(strstrip(buffer), 0, &val); + + if (val == 1){ + if (!ulockcheck_enable()) + enbale_ulockcheck = true; + }else if (val == 0){ + ulockcheck_disable(); + enbale_ulockcheck = false; + } + return count; +} + +DEFINE_PROC_ATTRIBUTE_RW(futexenable); + +static int wait_delaythresold_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", wait_delay_thresold); + return 0; +} + +static ssize_t wait_delaythresold_store(void *priv, const char __user *buf, size_t count) +{ + char buffer[PROC_NUMBUF]; + int val; + int err = -1; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + return -EFAULT; + } + err = kstrtoint(strstrip(buffer), 0, &val); + if (err) + return -EINVAL; + + wait_delay_thresold = val; + return count; +} + +DEFINE_PROC_ATTRIBUTE_RW(wait_delaythresold); + +static int lock_delaythresold_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", lock_delay_thresold); + return 0; +} + +static ssize_t lock_delaythresold_store(void *priv, const char __user *buf, size_t count) +{ + char buffer[PROC_NUMBUF]; + int val; + int err = -1; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + return -EFAULT; + } + err = kstrtoint(strstrip(buffer), 0, &val); + if (err) + return -EINVAL; + + lock_delay_thresold = val; + return count; +} + +DEFINE_PROC_ATTRIBUTE_RW(lock_delaythresold); + + +int ulockcheck_init(void) +{ + struct proc_dir_entry *parent_dir; + struct proc_dir_entry *entry_enable; + struct proc_dir_entry *entry_print; + struct proc_dir_entry *entry_pid; + + parent_dir = sysak_proc_mkdir("ulockcheck"); + if (!parent_dir) { + goto failed_root; + } + + entry_enable = proc_create("enable", 0644, parent_dir, &futexenable_fops); + if(!entry_enable) { + goto failed; + } + + entry_print = proc_create("enable_print_ustack", 0644, parent_dir, &futexprint_fops); + if(!entry_print) { + goto failed; + } + + entry_pid = proc_create("ulockcheck_pid", 0644, parent_dir, &futexpid_fops); + if(!entry_pid) { + goto failed; + } + + if(!proc_create("wait_delaythresold", 0644, parent_dir, &wait_delaythresold_fops)) + goto failed; + + if(!proc_create("lock_delaythresold", 0644, parent_dir, &lock_delaythresold_fops)) + goto failed; + + return 0; + +failed: + sysak_remove_proc_entry("ulockcheck"); +failed_root: + return -1; +} + +int ulockcheck_exit(void) +{ + struct list_head *tsk_entry; + struct list_head *vma_entry; + struct list_head *tsk_prev; + struct list_head *vma_prev; + + if (!monitor_pid) + return 0; + + if (enbale_ulockcheck) + ulockcheck_disable(); + + list_for_each(tsk_entry, &monitor_list){ + struct task_info *tsk = container_of(tsk_entry, struct task_info, task_list); + tsk_prev = tsk_entry->prev; + + list_del(tsk_entry); + kfree(tsk); + tsk_entry = tsk_prev; + } + + list_for_each(vma_entry, &vma_list){ + struct vma_info *vma = container_of(vma_entry, struct vma_info, list); + vma_prev = vma_entry->prev; + + list_del(vma_entry); + kfree(vma); + vma_entry = vma_prev; + } + return 0; +} +#endif diff --git a/source/lib/internal/kernel_module/sysak_mods.c b/source/lib/internal/kernel_module/sysak_mods.c new file mode 100644 index 00000000..5804e961 --- /dev/null +++ b/source/lib/internal/kernel_module/sysak_mods.c @@ -0,0 +1,100 @@ +#include "sysak_mods.h" + +int __attribute__((weak)) trace_sig_init(void) +{ + return 0; +} + +int __attribute__((weak)) trace_sig_exit(void) +{ + return 0; +} + +int __attribute__((weak)) memleak_init(void) +{ + return 0; +} + +int __attribute__((weak)) memleak_uninit(void) +{ + return 0; +} + +int __attribute__((weak)) memhunter_init(void) +{ + return 0; +} + +int __attribute__((weak)) memhunter_uninit(void) +{ + return 0; +} + +int __attribute__((weak)) trace_irqoff_init(void) +{ + return 0; +} + +int __attribute__((weak)) trace_irqoff_exit(void) +{ + return 0; +} + +int __attribute__((weak)) task_ctl_init(void) +{ + return 0; +} + +int __attribute__((weak)) task_ctl_exit(void) +{ + return 0; +} + +int __attribute__((weak)) schedtrace_init(void) +{ + return 0; +} + +int __attribute__((weak)) schedtrace_exit(void) +{ + return 0; +} +int __attribute__((weak)) mmaptrace_init(void) +{ + return 0; +} +int __attribute__((weak)) mmaptrace_exit(void) +{ + return 0; +} +int __attribute__((weak)) disk_hang_init(void) +{ + return 0; +} +int __attribute__((weak)) disk_hang_exit(void) +{ + return 0; +} +int __attribute__((weak)) ulockcheck_init(void) +{ + return 0; +} + +int __attribute__((weak)) ulockcheck_exit(void) +{ + return 0; +} + +struct sysak_module sysak_modules[] = { + { "trace_sig", trace_sig_init, trace_sig_exit}, + { "memleak", memleak_init, memleak_uninit}, + { "trace_irqoff", trace_irqoff_init, trace_irqoff_exit}, + { "task_ctl", task_ctl_init, task_ctl_exit}, + { "schedtrace", schedtrace_init, schedtrace_exit}, + { "mmap_trace", mmaptrace_init, mmaptrace_exit}, + { "iosdiag", disk_hang_init, disk_hang_exit}, + {"ulockcheck", ulockcheck_init, ulockcheck_exit}, + {"memhunter", memhunter_init, memhunter_uninit}, +}; + +const int sysk_module_num = sizeof(sysak_modules) / sizeof(struct sysak_module); diff --git a/source/lib/internal/kernel_module/sysak_mods.h b/source/lib/internal/kernel_module/sysak_mods.h new file mode 100644 index 00000000..bf82d82a --- /dev/null +++ b/source/lib/internal/kernel_module/sysak_mods.h @@ -0,0 +1,21 @@ +#ifndef SYSAK_MOD_H +#define SYSAK_MOD_H + + +typedef int(*sysak_module_func)(void); + +struct sysak_module { + char name[16]; + sysak_module_func init; + sysak_module_func exit; +}; + +extern struct sysak_module sysak_modules[]; +extern const int sysk_module_num; +extern void sysak_module_get(int *mod_ref); +extern void sysak_module_put(int *mod_ref); +extern int sysak_dev_init(void); +extern void sysak_dev_uninit(void); +extern int sysak_bbox_init(void); +extern void sysak_bbox_exit(void); +#endif diff --git a/source/sysak b/source/sysak new file mode 100755 index 0000000000000000000000000000000000000000..ae205066530d74b3d549ff91b370e945cc8b4219 GIT binary patch literal 38920 zcmeHw4SZY0mG9V&i3!1WNFd=O&E+G(d{}l867osqfIxweFC4yutjMx0B3Y_*mBdhr zTN4&z3@KYE{Uq(a@wd^ zdHa5E-|KIE6YJdh&pBt#oH;Xh?&#jp&AwF^78ex>eH4rH1f>pDdKr?RLfp24lL}iT zjuc+;IWbcl0b~OHyqrT+o@qLwRBKwM_)0)YZc(uW78QH>MAMTbEHu>?5-nAEl6X~0 z(nEDJsi_bK10+Ybiq)J1ZICCLk}-4~FNu~T^GL2z$yF+TO%IV`9GdF-kvjUTSM{qe z_98`TFA;G8f99wze(h^!Bc1z~Ne2HiFP~_7k}6MAEw>JGTt6xH?|xP)j`gZMSHCFx zg`%o>Q4@`9T6j)Pw6QuGX-#jd-nM9A^}=&Jskmnz*PHYc??tOOa7onOS{2zm4S$4D z{u9gJ-}(JF?mczgAD%sL*TU%^G&l7AEAe{pM>@D(bW&7VOjr&6&X{?}NsHg^(%}Kb zd8`5dF;0k6&G;;YU+sdQ<${~gZy+!KI0`@^{c#uk$1eCp7yGHJ7Sg}ZMNgLtKIDQQ z@1o!Dg1_UU=Rz0$`7Zp13;#d5@PF3@KgUJSb{GC?7yi3l_}g9Z9nfPSFaLNNK%w@! z(FN~w!I!$=Q(Wwy*fHNWVc>WsB2{-a^fpIWI!`?!XGLxZ1n+R`sZ=P~+GI;}lG&lJDW2Rc5@|Ek5=@HN=Eg`;Bv{|( zNHi*%!g1(t4x1^`8>M0*&EZ(WY)ger5evtNAP4_J8slkGG(lNPG=-w^R9M7<(P%tG z9HF2olHp)95j0z1OUP_XgaexbGPg0DgnV1d497%5N;YjxM$E8if=nY78Vs3{c&peN zjF^cC5NJdt)2+c+SOi0%aEdJ?2LuA-v_Q%XCe1)B7=g{L+EdAJSX{Jf<%;tI^E~HT zXY;MId7cF#u(JNr04xeOM_^F0{?hZK@z!vCa8oo4-OaIhtI{2itOXg-p2g~4k#(jK zcUpw27UfQ`WO8R(kK~}}q3c%({xE;wln~P+k)vs5P!Eu>shlnm*Qs>+gvgO(Sqll_ z7+BN5l&*oLd?r6KHvBjnK5WBJwBaK*{0tken#yv&B5V#6zK_^CF0h7CW>h8s3~whgbc;d5+w ztqnijhA*<=RW{sf!_Tzgej9$44PR@+&$i)L+VE-{{!Yc`KJ<4CmHE3$e_dw?|Mot! zc=V9J#?c?-* zO4Cq~-OcHTDNRE_b|=Q1($tl+g40)1n!0dy_!9uL z*HgNZ(gU2nn9|f`vxhjng3`xOx{uRKC_Rp`cO3$El1E(iZnz~|kEvJhqOyIFl%}qjH8}k` zrKt;MD>?l;N>kU%3Qqrq($wX$!yi-qe?@77(gU3S1*K1>^dU|^MrrC=**;G1r!;k` z>~2m!Olj&$*`1vJA*HDcWp{A;`;?}xlWphpHz`eBCY#{&-IT7PbOX{9$8+CU+4;+%3c@_%& z-KA7-|KbtzSagX!;H55cxAIN;(Uv$v_ z%qQkFDCkxS%0>s8DrQ}flc)0AmwyMcB0Y73zw@=s4RGqg(k>uH8z0~CcvH?e>Tkq9 zcsnsd>*uM+&I9`&EE1vzf1=laj#p%Mp`CVYe4Lp&hHCwtHy!qO`40O#eIx#U-!Pfq z?;9dAxGM8gF#EeI!7x&>NBApx`us)sf2iUoqT(lgvxOl9D$e-_S&qsXtnhc-G%V5> zE}*m1H|+25jT8YJ5#~`!)h&JL?=zP<6UeZCsqe5k)!(&Y7%ABDoWHX_^T}Ue>CdI5 zUmn^4)&B176xNja4yo2ZI{L-($jQw@^@OK{U?Q3cfnXiJu z%ANpSR<>~!vXjWRiZU<&bv`2H`7s zNO55v$fZ0_HQU~oKHw)u!mQ%}bg=otysG1-kAyiH6`<0x-@=E#>)8Lc9A-cbe2LCb zx0gJ$uVG_@*nb*|?h#~5XTLPCiquP9QvT}?M@JDVgY~L}^d($^4_a5dYMnPm>#v~3 zrnN(Ag(Iv+sHEz39XmxSg)4eKFa09z>Qi=6hIaTD38O%!SW8z?dyqIf!AYpN>;&~t zlKDF@vYPCj>=BC7$9l-4y$wNx1K9(2=Fs*=pfL+Y{CzMwDtRA9ll5}`E}ZQpdGwWA z`pn6D3FY(jY}oSjGHJ^U%a$*YEz`9vgQa_^m2)1zb&Y(48t$jY>ABref8{|({Ry+` z-=>6}0k;T-bJYiLqg@JG8lM9c9Risxz(8WlK_t%)O)7% zmYfS`O@ak`sV@$$k(TvmCV~&Wa7ynCV#vO)tLIU}Nygv#%zkQGZhtc6HSlQZ#iS5k z&$+!8(*b|yqlf=YOxuan?K7@u5GyHCO! zMO#^y@1yMBplE;h_5o@}($ljMqE`3%0OhM(tLBdoX4U*=u5Z?>)S#fAu+)&5EM1gA zgJpzy66%!kcdwnO^r>O=C-1>qO9&&;m=b+PmZ3yXRHCKCMirMM@E0MvLyb8sts65$ z9^BqnvHf};7Uj6}U2t1P_h-II#>sliAeq*iLgATG1hf*33Td`KhY$%hG0ZwZB^yDLqoRm&&6U;l}Bc+_F`# zB@xL+ar0YvMi&#MrhN!;qpH~RFRGYrs+dbYwV1>9V*acg$F13)*N{k50Mo_Ge0Q?u>DRhL+ApGbmAA;xgOO`O!Tm$);IP z(blz{7<%4RHTx3OP@6~L+{~320CMc7%HX7-Y9C!p8R$ zjrwab>Obl4O!xUaH|)(#vPjiI-^&I~M7g&8z6?iSzi%J`bGk~mEaKW?Lc=qn>`x(q z(eFuNo(|F{OvB%G=>T^J8Vqsa>Vcf>&F8`H!3N2Vv2p|0B+D-?mJ^9(xzwat`0|%y z8Tk`fPEg`|sf1kKi^%hL`(7sW8B6JVP+GCa_wuqfvyACg>?A~{;JMQ}X%d`8=JaQ- z#56))#8DqYc~-Z=Lib7(W%p;!mMG14+A47seTy!QXaYX5Pp4{Sk3URfjG7JRP50qz z@_)bYRU&9P;(Kr{(hbhNzdiQs!d5=1|2>leS_yekt9sjO=HRbv{ikasq~(PG4rQ zN&0}UtTNxyKEiF58N5JhA&z0C;b1Rm$~>nyPU*O5h_CD17Cc2ow&bB{C(1$0$X?Y| zx_SZkjpMpXSIM)G%HnGa#n;MHBF9*HjKr7o3u6)1cF?lT!rAJuO z33$H!rn*!Qp4e6T{yDPi^-|Mken3h~x%I4@$x8HUOqtW{lO7K=kV7s)Oww@G2kri@ zQvX~dcL()9IFDw>6yyi?&x4GdhWFvC6xN>k?=T|!mK2_97jDfHURFT(dJ=wE3cplq zD|xOI##B|7d{F`6Y6uT*>s^nm%vayQc@*3uJrSg-Vy(!A2R~KC>m;^-#lE1#&Xr<@ zF1e7|{|GUC>x(&Z=S?56TRS&ox=JrvLGAD*G~9BWy??m#FQRTh|3os&J@{UCj z&S^gx)s{?FwBoWaGPe$~AN0VV&r?D?n?9X~R?ITzIeBPoOi%9GPBwOyuB$U-JXqHd zgYFzMAHYsMCsVVbYgn=t{>{|wg<>?;f&Fdpe$N;2(R(vK7|#3%veGvlI3q7&jz{YY z-ItCarF%8dcVHUJsM5bYv=ifL=c7>?PCFlI08xXgx|n4maO(EHHZ`64KDt&{=>bG9 zT4oY!%H9bKqb$kN5=-~?-RM#@M$(|SpN6s?p7ZN5^8P`xjJd3#WHY@8(e(II6PMot=GyWltOn+Ol-zoXfrdv+6pG)&z$ zOoLe+xI%YrCkHL{eRT78D)yAVS#8j5@o^UK!Co(Vd8P;QgL5pjKNEhPN_@;hu!KW= zPiMKD1XN`GIK8WM4#om%3fcS< zsN+~`4;@2^&YQm7anrYjIdLyVuNvn%eM7il>|`JH`*tZx9SL2;$1VZwQ|TuaJ*4Pi zqAEJFAK+X%{lLHkf9aF!8W0M!h*)vUubo+aDhu@}NBs*3;btJ)4Iw{9vWh+4GPrmh zo#Rfh43m+an2h)_IP^gpHAbjVmY=isdr3}Sw-On6i}H8?IN1AjxRvXaIfDGoQaf%_ z<$%dglbr8bOs{BhR72mMRBUfS-|eO4^-K}+2a9%aRQNtx>Km%K^DpRX)WSgL@NK?VmDBXBMa`1D;O_LjJ2(>!=MN!PF?qtmXXsJJ!ZFOl;-al3%_C-EExeUh9W2w}lMgw~z;f+hd{9bd+@M0A^AH!aP|D>L?xP zS#Gy`7CDoGr3?LnYhEdn8{mYsWgWhEi!iO^rctG+K_!!hy}a7-GPPT-`F;e8x_s~E zu1K9X4XI$|CX{#V{ajn~1PK`%Fp~XPLTc44zzaW;c|R4U`wy<+8-`%xuw0EOQGv}-%%T7mj|cp`UY>yuYD=Fm&CuTbmhidAgioMWMJB)1;^jaSu`HhTlH=*tS4 zt-HIHEa|={!9-pN9UXJIj1bDV`eT+bm_{9yGjuO7P!Koji&U>J-QBqi>F+ziK@(T1 zr81=FVikCI%j(FLW^)c2X{2s1UCs6hy5VjJHHe;d_UQW_6{oJ=*DEELI+Vye1=7*8 z)h^j=le}F?PIX95>MyDf99Cq!;J(CF5H2p#qBD6Z?Ky@qoewMu&@KZ zPE#Hl-y!##-{ntqcpbj;(GhZb*F8+Sx6_w>xo-|~Qw+h8qoxOMLKg}pUMo=cqfoR4 z?hKu(?6V$+k_QVYNlPWT3E$54!0zMtj;&4gsAX2p!Cvktr^0#+nzL*;ucjR(aV6$h zf9GlU5T@M&@D2WW=**xl&~vtedhS&15rUPd*0kRH0A#|iz=_}-&DL_5v$(~3F7u{O z?Jxb&>9W!KOYgGIy46{h?!tTMR;!nSDRZ)HEwiG(^m@&5fLMNp5+%zghD*oCij;P%#nrT zYO2!IY^$47DO{9kHPje2FtKBTMr(q>3q3Ie3%ZNG-loieJek zPtN>=Vwg%EZ0Ii)N@0KL+oz2+K-+$u)Nqtl?BKLqnMdLj-vRpu;5A!=e-*b$xd!L& zf^cc>(S!6)UqVRJRfDhBn7Wrj1-?tB@0WPQ;xCF{0~g}{RX`&LX)xDoBK|KA8500t zrJL?iuo%e%qxn6w@T%@so})s#c`25Lz&I+6GEsp15(`eK+xvTV@-ykcn@gj$LOMbnzJeceFhATWJHcy~l+e$Q9O0BdnZ#eZ$W9$rwiZo=`F-?2HqZ zdEo`SMjHMk<3%|}UerDryjViU>UiB=ZOIH^a$SaoQz=(La#%oCwWF_VYQSc*y5Snl z`}-FiQ+v#lY#rO*ReItqBRA{47m8^x1ztk9Uc>+7h0)Q>n+P&A@Bp>c@t@GSEt(!j zqhc`t{rTs31VcU#QFq14&gbhcU)lLy-G;i(kL&7JbY;dz@b6nJH2_|cz@n(sLnnW@ls)@xL)6sBB)TC3C z845;qHUXO=tu=`@vjvaSGD9vsFCUn%3gzMqGaiqo3_LMxq*LLfkqSqfs$0U*gb{2F zM%z*mJc3JR+4$r$y+&i$3`U|xq_ruoSnw2W#DpT~*%Aqb#|Ul;@rxh2&Kpv}=I|0j zs=p=_YuspDlS*&Gi9xAgvN^SJ<0L~K0G8-;1x4{FuW?OvaO2qkQZ4bVhLld_@m06f zP=$!gFt}=Db21h*BSyTHtDZ`PLy@L7r66CvS&FiJFm1+-#`xCOXgt_xY%-hRr$!?b zk0tOtajOYWCgU++aad=>BFSVtImVuNDrIT>Z2BOPZ&ahD!ayyeZJ%b8#|Uo=#u8D> z+tpBLR5xm!(r;*raM&9zORk%9UQJ_oOHFGU59g~Kfd}0o zz8sEhiqq$oU=$@m33o)U*Qe`;-m%&}H$)P6z8;@s7_m?rO%fgyM@1UjnsmnboDr1m zseGCeEG01-s5l8d>SRkyzS#vp*T1^nSYT9>tE<0YRIhNf;TLBa7-ba5{dYpE1 zGyQMb3QbhB-6OiaLKty3C7No+uoP%8@?%RdWl;Cq6h==&$1?H$MHt<`F>bWQ)5g|d zD+Pt&97$+YQoP3J4Ig>dxTVNPAaf}3b8pm7g1!r*qcfsvBz!3EJcoomVC zpivz>-6LjSbKR1SOZZbDJaL^33n;>{qDYO*Qx`*_FM3Pp%5$TT1S~ggyh$ucB*R;P zF_GBPL_B5QXyx-2y}B_>lF}`lQ2B@=QO#ow#R1ZPl+)WX@&;t{j zf*9KI3KA04K|w}|>K0MW*L+dkEa=i8)UX=J=@T__@Zz^PYjnyn`pOtWOE?GR@U}!e zX&RSZby?jdfh*Rmza+4>uHKKKx#+}_rSwkfDX=~B%;;zX&To5mbaWBuUeHybt^YbY zdK>89g6;xc{@m#3lc2jmGoaOnM@P#rjjH+e=x7z_wBKO52KoSK3+U~bHQoj~33pJt zK);9^m?uFSaKD%V9RMvW!8;^aI;jG^gO*T0S7Qy0o;ZIB^fu53v81>Qv>IO`JPGSA=u42DO+a0F<*mi%AALjv z3RL-bfc_r&KeXo;|9~0DmWS{cLz``Ox_?w9~Ew<+u ze{9K520n`X*T84V7Y|tZXCwcK=SN5HcI0-Duf2g#1IuztSOpnI%6N zc9r3_zuqCg#*#l9`OA?1m?QsDD}NR8Uqb$u9Qkclew5_V7k=v~|FBj59mxLxOOCfV z@^7~CA42{{^p&d}`ZrkmpF;lEk$)9v&VTime}{k{LLb}dkpGq?KN)>(ExvK8vge05 zo8#wfCC%(zMn<#|e^%vegZIM(YD~==?u_GD^$6;=W=p5o?bV??jnC zf>9y*ie;LfG^AsnGEnc;F+;-QSVc9#t(ZL4HZY}IN;(d!481NhQNm)nqP3H}e5Uc8 zN6NIedxzqqjRxq@<>UKGu1|yF$2ZNK*8aq8A*Vkq^RnFk>kqw;UA`R`s`kEI(I!Q= zD0-`+_bU2;qI(oQsOWQw{z1|A6g{Fs*8ey~=P0^R(F+y5T+t>)w8aQdz>B~$k+ZW`u#wMjV5luIPm3E|Bg6~aOkksihM3#T% zMQg4QXZkQu=&?uPJakkz%0CO=--I>?LeU_;zYtR$d1r>20{E_)?Ty2~!}fosj`%aEUE&qNx+thFDm`vYB4(f| ziCPB|asFCvtga)D0X7AHd`MlgZnPy>q`8%FxAxR~g=(7LM&s={-kyc}@_7HGaq+D2 z{y9G0A`VU-A8+N=M_t?h>s&vDy7wBN5B+t`hiE=W^V=exFP-U_&*?TTRc%XivK(*W z`9QwR_R{jQ0gYZc_vok_tWlQG%yv^zRNi`7t|OTS)(O z(Xg%%j;CVQ7s9P?{_N`UOG$WIsClOY!_o$Jr2f;W14M$~1};9@dX_-vUrHwh@Swu= zTvYR?UGQ!f`~esIaTojr7yNw}d@@xS_UGFa?z7^!S8Xw z_qgCMxZr)sevVOc zo zh_ml0oVJvuqnx z{?7_e&yo1m!00%v@PAe~zq<(hU4>Vj;UzY)lRxpS6~0^`KR>DPDuo}T@L`4XJBr9V zMd2Tx2e)=U&{XDK_j8r*3zsen<0MI-X&ChM2sefz#+#e7vUcX?gfNtY0zNNGTrRJ;s*SrzZgC-}6*@ z(Aa#OrSU5oB%ttx8ox^7UYRS-bJ2g93r@dWW++)Zxkc&M^Cj(v%kY4)0srLBukLfv z|Bws*J18vVhh7){=UnhCaPk}dE(#yYKkqRJsQ8ZtUP(_|Yg{>F znv0!hBVbrhy)4(O_^Vj|ba7u^xj&}fCUgV;G73)F0u95n6JY1{rvb=a}cF}XA z3;qKa{4N*#?_BVGF8I?f_$w~>yDoSc;*I*_&OCok1zsqA=DFZL)&qZTboz50aBBDb zc)rd>PtpbNa>2g?+)%M$Cl3LiQd}X{s_{qX?Q_xdzla~-O6dDCJ^sAt!v7}oqrQO# zsanPP2MYJ9IB!=4Pr!=|G#=`CT3F%J7*`1!sdB;P@5tgltGU6(EyVf6k9nP*KkM=K z3bsFYe^H~Pt^q&y>!8x3?590fD(UTpyncE!+jF#_-_oNa%Aa^IUh4@pFw)MSLL57p z^iLPo5yj{lj! zsb06`)oYFm|56wHQWrc(`isYolQF_k?ykIY+khLY?soFG%s)d^=8a~3@kjSIvH+k37XZsKUU46 z8jmOUAHQt9@SK`Z(wysw3y*rqAP~V`dY({zIzW4i?`XkDBM1%4Rt;AZ|uNfn*q48rtjY#GUna<~i%>%p2H zI2l;MSid-##`?#Z&q@F}edy%L^OrM|6*&DCXpBmyVXHd&?Q(KrA`+IS-QYPSfA(e3i~^kpXcjbMj=1TU!D)8 z($X#5j5!ymAEvjm9saRpHo?(j+9s!(vq=`iK*~(x7hFTwt4se1eZY+2g&}M(nZl3N zH^u|a(fFodG|-66HdBFMdK+y-hCR#7aHHqkd5iID_l1}OR7N0}Oa|LfO*7dhpT$Tv6gz@fFJZbUTHYkH%Xdkrz zHcKN%qQrpQF-;?uH&c=3Knpg=i-waT))tVvzKQTQympliqfor?6bJ`HExcbN$_yq0 zt?8K9l#WCj19GoS7|;}l%i=M(H5wk9ClnuVw8mbe4IcxEU=kjZUW~^#V@OqeGJ`+i z_-27UF2QWJpkXdtUw5f5;9Gq`Ab=EgSgUCoMc{&~R@Ys+@_c8G4AMXVFX;t*ex<>G z!Fmz6Xw{k(b*lnvF1+wEUwxpyZpA7eX~i`t6rdocAfcW8un(TK=_Bnd7@)?qd7HKz z<+h=HP!UPtu-D5^$^_G{+}>Vuo0IAu0sQ$Hs^G>=gn+uRt*H!{*3|nn=b(q>EhAr1%&U3Fy$_Si)=@V?t9no>#{}pcy+eN?qKa8dGsK z7zlcAk@{S&8uLY9MT1r%(kQkDBPOWR zhF~ZZPUX3tcA%tshy?BJ>*Q}53+1YjUkhrgU|vvQS8Lg*sX`G*Q=Gg4ws$#fZ0B?WM0Y)ga#n*xq-m`?*%IGLwJV0+D&8QcVFCM9jrXZ%twCgj+5HSDQd zv9cQ59jjDxYudxtDr{FQJe0x?vMs{X*wzYtlA1}GqfKNsl?WgY*YY31JA9|faBXeRs;$d|tWS=cT#uI<0C*vriKDWHl|xpi~v z{#4}MT+*#U?SR8fmjx0KWTZa?C^0pK1=YY%h%6Yc&So2^nM#MC3Wa? zqSxV%_PWsW`gx2GRY48JghQ`GX+BLiBaimZ&}seLMg!KX=!gk_}?AK=MbyL5RAJq<15_w6nD_?$@mpT2*qyX(LY(MhL--Aqh`TF^o z7P)P_Xvq`EFTW4+}YxYTu!Bu zKcwn!#TR(#&~fkz&WT0K>*sd{))1Ev27l&0wB4Hi0ap7pbQl^12ZHJcA^eyDs z<@Nf0!#XLn(|-cclMiu1<=1LJQzd`3PAjUbqiG3F z)KDimns+k2R>|+3sRR|t3nwmH$G)9&Mna~_rb 0) { + } else { + printf("Only the integer bigger than 0 is valid.\n"); + rc = 0; + } + } + return rc; +} + +int check_sys_kmalloc_list ( char * results[], char * kmalloc_name ) { + int rc = 0; + int count = 0; + char slabinfo_line[SLABINFO_MAX_LINE_LENGTH]; + + FILE* file = fopen(SLABINFO_FILE, "r"); + if (file == NULL) { + printf("Fail to open \"/proc/slabinfo\".\n"); + return rc; + } + while (fgets(slabinfo_line, sizeof(slabinfo_line), file) != NULL) { + char * token = strtok(slabinfo_line, " "); + if (token != NULL && strncmp(token, "kmalloc", 7) == 0) { + if (strcmp(token, kmalloc_name) == 0) { + rc = 1; + break; + } + results[count++] = strdup(token); + } + } + fclose(file); + + if (!rc) { + printf("You've probably entered the wrong name of kmalloc.\n"); + printf("The list of system-supported kmallocs: \n"); + for (int i = 0; i < count; i++) { + printf("%s\n", results[i]); + free(results[i]); + } + } + + return rc; +} + +int validate_slab_name ( char * optarg ) { + int rc; + char * kmalloc_list[SLABINFO_MAX_LINES]; + char * kmalloc_name = ""; + + if (optarg == NULL) { + printf("Arguments needed in \"-n\".\n"); + } else { + kmalloc_name = optarg; + } + rc = check_sys_kmalloc_list(kmalloc_list, kmalloc_name); + + return rc; +} + + + int get_arg(struct memleak_settings *set, int argc, char * argv[]) { int ch; @@ -83,7 +157,10 @@ int get_arg(struct memleak_settings *set, int argc, char * argv[]) set->type = MEMLEAK_TYPE_VMALLOC; break; case 'i': - set->monitor_time = atoi(optarg); + if (validate_monitor_time(optarg)) + set->monitor_time = atoi(optarg); + else + error = 1; break; case 'r': set->rate = atoi(optarg); @@ -93,7 +170,10 @@ int get_arg(struct memleak_settings *set, int argc, char * argv[]) error = 1; break; case 'n': - strncpy(set->name, optarg, NAME_LEN - 1); + if (validate_slab_name(optarg)) + strncpy(set->name, optarg, NAME_LEN - 1); + else + error = 1; break; case 'h': show_usage(); diff --git a/sysak-module.zip b/sysak-module.zip new file mode 100644 index 0000000000000000000000000000000000000000..ddc5fa139de3ad3f7a9729b4d9920d3f64c09faa GIT binary patch literal 298046 zcmagFW2`8^k}bS#+dA8}ZQHhO+dA8}ZR2d)wryMQ%)2x9yYs!=nNGUX>7?sVtyHbl zs*;xi27vMMfZP;3vZ|rE&AlsAPgU6Fb8Xe8UW22eR#_8&}+XXrN4qR?u73&t0 z_lAnevn5R4?-4nk@A=K7*bcqlWPt&xS887!IyGQv1zp436X-aP0 zr%9QPD%E3Bs>lX2T4=R7=og=dgAsUqU+(i+H@ACtB2pxM!p!<^JkhP7=Sin=I~!N;krhYOn9X%Z*7mzgfN zt~Spw9TJ#pt^dSL>7IjZ))J7cPkP(Yz1M8q8Ta`8Hd_|1sJz3m!Ill?IW^#=QU{~V zF2_we$x4e4e6l%@CZk=mW(*H)F~h6(x7Ld4$QF&+ZV!*)z$YJTE$NjiUP~I1O0SyQ zsK_h6uvAHc%hD8*xixN8;)@t&a&ymow?__uorhaw6?gW z%%q;4iT0qiWRi2_yp-T;eG;(hz=y&JzNXt_sxiK%* z1<%5q=Sb^(PuV_pl^J)r=0~`Gxbl&KZfCG3A`r!?i8bj;S!5wVr{<~~L0He`Or5nU zibF$FZzcPFXbfy`oI=}yOHS*-XjneA_UwYM5gAVQ6 zOYe|Mf?CVlYLdJ=>uwlCx1f{m>}JT#m3jM5L2IOT}k)%Jo_=`L=o~IyrAhvRx5s<}RUvnomZtJl;p>$O221 z)@iL8`uMCp8LPKFXVcYa5_ADt9mNd0lduA<86NU3AIl%1CBShzyIu*pkI+t{tEvqC zBk52;K-6OCC2pV(7Vf=-U8J=Z^RbrY%YL9wI@dhqb86n|^nM3p9eh?fEWCGVlN{JO zNHL;k`V1O6<1p`lv%v3`5CgXWY*>w^qJves^ibhT$YU3ftYrusfKs0cq3$hPfbXjoswm#3w z>V!nj3aQOvJ{i$(E2a}3)eMPJdCNbYxq`e*0uOFJ0hA!|Kzw0<%!3+2Ko5N9H`Uid zjrODsu?Hcf7+Nt8WxtMD4gmQ}6K0#ge~F8`zMLwSPuI!zG{So0z{jvv#8`!jA3oU4 z8eEzmry9X94V2g0_Vgptkij`_llCKdg-C}U7WzK37zZ11nhI=4HqEM_3%)N|HsLtx zkws@wIEBLWQr9@u-)k4d(I~&viIopdsnQ(xGltxxvS>4>k}bqBZl8a1SY^!K zK2rB!N%^i5cdo15Kh#7Z?T*~=6BvaTH~NOGgs+2D4ZiSV&-s-;BP}zG!PnaxFP8Ud z^2weN7oINnl*;$^d(YFIsq1_Jyy4>aNpuHXKDCMp8Xz1Az-cvkPsIa)HlzhmJ;@+_ z8Py@g)PYIx=kcV* z{c|Ih&-mi*UMcWYYJ;-f$^0-OyC1KB%w&YkUVj?%9zIrPXAkA7j%7ITwHpe z^TlDvtE(sU52RQB*j9?4r;qWceWU=6V2nD*WL!;+_Bbyf z1Nwwlw!~;TZ9cinA2PtFZ9Jki5xG4)I&e2=K830SHGd@RQN2Tu_0%qsv9v-^u8pRB z0Ly3PzfsmJw_j}?wyz~?0o$F0$j&t2ZmoT<4BVU-Sg8kskx^i4JcEFX=GYt`zp$SJ zJ>-euV4yuO*Z9Ls(Fp$0Ip3`Y)L(p-L6Gb;``DV!YA_0m0Int6T^}1lreJc5Ga3UCXLT05;p)92Eu2 zmFqs|VK2rWxpn}X`--|e6D}wh5WLpcXRGb??6_-yNaEd-X z90;bQp1tt;A22E2{1W*j{#RC3B6^1e)1vV|X86O2jii}rWX*}%#Qf7H84}*vK|_;z zS&7uTOLAxqhmLF0KHKBo=yJqT?mbO;hceCbIZ)w-En-lWl=XzvI1aGXib@N?={UUA z51|0ldQ9$Al?WeiOl0+O(=7+sD*X)*J)^7L>WY#}Kt{LN5w?j4R9%ir5Jn%;6C#Z; zXY&kgyLSO6eV>zN00JO*aEG%D!^OQr&=f!RDFPyOjQQlCHwPR5WH2Y##p(U>it67; zNK_P;p71hgf|lerVp5Ew(oQ51lp04sb8W=<{q3ZuQWQW*#Mu0ligqg1+;o=gxRk<{ zMW)ym?Sf9T$fON{zK-0#IZ2eGmCH`%H04p6A&ZH-9F`Y_+G0m|2_)f!zj!7zEE@)E z04<7|HI9LCs?FF#XeX^KcU-fIsTX$p7W!J-O~{^^hIUoeYqO?O{`8)K0-hVgMuy_N z1c`=26amDyzHTGWEZwp|}#s7B0>O81`ioEyq$YM#MX`*xIExUtqc$h?1SD z8ATiegu?ADdVCi{c~=2A3PiA{$abIBbj(hajON`tR+UxNL?_YCpQO*yD|H;R-&Wfv zb7x6+u7l95{Z<=@tzznsM+AbJjD@+{AvrPsr$E)*R$Z|WR6O7;`5h>ylOAC?@*&2Q zZaT`o$*v_Scpd$;=v{az{*qv#a5Kts*y(W26;F}D4T0viFN}kPq`WD8`S3d-72PmJ zPve1v1G@frR&i^Zme#0s4Br>LHfwGs!h@zE0}Dz@GD}A#X>@#XjV%nNX_LJ~<1E`y zIG0mN)qdPAcCfd{8-Mo(&*%jtD_FAWiT~^X6SU6o8J}|P^`2{xy_V`(0Aw2u28jGn-i>c7r%_Ll$CS|sg6)o^NXY4IaD6Di9sU(s?6his(|MZfw5 z+xk73brJ~ihZ&~8-0XP>K00a)QfoZ2#ZA7=vfqDa~Ru9=R9~miAzN;yrCDMOwvd$2fP6^Ta6<{BR-LMJIs@g8(-j?~ z|GaJH3^h6D`uu^@k#1ENPwWB_poU@UodU*AvVs&A3=)g*4~GIyY9QWm57N+QT0t?# zoj{jZ(hFnG((06>qJP~25|^WGAftAY6k}XsRPKYl*v6!yg}P%kBiXJbEsDmD?%kjo zF+1fvP{1HY%2z$hLp(NjCJ1QJBVHFpv7s^P(_~m|Is_qqmTHgfnmbl`6C}rG8A^SX zkbQAj8%NpFo~t2vjbrX}Xktv7efblp8Nr59Tkm&6xgGSCLg-Arl#GY!dJ&g6OyjzQ zWFouTq>`RRkN~kUm}~lRIf@&F!~*)DQ0kdz#uhZ37jG}3PdVBy27!VF??eKI9&=c~ zfL%U7?Vu-6puZIML_sUw*Yk5M3O0Nz4n>i9@$b&4&03hHLjS;n7g>t0RK+4%Sl%gi zhcj1kZL=7Ss>C09S#_aK%&DAl!idvad5cWQV28uqna4POty`*u=Z@-1|A#h55xDj# z;PLw2bxt$?0p+l6|4`5iGRr#cDci=l_+CeN6=7@yyr|0nO;JsMqiXO7{SEKwjSFGO zU+2OfhgE3WuxdD$dQ8E0dyVPy*{IJbb4JdhwOv?$$~d^}5eH))i;j58B?Qwtd^JRL z`IbWeped|i6Kl`uOE-XW^VY!jCe&O|7U@meQE#b&htBcF|rJh6aM`d3b7`;?O2Cluhf@Qjp_Sd z*^gYRf;k*`+~RKi)~6#wgBs22M?14!g|iLnh&P?UYoU?a-Q`d$PdSBr!BzFb2Zws& zbK4UYrWInPZ7M|}B#0d5WcHWRP_?U95#NOv&!S0jIF=@=&T_HyxaI*vMjy-5hZ|{8 z<+;-mNnLCZE|qOfwp99`dMey9MD~zNj2}KS4;HlY?5a_j35DnT_8$(mfns4l+L>Nc zA+kUqko%vX#VB-IB&d`KB<-`%1@nc3{rD$28kM>~dIF#XPLwgXE)|7|>ylw#%gfnM z7z6If#-Edy#>oUUoJ#??oawWmX`{WM9IZPX_j_i%DGR+sRVDME1ef(jj67#g5e`n| z`SX-lID#j4sV0oVWUa4XZGQ6Pfo{y^ATmy89n<=)s-DX8^a9=<3JB{U!KPUW6E#z# z`62S{v-sVmr}}b9(aYTlcnl)hQmeV8l7VppO>zomK<~y#%Sl6MV?)n8@I1o9cmj|; zu6#S_tcQDaJO*BK* z>`?bcOcP*$YC4MI(4eF#Pj0fid%BC3>y?gsxm1~m&ney+C4f#0iWC0wK}@v*%N(_h zAhhf>Cr|73eCVxgQVqlH8}47`4QLnkV+(HfpGpeI#%3sLr24Qi^~4nw$0Qd$^3k_WhVUP{6$NuUh=)b$}Fj3bn6?n zNK+xF;fOJR#!kunPEYBIYc$T}EfPywgK5eI>6Gp7ST%q=&;tOm)3NVb>h{k0>Zj7Y zQc7uVy_tj-uexRjAz^g5<84up-q|uJ$lIYMnIgS}6}DEX_&%6qOx131o(2j=MPQ(VOF(~*-^ZmnP%YyL#sUFqiiGjkl-4=O zOO`|r2Q$Fk;IJr02I-=f6OEq5q7F$;2>A$8+Xz>K;7xii5IceCuBtVMrEmQ|y#nqnV;i~pt`HBXO*17DBJxH&nvg50`)kxNM9l!YgwMTYW~ zo7M|q5qA=ktU7v=_yCLpY4ZZjWuTxbtxUZK(H`6V{3r~RIx~w&*6Vw?BnB_@;tFl^ z2L~0JS0X&+R_?Ti^2uozPyNL2kfdjpBfN(29MTrOBi6`+bfER~(cSJ{5W#L^omXtJ ztl77Hk<}D+u>c^&`;WXGoB4#&2cNz1c#;b$M~Pr_5b0tEsII!Y*=Fgko=K5AQUeTS zEGl^k++$<@nv3|!GJmNh=>(D@orA+ex-Vpp?wqv-OgC6a*3__+tfw39uGD~V=onD| z7L43FtVN=|l3Q1}d=4%)#peal!I&_q$Z1{k>ikma7<`lBs}W-o37ba@Pv86oscYP0_~_o1q8eIH znBt(lTrcrI>t}A&mCnZ)xjgZNfI&R(F)zzce@!JC8{Aw&n^3C_0K(l3wZv2$29jY2 z;JZ)F)O-nXE;rN(Ud*-S8EYE?;uZTP?S3M{E#2}pfURE${!q+^_ae4nrt%y9os7<%83aNW^y2=z2oXPAFSu&raQk}lWO7JjyWjzb1xsf zZ!Vmu(gQoj4t%K+wcDHZ`|scC-F_Zy^lo2tyN|msm3;?`-tD;29ogXPyAuc~G9^Bq zM(m7w!vkmbj}dA`ga>!G3s;Mqyqh}k!`u;{z%3`}Q$~K=xNxLfaO7{?UADOwMSOrq zWXBWso*%c&u#XFG?}cU$wR@)>W?(|P7PlV|zVMJ}dh^bN6;5y7lOg#)BhhXA-U1#9w zlinc@Rt2=|Ja^PAb`Xb zxi;sG-0Vh)Lg(bm!yo_opg$%8gZ46cIy{&}6$G z58QIAer#Iv7DLt$xW*$ba$7ZBXOm|B{)01naL@o4d_WKSJ$-{ zPO@+{o4(c-c3@?vGimL#fWOTX^U?&X#pfT>L~5lICn?`Lrtw>)55}%;ZK-;Ewki5` zZBr006_|Uu=bM{YjTqYibCyH-5BDk3_~ZdEg4i)AoVM<&&xtB&&wJK~WV742JrF)` z;sPEZpGBMVeFNAUXuq?nHNpj|AhWUJjib;&^Lg;r3ogphaq)$j&1(Hn^i#ydM zotHG_5xji}GHojLo4c@!yU$hb@tw$Z;2hgJU;&`hqvok4@|G|=NbA9M#FU7l+ zXq5+1#PYt#W~3E+H2POwATcBvpl%j{!DzF!DJ$rS2DjRvQ>6E@<~pG_Tar8wJF4;( zx*=8V?g%asZ5J>^5p4!FuaIvevp+&av=~Vh95?b$CH3;3=5OdJh0jPDcUVD0m?GaN zEhjXbttLx}Zz64Yn=!3A7pH956XD8%pz1JMTQ;S$8c>N$i@}52%64U<*5@x;H<=09 z6b_r;VYycW4-ZD#;hiHHch%;S<%+&u^x#ZE`d*w*frDcQK#m%C*xJF+6zj`}mQKS$ z@xHFfaopAQ*n52ke2BYb%aXiuQr=-3j#9a`!b+ki{cq*F_!k~bF!LF1Z#e`^vV9)A z>gzSIVszvIq1QhW!CRkbpr3Cye({0YtN8}hl(K^dw93=S;8P2Tj%vvW?Zp~h4WkaU z?XnRlz_JWe-h?eI?(-R=R~{#4bQ{Ejvy3o|W@l=ZBCdE(gF%CpU(Q(_a|VmYw-s30#g-vz?{! zV-Po$ydP1MS5$1(T~DVh-C-nbeoZNVv|+f!vbCNP z$N}9zuu;{})bGAL?Oas(@-5ZMtoNURV1iU5@yY04dtMvw9ar8Urk5zHdzLydCiL;e;0>S7tySV8 zRmR!klT@fUON9VV2_QvN>{v`Ot`(xkZRHb1xd zqo#3%e<>@rVyhXv-f4#8j~lI^nhq0j?G=qy3N&DYFRlaN zmUMF7%jC;X`EcXNef^uD4o%`)Gv})K+-S~p zl-ZLZzrp`IRNahUaI*mc0PO$575CpjRZ&DhSVn}_#`vFDbxD|z9N%s3Q z>nDz+NVlSX@Z8~im%4m3ftsd1s7|ZMRwJdKbaJ~+39>La-Lt2^{LT@3K)tUh!LybRN$uJ_jMG z5b`?gyS+>_Tbi0gC*5ZVBsb^cVt9Gt^hBvE>Xal*6jxYFLsNY;()d|Kf-n)1gJn}D zq1(+YR%B#{Y^Kuf`6}(b0VZyNuPsQe+%6}5IV9kWLe0W{+8lQP50NNbC!N4@&^b(} zPwdRKzJ0Rl;v?tJWhB}1vx$WhW%EtY$3kZxirt)Fg8T6t;s5ZZ^ZE|QLs`d~*4IZ6dI zYxF=@IqKwmKM|olj^BmZ`0ffwKZ=$&;v!TVr|208#diPY;QzN1;NQvlMs_wfcDDbn zAwcat@=+QA0N_CY0D$D*K;%Cy7+M<`SsB{7(;EE~D$UqdP8+QEjgM3aZV8etRpjZ~ zHRu{SO4e&9<-feFOV4yN_T-6;?}XziCDv4H-fmhu0mS1#DAr_{s-^eqJJm0-cPF+; zH%!F#9C1Bb8-tr;*DqaemgKBYy^y^O8yCp6X&feJUoc}LGzN|Ewj5fcu5yX!B;0#R zt)3&uq@HuraW@-+yxgySb$%9J0+)nV&}KVi+t`C8*l$dahpaI}TI0L*Z`otm%^cWe zuCKyh);P5pM&JNaFNP!zWmF-}bo(!ReK19>5j(b>W9;*6P}{f-$D$j9#iG(elsq|b z{ftYwcjK=bCzb|Za9poBxWhRb0a;oGnXZA3Ai&MC2w+zdz+91l^ttZ(^Jdz>zYseQ z6ku*HoBIg?UDYuN<5IE-ju4?!*=u1BYtrq=)|3y|CQKi|>Fhs~C)NNNPLHz00AD&A z2NY)prO=+|7lW)JZDV-;@Gvy*&{*%l<0_$F1{=v3DXomV;^1L-8h&@OJbBxmX9Cx_ zchgTtA^k#(3UR2r@`V3@yo7x=-Qp5=a%(AUmiFFAm+KGnu0-zz18MnPJ%z8$`vf!{ zD@Hc(`-E-z;|0A(bH(fjvwnNv0`JRe7}B~E*b&vM6;K=kKpz>vip0S1L-o?<$XY&h zRt-7-#G1RIhHhr~1YL};)tt6|JMib^%6V7kwB0(j<7itQnd|rq3Dg%tk_0S}Iq5?Q z3jI0gsGS8^9_t`@c+tRNr4LM+zF*fJ&af(vjSWUrSfa;+g`4+dsiPe$Mzh^^a~qC} zr@d(yJvY>h?JAm^?$%*o9L1g18baZ`9iB$Xuk)?P^w<6vC6qGq#tJ5=UG4jnU2_{T z&>)Gyp@TqfEV%BPF5E~CB6!)&xIz-xLfqB%eK5T;9mxd%G4K*-m8HAVvZpFik!xfA zD3%{LpBk{kbAW6CYYhnSXeneTHR>ZP;Jm~vn;_!mOdKwpB;&yMYY@+Z$P|C2HwiF5 z63{vFwA#cnbkOB4E5>Tp*|q1GmPyHhk%IIyeCZz_>_&9MF}9ySI2!n&D}30!iD1w} zKvOmTI|0S0Nzn)h+wNze%xm`rJ4PR|QA=0&hFW4VRU+L`cp+W>T9cxeyQ_)xlnC;_ z_0*%nxeS5@>#7duC4>B_heveC#GwjOpVkx}+nKSRo}|>)6Tn`KbhF^_Hd@?wRl@^) z%)I&o?S#q^uq;i30!U%i%nlZij-Z;E8Ti>>PsS|QyTdRdv?hod=c}CL^43=Xp~B*5 zNJaN)B-jk=AiSrpFL*!{olAckj`iZ4t-O$#%?iJEH@M;;;c6ND+(32PgHuBJA4{K{z6SRE!QkwbOI+ciGRtKMIpDKxrbsE8HFcq|(5W>3k70 zJJ1X5{{J*Mu445>v2GyXm1|xK@L$;TA{PL1SZYIO1dh&Cdvg>xeW% zGfIxmvCHoJBhS3!KLdv5^Qxcyd$@gN8(kj;O$w3Gm0+k7F~xhpQf4y-Y61K0pU+Q2%DXBYy-I=p6GgZNiA zJOW6PRfi;@Lnz?ER3L&SC!C)>oFO2&1+i|pNp1v0AG6e;pYho$G%6DhZGyb7sM9%+ z?r^3yKz!^O$yZbyFK@qzWZiDiAOxaNK2Ons{Wi@b;v8s;xZi3iD0AU8f?w z=Q41QQ9{w!tGriSBZ8h)u8y;o7PF-T7ixw!`2B;)9jhc(Iu$Qut znq+-qI0;*roYn|B$r2?9PRJfz(;douub~s6tXRm$p7IqX>xk5Km(PNzc;-~u8g@e7 zC+hTVgf09ARD*xH+KpHH0NDq(;i8eY?oe=omJs+n3J1>6D)ZV=y;40539b+@>>Y!X zsVm3^Fw=$G@txVPRC|!Do?V@(lBLp5$KZ+zxY^Fs%=%CJ5&8-RV2T zF9*u~ybHA&soO?{*xP#GmK5Jgg81fmq(Ff22e;K&aZ@X1j9}tq0ELevuds=ANy%B& zvtrhY^xi+-b^;dWu6jxzVigrRz(YnfY;@Bs?I38WpVUD6{8fjtbPHYJ_SKf6i%+r8_5b(X+&&+$*Zd zUn66leN$CS80WN;Cg&Ytso;IKhiN_?*mdJK%g*=bD7TsxD!*bb+V|8BTSaegT(B9? zZQC5dNhn)FtstczIB_-f=he6>Gmc>&lvYD?YoWR3v<3OCWDgvmDaIK!YcOP#c$-zbtyZCT^mi&zhUP@U`| z&eK?wAAxBuTi=~-3P2F8s+i{FOu{yu6Vx}Vi{K^>&tB9)pc|s4z0`s~3c0-g_)SK( z9JY71rcmJzA@n+bcRL{NR-m{Q&&6I=y)@QPj!Wt*g}}_4ZhEyl5$mDbc;}c3h(B1d zx)^2nSERy(Z+d89&9vw1Dy_tMF<>5FIa5Qt64BPmm(EEEy{%l(#e!NRR2wWU$rl4KYS!Zk&7SE~`H&d7n|^X3=L>KQ$(T#I4HdVxemlY2iS zRA0ifbPuY0o0Y;Xb0vv5SF62iNFjx!UCgMeeFaggB&PUV9b?i)TQ;a(@4S<0y&8SE z&E6-Tk&{)o%rHY2ikc6$aO%yx3*6p0B`8a?@lYi-_KmZ0215Ge5v>bBCl#%~e1GZ{ ze|NscqJ@|O4=a^xWUByB4Fh!e6}=CrAWZ`{Iqi!_zm;vCb-;J*v&s;~4$N37h_=}@ z^xMh&pCbn}`hCW-_xj?YR7^Ml190$5M|aMKnjaTe$HH_{51O)KHS4Gd608aMLU?SQ zur5C3BNpNpE|U7QvU`_r)3wkIQSrRc*+W4}23w%+Y`U?V_O<44pj1+8ydp#eMH1^k zWf45-Z>1s#i#eVw1@!SM@C1Xy7QCAEc=GauM~s>+)LQrW{Hz33$-q%&{l*$d$`=}q zQbCq#a{P^m;jhUDk84>S33f&;-Fk9^Atq1Y2m2l{Vda;9;7xMs=*UC9wxhH>@;iMO&Vs%hYmgT zVDkx|E-TFh=};i1tc?Pn9Pq2%uO*<9D>eY6C<~N|TuOlQLex-Il@z;~6XFtd1W^}P zf*6gb7A!2_aw@J0i#{a#K>~pV*(cqS}XW8y( zxCH1&VHk@-1q9!&&)4kDP|{yk-?)9%;*v&mI5vscnQ;p+)<`Y1NgS}AE|t%cp7$Ue zAQDHR(BAf(Ed`=mVMN>`;E##RwM|PTFd@si$DSoU5p2I=zdSnVlQwc#V^r4AS(o~g zrqGUl*$B2BBKpi@neh*hB)~->>?|>ZQTMRyu5`4Kb_4HlG5^v&CJUCb1jig}9`GuC ztF~K4%%hZ_xDT8SVVTQa%!zc_ul+7pzRhV$WSUzyFjHWcaosm z`>TiDZjb!J+S{?ul~~k)^in|keCT=bq{d!iw_F@af&mBND!6r#;{`ggh2WSm!DR3y z!a00pn+axUapYtzU(?siu_i89R|)bvuTr?ZB%O<7NwwCdLA7X#vg>Uo0P*Pu@V_b> z{}@z&LaNP!1_1y-f&~Eh^*`4ojLaR4OXg$Bq>E7IejP^FU-+6CpLmcPjJ510Kz5OEgVZp{2&%0OEQx{66vCPjMk@Ug z5Vr0D1VNNW3oLBZcWrf7=MVpdXxR24@Vz?t}7sj-*2vnV>V`wl0<73CKSgx_%%rMvl#MVO?xwLoA$r=#B}!>bptK@oKM*vGs`-u7TeC64gszo zn4IeuP|TQcnW#_$QpTm~uUM3{;4U5o5y7YLcvQCVmPtykS11-Wk>aCeu*hqjlIh)a zQRdPhR$rB4xOi2f*3|v-2Mi_QR9We;m<&2Frx4cIz4LxLg8Qnx`TkVh(UT>$-=Tbj z3k()fb%YJNJ$m5z1AE-4zaMj+yp{!s_`B#MNz}lO4K{gZKuM%dCaJQoSPRh6?X<8m zlOsn*78t+V&Rw-0Ch5;_IdgMW*-{GL0+xV+5b+Y1=!CKcB}#uDzax2kJj?e;3Yb5} zSKF3Is=6wnc~Hkrttb6V8d$8?sZNzsWqtYVs;p%|i?RR=g!&=^9KE@|mAgKaUcH?cI^hUR#saM5!-3-baJ^N85w51 zUGgnLtw&i+S@>k*-|=f_v=0&Hm|}&m9Ix>I6;%FFF3sjoK}7mHIQ1W!=zk6>Caxy7 z&i@iq=qRBGC<*@a_3xDq>c33?BVIYtA~RS30{~RP0RRyG&!)}o?5zI9@{;O~+y*^@ zuk1Gzx^4nx2-L8yzvRMlm}OOR8h@8ap930I1Tu##IQHia7grB9I+VG&khsjXx7*Ik z*4z3(eZ(-i^uj5?z(Rhz9A+$ZZeC+r!~QkB#1|1M_@4FdSV-8fKAx(lQ0>`Z`)n$r zta$Z{yM6W^Imhf9NCI}^Kg)<-t(=|0uBA@K+(Wi`G;)}z>sT)wXqt@LyEa2E&n$Z! zGU?MHje?mHmbt}APXSluwxM36X+X~P>bhHPD-RPlQA_h*UGI!yW~UdQjvPH@)ywfg zyqu@6tE_~nSYWMFHhW1&Kzp+X=968)?B>~u_%Q-i8^=R0!Dg8~GC{}+_Mz+UYS~U4 zZvsvNZ7L=5q92Ri%qmlX-PIM7fXQ4|kU9b$lQxI>|Bge|Ie-k2qj&Wd zD8a>u99JYRL4tM+&Kgw2Se?x`8>&+PdKXCu2Ea|IXBnrAk~StlHIf*fmKr1t6$7Nt zdvHzqGyYIqq56|<-JkCRDMKkQIH)CTe(1(X@S3aE=Y@6Mc_6c#adVR| zvRq*+^cFgR;D;>&;Vi}%M__wQ!$-b`;)h?glFNvyf!0l)Pv)AkKOg8Ss6YAW`lI

RWi!-N)RZ%&)tv+@`wNJ%d_d=pZ67J*1HW>?y`9}2+ zC(P;Gb;uLYVes7TJ?AbN1EH`hdiy3rNNs&R+p{Mx}YPj?*=i~n;<9Z%q>dOCzF7}sX{4c_X zh3(&oN?QYKTJwJjA2v!iQit>iU)Dd70U!IcBj0gC!L*JSnyCkulr>@dL?kjL8~T|^ z#3~niKfZ~!Q8k;TvfGng9v+FYVSq3O`IWOg<0^b{>v1>2FYc2vymxG9X_Q6n zV5tK7CkXOS(=?*4T_7DP`rM5&* zalhA(lSZSjh4pf(5PTpZNn=w?K4O{Y$K8Cl&kR*`9{!-OW-sKMT$<9XAAWL{$MY1| zTTIhzPp!Z;ldGaz#NkM@=Epi4voW39wTb5X=6>7r3Zr^Si`*F$S@vQpJkIkra02X z_-zlJC5`&kI6p+rhr=|_O99}4Kt>o!j^gmc%?D3%B;>WN*(rC@t3?GI*HYwKL;x$1 zODij*IY`vo{a2#5`$g-Ur9_Sz9o}Tg?RjGy=wQB4Gk|r9Cz(a_}QzAz=)`N#sygXdDzCFy6BY!qqOn zeKFzcJ5nggu{0LR7b-%c)2$`w(qUN-9qK z&!SNGZA~;%XTnTCk`saE^!8bdm`XKAaA20D2`bbfvtYQ{;$)aQt!GLnhkHkGD(=3bN8S4NGhXO)}r&XhA6sS)Qmh2J7J9ldIA6e3c zuQu;^y%!j~ff^{mVyhm{yJIiCZa4G-#4^k_?YHulAxmQ6F@XZ@~pqJTRoyi&^pkxG(9M`bb6kijjY)8+8qv! zwcX?^4D(5BPTUfkm={=%pg!t{>>wz-FwL50JQOsd${St>uzuRAYpAQ5SmdTA`Tomu z|DcD~hA=INf2rYr?Ei%xIywKBwtiGQcl|K*#G z9{*x8Sy9VsgC51V#doOBK%b4|;(4_xXg;(iuDKBXxel#x)$vz>xM)g4@Z8UzUzHA* z!gW`3I~*sS8o;zp2GC$;fl{5{ZH67klCj5nqxyErcIR183Sp*FD(ZcKYJt;%uXLIr zw_Oc=Pu|m!KQ1QBQyRr>%mT$~)z-5@#VR?;0^@;@9gwhSrZt#XTG6<3=6G%hO;YHX z%PL%1RVZ|MLv{U^$>|`l1Bkw68OaoHGDNz94ZsEBOh(N(iKC}7FSmCWQ$o*I*XI_T z5)K_a0Zy;Xq;`bu6C%fTV&Cyb}NGH8kyV3Z-q?pM2 zK!nXZ*;9+_bX>eAprxIQ-^BNwQ@NIIJgRjn(D7x;SV-HiU~p!#g|Kv2NPT;=X3mmZ zw&%H@CWjGOp2`-FUE=%r=r66MyKcKeu(qI*Ks(Qk z(Oc_YqlpfsBj0uttM7T@sm`g}_3zrnENqReU5risU5|ze zCDQivU#db64gf&eknfPau#{b7vs#W@I*ZC26HuM-2Y5j=|#WPfaL|h=^$eAO6 z6xMaChVTX|Ojovaz{QvRZm81`dZ){Ir>3X99^9E&$ym@WXXDQl@5UDSiK4@9ANkej#NJ(dNl(hYHd}Z23(Og}FK!Pkw1cobxey$r-o@ERQYg)nXrLPYf!*G(T$eks~|P#2i7z&OFq zCJIl&q!xgVp6T{MQ_7f`+j<@bj}Ro2l+zm}Q^l1JsYqNSA({Rk_Wm+1t99KQzX<{9 zE~Sy~?iQp&q`SLQK|s1Y1Oe&pZfT@Ty1Prd|2XHGd#~x5>$$hCx#zmq{S5u^f;ZQ3 z#W5J)agN_{URAO?Qvo}8NAI6^S#un6RmYt#c$uC152!C*qs0W!cN2M^^>jrkPh?|4 zsk9jxAFIK@9>CwY9rokTbIucGb){#g=sLe0eaaDH=Y+_6bu={BO!MVo{iR?@6Zzt3 zz3AW-w{i9PPQLKi+`#QhjJyfj_`#9ZB{AP8{K9GHAQ6?0&EW(4+!Pf>ZQ?qaOxK&RNz`4ctS*_R)K~bNb}w+{t1m_*lm?#}2xcb4 zz=fk+V_Fl%(u-D&s|fy?rTBB(qj*Z4g+31igC|s}Xw&@c1~8_c{njCYsI0@n8hc$u zC2G0Q!;!|9ej&SI^GlLbj~qbLmvvD#a}$-m#v9+@wsQ;Q$W<5_)t2rfE{UHy> z*ZjlJKabT)Wlmv?pX!n%c*S>Ci@9#f&dMh7`I5(EQKZaHATFM z`hWe^`1N&NC={-oJ-nufhr|A#or->YUB9d+{P*FW89r>8{kdrWmiSAr+(YyV)YG^Bv4f^?k=RP0Joz zDmEgC2oiNsVMDR%VFK=|*yW?9tp=AiYb8EtZpJrY5^$AvA^OG4jcEP|13wVPCcRLhVx)P~9sUYb zZy9Vb{TbO6>St{CttotDp0DR;!=h|uAHOb3x-jUYi=EG6>gj&qes+eh_tnga(y+0` zO*im*A?9tP^_~`;=n8gZY0L%K@7toCbm!jm;qCK$`1xlScfY-TzX|%k-$93N0T?fO zB#5E6(hT7tXD}KVsj}&^N=1-Tqztm3w23zN#=z~blSA>OxsS51SQYw<$KO{i6|v!d zifN)0yeo+0>4ljN*Y#k(qIrz=tb9S61J`c(NE@yk%~7C7KU*iW3w}-BWmnq>Lme0&P zdr`|AMkXD)r%Ab+lBD4Z$g6J$&rc7wlojjH&TzBs%J-;&g;+kt7L~?5JsJnP^n1RN->8H6zdX2A|bo!*1n!lb_+sG6yB>c)zE} zpcCj=lET_O?N&ZK@EFBGTmpeWdk*`ur0fy2dby;KXEO<@H#od)hjSK$?*c5igp*?2 zeZwW|DRa3N3Hh-_wPe@4NN?{~u|jtJmNRQ-#J)w+p;ivoI?LfZ(!Q_Q-%rLUHZ z8Io71+jN(#L=U&^@6o3lMm0Fpamt$noS&X~tT482oNrhN-dW!saT&jN!;*SgZ5O(J zJheI4!5!jU)hez{_iPji$~npR-AKG42ZIKqbMcFgC(hovW3?jX%~@xAGJ*}myZ9q) z3#9?ps24jGDow3LuNDQdyYx}jlY)A^U7uvxJ2UeInlG~By|1m_WA7g#+pH3!0!LMU zIIBw?zAQk;xa{>4(D!-z}UX;MZw$x@45Qn?Ko9gYskY|F3$S=zxA zUaaT!Dh5p1(s1g7V>1jR1f+fDk#9T4tgp;@=s?r-SqEZXT7&crk-5mQ;)lrkG?WI; zM@CgzGiyy(lyb4o6I#VSfnHszO(yi>aB20BGtyJEGdehA?U9wE3?tYV#(PqSUuZ4q z*zZh^vYMRvpgC27yU2-hl8fsnuIXA`kmrLA-69UrxX(sh+$S@eoX)>gt8lAe@&`#8 zITepA2!A|VHEORRk<5k+bAlV1auNbH2O?kB~mNYx$FfU~pQCaX)xFg&CBn7#S@LBoEVkGfs>Bc#gH zmhVpHK0d?IHQnz%?(N2FqQSSG?s+o;+XAyp{;Z%Kf}ON?PC=5dpF63_DECTIVp4jD zS;s6bsfB$sqB|9OuIF>6?O4sl?9agX*FDYb{t?Fi7=Qj}XaC-$cjgfAUb& z|HRneKh$q$kpEg0{O!-tKR&w`KlqSLv<&pL^fV0gtctGD)wbte!9+@f_R*%7qgt`-F#wv!<;o=Ej{( zN<2~N#G90J+|!tm7*)JHz@>xLhgHbL0~u`CcztXDS;6>3iJ-~#_D0TC9LhYrr7;QN zb57duV|*CK{$72rfhd-^%QoayDn=iL!R(SU=nGpIWsJda{^!DT+FplLQk>bH@D$z!7ENVM)u59QQ?WU= zTOR5b`T=31Guf{hyH!W_PVUa`_UBIASeridTN*AD5qv1opFl)zo*+tIzviBv?1{}} zrXDDvth|S%el&&B%M;T7nyzkI#3&6(JP)T)+!mu~yM@jH` z+8TaWLrbP5o62zwNyiuDn};pxX`^2knWt&YS6O81l6Ji@+bJDT&mci?nVovJ){DP< zbu#BuQ&*F_9`2dc{NB7J@BfxQb_s?EWVz35Ue$p1N3{+!9k}M{ zdy~5h;&94U1y|4y7)4fecT0JTzuKmBkBjhTe|gUAO}Dvc$0&gC zjL|%Arku)3>GB<(0Rs)DWjQ=@%92PuOCG`KxywS;PLGQs%aM1r;8NXNYE=ybVcrnx zCBnm5d7IQx52Sn$(M*ve*k*#`G-Nd|8qwkYJx6^>IqhB_&pkB1>OT8N(Vx{rJw2GL zLk(24pP2>2S+-<23cOvFQu~B=iE^0li6Bvsyb+HZ9x9d)<5Y?%Dpkrl&9LJtCJpKs zFPk^gSA^9hDhKg)FK2mE+I#K3r-jO7Q<{PLF#O^6iCJaYekR*fBbDVfU7F&)c`w!2 zL=mBQ@fQ?GVkkQ7{470vsQj2mi;mvSUv&1&&CBB%W8&C?ZIt>p;`?#896~#VaaMAs zjT=XqKVa&i*}`aclQ{F6=3*5MzqMEwHhUWG+wxQ`GY|Z8b^qJ5l0faqkcKsf?oh5% z{Qxcz`ll7L+?{7DX4B-7;;S;`C@yvtlw=Un~(ySo#oaX7wNo07MX z1xV%>i4WFDQ%T4qzF^}*Gw0Gi?x5gZR3}{KBDt&7lOe6D^vf+zkHdLCv0vX@*ZCDE zr`Wh~ztZSoGw`)rQjH9b3NPBmC_eRmSk%UE5KODcj7KnPhH8Hu`|i!`d^#l7+fL-L z&klL*C@QN7gEwkTja=I}hhO~jlZ$st49l#Tx8EtsIz+uuIZZm<>}M)fr>)zJ<>3>2 z=B=pDAOZPh9u?9Uq*1CZCrkeQ0^1u1#gI~*M`P-mR!k>6o#6FIH=d1FC+SNKF?R%y zH#&@!)|Y6qoC z5OT$(pza}E=a*#_Cr)1HBQx-5R@jyIRKWpYtpxmRB|-c18CuL?^Fgz=^x$O%aEFZC zOUC%2Ix97#35zW>&2+n!tj+HZi2A_rC3yC>s#$1S%9q5v>I+<5Ha|UPZE*>T`?6Q% zx^m*u0E>U~)w76COQUl3UE88PsHF0;mjdZc>7`c&e3+WLh0X-m*nrJO<(q&e-B}^& zVdnZUXWW8Y{19;p%y)ccwkF4A@=qpY&1Ni>%N|M9>KrO`l`4i(d7a^M7Gn^W-hm03 zs>*!~4GRt3*2k}$ySyB{%OXHbn0u9`plD-Xo7JDlv5!i$YB@9d>ZoN`Ko+Ju8dgc{$B~SKbvoie<|O7R}J7Bz_S549v8@s|N55 z;M;#H-+tG~0N((<0el1a2Jj8w+h5E#rXLKz{cEdlzpDoD4dB~6we_{p^l!e^d^@8-TZeAKrd;$G1P~WI%9$-~hn^ zf&&Bx2=1>H+{4#r{ou&Lza+Tt$^n7{1os~*xbHd{5F8*lKyZNI0Kox*`)dWq{`*}# z{s()8{7ZuSt{fmZKyd$|g8QzM0l@)+0|W;M4iFq5xW86#-zBjBw*~iIIY4lL;Qm7e z_gyCgf&&Bx2o4Y&AUHs9KPs|2tOVB5csZ9hAC`dt$PWCO?skPRRk zKsJDEKTNito;+oJ7&~S7>*f~zpd5fV0B`?3y#4g#=^u15AUHs9fZzbZ0fGYr_ty%J z<*%Dt_;%j=t{fmZKyd$|g8QzM0l@)+0|W;M4iFq5xE~hWPp94Z>6}C7}mBTie+P+m)Xr6r$vw(Ro3c90Rr_ zM5^!_YkycdcZ@)zlDs~Oa@5r&tXIO%&!wGn=InSGFa3;TzO9;;1uN+3wty*>egk#} zqK*?gVj=?%C%gRu;3yl0MU*$!N&Q=RMme^KJFaRg=n}snhaU zh)Xr&pvyy6A(k!t8)T~*xb4+1PE(p}!h2dElbl!~qLzm&4x}p4JLE@oy&kDSMNqsU z+>4f3I)Lb<%-Jla#cX5~eS)_IC3x)vrw=wKq0BR>DGc#Nv+24eF}Chn^b0C?mAY2r z$WwGO_cZA`r$;W0J~Hb*Uct0PxV=~LcFZL^ za?ft)clJKW)ta*Wq;+ah=^asAdbZ6s>OBBX858r=2aK_hbu35pz0EnkSKZr@m!na~ z9}SKvjX@I)x#S!vzUHMw6lZ&he|~lYFDn852nGZMc(=ns5u+n!hH=wc5v9mX@rTg_aJ3139JAD%!BRUgHJ#zNt#tu536Vl536WDx@Px>b^btRqkmgJ zi&3iG7`K!DYhF8w(V||n1xR$Ac~XlX1y_EkEYD+1n5RMyXnW1(dF8J1|RE>c+>w1~=FWrh(}kl|F_##HBe%EQjwp`dDwA2G+U zbP*szFVA_485DRn-xbJ5?_*pF;c#iOjR^T;DtN)S7PD_B!XQ3Fjug)!GSEYR0VM$w z8HxQ5q6s z42SIs`AaD-FY$2XBJ!D5hix(98^V{SC)`A$`aVu9hf+alD7-VO`rtHUz?GLOD`Ss`h0uy~Ug`(mBFP%I&usHtQ)(We&i~ z`{ezStkbGyNKyCGTU&c4} zqo+QlzB`D%4JNxt_G?TU*w|~d7(c`@-BmuuOdfLJ53$*s_o_0S2o3b{Ik{)Zx$7nE zrDJg&kIQa>T`VM5U|RcJ@0l#bgUTFOcER@hj*&2RQx zf@sjvz+%q?mE7UOHH4toqZX8bg*mqvmNKBwyRD5Gs$#k&SxtCN$MpK>D5f>MHdn!3 zArZkE4^_zDLmhmg$CQpK2tjpF>CmUxo5?!BSld5b(8E|`8ePR@QE9*e z4r8aI(hzFU(ymJ0rT=Cj=Q6V})L%Y}DHfa16MS86w}Z-nhjOv|HFC0$+cFDXzAfYu z*O7e)Qx-)AbqYIEQoktb5ARmMgz?bS#5H*Y2Y#Sk&c{;EVgY zlm6_FZ?QhD=^lUaf0F%5An^lxSG9_POCX{QxgL8*e4^4&R@3uO#Bw7gK9U&Cg{gwZ zvS3$=8HJ=rqWs*m-7l@9B~dUi?|L1a$h^qF<|4)h+|bucbgNs3#ickyNDL zymt{nM4Fo+CE#kSR0*W|hUE^alvUgG%b`Ozop{ zZbxde1RJm+;rK3dl~=L1iy^2S1U>Eq2r7?9!9lq;(va3-X$MEyneTM^!J5OFYWS2X z(BbkD^q!RXjZPSQYL{kVt9P&&R%hd?JaI-2dsxW#qR}8vmRZKfL;BG0y zHeppf=tO$khyWpjfkQJN%&cxy!Dx8v^c4%8ebRtgo5|+9P`X9UOf(>kKGVFK0>i!Qxnai^ zxQo{0LI@>pu&`WN0qM0F9u4=xS7usDVsUk^NvMor%ic*64&v?2w5lOfJUM1H?1=dr zYp$&*a-aAE%PJnk#ajbHZYI9<6*E75dVI-8_w~V$H#S=Dj92lcLNMQ)k&$|nr^Q)) zyvlLJe!{g;uao~_aGVtJ`R&(LfpN23l2kPI7K_WA;N0l#m|VM2meq~m^l(x`8}|_->`j9ycoWN<0B_*X^U8xd71I$gTvAa2g<+5~j6=VSG^2@998xITl7n&Z&8XGAWW%FT zKkg$f?HfU|gJ_bRMf7rE^&#~`wpy@6t;_ZbhsD*yj4t79ibf_A8e|X9A$X-T2BY_f zz6--HUs_YOPK#L9+G^eu$XAh!SI>>CHoGKVO-eUX2P_Vlsa(h##-qhj`au(yUcd3f zgbwOdvx?ZMYWe^fAHd|ajAaP$Ra0}y`@He1rHK7as1WJ=|S;xXt!o|DOMhm)RL;7}y?0J${VL;(t;pK+S-f z{|q&Aiapt=cu+I^gPLXkq~82vI4l2CI1m2=oJoFzbF^f3t!oLvuW(jK{w$eEq<3?? zx?&Y&cDMc&^RDUqk=n}uueo{D+{iR>h-)Q61qb&sluq%&wB2^YHAgRb2Pr{kl>yU-0GH1tq7;@Q#X6UVCXLmln;GyNjHeyGnp~NsB2*tD`8H zQ!sBy)M}{?C2faA@5x-wxsT0_#PT!D8qY-=LL>1EbxOJG16>+!!7Y0$!zqpbDo_4Z%|Vd*Gy>E|lS;M(~^RqH*LUW#N|lNewe>cAy}F8% zNIHH%+0gqj!uuDcn8nejIUZyi!p;bn5fcr0Jrv#(%Udu`lJcS|{AvUm;Rf z<)wO*voJRQqNS@IvDgnT;YlL6Vuz~SwvH?%f5S<=#e3UE#fDDD=e7h;R?*jV#?E#4 zNI01Wl4A^9@Af+V*?EZX+}YfYw~X*Rt!gK^S<*Tw6p+qI2aYThr$U$8PNJ62jY%u?5JYN zg$o{6ENVYH=w@1&z9 z2$q?(3S-}{ZhrA{r=}^yPDHj*FlbpUAz**1KF?_d?;;y#f~9(Uqm&ybPv!ZPoF8(x&}N} z_-TVcF_3bGe)kj)b{kug2FerxnRgwo`|kukkl|`pf+Byoh2{$!(s(d*kSQy;45=v1 z*o1tM>`_GuB?rhmE4{r@IQK6?Iw8&Z8CYd^YqD=YMI2WitO;DGb_Q{#-qJoRYq6aS z;zmu!oe7QY+F-FXb*aA4RIB${oSVCU+9{dhG1-jdrMBnS3TZgycuQa_)B+cutLG83 zVi$OJgyrb_;WDKYZWF~Q$!=yS?j8TSo{1ZG>BlN+*lBKFs}P)LuIGLC&C?JEESeSv z9DA!9Pr-$)gKY$)+ESm3gA?1S}a%D*uh zQB8vcPkCCjt2aP@b^0mYq2knqGta5fOzM)pDEU0vBESK+nwjGeFPpc@j!uHe8BPu3 zE?R}uC$mTAM07WG{wNvm&FSvRNd-k$t_(O92w7ZIzKr|f;EA=C)=~AuLFeub*;((! z%GiZ%{hT|mzR^8S*=Ram<%PhVCil4tI9+=AG1e=L{gV`#9g{e38)f3t)kiBmY*0tD zT^_HI6b#~Fo=HcoRFplX#l6*^isl9>TycJmx6|Ebf#C^Bg1CK`0!r46%ae0ef`NlX zybU(&$Gp2E_iAQICk}~OBIF=t8GcVp;KQTqIeP&d26M;|45VrS8jx{L`b1S7MWQ*K z3M!)3mlJLuCC57*9q~*g>S*wmJo!?Je1-8zjh>bqxC9)>Ji245_G5@YN0Wkw&2E+`A84xodW$4v!c0$3Ppf13e@bWA)vE9^J)|>aAJQ3ee{RKp^mEY6Lr!DnlkQ~m@_VVW zdY_k~QEI7iXOj{Xtr4V0NGfGC98zju7r4rj5GN>n@Rp6|U5+!crHswcECpQezHWJ4 zV987pcwurShey=OJw9NGU$0@;-us;Jx(QFN=vjKF~9R+?-xtYO~A5 z{057mDL+rRLtxn)x4XaI#*WCF$9HL_B?1@dFL-he`B4Z9y(t-4V{Pn9G6*cjKs`E^ zSwV!t9b4_(TfOq1v(wPos1;TSM_?m#R!~P@!_qvO0t*TX?$@!O5;jUjVvPD0(_s9& zn8x};Od}%W%jWCn;XN=`ri&jigh^bh?838_!Z?~BA}AD`c#;YU389`p#5ATsnLc;89NgBLFSM7Rgb;|67hUki zIph1MUDS#fIlfHh8`5#7_If;7nKlBQPSD_@l%~$O4-~4Z^A95czcHC(z$6Cx0DpdYvH6y$THi(ufTLqV(ss>K}4C>0tqSR21co`aW>W*Ded~)eL2CkGQ7}Rrr`#{m)FJ6ppNU;WY!5Au77n)Xx+75_YL21Q61MiY z;y%AZ{K`GJLL+O!dJu zuDV)d^@R3qx3T5&_K@_n3*wLdOy`P@I1wY2Jn zo3xYojY3K)dOqfB!xw^s&yB~amuEDDO}*+9`FS@ZW^c*qT3_{zahID#`0f;ZR%ziL zmX5}&Pfxw4g~0I^nWdCt_xX4zA}M6XqJ6_vE>%vw=Acp-XP*5!t-$)pl9V9XIw785 z&eE)Cd;IIfAe^gaPSB1o(lq9#Ns8U41;<^`3Ip;3R=}?!&zgNlZ0e_VKz8iMs;f6Vd5lXy=TVEnshbzpo z+Nm?d(-o{&2t$g8tR|mW<^15}`$$G(yYNwI5+&KJE*O>^C?wi<9w?!v-(MAn!pZHJ~ z`(*886^_h5pCaYJ6Yy?~kxCjknq_o3^4pUmP`~;Li;}zx?&;->_tJjXH$@doUh1RV ztvk@mtDC#wouv&a}-=Qcd)?OV& z2CmhlqsT){UdPp$_j3pA;Q9~2)s45IYw&xVl)bz8JidW@$5E5G}xxB zFNRKXiJgkS#G>=!IKR>(NfsGz8rd|?_arFcJQ{tQ7$UjvA>L7Tk4$1JE|i)yIu<&? zLdV{)TaH=Opm1>#d9S2Cf+``=-Lv%0ewK2vT-Y8`CX!jWs3D&rz@2gMvX#o02=uYJ z=q(2LrAt$^a-4b2Jq^cT1Je>SxW(IvL%ny_K}dlZu=c!b31C)CF2ub?GE^qr5+{#K z33Z-IdlSo%;P#lX`jLJqXoS+Ql8ud;faXrn$bX5R21*F>G$;I1K_|bkz{fld#HUM) z_#Wn0P!56Q88x;YA`aPU=8PB-hU-rna?C%sQ_9Lx_P|6D`yDD*mboKqqMB0F3dV!A ziBgZW1}sdHe0fY@0y{sNTZX%}Uqk};Ug>M-atZaQp7$i=a17iySWR)(9HjDy!`B3-rtwejL<1b5rJ zcTYXrzz6A8p^1bXIpWAzsfwb8vmpB_I>5Qs4@41`5Pl6(R(rZ$sd;|QZC-l2Q z)T;6Q#s<&oHt4>5T+?0|W=v1J$v1;DWFx`Za&l$6<_|(f3lGB(ra!rbG%Mt15jD$o zHtnkwkVU8)V~zi;C$1eAsabL^bz?kjA;9)HWwfSr9Ys=CpqW0pdv9&VOGaz;`j+nN z=%+!_4J8srhFbz$2DNwKwO*#)2~SBpujIIR$l<|9iqqtd)~u9PXaj4@CO+6~4M1a& zV&pD6-Vb3fFsbpoQlxRaudJYKMz*~a$)@s@J~R>yTdB7}1jE1h*k~F)iA^a<#D?m| zMHwQcA`L~C99o}?5lst1Em8qxG87;c<%@P0AXMY&#HwQ7mA^;qyjxgU3_3Y9;dc%S zQzsU|W@s%L8FnB-_}DkIsaNDhZY+p2v%?Dw$=Pl0qmX7Io9y>|x#-;Mx@aOCSCHjk zoL|IoEq&k;9ovdz`l+A9TP#p{fkd1eUAG@^1qR}_RHA9o-t~BoMyExCKQ0@olws=< zIH23+*xVLr-dWf$s}_izwY@^@op@bC4jNvJnYRF;#GoE3i*P}l^7u*^47rbBGrx4k zR9W;Y%d&Tm$30SM3FTldqofgvg;wm#Sm?wSN`sP;Ib7?CUqx&k)@~KUEzNVjgswYQ zMj}vfycEHw3nP=g8SQt2Ye79ZokZnhPwnGUmV)ErK~x)%po}ni0ypkg#_&0!%NyTP z>C5BI7P(F%Lms`f#?5>}sU?GOP8Z6D++>5hd47v)DdC#v8g3Ofz>5dpKCT#sXx!PN zJTe#1=$lh64_BzBw#>@{ktgL;2@Y+ZG4pw_`!ILl8lRRF6ZN%o!@aM`3tcmCXhkb} zkEwHN!k3IfnOJj^v9S!G zhdh}x@zre&Xj{6`l0Q>Tos?V6Hhbm4+T1=w`+>s=F)UV{7v)5r4B^uir1#oEUIj;X z)6k`DP&|}N66PKSP-+;fFK$(Xp$mnKW& zFE>u`N8iv&q&S#|Z9R8?o5GwmgWy%iSI{D6@8u$g|GJeB9#sWqPlB!DQZ(O`be8>5 z_wtcOSxE{8dQCvdf(B2He(-i_6KK15E=2P-dRvjaW9BlvNIf$VwL{^#SO#WQ#4*2W zWaWD;`PapfV})Ov>?nOW4#e7^ZdjO@P2aLb->SOx1`yM-d2w0OdEIIKCd_|~_z3=_ zFd5i?@OH(Ies}sW(c_2fyx$l4$BV5nAQTUT;(s_4ulp2Medi(G)BF(ck^WN-fj_*q ziuvc^p6^oe0?;5N^}nRzLB6HpAHqGNzlM7Zehc?pb}bAPUwjDCy0~sZr4}POQ-qSq z5D(-;-_`PYbvie!YD`RZO3nAS#31%1x0Uu*RoNR?KeOC* zttn~BnC@ceW`_fY$Vt^ySQB%nnP(z+!}!V3lzY2x>8PUqNi1H^AxZh~V)3SOu)oFP zV}}tHBe};VJ2Gob;+vA-n&?B_#S&M+ULbMPuBrz_OLR!pCNd{e=y9WyE_HS)h95hS z$g_&;J(Wo#=X_G?zmUF-m_sjKc%WdE?@=Y zJM!$;EhN#t>B;TS%d_5bt;hRPWKPS>^ex`g)`Nw}&`NLEHzLkc*++zz7)nTgRKUPG zIwqK-z4q3Cn(5S5#l-CW?7Fd{5w<#u_)JpObXhW}Pc1#QW>=ZEE={xnW24!Z*%8GT z?G*Wpu_oVw@v^(^W(GJ9!FbMJg7M6BI}gG55a+s1>`}CrkV6=sS%uw;zXjuIYoq=< z7!P44@=GwD{cpke8RLK~d2?J$^6bK!07RK`qT~-rb!w}YAs@J<^ZS3z_pDcX4DI68 z6x#=`I2H=#nH|5aBqYu&W2`qCLx3NgoVvuv&!SU#8Do$+WbO`1_=RS@K8wCBLWvve ztnyso@Mynnhr!DbVgNLMS>GGr;cBNWL;-*#M z-C#9iul!;D7VyD&2>4Y0E#TwhQa(4=y%@mnq(47ZEQrqv*M<|pBPjQ7EurfvQLh*x z>Hz~1hJ0x(^cW1{()-US5}7LWbYd^gn4e7w8j}P{W7!2_w-R4HrEahh%Y4NTPq0ru z$8w@nO}rLk4T?w9r5Pw7!!x77wg@)gmn$Qh1UmhI%nszvcy==j)mjN zvP9!f7=BCxymAA%r)OSF(yJci_wU&C3GERd^(6)gMyu6v%d8At_$5Pl5a!#sm%OCJ zdX@Z3Fy4h(fNt^~PQ>IJ1IL+mX6KK)6Xfct#MRYGO4FmB``_|CN^wRTeAU?>cB&^f zxT5wo{XB`Gy`Y@3w2*uY#xwqtVEn*CFg`E$?}PDt znj^2l6LOt3~a&VUPay`FO>A>b$9sL^slB zHgGDzBoe7n2Dl;IP&$S3IkCoi=rV;l-_k!|+x%>SNTd4h z{qMpzp1PAPUyY`@V&Jnj@B7DhPSEf<>qU^MXm8gT`);2OujqgJEVr6}Q&>(K-_K9Ru@K@s zXJ5_wx&}`(nVUQ?PW7b~NK#EYe#~&?OABn6W+&)(kEf<8`!p5nX=i6g6PpCos5G_a zUpzC8&N9~*m9!+gR`5@g^JnX;_VRyc)QZyfD6M6gz`ZJh^_;0lByND-L;G2FDQX`o znPQQTJcRuwB;y6Ob#rP*I$m)M;s|mYhy- zEN<~+Zu0Cjx(^dvhHpD$k>}f|B6GBJ78*pYj-nM zNhUNb?1L>%J+lXBiJgtxJznmM7w;pRIQ%zgA%jzMD0^QK*gR1-s;u1@ z8HpQt`?nPRFe3Mv8-+2i-QT6?8yEetN-2L!(QBH2OVM*Tc?B|kiST5FM!4ScWY?XA zBcdU&^il;Y;HBSvk~;A%{X_SV{#m#Rmwcng#T8aTl7taw-GVQx)Ru-ARSrLS$|HOs z+QfLifTdNuR$?>My*zo;--0)vq}4i~Q1dL`(RD*BV6O&;k3d4|V;?BPJ@`K-_D`kg zzsZ;5uSwDWK`DUQKqlzV$OQH7Jq8(jV7B+qrs)5mH~(B{bGV294caju|CXZvUxzl7 zjChRWSyNn$m;s#C0-|I5&5f1YmLy$z6sFg^(mH8svxei-z*{Gz%5kO29Ehid zN~qL+_O`v7un+U6X8%9tPvw7`KNYJfEk$2-)##268JX;iA`c7|*~P_EQ8bBd7iI{O zlV;^K^_-2@CeD4CJ9q4jHn^q0@2gm$J}n;hEk?he6hL?Vju~oS0&Gp2-%*RKDil^R zux#YK!2ckBx(>vG7XI4+>X17mxRqW9tLK==B9&%|NEEXI$`>)I zA5~NvXG)>R^zGPD#*i%VDU-}{3Q16#-u7oVQ0Qc&d2Zc|6PV;&7%T+AtwPpn_gN-! z-9Tf2paEHCf?Y2`um@xsY$|K>Kw(5rXpF?EtmPJ>o2a|uLtwpj|Kn88uidalVNq~h z6w_~>(hS=N7OsG;RmAWeEtx2`+m*{Z1B9T{CfR?aFN8nx8;W~k4$o@4Ds;%ja_c8DM zTW21_p@L=(Z3S*BaUM8v2>&?CN_crq3$*jeP*H@{#bDcWPv7ig&=xt>&R4jNsFSpk z{-aFG_fDg1AKe^vZeoM3vsIlFUqaYeF5^rXR&G)7a;F9_p&8j~3Vpajxea|T){nuc z0#cm-BliwvsG7)vB|r#lc}X$WGhIKjq{iB+s-6p*8+kLdkLy}tv&~oMJ#pC!a>^3* zV`m|KjxC*K9>`W*%5lb2)g?k4R2AJ^X4rL-tIdqA3kV4wb&LF#M*=ke>NBPF6DJ{ocLqDhhT zlz_NWDO(ZmFMB>|FOU5_r+WNP*8OJ0K0c>7adpz#f8A_RM(w?F{Vio5{9DT2!*t&i zNqks|wrzI7ACK%>IFv|h%_@nd+Z>X!`_i&~C#$})yY2MwVM0}|;Y;Kj@XdDI+nnH{ z7}H)^kS2-8&p!_o8Jv|(J1ngyub;qAs~%l_sf*s|adHSG_nnD~z`EU^+$qO)Q;LwG}Kkeo_3B+2d z7l*Z)Vbi@8^m{96kUr*&R0|8TH~no`^|a>b%<^!*aE|}*?Cc(1k5-nSTHCQTw=T6L zLbFnX&tF}hUN7q{T#2o-_%JVC+kiDwtaG17X6XbMZKwwJV}07#b+)J1CKCJFJ8h%m z8A7eqgR}pC18us@>qw}SQ*)Wm>G;mOw=aiW*ZFyHSQekvdO<1RyF84m>M9Uih5Hua z30F2wIzkJSJ3ox88vXsaD)#fgjjO)Zo@5a$7OrbE)_U%p*>p-6fAVY0o|A_5?_>5m zzsBtMzlXNa*ge$$3aNjWFa96wU5slT%2cclPQ1El`Hh}6I9O@Pz@sR2?0qy|V0 zkQyNMpSgDZ|30Z7wy*x3CH@Ji|2}~Kv!wpM)IX9MNZtd<`=3wVYantDMDBseJrKDEBKQAqNABGSS`WR<+l?V0GY|ri#Vw;Bo4(lFO`(Pp zZ44CYi2I}wQcu3#qiL0u$wj6etDF&7e=B6YkT_bY(fLXYUy%c6v0}h-9XU_dBRoN<#;`c*%83M$tQcqqsZ@{;} z%uhWV#x1T2Tgc=03=Ze&31jUcrAQx# za>t)UPnClmHmqVhuIy{VtYZRZb}odQu6;%E{wYe;Yg=yg`Z_OK#_dhhg}FFpw^WC5 zykwyVFXwU_$D_#CUh5BQH><=XMu@t-M@!t}IBJn!J1;u%HZIxsYFDXOGGOQVRc4t)&CRWUb0yVgN3|z}`?( zqc{D1mTh_)PH+-^@l90R!(5Hqrm59Sj9utfLqcR$xTEPrv*RDu6IVt$ zBnHoBBWk{*@~zL|_;XLwJ{Gq({m!Zp)?b+Uq`Ael6Us3r>w^$T1fGP(1f5ReDs*>p zm@k4x9v+tKA=T%c{S>p6{TUBy`>I8JeQJp|5P8$gcPQ-Z`9x0W)||r-cg_yt$X|bc z!(*KHP(}D!g!;%CKmZQ?t<}LFzwiq@` z&&zH!xkbnFo?9>S;9u5+nwskE@2gS9)S#9{kd+ykm34^s3Ia5xGNyE;4;yHFW-EB} zQ6G+dCX?y7;XXfpf+U}SKM{Sm{XB?8+K956er0@@q_imI%@)$jus)Z8PG1eBb+S%R zmoFb;8H)es<^n z$3g(JfPnMw8F2nvX8r8${f`|AAPdMf1G#1(*9_#E|9_Zk29O0H3qTfttl#79f?xiD z&-72FF8-ETKRwU-RVe^k0JQ!nX#Mm&>sLJrPz#_IKrMh;0JQ*W0rM z2Hd|>>)*CI_^VO?wE$}U390qNuLD3WfLZ{x0BQl$0;mO03!oN2Er41-O)X|RmhaD` zaL~DOu<5d~(EpJstzVS_s0C2#Pe-j^^(a6sfLZ{x0BQl$0;mO03!oN2Er41-`!WjC zFYi+RmiH)sI;HhPA%Ix`v;HV%{d7v}hYkgh1t1GR7Jw`OSpc#CWC6$mkOd&?r{`JU zpNIU7%=+n+)~`wd&;p?KM?veSQ(C|3QGi+iwE$`X)B>mlPz#_IKrMh;0JZ)FYH|F# zRtH(=ekcV{3!v7YkXk?VC_pWMS^%{GY5~*&s0C09pcX(afLcGjI`}=K_3v37{GkxQ zEPz?RC$pSOWyI&czoX@a1p@NZtAjsu=s!Z1?GgvdBh3@c35JAqm)n8C1o!mZ)nvE1 zU;e99mX=irP$H@Lo(PeJ-0kV6eXllCiGf&L(&G&Nc@$3O^dwWAeN!fdtYFM1d;8nl z?esR@hxWCb_xi7>Dk>J;$&{nfvhpi86uHkI{b-q!s0MtS%37-Xd+N#} z%MhkBaiEIrI2oyIob0?&^2K) z>KY$=sMvFB5_a|%$1qNX@Qjsy=N|{}Vo5qjJTF%yXK}|v){FWU{X-Ty?Vek{xU|@? zDmC@yT|RM5hqf#s!Ii{`VDt;mPK6kgw>>Cc3Fr;V3{iRdd3|gwj%#_|!nzgi9WW$k zTWb{`%WJ^H*VJmw6G@#jR1Zm)-0)#X!bMyS*FH`6`604~V^uIHjyWU>6R7olNS^-~ zAmijru0*->&cxm%^pYeif5m@pm8;aJ1$#A41BhT~6!7Y(4ebKm69jTWY22%Wj9GAoD%`F6^kHaMk z$HaA(%iW&ID^KBY_Q^_CD$qzWs_pOcg_D;OVTx*pZx(o;%d6AnpbyQO&FlA(>+;L$ zR1iu{NfRwRf(ub-4%0j7eG~5E*mudh`M|Tazts`!!z?@^sN7RUJSY4?$=lUJHH~3s zp=A?c34bDsLoI8$m2hK#k8mDVF*7@74t%CF+2XCpmnXA}>u(v#TMjfTV>NF$@}N6y zia6OuV5X2g()}3{Rd9#YL2U-5wz+XhS)1ZI@ZJJ(wdz;0 zcEWn%=b*TQPSdzbNj%pMX#VI^0S2D_Hu~zX@DrRO6l%~OK-;6zbrr&vbOQ}u+3+^; z@vabA3~e;76@o@KjDp2urYrdB8xwzJ9IHteqVz^EHapp?a4JJLFyO$iR~|3Vj zXw^>IT@et&!|y0Gh9_?Q&0(6yuJGtH@n{Q3$tQ?J?)EBnB@DO%}ttd4BF^GmKX7wH|@WDh|C;}w5cAYl`dL`z> zq*q@+V}SelLuJ2hQz+v0_BMK=B!ti4m$=4H35*U2BxuS9P~%^$Y}mAW*Q8!8-%9`z9{=}SqM$g~`B zcT`SwL6##rd=v|$5_o=z`OJ8VsYip>)kd*TKueV9NL8?kSc&_i(1SYMEIutWRexJb zub#7NXN+fG+5(hD_y=zf2Kwf#Lv}Swh$2A^sUAhc(J#&Reh%c@eoHa4gAsU+%hL1oE`Dxp{rPrG9cfOYnNU>Dm4~3onA{86AOH+U`9q$S5=b zZyvevU^V>Ub&OZ0lr&-7@fHfNkxyM6B zsIi($fF`knQ@fgGh~Vg06X+MLE`G!If`6y&N|5y-49$(i*KpIL84F?z=ldI4*#Gs0 z7CbAXGyUcK7to0RdP8e@mYBbPf{ljygK)<5x`LU{)XChJk11jVQ^%Hhdqfu}R!J^& zn)08X)hBLRTDHkOvSd;0fPj@d9#8eyjo*c7Afz@LoPE2Ixn}F%bWeNex?ugXM+$N= zqjlA^J{TB=T;%h>K;i3*EbMQrcQrF_{e^nlL$R*sx|Mt2;~T#ld%5}5AZ+%RpPl;- z9G|T&Xn}&3BODYmjX1SyNKSTgdN!Cvlk9b z9#of)1&0G){0TDgo2#lxYHI@hm2DZk7Z7QP6qtlxCNG!J(i)D^kF%HY!Ob7e&D`y5 zuNSgKJ%mSe8*nz0!@bN7^vPI6fxmEv|8~M6%vcd(*mc{MekMWz5$1EK7&EormX z&Ws)DPhx&9y0+*@a64iEyQ053QrSSjM=BJtW^UKvX;k9M+V=it-`AkbL3rlpP1 z`(Q2cRah(rEs!|qP*{)cwpP=FB8dL_`ug*mBi5qJgMo!Fs*eGIr9CEIH!CUV!NwF)oE%razm9+>vG3@|rl~Q%$-! zQ&>q4bm|#YPvMC(Vum86m0}W$N4IWFw)w#T^tI(JEqSC>52h7iZS$9h7jjb1z+pgs z`BD7UN%x;-73*)DU;T6<>xV)Bs{mI0o~-&^-p%@XZ~&>KQg2{0mDmygr&G!Z!q{Uq*U_GdQdYo`bJ1FySQCGv= z7G&FdIZ>IBvOC3FE*I9P$cfeRE-LO()ghC_Dw_l!*?Q(y5-Md)O~@CT>UJ;|bJZQE zYS`)RztmV7rmk9zC24b0TRUX%Hj|XH&8;m&GBHoBk+e@_n26}Vr%tm-s;kr9zM^|K zU}m#9yIRiWP=R2#Sl$vr&hHgXes@ZK;cAb<^KSL9VHrc7?(@`+1?A&sh=?!7Sumu5 z#XFcodECZy`%u8_3Yc91vnybB1-U7# z-_EaoI+69O9{oqu0w!3%1Php80TV1>f(1;lfC&~b!2%{&|H%`qe*sr)zi~D2r_Wum z(fv>gz!iY2KOwGu=urSyzw&;}pp`MLwZ4O+u|2)D zt&x+Ju>-xit%H%dzA3%AjiHs3kum-En`xYMoQ$+gKYUC66E9=^^m^bAg#cIqu==B5 z_0y@VA379Z6~HQhRRF61RspO6SOu^OU=_fs-^Qxn^x*Kn!m9r%1h5KV)$hrw-z9bR zomKzSq5p_gKAv$>=#YT?(EF2J`R zv;06o->Eb_!&7A$6L?tCuin8kAsdh1l#}&D2Z8G^woE0@5s-N%CM|5qs&~&_B~wMS z>vmk3{|aAR4bJH;(M4EI|*7iiD4EUlGA6^^kar7d$3tSSvg z%1Fj|LR5Z_RbY-qzD2=6Ek(Y7es(&vYL;w(24x&9o>%FD>qj7U1*EQk)D@7r0#a8% z>Iz6*0jaB>K5fDN8`lDV8d*OS0+0nD>-R*~-zKhpdUo|ghyEjE0eLGRZw2J7fV>ru zw*vB3K;8<-TLF2i|Kz-tD9CdV2R8?OOWMCK0sg;M0$J%88R!@pnP?dq|8mWOgU*$M zO_z;@{vT`wM&`YxHwFU%;er4Gk@~%A{Tf$F12#(xNW4X=!e#3E>1IaW8=Ac*CW~*?`iyuwI+PZKq zU{R*w^QJ7oc3}z04wql}GrpN|!L@s`D(>jjJ0>jQ{;P*heg*lvvmZ-&Z46>bFNY zqIQZZFnBUJC^|V~)Pv#1LM+9bvnrYEyxF|mdFhk!YC=N8%ZB8c-jQkzodqw$JQDje z%()VMo1}|I6;hl^5MzV|rPQF}Hm*3UHHcjkpM}4=bZ$FeGdHcV&3cXM$IkM)Xz(DcT5*sdEwzrPnb4O=TgG9OHU)-tzso` zQVxO2*?6&gVuF3G??EkyF254|-=mw*e>u9T&;d0p_{$AWJP;81->a?vuESpe${H_d>~wNd9U}1+`_WvQiNPk%U|-IdG?i^`zM8z z3G+$-sXG^={J2A?T0fSQ&q2^18fSjF z;-1j6xIJQxiuq1}fGL?=-|*BlBUSEh{Mw z(uv|*R_2}X*d@ZZP4Fm#R1@N}E!WMifNZbvb7%07R#~Bp^UG+=zAHtaKozEz{wXWM zE2XaksiMp9r~IQ}Z*xM-sXz3i2uyylAB4= z!>?YcC+silKNgNIew4-q_0XJ|v+_zV_Lh}Az(Mz${`X_m<8#pgZIt5Jf-KYO;a5OT|Un2~FK zq{8V&g}}7_)qn;jrxTmjQ?k}Q3-4HyADKOr?QCbPyL*f&B38~Y+ey8QyY`aD7cpF{ zNTe0nfdH{Jnc5JNSURqtp}&f?LwI;f`dGD-Or6V*x-|ZVvePWIhj_%E#PNCMbN6{A z=^fb&Yk6s;Gl>pHYTZ7eQl(f}yj2yeW~B=XezXH6Y396iU#@PHoO0B!D#DO4{m@s! zyFApps7Pyt1=OM+KoSH~-Vm}JzBG&R(TbKnoX$j{h48bIuaixcr!qItLW%H4?^bFP&2Z zsFvyuHH+FS^CYwit;&n!YT2q68Z^7_W(LJH)WJb42vf2j35r|Gc$td233r>Q4h-C=;+!(goZ~FO+vOKXW zrH&txuWevm$Z2}9J>za+k#j|T5p*=V2CM}ImL{!L?`gh4-KU$C9^pcyl_-C)5r}fL z?x0WUO`oM;a9bF+QZHIpdm#*N|7=T~H1vygic8ce`n9vA#$%q9U9wf~kl{cB=o8|C zall+$ajp4^VTThx^0S^Y6|AUZE@=o(!i-XrTCeAbS6GJLo!i_W zZ|##Jab^~T5X4O&!yE4w6xll6C05L>q`-VDHXb|heg*}WZr=Wgy?iP<*JJ!PF5a*c zl{XKkYhZsQX^qRKNP~YQ{vP)8ZM^z-uO2JRys`DD<19+89>Y2|H0Ra;RcZd_cb zV>*r&t(7jI4H~twD(|pMn=kBzi_QWP2rrmpzB+tqA-lDhZ$L%y@*xm`c>zOeCua|) z-#+}J=A%tc%CrI(>;<(2qi*8atCUdJVh)ksNk@Ol2D0uj1yY0@uXnB-GYeC?R$CxhHD9P>6Lx***)X)u4}SID^sTQj zXb|Qyh9|z=2esf3B$4S5a8uDUWo;6x4kF1OnwU@42SSiM7HKf*NQK!f2-e({H$9Q6 zZ7ync(Ru?>U1FcsCDY2GG4XN7&AuVlq&$~;^`VOPbx6t}3Oc(D+nhf((#^rtE7z`1 zWDLI6imQ0fZrxks)sn0VADq))%%;4-{?=1^Tyw1u%678#p$E)`&iFH!?W7giN>yG$ zHMk;mun`|AOf&RrgHJ5U0#lrkLi>)S)Y^sNsEg1%WZ&PuNQg?oL!Q?Qz%}6}&*qbd z!eH;rWdl3dM#L_AmI*c_Ob2O@Y8V$U+HGzRucMV0J!_o%hQcOF31_A{{qafEJHiZR z3|;aCCZqxK3o#zk%<&_*S8eU@xagK_a-5SbpFi8pYqr~OYH%I9))pWqFAr{zC1j`| zaaBIj8n?oNnJJBX#%0k_oVQ>#MHsopJ#POcYs}qce7Yr$;#) zK=V&G*ggn`Q9&X>gmoS+nWI_4Pg&+BC}Cq2D;&6$sU$8L5w9gQTTvba-9A1Z@7Z2# zowMKGuD!^4tvZeFr6ho}e#Vn-r>4n_To&}c&nIz0Cd@YJ+W6bBYvqTPom(2tt<43xbh|?}FWzzC+sAsl zl6M}gsln$?!clM*($!j@v->`MyW&_Di`Y}2=c%2GnWtppi%nz49Kl>1ftb++!4f_h zGVOB4W@*Wl70$L6LSEIoA#19=pHb>?xfm>iNv<(mY+n%;EFbD0rIe1m*r@g5Ek7M? zD&fqWB5p4+*p@Eky=NDL9rHV-O_RY*vnkXE)e^wpwJ$_zyvmy0gASmXaXetvDZeCF z)@NxHY3FuXItsPGC(_A zzaSY8wUDIQxQo|5R*U)f`!+NQUzeTmPl(AJEA$A6zUa_7gwSudBQv@jC`#%$lbUlP zsE%vNSxgf?*El*kQk86rt1o_+c%d+!Q7Ah~&$#%}J>fLv*6G0LC0hiiJu-$inZFh` zm8OT_ZuC=o_CbE6JF1KsKO97SOwJ*XT;*J%Sj1%KU^U|vU7rp$szWAI=kDvPqlCAJ zLCy)v1>4ayb*kyz)BTIJmgz)1{p8#RlEm%(@>@^BuM0G=z`(@v+gTc}v9%;tOoM`g zlqnDvAo?xdt}yW3UC4OiX?tR4W)9Ttk(1b6NqS&vzmJd|+BqqIc7AmF%x?Zr4&V0n zq^JCd@d|FbxnG>o`T9tS!M%aR!0P%d5@s$k9;1v^$nJ!@LUPSCr*`U`!vV-tCGU|O z+k|MM;OepJeD?9G>^Nj1(wgJ59?*1B89pxQ1$K5eA~zZyDGbjv?E)}%Wap)@=cG>` zuFG;Eb1#%&y+_`|HHjHhquVgdI=>VWUV57(qM?t&S%Z>{ZcQ3(YK|F|NHCPawYTU& z;OpX{jGtMRJn9~Gmw&(bv=Qt~4FO+((5=F#58jjn_5|^NKV1L!wfKL<8EnupeMQz!s_7naWh8Dj~ey@I?N`U(S z_x*X?_xkr~YCwH}`T+F->I2jVs1Hydpgur-fcpMO_5Iw$kC}y*k?G&G?fG|V0rCUn z_vew{&ux4Dowf$-2iOm=A7DSget`V|`vLX?><8HI&uYIPR@MHnq~BjZl7FZL*blJZ ze}?^jXluZJfc*gb0rmsz2iOm=A7DSget`Y{Nc;WVw&!0x_V3vC{9ly-_W|zv^SJNl zwmtt>Qv>P))CZ^!P#>T^Kz)Gv0QCXt1Jw6Ns_*BwJ(>TF+n&Fw1;`JO-=9Z*Kfmqy ztF{L02iOm=A7DSget`V|`vLX?><8HIkF=l2PaUZEWn{tpm&=~|`u2uq|J<_7@2AFnf2S1SKEQo{9{2s!xbN?DH2^;VegOOc_yO<(;0M4DfFA%q0DgZ``2BE^ z@Siy9`7gllhf)Ci0Qmh!!0(5y2H*$44}c#4KLCCJ`~dg?@B`on!0%5AzaKW${#f|^ zPzrz_0KfkT`2En;0Q>;>0q_Ih2fz=29{@i9egOOc`29)Y$MWx6_57g}06zeJ{}J%} zp{oJ-0q_Ih2fz=29{@i9egOOc_yO?yBjG3XGZzW}8d3UfZ}>_4E`1lv`0Qms&0ptV72apdSA3#2Ud;t0WNb(8(_ODiUItJSBCtYlqnCbq%PkA~tsPi#{ zfq=+CfPjeq9(8^VK1G?ZB?ctkr8a~4_RVCO%BBDXi8#%yr0aPp%FY9S`pK=MUXF$DsHrYUgqn$e)ygL1gML1~b?KDfPMPslK zdaG&T?_g=c7r?^8!bkKS7er07keK2)?YAhHWVrn#qmj!fmO7nwUy= z*jlWoVTh7?)H+1xZ$xpnLc~xiyYOa|5)na(T<(5Z1Z7V1ZfS<#GdQ@6j-tKh{BTu! zqW%IU?u0gjlRglMdz)uQvjeH9khcYPkZBCpS;f3YY-TQ@Xe56juwPqURU(o|g(yM^ zl#8|t<$Ohwa7#bPy26Q60x#~|xu!{xH=|FEq-#`ILv<>do!%chqO-`U$mGy+?`GCd z)%Q|-Ft8@V40d3qR5!_7N|Xmo{Sj$3Rt7)it#4nDhP<)27EM7HQ?$YWaCJazML(wc^V-6^Mff5; zPO;eA1}Z1@3>*do1OysH(WzX8a=tL^{5c4S6fOve%Q3{pb(U51|Y2 z;}X92i*%F37PuT0lbCi^wmK5Xn9o9SQ>(Hi%md>~>F(FuPw-k_PRKS=d`86JbkKD; zZHU@H#mmbI^}n~j-k4g*^Kg2UZ9V4gh?Sk0wKxrt^~;4$_$r<~wRxhK7e1rjNI-CE zwGbd1N0aPyJQAk0;$$ryx3KE5P*XKYHn-5lJ-L6i8*@x3Me=|Ns^o%Ca(1beBC_P+ zumq_>FwT!PJoU!#)HGv3?r!|rq1@C#5xhYy<{gs~TQu8^1L-mGM8nsdy;3H;Q2qVA z$N9@xH z1flY$9zp2ZAtCh8QTgL(%VN@fEL4^4idrOPvE*dkO86-nsX^t@_kn(q@9dq(V2V32 zXmv>4t+J#R=RB!w(5=_|GDAMvhKtFVF-|m=%u3Y`@u-NYMQDY*qu%Ai_h3|=jTA}F zd+iXD#=0Rq%$#$Lqpuh@f&@aoV;$JPWKhvBZ4*!?$3~kCU4)%)Q7}?Vk^jcJWIZ{d z95qXYY9s*R6q`+_$TglIlm;b6zDQ}@zn5B}*jC}IiZEnMKjxL;ZZpNMBGOt>0kx-&_OFU=>FoU8?CP)K6aL_ zYHQ0B&r-}Gk2cuHL<&p!Wft*GWz|vR)`>+2$fsjMQSAxNKjEuRLrc?Sb&^gn*+swb z^c5f}u~cgwg)t=%j*mH1a>tqJc_5$BIgZfGkxx>rxIk^k%Xh%NiRA2)#M;KtNo=OF zJ_=f}CJlNFJCz_2*ssoQrbLq{u-rB=5i-7B`Iboa$%+z1b?}I_W-QNTj?-nS>ytW` zi(-=ko7H>EQsrYI6`}-a=coK^Fqg51bmDzDvjhPI?-Go3A%+?5`)tKmbf2oD;Fs+lJqn$cD^%AAI=o9eE;e*Y}=(k=1Zb3CqmxADj6tI)@BMuczN zm;G;QaO1hw+mRcQP9TrUC%SZK_MlQ$@@Kp+M8qA{IWwBS-H+*Ta+YRd@9cscHezaX z*Tkf-i%*nuVffXa z8>|ISXBOMi4$N(>wnTr0y(d(uAEJ0JJn-Bflv}rYcR+*rV;&F2wkn!_YrzJ89>r#9 zEYe3w2P{dJP(}PZ&cM1F1_+ZGXwhKxSNM2cA1ce0yu1Y=E*C;iocpK+QiL4OZ|^-a z3sbtZ@5D;0=%g_SyFTP~_brMAzlszzg30j~4iAUrkK&Z0>F$Llh7p0$ME01O4X$br zmSmKAX~o?b4LZh*1Pi8|PMUQnd{VS{xu}Z2hk-(KYuAiSQ$M*w4Hg)LV#cyz*qHiU z{M84C7TJ&#rjL?rbjs}h*hq*6UX&Z?U8(KRwT)#-aNDQ13I^I2f-ANIZaeAE3$o;w z8^{h%y7t%j;Ak=GPbil#T+>ogXfp9ib4hV)e1gv@<<~`BQAkRAj?zZ-VX~@d)E9)X zWX-70lJPMUCk7(L%R_zil8bw!86U$l^u646C|GI%n%xOf+@(gQL_zIJTOva6H*?T`H-X2 zpt1NiP0YFZR3K}3kLYy8mMlXm_SlF%MJ{%8T+TR_@x|Td>GD=+B7(bL@Km!hZ*^_z zDaQPR^T=Q^OuVUmjmEY9ytm+n;Y2@8c0$a4s|hi8lHcMzimjlhspjt3pxVT#D^A)6 zQ9&?#U(pTt6_Jn{`0{EAoR2Kmtjc4TgO#e*Q^>cSJ1Kd&Nw&7SUf-zJGCdM9ADvil zU1c6*JjyX_uAs;KBDQ8zBkcOFczV6VZMj*j@CjS>ssqH;G&GF6{u{A?So6S(xpgwW~ z!FuwxC_a{wF;^sxQuJn778h1A$+GLkw2efcZD!ywwCnd&ku zIB_)fBd5qtl1P?~BwEYX`cC?XN{*bx6TG_C(U2qq7t=QRZxP+vr&aNLzWGmVoNH4} zAE6>nQ*MoSjB@E&9UGv-_)vxDB3|RQe%OkBYR^7k3dI2%Azp)rh>yux;*qPIOOU{U z@J}$eJa7nT63DwIGWH+I++QfZ!jD{`Djw}8B6I2Tu+TKpUZNdE!Z#XaCdZdo%i32*7k1B$N93*vsZpGwB6NLH3WP6?ZS~bQa9Ko z4)I0YrA5Qjp#<)O^-BaOUde6RHQu`f-oR1sq8E1*4Lfem7RgP$NQ`VyR^Vb{BH|(+ zsk?cMWMP{Uxe5+9-qK06J6c7#$ICbx8%Ey*!PlC(e?qN}A?c=HsnHW~i;m(y`H&vB_enc)^587p!6FBZnV!({OM!+bV zc0i=53@-99cMSJTiPeHI^y<094%eu1H^gDd#Y<`<8t-lySi+@tLrCT zKmYqp`VZt~{as$jkKg6h0{e@+Vh@REvwM9|sb7#p9y_G7i0aW$*iqVu#GoX_mts7g zzB6&YB@S`656XVji)pygo9dgMaSDOr3)Hvpu7R@y<}dWBu@2w*gztK`uu5aR($zaV*cj z7=p{KeD3WYb4ch}+)i6EF}MPKh>LSWu{wGvQA9N(SoAzmin+gU;^BW}h&9b|I6rbL zFuW*9-p-z-QKc|kvn@>r7E@yP3l%wSDXoUiAm21lkTVo4SZ z6OA-;cuS)AAEhxPK1X0*%_2vkg`}F5K-kA2(CtXs$jH=7ABgy zBesuXyhRL`Rb)ctl4=!Z5I!yj!@&D!X3#%!ka;7G zsl=-bsk?~`b;mWI846&vYj=p76u*lZE<3g~ZL}I&-l`V};OOUg> zdncgt<*I?l)3xe1hkKHi3mFsa<1~ez9%ZaiH{RcI8joX-n_Z-XsMDzh=oLACg%D>C zXdl0Bk>My_?hwjxeH7Qz8_m#ZcjmKSy?Av;5mCJ}fyfj=bOB{PBl+4&;iT@&t<#`n z3uoqY^G0K(UY7MiEwv{?4()Ys%6vCwQUkA;RKB9kJF`wVH?_rC?X5D(Kxm_YIf5c+ zob<|e&w2{0C$r)? z-OQZ7lNykm)C#hUQHkqrb#?!?)3p!ri6gV%`R*GIyor7b&!V(1lY zfs7{Cz18qedmChOb|^i3WfZt<;;#v!npIc$!7l$?J_N2`Yb*i0lQrN?ukG#b$)`&Z zeE}GDvfOUDRk1vc z4#xZb)HBEW_8nHAvhVgLVEVIy)4jO3x29GR5ItD&SSG9bC!d3iY{3^8S_}CT_I2v3<5S4%o7d~ixnVoT;BP$1t~!dqLgTfJqdR$B)vlvZ4~P94d0udR+v z9MlwEZ`yDiV{`6?<9&WT;skaOlHqrlZ9l6~4^@P_FCfnAS;I2JCe!6gz_qEwL^wR> zVq4bMkJzWX{iTggoWR!E;Kb(k-R_D|o#@szIY^(sLYQwjm8{rucW9f#DV=m_mo%~l zHuzvrugU}RvnE|zHMd)R;@3MCHAkJFbT~6|>hg(7;Y9A>5sg0`DepFQ%U*C#F?c0L zwo%@2FGGTw1;oQ9$Y2D_)#V_}vSm*amgY_JD0so1^V~4zE|a~uE!#4ddPPsm+3;fR zPN;I?jt7wq0mqW8^ULTZCJ9sHJC4IqpZ!FSvv0@{W=W02;@1ayb$a%K=CJ9=_j3h( z8_xB-W4(9cZD29;40O^ji;M5_FP$9(dl10Kopu*KWkBMwnNLQW!750iUC0yFZbnWS zHJ19)=hWyriQ$-dKJ4u3!+ety@eF*!IT*Bb{-wFKDydC^rNjU0B5fE|nCMmD^W|KM z3;9A6D!QvN+>OM%Lb`XkpPi6erL!E!z>w?LOjM_(pdp!b>6KC@KlRg{lNWsi)eP3EtKt*c=mcp`WI-CidFKFKCNrzX&Y6yq}Z6bqKc zLTed)pVlZXeth&|h5cf8ILiY4C8cV?q(9 z@41_px!o_(I(g(m8hd0z3Ti=^xluWK|1^l)<>O<5dAr{|nwc?cQs7M7YZ4XF-XC}r z%uw-=64Y#`KdZ3kh@&x<_e^@|`jhaC2s;|2Tb-_9SgkwvD;zbU`^gzFNRHQRxBbZ% z<0T$K=W4ErH23gzuOMuQtpo?X`4u=;U88E+U zD=|Ct;&JK}P5)f6Y;J$@@>KXtR%CG8t~9m<5u9@S^NHQo~gSaxy;9%e_V{wwCIJhN*K{Q64IiUyi!mzq*@ly z(?!zpVNn-O`gTg4%8--bQv*qDPXL||6KnMHkS#0b1_xN4UDDqLqM)0xCOe6SvQ}PB@SA1l6q;O+y*dk8*D%R8pW9X>Gf{{0 z%%uz`w}A_zM@6n0-I+Ya2r^RYn_!Y|vckUQYj)bbq_8U?D?wv2M}uwhhif3~5RQtR zGOpH(Ayw;N7^7gzQmygN9{)xw%-ltu<_Qz$S+@ldAc+umN4#5QOl6bt4p+)mkezX? zm9Tl*U)|?*d*gw*Ze5>21saF}xz17D$<2%fCy@p@^h2;ynd`ZBl&BP~qg!30DHd6W zGKJmKAytfz0AfV^3XBQe!)dj zO#iTXQWxB_NGv1kVL6Xl24{j#8+UPW53K4P^+zYV=GyVl9=@g7?S{@!^WJ&)YI=Gv zzv4BOA&7_R66hK=%$KG3Q<~L7t9CLkaum!K`ujb<%+nVtmCrdZWzdT~Nzm)3(3Z_P zXAOcO!Sr)MrK#K`r0dvWP~&7{%Z2lNM(9LXh1H+`UO(51l5#tGc21#CB^5pnW(b`e zJ@vUf!N*)1dl|orx=>NdZ!eX>{Hk&0(UuqrIcd*~y_W;`qFTm|*1fGuYv*^#AU{Mk zRd(b+Vqe@d7a0uA2!Gf>XSuJ~;Xo=GEDPMhd}6@Vh8z8Q7*~gc#~@fWXg}bheR4l? z`=Ndu(u@{&Y*(UeXmP`WY;nC#W-5q4_HN;75hNVAi9Q^$?|+hb$>%~u@95NPi0+72&>w1? zduGLhY^Z*6i3H0Q-DF3j*Q-6&C190A=)hpAPHHDNeixJ%NF(;5gjAxQUz-fIYE^=t zt6L8G{qmXJ+aWI0`*$BSkTI+_l3WCW!5YES0(~;q*N%m~Vz%MC@mI z=eL7WODZvQgp-Mra9?t$3GSeXD((kVnv?QaJ1yFfjkAX2<3mqZSRZ}n;6U#Flt}L- zl#s>f6AfXU-y!WSX3g5KIj9|u@{U9&9{x*5?F00T1~+%Tw0vnhq=lZa{|PC_CvpWw z*-IN1{jPZ1^(|`mX*iG##RzCJzCfdx3w{yYg*3)O+{QsEBZwe4dlqg88KLD@$MR3? zg~VgBpanS-qEGch!TxDtReEsJO{yUwiFs&XMVG$lPfTa|o!5M#Q3S=SR}D(4xN1Aw;*gfh36ARI86#PTMsGYjyp9R*woahf8I5A}>6k^Mp20<{Fd+&R zJ$n1o9Gt^*2{PVtcN2&SA4EW%6WghFjzT$->V871GJCco&iLZQwNo^?*M-OKo!~Vj zuJ1F8&HAy=OmE4=>c$k!|HO1Pz$DeLx(DfUO0~#DUc&==t%J#v_4wgYk;=2bencU|Pns0+(7HHDCFNNF-H;MS;ckO+^d7U^})V z?XeS;E{h~{bBcR<6zD6ZiRnrFFNMpduMRJ|(I!0wu)w~dm)=>}1x0)GA&s<}UDQlg za0sgi-Uf1OjkaQZ`6!-CwVmp=NIyL|b6igcv!ih|#t(mu(A2M!C9rye7)TJAIz|wV zrD-AwJJ)@&>_=lf#7YUJ=$7D=8U+%XV~i*2F_i*_g^AXy|JJ%r6j%Hl-@DenK+b%~ z{94iW^y8ifjJ~k@+pC+Z7-Fb$KmA9^!0es-o&ARa2=%^aB6*F^eeQR2Qrgfg6yr_r z&L-m)g$Ls66FirXH>hx~eL+UXv(4GEVmWotlPhp+CU!LnT+JzvTn_anFdS(0z_Cug zu^OB?pywg`^-DAEwt?wk4A4js#Xd_RxIzk#hW>yVzWd5#mH~eZs{vN?4K&_*?@Vbr zTrT>H;n(~xBQ)aG9N{CI*1(jm@%MNyjV(<3%u=S3$67B0CtaE10{LeguzCn|gbEAu z%XrmI^KXjHXV#^>^L;7Lzbrw$xn(W!9Wy`gg7Q`zYhycD?2^V}NGo;GSU zrO>8B6DBU9Ks?#~Y|azH&*yc1QT0|Fs?+c>syynB;0!E26Y+83DO^>x<*uYGqv0vk zfFY1De6!?ZJ>}`gvsXiYpPgm&72s3W288jpS0Qb-dmx^@$L&LM(z}_~H{c0TbKlWE z;q^M-T0N0s2+%1XLBLwkI9=XEJUr?rJ>p!{gL%c0v$kqkOu>L!b$&mpBIJSv?~S$e zHFpaiMS@W1>-Bw*HlaX__XNM(p-EsL87P<+E9RLNXZR3vwSbYQNDy5CniY{RG;Rx_ zvg?5|SQ5;RjYmzopj$hiHLnbxbxa=7C7Nwfq2c%du@EJ%2W$m4EurWO9;GkyTqsH> zo+TF@6`hM8-$xrCU@L*49$kdsaD20Vo$9o`6v#~&0Af9K74#=hkZ};oQ+PqxG*|O48Z~`FM}M_!KW9Y-NEV- zK6V$fZS?ye^Xf=z1LX`m>*N>mdH6vFy*5F}g$PIEh6+wciZ~t(GOf!Z%StP(FDHsb zdT`3hC6#=qUdqQz3X>Kg+r}5ZSxR`Gt%?2B_7LA_f?J8adY6S$^QIb;W}D+ka;Y;5qb8oJNv)VMeAUc3XES0eQqs-YY$MK% zw3e`LT=&TIqB4OQX5H+I)B-Aj;ifuqFSc<<=aTdPL)kZki4tr{w{6?DZQHhO8@FxS zefzd;+qP}n{O`h&XYg%KSuU17O(^8T*wAo8jPz*X#oebz+U7 z7V&?^4iJ5pGFKLcZn`=NQo&g(ljMWNd~57b=~!KjT*ra-gk)Tqc3_LKS*LfN#7hP} zCd%nZdH^~?(L0_Qjnh_VUsH=yGcbe+6QO#Q)D|&pBT%8*SldmHL(NNk$8vN+ITAsR zMx?Hsc)?J->Ae*wlV1yDn>&Y>Kr#G8(#C2-w{q^0K(DnUtr{>I&DE7gak)|S=cF8p z=t!y2wHbs_%^Kr8Jqn~GQX*1CNzP%)4)3rft8$hT_-k2RheD5{gA`nFQ;Q z))%oP7fxSw1Udv06fZhXMx&TYfjJfW)N+G!Znh>h7xvai(Bu3u&3RZb9ZZk;0{Q28 z5pz9A{t^BF27u5L8F?O>X?QG5VDwVk9@PeON8j#Ro}}9<-@f^rR7bS$=r~5v6A!*y z$(%1o)8#pYPwqJ9B1<2;Kkj+7FsW~~kl1>@t*xu72>v;FJt|vE^W%{>q)^Lxn0gVq+ zJ-NT%pPA*!x)S7f4qUB6V9$CpWNVeelN(tlU-QI@C>{eOW%d}MJw<+p6{rECsy-hs zFz|yJVr#pEozWD}0PcrA8#`FnO=h}`+ZeqS1>`~r7EWhCh38TAj5QPz;` zNZ;M^WV1+Ss?`=C>kTXBoX>)S4tG(9{Pk}Bx+8D81*4eHxXQ+hssNKX(tMqy0Y9{} zabA=^ynE?P-O}v7^A}~UCqp9k&Kc+nu(kH~ zS_0Wef6zk;sOx0-^DEXd%Uh8yMr}fzbbyLd*bpo0cryz$?}LSEVrOU-3iZ}6Q|^;3 z9wS8)7hrFXAg6A8HAM<6FxaEjiOOxV3KSCcgNXz8^{`kCa&lj@@Qt~BEjnYe5Gr?# z_yX1dSG=Q@tu-Tk5FO4!eH@uqQ(?;$bR?{NrPNO@i|Ii5`TNucW;>oQAMuN-mDnX8 z+GeW>lE*nrBUXho^cNFlUHa$j9ACPn@PYpLIQoY5&eg7su&V7y=q!0``Rbth zh6-!C=@j!M*a=vVZkU?dWPh|j!V6z$s&Cc()W=Kum3SnIM9gn2IBUS=A0!<{fy8p! zxW4#32eyWQ@1uVS>~aUoSL2PG=;|wB94J_g0+@Ar6#yx4do$C#i5Prh!aO zZ7REiSGD(+5G#ECK>ix%hbr1BRflreQ46RL1B~4D;FnN$eq7-RDYepO9Dt==*4<1m za-{cBW464jpwhm=-@@;|GVDFDd~;i)E}mlO2C^QQT@*oBWY2t;MCk|MwePsv8o>2I zcHtuV!R>m?cc)kiD)?+HyD<0!|5trvXZ&e*6c7L)?yo_Y>Oa&+{<|Ce4{@YQdBX0m z2bV`$zpoH|En@x{i~JfMr0q!s1%#Ff5iybqUkL~0?d3YM5sxt>--7-Z2lEHUHPa7` z>mM$<(W=CDZa+^nHE37Wn+N7l$|F7OwEfIfcd4TESk)AVv3xxZa_yW#xbuuCzYt37 zBqNud&Wl}~vp-Muf8s&zHa(9`MYCIUk08g(&P2#ZpCvuGa?bWrw3J11Pyx8)o0o0w zcRbMyC8V&0FVMB#h}V{Hw~jo?@FnjcQ#8#&ZYs&(cW!PH1VP)lhYUqU^3jri1?WbL z-qO6g2-R;oQ|~cRn&&3iLtc`Jb;YFqc?O|Y3x%4B8id{LX(OS|W$Z?a7XAsU-4rGR znZ>Y&lnA{RS8BzV_~|TPFE-3nhOBch>J+p7okb}4ET(5w20}?k7mC$Vf)(m?H?fj6 z=I`0HKaN%E@JVj0|1%#2I^R(8V^nHSc+wiI#NP}}4FN)~Qmv)=z-MRb03w&P0jJ5f zux+SN3a&e`wz*fErPwzc4c9A!gicz==?rOOYC}_Pfurq#!EA`}SQHZ^Kb}*NE6JJ? z+pAPRddfFhn*NZCFJRvnLPd5ECRzboveyS66S_b|8T|$SuNy;uN$C6RZ{?UF_|L0{ zlcT{2fS^Bx>Fhh9x0)Y897m zIT;QY9IKVVkPUY}`X_k)j983j#A$5qI?NEbLXfpo3w2aPfJe*&*=sz5}(`4o) zpHFLZ>9Fu?9e0l|K9zmX&}q0htu{NBj1!-KAaYIji0+F>MNont(AIcZDtOqfxZ#~{ zl01X*cF4pAg+wOuLr+5;(So;)?ANLkc7iSOhH;_;26JfS+5M)K9H$O~#kaM5&NK{@ z8#pjsX#CIqRDwegV7pv{<%nloqFdGpOb$rYp1s2RJ?G}d$_=yS9@Nr9Jsv_<>x%VU z_XbzUEH=xI{3f>k(azSg8AaB-J}Q`5_o@&1Zf`XSgITP;A0ozWf%|ag;>2eUw2$SV z+-3_DPg=W}TbvA-*+phBkgb}vaeDu_(@45oJLEUM*gXOAWqPlOZLl4HdrrpXs2uTg z;?lC@kFC#^BpPP$OW-%R{In=)ZQpL&1%KEX3!oJH9%QP>M$q~SYJXj|pkhfZ*onToD>4Jcd!f}_Gtj4>2O=J4=H{RA=F zBxg=FpHvLkoTv`>ti!TEMIj}l+|U1>JL8EyuJc6>3sgQ2mo98?OZ6aa)~d(zYfZUa zg$Pg7^SXl3qG95Pww8ecxEPxOXNt62E*vfesLlP{>@N+sj4(Ce_Onud8_WS)a^-Q)}gHa3_HxS{*MW0o`wd^+@}2WBJx7N||WqH_=U_4f?L- zfHkROFu=TrEEuQj->g>D3E-hdTnd!?*`4UIJ$4+w1Z40SyQKNd z3qNsF{Db=(&1#7LVIZIH|GwA1d)VsWlRaZ zccMEo?;F7aVhjFbZt30v5hcQw4bG-YNP6Uk(`aJoRY697CDeKth$XI?(xFetv}|ay z5lIe!V?|J1rBTmkb;dD|lDg@sNCaQ2|M|M}h5y&pV5D}`d4W;H%rTh4T|tz!Nv0a# z2=kqh(x#7oWW7ckUA4FK|KN@(J_PgSUYNohsah12`qt49D`{0EXlnUjam zwJCR)3z!dj7*Fa^ykv+8rG%ZWDz>Cn6QrPRmhXZQDs@6~SoDE_AhrUT70>%)rq9rr zYmds+c_S#)Zw>@jPY5B1#372Kd@5qRs@Yw&bkf2A?~2)pO9eNmM9qqoM6I&5RLmBz z-K24+DBChi6P6@%qn=nC3kvCGT?^HrnO4(3PD)PK-Bb3D?pBbwyAQ{J06Tt zC;m8gMS`sLAn-${`UVM@)!fH0DWWZ@+1lcSB;T)bH<{n2L*FEe`GNsAT4E)L zO2^XY_8wH70X0T{ne_q?HUROM0H)YgyoQh%29afGkhvYOsy~G;TCa`3MAHkVdgI9r z={nA6uK?>IaWxjIv{n&qB;bnOkJLGp=-@z3f4!RJhEYK3)(P^d4{K>{`P`~xpE%YO z)#R|7`w@7_>~DGSFc=uYG3}ADKKCtvc)((4Mmx!XN^L9@yaII0N;lt7^i3J{gQ-<%x`z6yRnhR1x0otrNeqU|fnhH-{dLR`V?LFuN z4D{TdLrJUy?!=sAW3=L2tcLBLy_@Ns`7b>Akm200IWd@K%p!%dM5RQ_4inPqXfRH< z0yCzoy&#-sG;167(wr!Vw}NAlCN*KG_k3R)UouuCSBVk;l!6w@GLa1}cig1*r_AW` z39z8`j4T;fk^H-=^myzl1$iLu+A{vZC}W*6v#%g2`ZAU#DQG2m-tku}ig0D;o<`CP4tK6>+NkknQ{7}t1J(LI~}+zD*{XCr4mLQKVJ7m9j)pn zV1nnfMZ0`8_wiT^!h`}>5SV4jJJNQnLR_gdEQrA#Gph3r6Ccp-NI-4gE!c} zm(&`~Mh*3}@kc7vCChHUaYRnFa)w5wAlUUA|nmMCX&-w655x_wAKW;+7HsHAifG4D?R$($!p?x%xw> z!+~O;kcfUu{37yROeyn_I_27VT3qljPK1AOVEqfgHL3M4!h)Ilu(bvG{BciT%Dmks zMoUh>W(CeL$&c`S^h^2mQGB4R!0}El)k=$G79E1ZI@Yz=zqkAXW6RA8)7CMZy2W#18n6}eP5sL!3`4^j!aK4U-F{V=d;u%N%Q5tA2uc7Tu)YruwHCAAq@=T_r zYMT_s04cOlAcbkLA@+z5Wmj;H$c56wrCRclAyxz|f(1U=`(6F}c;Ozi^OuXHlFzMa zO{DxAb0i_j^U`9KG?0xVu*8v9~FWdY;FW^3}MpG&@w!Vw&Ig{sD|7@8Va`VrY z19vWrNBs209=mGRuzh$^TecE>`Qc2#H+8}(AVPQpCU4#>3$mk~QWoUkzfTR=z=wYW zj|knLKbn78#na!>zyFj~I2$`#{!_~}B|%ocp8;cJ=Ay(S7@D~As&|MG&jp+)P~`XB za`ZDMiwmtadm*8ZJ2x1G<6tX`^uy<~|Cv;#5Tm9;Z~{vC>uJp#MRM0t`X^Cc@-uUj zE6q}8yoPvWI%8u~AuSelW4SPN+i`1`P`jm@Buzu1es^_r*#i@CMju3O;fW>-udMa}9L(l@MA-bkyP2uJTRpZ3Q7WNj;XV;( zbC)ANM}j#6eKgch5;{yp??PRxnuL1YzQyJisgNP&)u- zrki{>z`w%$Z=1loq`xAOI4l4F!+*&C{>fcC1CiOhpY^Hw<>(+88_sx1n4yH zv4gS55IpCPi8{RnjcRP(s^4T=>+VQttd>X<&6#RbJRQ`i*A)*~ZVEq(iISMS&%GnKo0@!=r5TxFWkgd!#HUcs$fVzAN+GD_EQ7PAS__YUExqyWeYzXlZE3u}+70Euozb2z8R9}-zd5nVJ1 z{c)(9O(Y(1;~+9KjU9l|M4wL6Rl^6yof9aL`P^R7c+{V3pXjE&D6UJZ6@_lGG|=sv(SD`uI$i7uyhoE+m@vw#y?v4g$q0y=PM~OOxbF27E6` zqi6PpW@LtU4yTf?*Nlj8Qr#3@UMxgmKY_H17?1p3B%pi^L?TooYuWAj`mIiWR*mf* zPv}j*g{)xLIg8uwtWr_jU*xElk?1gVrTQjSLBb*}JHk#k;h3&0qNoo+LMU|uK}DYo z>}jrrGuMx#cJMz=nEyS5K=yx}bX!wf3s*ZAQz!cW(SiT<+F*vTk#GP502o360HFJi zKW1!iYin=!PnY~3jlb?}HiYkM{XP;20#2uMo1Jqqfmx5Nx?@dHNky70uvJ9bH$@)C8+;qqX8n`&@!T_L?eAv@e) z+vi*KxT2_j=ssldf@FYZH_2d0CBBkB(q!un#~N|>ol!Cp#I)A3X4Mh>;|nYfFXAytCpTV;TmLnLC$Sn`O!5v zByZUdv1diifHZNs!Qy(!j$|lhe{jV$Gs}k#`T)*@4?$_R4P#EfeIzg zN%x2d6v?C{S-@3-i>aCfp1^m&K=?v4qN}aOK~@e>@2u9bnVuaLeWB|m7u!Vfxly5KI{8@lK! ziBQvn_$MZXBHyAk#DLwfp^1%P0AVH)qUZ=RCqw_$y-83LhvNqoL^kbHI$_rBkwsB@ zC6)_R#?E3iU5fvRtz?37^s@$EvocwcK%5$RIZNN1*q7){ zY`4w|SP6950WQ13V;#`S=kfwW->nKhNM_B&X2_{LX__+;TRaTP>0h-Zv`n5_zjQz` zp8rZqZ18gca^lXRNB5uA_HzGucIV3T(~z@=Ws#TKMsvgE=pwDuDouJYLPdEb-2np360fKB3{`4NV%1Wo9%bR7Hum^yVc_sjaBY=ZR)RVf8GfATWomZ6+X4m_r}7^LLm>*?RfK0`O~y zTI9941Gbad#M!1@&WmMvu*?31Fnu&V;QCP)x9$t_Gw%)9>n5FOJ0TWV-udnF$+&4F zzEK-idxlnJmYUvnHutqPfGfd32cO@SyQS8-Wgb$_`$FbliiMgIJPm*#slx{FhZV2h24h!|UZm z?pEq-9{baf=)syXFSMx#A(ZO@OI2=}!i{}m>qbVSw!JOpsO~im(LuZIv$&$VbJJ)g zzdYN3vbj=%>-S}`HPB=}8e`|x*8~&lhY|e5oDLtI< z3$^n^fpVekfDT$0bxMdqJ}U?m<@0hw!Le+;jrP%wwV-@e38Js`<}Ta$NA88yt6*1Z zdf&x4cbP+NHa!^mt%~xak6`CSU5_z@P0OKvbYE_v7HWzJua!2+B2;^(6%^&+_@bHGhiX{hBj4(D)9$rD;yJT$hnwthb=S+ku@uD< zc?fq9007Ou*!Tati2b8OI;8evy}^OXEFqtfYZK+zGuj8flW@HJ$>jq$p(^ z)@i_aOj1%#fatpG`7P{#-YY8OT&On9s+OK#G4}XP{f#tzoRme0x1$q$`U&%eI9M=iwWoKT=}fh0WJh3A<&}uwEedY-%bw925F5J{@n-r8MQ{Aaq8o; zb@A^D1pN@@!yK5M+-L9dWK|WS1>^J-7IA)Wh}=l(=0g^-j9ux*LV3X(PK!p_isi`n zvLVZ1l?Yer{OmLiQYzSH*Gnb+1JH!zG;SbxTY`>&1#*Xk9F&gj%`7A z!7$hl>-NZj>$AanQ!z1AIm!rGkz3?T?6_MR%H!|G}fkMgj=SF?9Zr-+l47H-V> zvIwYjbkGz<#ig;x$b35g-!nG1&91+2@7y7elC5k#HuSHqxtNlqP%S=J*)fnHBb8Ez z6p8?ec%(vt-6YD2M)|*4voy^|S+MsQwS-{PN@WbAXp~=-$h4=>-!RlkJp_OU%qO-- z5me-9W2aSdgUEtx*sI1}MB8J!Hg2_uPdQ^6&^9&^V0@6!X5WsHE_5rkBxRVj=3iSD z)QXf=vttaB=!m=sv`M5QMEv@h%E=vPVa1?hW>!^BPw+|v1^1eNU-%PTM2E$3B^bkmY23UVBl|N2AQkd(0BeuTc<6`Q|5XbU2i zuog;^bJ0*Z5JR6OrziuPI-5Ok_58c@wnLbC@M8GGUavQ7!}(>agFRp=+z?^Gyfcx;QAz5^7-S-htAGGFq1xp_%zsU`kK&N@2L6E~~cqyJs; zG1~{D$7kPG7G?R&Y6g|1lmX=FddC8UH_+kO*6iW{UH|5;^eI<}8pHd$@C^48whI%^ zI|F+k_@?(LeomxDUweX>t`!yY_i$tpXf{x84()fL6cQEq@ZWj6Ger9d1J+*nsPk=5 zl<-NAD2NfaLaW}3X^q3InS3(dvBV>qGOxer0-HcR?qGl#V)#@v~!);WH23yxWH z&kUwNh#>d}alBnw9f@C`u>T6kxy#x2H~#i;$NvIy#{U?Qn_1eJ8XFp0nEn$c@51FciYJN}$MGRx59mRikr+iyMqE3Ukzb+j+_5Mitq*S>J3xUoGF#92U^NC-fIUurJ-yr)#26gRn*U1ji$k1Q=VTg}0i<2TH9nawj}N9b@v?q_Z5p2au?;W@CtOaa8!j z0{=@&tvE$NRy8>P6bcV$^RIj+1tZYVNF~jaWos*$LXn%OCee*VLo7wD=Gj**fY)2ir>jTQYN`BZMwz4#Hp#ZYqVEsU_RE1BjtKYIdVR5R zro}Jxg2oh>qAc6arx~=sBSYnC4awv3N_3N4XgBbEF$&<&TSEkev_`AFp5`UoJ3sDJ zBbclM^|o0e{TCP5hUiI2yg#a4#h%J3>%T0}LFg;h(M0a)E60gfhaD>^qYYcnL+gtp z%q)q*OBKevWaAO%s#nKi-Rk)VZ3}?6>oDFV;8cE;YQ%-NX^qaVgZ3Gf?qG?!=*2*CN<|i>fm`N zczPRfTz_F9hC>p$3-A!cO~Rxf@D$b3!CDaB97<;w^1YRa%=7a*vRzGjHc&|`_i&Sq zhSPy~m2@fcz(=cm6S=rjxP=a$OT5cr!6X9>?8ru^AxzTv^+#jJjn};G5HnH1s?yqW zWr6f)2KLArH<5Fj4#p*?WM&EuD{-tcoD2A8!1b(qp<&!pja-ZHPb3*vtfgH#uI^1p zb?!jCAVCzrZ!v!-7sDefdMUMPDeY3bLe(dvqcOo{MOO2&vI+|`ML>l!vZDF2`e+oE z{%bZs`xMle7)OT_WZlIr4t)#_Q!anv_ptdXQ#e8#vJSiZ_>f+qvGltDJ>RF>_cFpw zo)eTZTa+(N6`@|`oD{Z=Xq%>)pdatYYiu|FM|6RuvX47+TaH2y6Myjc-TCg*-seb= zp%+ZZ!TWUPA~go=KKb<|>(ba6jv8hfY}N)n{4>hIJ^<(9uai-t?Estg^9}2xjB96* zD|57S&hLAvsGe(fIRW>;KWIz$)V3$g0s#%9SHfnPTH0RtPIoAl>WL#w9^uPrhcQ~0 z%N~~)YUV92_avHPs{K(GD-Ji>d&aiThrhPVUYb+omyy>a?aP4BF6y`(_&uOcFGW<3 zw2Tn;fhnD4VjKgP$UMWJC5_^24({Z7PpCa;kLPQrwk*m{G~sLFF8!hz!y{QEjEd#i}P1$A0c1msrf@M52}gKBjN<%-Qm&bIG`H#IQre<86^L*Y|nQU z!_0E&l%F&`5V*kOLRUw}CuHaMDY}`S@r&rDOa#lks9v?6z81@re`NK2KsgUSv-^6o z(Gr?VSl@+P#^kHF=0TNf#+C29u9e5-*8ZC-e1ONdtJ&k4BGt%&KtcDt# z+y#u|2Q`?=4VW?BMf6BL2&|@Q8)SI@I=}ql?BeNpUofNAp$(ErDNez0Aui^;E8K=H z`&!sW3F^U`uk@5&km0zY-{I*;b-PIROxh#F$xlgo*`K-8R?Blfm0ZU1@2ypU>ffJ z=0oZ+9iglrzZv5lEzVGZ8x?#U%a_Zwh7(^m_Lp%geY@fz_=);;A1g{gWE56@=z znJW!;(tSV`egD>J!w_B@IQrv;Ag@@3XU*&|_C!_$X|&l)wvOVEPVVuMvdG`E0YO2eXi{Is|ITQ|GrfYIIRl+68W{y0Z>f^FqbxSdr0I!p!oC zGnN#{t0oKg%3QRmqUuF!PM9&RAGgtGR_9nnMR#|>R${kQMtUxliy-_wdCg4HYr1%z z!$dJA^Txyu2tSY9G~Z4q#a0uZsxm|M)yG8OAymbiLCFt=RiVYF0(2vr@%m;l7{n*^ z0S^WyMYezQ!_e{qwO#SPz7Ln|&mPGAN%mLt{p9mHz3x6g$tg>m$O=2GBr2Priq}|D zGR5!c6OU`azTxuZ%iH&#H9pHmr~yr9KX&^>T4pY)(JVVCRyN&i!{d>xl#9qn#Uwht z@#deAL`rJWCYn!WqbLw|^*Di@QD>&<^?nk*FXk8lr3wnBR&(WOuXBej00tbE8nZFq zQwDH!sHum`guFf6G+S~=LC@jg%?X+%n?p$lQEsyZrNeP@A~9<+!fgfc}tK1_P&e@E7L($7do0lDTrl8fdHdFJWywDP^28GKf?Ti&bCaH_? z5Rmuo7p~PS0pi8oI-DFOkS%smUhUCf#o)Ty3DMvOeq61ubwNg~i27$W-3R1cR@k@79f}~lMA&$$IFPSD#X4IV+UQt&xTeNRA z!E4Pu#2KIEY3d;6v(#&`2e_$;e?(_Lo*m|+iY_cpZCLRm_6hu!C}pcq$>5dz9E=p! zqT(^@I{RQgxU1Y1+=J@csb9Md7r1mF0tfJ;abgU`r*}zDPYic#NyO3w*y5lNb=;Mv zpqfm@4Bf2dc4p!c5WobMPUrut&`s@SoDoPTbTyL$AOf`f_B}Q78T204x9B4WzmNy| zX-wrQ756=KmSzd9s&0r|T*N8|4TGSg)h{xA$YcA^5Fv(i_7Iy4tPWn=^*B|)lcG#X zX6`OO(!Of+o0bPxlh0Xu(c+iyvV8(U)a!`l;sOMvFe&Ro`x7&Y)@V2jvsk0dNTjzb z{sA`J$()Q-IjpIo>zvZO79B%^m|q9kN7A1ag;SMfSSP8mY?|tiyJ(agUV}QAD*Fcm z*i&7dy~7HO2&RrR;D$_3|78m>>xV5YV7O;Wr%K5fHUwY$=KP_^e!hOHhFt;3y@<~; z4cl0MeaTW)4)O3(MS$U;(;4dR8y72KMHSqYi39ju$5*VYHW~BltJir) zX+c?z->@NtZ51v_)v)nh21l%=G`hfi7*C!fPK~GU5FH}DwOoE3Y^s@W-it=Z3OB*H zMwK(B=htDpSnVYu=#g;X6d))0$pz{W^!mU-KS5!cMIQr0oaS@89d4I`#seok3TbM~ zm9QQDs2Ar^<_BdrjNcoXZLe%Rpa@reJw^RB6HzSZnC@ka$$*k^pkF?$I8S`U+_$3l z65iB0v6wfA%gWSJcW_vZDr_wLa9PA5$9)MrUfJjtigiGQ7zhVYfC!AiGDfJ`GR4|e zkp4)U+@N4qzJMHD@##?nMpmi8 z6J_eDzy=Y99Dxc>`5^0PP0L6x&Xi)NP{PxW^k{OgGmsTG({eg=m;)d|kzRAB zi?mG!y5~VVtMy*qzD+h)`;Qy(Bvo^$+ZNN(aiFjZ5%0LUzSpyx0h4TN+Z6^Y(N6rt)?B zzE^$USRm)GC|@iW8UT2*3Ee28A{)bMy3DRmJy!-01YK3ak(xTdqgPdqCIJAWgZ%s{RzD%)3gN3&)`0;P5-=jDDIY4C@m3%2J7;Vd zm!SAvCVza-EaP>qGk4VPAdev%|D)o4xN)qFXqLV5c?$ti?L;G56mvuZ7xxmT_UjAF$w0%}|c^I4jONmd39EUg+HC8hCKI_@8mJGUL#L3&sMCl|* z&B2C)Q^W3X8@F@!o4;#Tj6u0#L z36)C1pXiMiIAy~sDirJ@-!A--z`+U{h@TgtX!}>Mygypo%thsFkm5vRuS${%u+Qv3 zu72n18>Pkh!-Kj(^Yt!%*9|iEx?JjS_nLOO0iDX& zMBgrO^%+`QWnO$rTY~P4+Z8wKuB1A~sjE9pQ?tp}ora+dQIgx0%xf zNa9Oo!hYJhr}Yf;FSDkdMg4r2jX3bH<}`!+o$uWbTOXaFH?JSsKTg`bYjp43tFYoK z$AdeqP_($27ab?$bj^w%Gy@y9YFG z5BCVL9My95x2nT|Wle!{C-&cGa{JlIq}yXU`*^9O&vzR7zWCbWer96mFpaWc;Um68 zoy6qmO?(p#kZ^!nBazouLcPZrvznF=DxgsYr3way%CuKzBNX;mlLOBl(&XP$Fb`lS zUr);yvuG|05zP)eh~T<8=ubTqa6U;Gy`2gy9gnN$$WTyv0#ANWZUJ5=kR3qs;%bO9JwUC)itFu?SGe34}vI$D;U zeOM|UqiEjANFQpp#<<^m#bU2KQ~g0SkAXvlhF-; zml#-|6tW{hYrDI&n(l>AZuYLd9nO;@{-)`%`v|K8Kw#2|<2KK^oKs2l=<)j;qFnB| zUP;jj+mmWBS*5!arkQVrSP^OM5a5!i2)7Pqd<+-lP{+8sKR?}!!X++(yIMkTO2Q#v zraPE7ucZNlk2MDB0Of2f1J2_-ayQJjO`FId-&|kSyH?EvEuvcV=Icn!6j&NqCLPb3 z?ZQ|Zb^bbH+|S)Q$Sl1YmOcPZqg|v)baCNyw}6=ho7H7wEty|Symd=O3nI_H($p1_eBt>lr3 zNpauwN!ANRfs;o3;SMcXc=h;zTw%+zI=P}g=z=+E1I%b-d#$tq9l#M*`l9Fxh&hhQ-uk6i}fa$M%`qnvWF_jwiO(c?pX ziM#f`w!Q%H#JjpE{vGQT9H~+d2mzu+P(erZyk(uwibuZzdkFjzsleLHRAQ}6z-oI7|MOI z3k(QdC)8pa((GEzIJ-iKNWXqj*-nID3#G2$ai{J(nsQsDq5i}kfFw4KNo9}rMQ-!V zbh004Vu~W6GPtec^7)wMJ-c%cN2>P2?19=yEXIgWZk%r+bRkV^ ziMK4N-mx!yzaAaLZe78D)YSTY-M2%`u@n6grDkC+!r?)U=MzR89dJ_yVtvNrG7HST8A+4X+fUE8uO8w{!C;QJIg3__Ef{Mvb=k_{E#sRl5xRFg;W>Fm~->v}`=+pP zA5W?vVq*~BwZwVLEsT}$>D#inQu4s*cYAJ5>o5ZWl65r2PA68r%{4rOEO~3GLaa*@ znU>*B><#?c*k$Za!(h}~3@Y6yJvhPVjHQXbZpVy{?seblau5B0rneM^r@q?V0Op#0 zN(^rmeOiOf%mt)!Q%C>BJEmJjPf*Zqqey3lcNL)%zP%IpyKCm*#B#<_=<))iLk+oz zc&m)|I#MxX{a7%_Ht>4G~V-DQe!3BedruJG|`H($!ho>BuPdIuRw?b{4DH4lOmRuwA^ckst7RzW2HNxiQ3ef%P0g#_}n_ope!AeZ*-IdK;Ju|r# z>zQ6xo!)p_r7LZuk}C*&0E29yQ6aun1;%-sVioN2fki(%tjWmy>{J5AhC*GWKE7JS zt=XbB-0AlhB4`O4Gf_6B`nODM0|Z;y&!20g=25$89KRgL&B}4;Z_n=F#oDi#Bfp(X z=zm?>%792hBxnGDkiYXVEdQ~<{$ERLZ|CAz0I0HmLP>Q zjSpw;mtBCDMMa&d6V-!SY-N|XygoL`ma_8H_c(NY0N1u?YDTTUQ5csgYqc@y{Wjh` zXURO*jq#LlY*i?|L_LwxH85DmPq#Suq$p06m0?w%caf!6e>XoRtGa5lEMC)KmP=jp zVG%+ZbE8^rf71Ni^=;AFeWl8B-*l~d0JU^M*|NG%7QNwFqx({&vU55f_n_BI8-@8o zJ*R)Tiqd7>MyU!2{4VJ{m~VZqH(a00u%*%^5(&`83)bMdz;0t}Zxb82ma37F@z>!5 zuDU+Js~b08I&*ygT`Ac?__d|eK<%DWlQju_JM=VKXjgk*hkJcqPARJQ5nntXRnq~~ zlQd!NNs)&Smuf!)f7CFr%&m7AcLmsKFpmYUVMZI9;j``K%NG>Q_7)GeEY9HK_HD)L zqz5de}W}G_xjG?NIrY} z^j*$F*=nz39GVoZ>F)glKM&$yP-n$hJBV96Y@uDHvOoMQfEY29h!>&mV~x zHs3%{Pn|^kh||P_8n12wUr>!sC9zh4d25pWk@r_y9Nb_+sUa_BBZFul`pzOMbyWW{ zY+&N>+4_`fF?c0;J?B;0IpZ0;?gp?UEF-h7db@;DwxA9vIyZBf%H0=o0hBp-BaC<_ z?Gcd>ucZ_bOrm@exu-6;<16J4KF*+wA$4YCvcXXC{jWav)%6Rt&v(jYh9=Qi*&WL( zFK(RY{Rszm^RP!6PmSa21>U>yX5 zn1|J`emaoHQ~DUH{!jpQI0@{D!W1)EU2CcCln`!i1t-(*T?3#hwsqh4NgZx$J*RZ! z68t(r93RD4XqcBb`Z2ywoK2@0ofWO0kM-ok1T@B1;;IWm_3No1ka}LTJ-FQG;P60a zR`n*kwxl6^7cyP>1d3O6OWdS#k?f`>Hq|QfVL~d>5#aq7#gG!M=oCskSJ2$P!&-Zx z7^rE26IvKR92BiUq^oUz!Q8(I*qTm;;)@HdwcxmW76lvG`WKZ7U?V7hmR5q*xxEVs zv~`N9pn}Whn#vo_aO+|ftpEONq94HlC481qK9zG1k9@;U!@A6eZ7J5He(XC6l^1YT z%Eq<|eUD<(mh?8X%P3BD3j;jpK~4!BO<3Ifp$(;Me|(5Dk{uK5O>F0m>rzOt z!@5NHl*>O_U@to+$^v{tEuPi&9_psWgUK`o_wRS2>Ngv2zOgk#SDNXIO-H$#B33X;hYnZN+WOUZtvDYz!`)xH2@n&O9DuF#Iv z%gDbAH=UKZi~l4`H2Uji=jr)wCoY!7cd6S*puSHy!M8wYry;)SGCR3DWQli>F&!#) zn0?;(f8*8@qw z+rj%vHMrMB$hZ(fT&K3OmC6<%Y9S>uWIp#OmRX$cb$F?(c4F2mye-!U>h)k|!xS#R z7`2tv=7m$f3?zK7KdP{QT!Bv>sFTn-&^{81yXZYdQ}oZ1W7%|q5h+x{xn1JGg&!G0 z5%nyz5g9bljchR52KeE@<*4PMOTn|JiQNE2ia1PGVF>Qh*B9>S9F6(da>~)(VB#3! zWMpd}g$I!xTvvFGeryM^F`DmS@Eec56*0gGR~(2%biQPiVzQVhz5s$!mEUDV!EKCo zdqp>!jFCs(#Ka1nmnrts?GKMq5~I%F3W%~Rk85=V3fbg@$ensB&5+qmip8&1P?Ig| z{vA^gYSWYs)YphIy-i|_7a2lQ+%z{(HFvA%3_qYs%!aDw95zT8E;qD%`fHSZR5)gw zM;>rqb8v#2Ej?{2AC`gTR-Am5YJvhsgyZIxDvyCRb8!|wMyCqZ@93FTM z=7HVu+G-q7a=j2ri(p8iGbX=Rf|S_dH--W^O4dDr7u0_orKQ{MHI<+l)U(K?7BiVDR}W5EU$6p2 ze6X~vg*F?-)KF_5wn9rPMb&tRGKfx*k=h+41;`B>di#9hIqMXV)> zgl5e3;=J8qj@H655vEhg(^4v1`Skmd%tkT22O(?`EKDjTDa$sRH8(#90tj;2pbNBr zDPa+G!mT>W0IU^Bh<0Hfn4nz6AYI7)xQ^HIC2gfiI-)&9%JF7&>`XxbG3{6aYiGN< z`go|LvD!XhX01~)Y$P?Q0QFV{RC+)D|50|1L7s$LnlIb7ZQHhO+qP}nc9(6VyKK9< zjDN99+cUE}dv_)#?%hwBkssd3h|I`5&pFTYds&)q7Umm!QoK17Bc-RW9ivP`=p1^< zUhYR`_pMUKO(#7#&6H{IGU<1PKHexvAWQn6+BU)5jf5)78P1ea-%3Jf9%1Xr@r=&? z4uI3=f%6TCx=$zz4LWt94_@ro7T0&V;;r~4<=$_=6v|1!&t$(oYmB)9=+B+}Nc!I! zWchgo`vRIVv~2fK7cUpbUTCEb8fetlqwQZo(gf&+`kPx<4E6j}{pQ4?+=Q4r2)nQC zUnkTy4)sn~y2@qWj!&0qT)T~X8xKxnE}7MTaursWgdOkFTUT2-!l32f*Ec|GUh0M2 zwSHm_aiGoBxSi88IC_6)4|`xtQW!#Nlibs5*^Q0tAoS$$?jqjKGAbzi079Ab5=fSv z4zv?&y7TM69#t0&5@JLq75f{u=2E5I{oVMx%Ix}$X250A&An)&Fk&!)2so_SXP=irh0Ms z`v`BOAoXduyx9xiZUpZtPIu($&NvY|EMxq%G2&8xPYMg`wT)K1^?e-d#(7M4)(8xA^59n|G#w1f4J8FQOBt2J8m+e`Op36 zguOtc_`SNuLcyGaK_*2Bv6Mui-C&P`I8lw)1FHUh+X7%8R;fk=#Fo3x-E;T%-#AN$ z6kvE0*5}5NcV9Qz3qRD3%E6c_$%e@&iG@XJu3~za(m7G}M2OwU(QL*RX z|0(UBEsu~0hXMJ@rp6k%t06wo!7d4g$~(JjkY^lX7Rwzw)Khvsm0NJ?YxfKxNB5wl zhL8K{O@~c?sBHuUPr`x?s^Prn`#i`np(NKcc)<6EqX}}YQ9z&>SW2BFaoXg4u<~kX zlnHNm1@U}KBJB|$%rRz_s6K&&^nL$&&*2F+L3eeE%DP^_vEPBVzNP8eDnL;D@;Z~L zG5Ec_Qq7%MCx6f#t+F=))+V$wPo4dHj9r?j5r~g-7VHcz)-wh6m+8yY{)s26<=hk@fI(dxtsbp*>pW!UP|1OoSNN$239g7;`Shr{iD*yGt5sf_FNMEKPM;R|pTVd99)}?N z=X%c0+}QSi3Pbc+nTIt)0RbWYm4^Rs~sgEBM|&B zfxV25TVeCu?NAC|vG&=^hPQyspR4kh6BSCcwKAD(NC6k+*X<50kbyb1^Ce_55JqfY zKu^&?bP5rJD7C?avPytoK;O><2Mgc5tn<${J;I$UP1Kx*sF`rgX>ycX^l43HqJV;L z7Z2{2hEvDB!iE6P60vx364(=P>ujo*Q4|SirsR^3=kiIlo1NFOc+@iK;!%B^a1*f9 zo89jhu#Xd!bZ-)e=ax`g0Q(f1&c6Z4|c5;zT&=;SkWmW<#B zui=H6B4fJGbkEX{aIAydEU^(%T@yHGcb22iF+wTgh~a|`F#!FjSg(_yT?S>sAO+_l zq1+;8N+TVO1TE~! z>?$owgN({xx)`>XlKHwEc@}2h3z89o6zO-I*);FNi2}k2>-{2Ce(`>MSSSfE!|^Y7 zW3glT!HY6lXY%ZoSt&AC9E}Ao2^md1g|S#vQhYnTdRpf3KgaZR6r&{##L|}RtuPH4 zMws}+0a&N{wQFO$>JFzeA08-Z$HSoVaZ_R^F9eN2m4#LC2Y2DmcvhlzWVzPTlUN#Q ziOCnqW{p!qp*w8#i_j?{DvUBEk}mPHqw&a+=zXSqgSES2%pkut!A!yisN_JgjVx&! zwvd=v?p6k=fSL^hyB-2^2m0~~d%T`ixaM%`^nnktK^0Fk9T`qBg`lPpgfc}$gUBJx zz_`}0omeh5PBwgRKdyFsZv_EMh4}GaSY}qdB-#~SPU}2IWo!1&Ox|9TCeN&_ci8@1 zB4Hx@km4BR1Xwm>yXPv0IQd^j@$n+obvQ*LP}1;yG=jorIc*Q`w|b30EkH(Lq@vWU zNQZAwNwLpSXhts1iiryVd7N6cH)nZWn5cv^Y#Ex4n7Ltq)9b+77H1hRPjQ^<8ycW2_~+NlR>kz3hw((OJG~U-oaq^QfZ)%#yOCu0Lm=Ojd2D zA9k;jeqVG2O{;E%Rz9(qRltQBnNcx#)>-F3n-RMdT_y(wkG%0yJ*S)d=%NlWIoOok z!G9-m?r8JqJ&Q`PD@8x(t?8{R)1++B?{-An66PYHrErc)%)60Xn&I%dDXR z^Ylq`Ip8SIX54vAa{2z(Zo+PmT@wj+7;_G^z-7Fky?>7bzv^qr6 zdqOyxvyFO|2HlxN`sICK)YL&0pFe0^EIX8>8ZY~qv3N?%*~hT9+XIXk5us11W2Rz?k8^_KE_%W~Y=54eWhD*q#=67-k|N}} zh6!@}$iD1!r7tZk`std;QWfL`)dxfyWQw|UAZjoYfGNr;)#CemX^9>B8cuku$;jQF z7FJYqjWY-``)pRg^^FXmh8Ok&K?Dr^%jd}Z>=k%}m7jf_v7lH*VCarOA|vqlnSCFv zQ^-F|Vwh~?81{}GN=P8GlBNRE z`6pHNfTgxI4#5IF(IqE(f_MHshvr8W=wZ4sPzh5#{?=b^<8^LjpPvvUwAwSO+M0(? zW{_~sw{1cg-#p9ZPnI7{wU!R@pMe6ytZpLEtz7;+!f(!)bCr_yu=`}&W zhh$z9HH3!l#|t$>9m*ZLZQ1CUphU-C!<*^u2jQ~QENN_Seg>e^Wj78IW0D}m)TEm} zupyeeL&e+F0v{u)S(DR;=M&Q~0eb-DaOwY;$bm6y zoZJP@$_mVDo2`bO`CYUl>BYQ1e;gG$=u1yW4kxeVF{<@CBy$o#^!@s1OlQ#hF(fne z^YXI6qc9`neRqgnV5;Yb)A}dLGRMnmtm1oxSQbY%(eTP4HGO(ABneF9kH!o;uphd) ziydjd@MV%3X-MmT@B%ew309+mzH@ zy-|Wx9_@qY@lp1Ng1q4Ym8*Fa{AD&+ao^Uvz2u+&#ez;;^XE$bZ^7=538X^Tqu3E|!X;iedtFHZ6slw5GE{wedIoX{bJW-Tq43pWzKS(uv|U7wtku&!p}NKK zbjymdp1Nhd`{ooTY3Ui$45n%3%KbGVN&O62+M`f`m_BdyXy!pwpJV!J+ZwvM00|j}@2^kXtaUVmd(%h6n z2a51o6fr}bEh7%kI%i5#7hBX|=bUd1gRP;l7Q%!jCg-bNZ0B5br4%BnF^116k@%N# z>ZOxWw#oZNfj}5;fKa3SNiC?X2#z?T%2jZ6n;_e9(5!@pQD_~(OMu%+ytY3Cy?~y% ztZ^)UPetPXrS$w++PD|j92&PVT`Kpk8r2^m`eLFf%We`iszGM&5N1hUr3#A+BY-kg zoP@9PGfZodR{6EAhIW2oTPf3TsX*e z&byM&Ege}yWGd@^WmsM~=!8~2TA{(0k!718WGJYu@$x{Am8@5hLOil>j6Z>s)CMmX zg}ZA#MYcSby=734EOsxR`hs7chjBvQr3Z-=Ngk|P0veDx>0J)F)eJvmd*_z2XIE~O z(y!=thnGB^vNl5!_9l0o&Ng=k-YwNVsB_;h6Q_vn#}4CrJg-&~nJGi`C0+dUc_J6o z0wi0@osef1yT6=rFc`PFxaC5$k&Z3IW6QY1ARgSWi?C_%r*EOu0Afs1*T9Dp0-u~- z5w{svam!^^-!wZNdRZt!Ard(nh;_D&iNXS_Ftp&7ncl8iJ*qv$+5*ZO@@V%nbb^{m z>=ss=yEMo=&Lg*nByKmiXuqI`6s~&cT05H#Tp_%#UI75``0?>Zu8f1R!VX@8X=tkH z9sj#p&3a)zEmLC~d~(|dk9jdG#X{6EVS!2bWix&s5=Gs7_klESlJI_`7-pA;Qd8qy z@d}`uA+2C(OWW;hU@ePO6t`ZF6orH6)Q*qO;a)mcG&y+-&$(&n{xkjF$+86YvVqa} zdKuw!Yjpkxr5Ra|z`Yx3{2mojMtdV2tgRJ#(y>ULQE7oQSzJ3mCa7B~l4g5VcRU-&@=Q=*J2j za^b+MmeK*mEzu!Jl2h1|sW|iKnGs&FQKxdthQAtQP6Lz{0xy{anF(4UzPic~`Q8_# zdDA)#yb>cXyXWGT^N(??2Iw9ohiPc@C!7RtKUVYiDnhNi?3^=&P5X=r41-hJm|9Aq z(7+6#Tx!rk*>fI*l(0{r7GkI8FsnCVReE(BnOPV8njx0hHFjND zgQ~Z8O{nR$Ryt+p1L;bMFm4YOUJyZK+_y&py^{I7=u3cv?7Z_6)q`E-)dtZIqAhIm zk*^(9v*ES_!vD9U0;nD<3Pk_{O6UKVoC5!@QU89N|Lyn>67`p^?_ZoB%D3(xv_@7B zX-$voM`upuJnS)LJw@}WBgxxM14vQXv|p&=HDu}Q@}E17n6N=W(END3o|)NND1-j{ zk^Vricj9SqM{iVq_>xl}H@Vh4ITHuF*SV*#J*FAr(#+0oav@x^x0PL`m!erG$~js5 za#Pt~*%ZaKmKfV%Jx5%fQ~G`0$Z(Y67;yQmOVo@vmc^+qJZ(ChmBegztKg$UZK_Uu z1E-}ol z>_)QTtoWQpZ&a=nP=Q5tTXbVW~a`DLs}*-0F%K9}&EXE(l3LrcoMJ6oNsRH}T`s zsN-XlIL!5B5?ad)ea1+WbV;)WWo>_)Hz0Nu}(Ye*%r76;sBI_k&Qk|b*aiF zjW@8bNA%`LF_uCVka@x2a{G97*T?e(+cqh?rWEuOgkU*tKPZqBRGl z8FwWD5w+F*R3SVlkgpm)rruZ>SCy>_X0xQ%#k$tW$d!bo*^JkrO^3U{6L3YhP-U!K zK#<86`IqVbBX|Y0t2McOz?KQ*JAM0Y%UWatpY5+YxVWr&@O~CoR{@xqedGfxm8RsF8AK_c4#?~=7IT^Y?rOZL)V&! zkzV?acTi-p=@QUs^aynAbmGBx+TUG>;hgTa%{@Q?g3ngKhLzxrcW5qxYC>r75`ly7 z50bm4%TnYkMdg%)6tJkZxsHtr7?5uQ&5MuL4jui(P{(xF#DVC;13~PyT(;u-}E00nt|Z%;?l{N1zE%u zODyI#Q{7{aAO~O(uvSZj3e;0EuOxAJbb5ISbtpEt8XJ#PO~(!q+vrr>_!HdBwzXX% zB{N22tR>ZHFz2DMzKh56x&!R2hFk}2n1!M4so zEjF((M`zFgdQM&5tMe!aHu#NjtylHzH`)difNISGvNhQF2c(?gMwsclcq=XDu@PLN zF|5yfju_EXV#>Om8^2HXfKdN2%_~}ck!>R3XX+dxVq`Q|$|Bw5SyM!?c?UaP{K(8m zo93}rNWeZ)?ZVQ&LY^=~2e9v?7H`t@I{u(sJFmjAurfR>^|-N;sd927IYYt-TB}o;Amlv)I(I0TscBUD|d! zGuFm*o+RA??QpJAff6iRXMbWwF*p`Nh*OyGJ{e3RlH}9G?Hrf=YTGGB zwY_Hic`wv$C)X!aqHeg`0%aXbMA**>YAOU)>2iCP9-R2Bi{7!M5GN7z{f!;Yl6_!a z+c@-(Uf<=*yu63mg@yf^z-S%z2x}FwDLlkd;)JB=l}4hj=cG-QxFT@6-T@g#=2Ba2##f&-6($)$brPUkrPoISsdgqpMA6jdcnMK>~ zTQft%5`?z{H&s4)%##BC-(3o(E1l;)UM+7zFMdpzP9*cN>^w#l?39$zyU!?70{MnL zrIE-68I9|(5~Xtb7+z&|!sRAWs^C!MEGcwKD$d9Y#zUk}wtPK1sb6#X1@1nMIiToQ z7{%LKQJJ+}*ZT}2U8K>_Tl%Khr9KPO=Nd@by}a7kl942T4$P^f#hDR4tIa|H4zOBT z1~Mec_;48zsG_GX5@KE23~JYRU@B6Ri#{YSTG&JL$l$7vX$E97 zBz!>Mu_k#kd0UMd*^~{F_?As!PRE8^4H3Kb7KzyguD=yb-RoVK@j(Gh8jySNd`E?| ze7T0P_QPOys}3C4Q?Bu0*}9YJs%0s8wORJHM3?WgWT~-erK({C)AhaLEt&_`dsQHd zmvBNXi%IDeSIJWVi)fM0cB~Wd$jlPkdJh%$4d}AG12VK)8~+!zT#;l9VL1Lj57jrt zwdkcxc~h5CI_mU{la6CoQ09{uG`ZxmXKfx*?58Ogt5bVi_lP%d6KuIu&#(D2x4f?- zFna78EV3Eex%M1*>nfd{X zf?aNziqf3Nqfs34%Qapr6b2z{c5HMT=nJ6%y4$|QP;e9Z6@y@WjZs81S6DY;XBJ+P1XJ&way%@?|m(aza$T!QqCdF0Ndh0lhmiuI^oeJphquYA@w zSwnfdh^gMoN@3$Q<3Czp32_^SCE4k-JUwc37$i_M`DRq}+g=+G;jXO4f{4}-$Jk33 zSAoCGtZ+V#_yYU|4ffgz&x|i0*fA(%dn-1y?C8{GOTC@6v~udsv?Zn{V-9?zO|>%I zMyG8i;9LT*(Q6J6BY1+Kaq|E(sY?x^iBZuQ{uIP1Zf8MBp$QFiS#E06wY9~luG(Nv?%#SQi}$d}~SBb3I@LbeVul&?)K+4GHqK6b@o>fO(Tlx@-#tAwd} zsdGQFrZ|wdZ{0!-NfKn>%KNO$@C~6OPFmMQ%7H*v8a}}vIF8@fp1FCee%L3)nP@4( zSCS%b9klp((@i?4dVfV*ptplXG&eKK#P(%g%Jvvv*`gL}PAQ zSvcqYx;ZWZw6C@8E-c(y;iomIc`Ht@=?1t2@-kg}>>22PkfbNoDmHP{TPDLxPJ-(P zXU;#*uRizsI8A2 z@r%S8RV0%Mc#kh93%Ix_lMQ|g5?=n2R!{-U<2xzx`m&4#s?PPkg`~62!b+1ck`1=9fPhzqy z=+cHqD}nsvttAF2bLTuA1-e1byUeUDa2*aD_Rq2-V;QSu1NJ(d@cTtG^vaFx=v1HT zDim_@R^trXE%rHYcXYhp<=n35_X4T=73RR zTu5cS1RI5ILZDU_&(jtvWN@rSFI_CEF9{U6f)D+UwYT#3gl|))uhgqbIMuyy(q9)( ze{q@Jb;@au4ZnND4jZB_tgAYIJZ5rTyntj{bWNXxTzt@bN2}YL(HL)SR#)NK+%`>L zL{5jS2FX2LgT4J1Sq?#QP+}W!GT|IBaC3Yg>xK`Wug4;5W>{T+vVd*TchO2!TkA?g5JFy zSFe*ITuJSDdZY0!K764->4cd{Am2ciPvM?He-}Yq1Ogx=4OZE*px@$j!Stv10j4rh zNf}bMp_DU)0UzO7g(+HOt@4fdRQ=zf24@drS)csFZ;Iq%D2xCKr|I+0ofZ~tyB4u^ zF5=nQ(K`n&w7vdl8@|F9x*3)V3)WOJC>^pn|J38~+c;lD_ovXmF?WnbQ~cEH?&6YM zOW{H)z;8y4j4Kq=0Ahj0K&Vq)02``@7-;?Gs0U!{^s8EZ@#Mf@tn%GK2_n&6Z@EFtqZQP98Z-0Pmh)We#N z*z#c~NFE=o@W%pH&-Io33t%t{{qM8(j@Y^mBAq_zOm9P7(gE)#8{_ElLuH#**L&5; zU6xI|L0ek^k@6P~KVUnEOi!8{5raDzBm85Oc{+@vz8dMgxEs1I>W(H`jeu5#-R1|q zd9tW9k!QzwuHc(054A}TZES^5K(0B!Fmwyh}AO4*3;~o1bGn< z-;2$Y<9Qhecy>6duZ5^?)$a>OgA8Y`B)*sqp=Zczq%A1*^xInwY|-x}RbV%Fte>fgzQE)q z32AvHr|s"kV20~+tlRLovD#@>@kyORReF{N>2!b$3VA(f&K(-xwrii2{H4QDPp zn>8m8RhB0jQ3tGGwGpyG-r4E+7ne`{KBL&VC?I@(i7&Pn!=0gC3Quxn7KJMXy0u54 zxn7p7=5n#qeAI#zSrW+Px=)`rz8YH*+Ke%QNvPqWb{=O^cmW0#sYGzUC2W=td5vHS z;5%1?^hDT|-_mCYI`}gK<$x8{Vg z^iCWrKxbIsKrRw?r;4iEoQ0&-%Iz0R3OcMoSUDe4U76L|j7pE;C;)OYk1twwqX)!% z3gL1hcthE)>ILrjQeR=+V3nLEL{hjDcVY8Zid>&Htrd8n<~g(YNpKK(4f}h*b{M~~ zx3zMFc!aG;B-9>iN0(br@dQb1Q<2~opyw~78)!@2xH z3zxL{$g4wasb9INP=-M?B~`+n#0@7D9v4PyRYGf}a;xD^rTZ(ZN4MXU)hSV^V(Vx8 zU_}qx`ao&VVRwmtzwUQ`b4wkJz=`!WqzdrbLho#eg#+4o5?Nwc=wfG=lrSD=ERANr zJKV;eXtM0yQSsCKSw(K)f6yWB_fyRBu+0~3mV|3L5au6{uO z^RB|2;g$uw6_@tzfj~$ogF|WEUCi>xOuHn;R?tST(xvBQqA_(oqW|&$Y^=g1dPWN{UR6=`lGII8rFn9yRJ2za=s%y;W`F;IGBUt=O;DO4{G%8Si|Dp7 z@mw<+Wgn-or6?j5^G70z{(jxcv^I{jVb2JkbWY{v(abe6Q}$0eBAKF%FVtq)?w@4| zFH-x!Lz~%j{9m4%8Eu92e12Q{_%podZGH1d$H7kb8#fkTk|QC6j`8m-@{L|C>B_gh zzWA6=-9ZVAyi^KZ^_NQsugCwTTnHRgN&2_E+{|C%^*`g1|L^WMqr0oQ%RkI0{gn-E z|L%4a*dRD2j8#{W#O9A3#_u&2r@~7e&51<}CEjnQV@ECZ@EhaLj%PFeT6WKyaK??o zKp@Uk{oWJ&%?05)re%bJvh^90qAkwbMY(o@h^1&jjZG$H z>{I>@#Q#nkNz0Pvpu0x=tZY-aq^~<^AB1uF+VV<`f_b#mMjdk$rxRSaw|<-U7ad zKon?`E$Zb~5$<@yL>uUfjqZGfeyU{E7U`NwLik)qXFen^%LCBSp-vYcfzxq(TW(~7 z60J4{bo}`BhlLS_UMoRE==YlT-uWKtFMw}G>CZXD!Cwb<^T$#apK#=k_>U-6u>v&3 zGcNh!wUQnKQcE1Q4ZaB35dgXI>Ul`_3Cn%9ExG5g0}-6>lAQy5zUw;2f2;YD zN`#|mSukgy5NhSIlMAN$y)mW%kT&Os<|F7TE+oHMhe3*1U0KiFK(^@O?f#es6{ip(8R>Us zaTsTRmM>?1+4KdG~Je<-^K%DD<0!hM7H&|2&wY-Z8GjPfYN5 zJ;)7J+`DReIwQP%TG(e7LX_}ByurF^ezI5^m>pWx&Zum0o*hn_QKUT!n3d)*{yoV~ zP1BTU$n$rMB$f$j>VYw#dMbIdu^fr&AW|`G&=k%4>!Uv;M_k*9;Ef=6e1Th(OW=tV z$hZGKq|XnU&q@gnq6;WE(FfZuF~2<1iJAFp!^n4Sicw|ni6ub;f=r#WK}6G@-4l}c zeXA6h(BGSdO9|3B)iMAI=)|AP`ig&DZjC3#hMMhw_%2cm%r0=rjxW^81w)bgj@_VX zm7I$O)2?tLRl0Vom%fZI5>m`q@@TAcf>QN`50$InRh9RXqflEn&W9r4=gd9tb|AVy zUbK&$h6Bqt@}WPRKKrnGn9``#^b(r17m~aQpH}-zOWB@X9OHHxxkJFcf_UVMzce^; zmU#fpA@a+|Hhe{tYtE)&HRSY?NN*Qn>GR8X86=A2zDHp6Uegg}38qL;a{Qm?RAQV>mKO8gVj4aj!OJxV%S`l!(`rioVlvqSoQ9A7abE@)dXS|48DxX_Ce7v8vc#i3I ztJIrj10)CA@_;UNr93vo?@hup)=EORZB@;NZU{y?vH6(KLU0hX)Lx ztRI)K7L-%xT|Cy?DKG_bWyB*880?VDNC3L@IRl2k)IRLk7y4&d7~~u90=>XG=)My$j(&!`O`2VaO#@ z3`L(+j0nWSyT1SYYqrSq)HW|(63$Ha!sCi82i8Lz4^fa|r==fAo7pe_6IacSOwfj~ zE66KqO2z_>#)V7CIjc`OKiA_19dPkG6lMQhwaH~GZD3#UgH$~P)wPwY(q1o!w`u`S zAhT4jGBGNrL%v0fG?6gWIQ0iFhiBcrd8cr#p8y1rIFPaoh}o?CPkw_B;zPnkqL=(< zKYLAPHUDG|HErP%fx3lC8(WA78ov%HQ8Hg?%4kIjhGrBUDJMjySTC`?=qL~qf%hQ8 znrXv9nHNcAs=n1oeQ$NnN}PU|*Y;u>W{7?mF0rW6FwgA z{}$%Nw};nyN09utMn*ggOc*P3DQJq@*em2kgGgFQ?y zvz4zso`?cMD#eZQYoSn0lgr4mQ|poRFrm`iTuw;?dVkh!FM9%kp%O;@3>rW&ib67m z;P|2n?m@M3C!96Z#LfKkw@$N>k%FQD*1Q>0z>zdq=jT!jtIzvIp|8=PMkE6Yjn#&o zLp;jyA?oGJ|Cb;V7 z%-m_yfc;-a$OfFJ6Qn5=1GIhngX>a&C=l*8MF=S&iiS+XRH1v1c?N7WYLnoSUw4{c z`eRFcJM#Lz&m#v+KgQUL`eIj$LA)~2nL8(A#OIejuzYH%1=Frq(>BLaludN@dCXyf zd0PD^O{QAm2G#fH&7W_zVb4J9QrqUMbPu((rDn8W2EJd{68ApeCk8&A1oytLCvFFH zp9B6J6jBjc?9S8QXku5j`!UkEDvzZKELX?z%@hv_0Ls1fzZvr?y2E%m_A@p+!2#^D59xd4x}c;IQc$I zd<|!AZ{j!#E!rqv%HTwQW=4kkD*3Bt0OhaJ8v1 zI%Y|{8C#%msnvo-c5$fCe?O7qF&59qSsZ6twagyzS)fh^_fwJvm2c4$j4XTTNZ9Ib zIiMGkbs<`{`1EZ`%ybKt~_B&*yIXVA&SbH%BJ%S*fa zi@KUqqQ9xad!t3VOj=P%@?$d@+qI;k1X;bJ!7LaJC(68z7fgQZsKAI!_bFfaFn%Ch}d-_O%+Vef|zWyfjzH`{9S=n26)I z8aC7ADaWMqHS0G87VdQ*v5PjD*V$s-PnGV50+uXBjSRYjPvKT6uGKl(HI=eL@5dLv zlzM3{6d}2uYK}-RedCT^FfCQ=H8EncSTa^NZ>j_Ol&AlPY**7}@toBSN;A>}G|O$XwlA#LplfN*EV_074sQLjY6H%1{Cuf9z}~`|f!?hOwzU@vwtM3Koca(%_Q*j-z)lSpaoyzA^b*&sdbR#t z*b?6;cZ34eW&=p|I)tae*7|x7h?pq^YOMfL{%CZ~Z4d&UjqL~i+ooM~1`D=HnU<*7 zJ~1?-!M3o-{t7bC=D>^30{OBYV(yEyk<1bC){~apo^Rj02S2#Uw*a=(Uh@*Mepv=u z3p?`6JRYp8>?c$IL--1EbIb~MMVmI;+~sj^daH~5{X{rnLhg@^jX9AwPjz24JC4iH z41SBxX~ZAsx+Wg<3iS@u24Lm|g7iv@)XWb`jK6>LM~lT+7qs;3#@Ky@IC9XqM{(=( ztNJV%!A#`AM4HqnIc}u5EB@d3%*T?fRGgZT3H@T4~H59Lgv_K;jbrJlXai z3pqIc`_bL*ACcladj5ZHgNy%e8)T7|)^5MbZuK-B>*`KmxL?j-4Y2mj^xd`s0;3+A`n9b`*-i(dMThbJum!%3DNqQBY&kQU&By{9V8=0uvUa4a zoafWAy=Mvvk@)_JDSRB>HMfz=6!kE1Z=V7VTVS`>Z_l=KPj*<`GzLHNXq}w1iP2OT zxxL(c9NqCVo?iNj+m*B{?fc*-DC-d0tDplxTGuXq{oe9sw-HWLTqqT5+iAnu{W8}c zzv3cMLYzEolKR!TJljIn9)L=AqE*nRCzoMd_#ra&4%fe5_)l85C&DCF0b<%2m9XDp zWVvPV!~+$)1IbMbQ-VVlyIY>gTpm-x?L#?g7p#M>0bV&az+op0{(K5d-E1}?(IRtz zdq8e(Hc`QH_TapS+P7yeprGJa#l)ED<@>^(1h0oGZ&0>v6LU%~o2(0?>0i0YqY0mtAq=T`brzU# zaBAN!zYl(#IIWq4qu$oE3=b;_;1g-J=(-neDR{!+iL&pm?~q7pL+URL>TCkVX>N~E zxMaegx^4IJ`q+kv>o>De*pE#tOeUM_e9wbYw{iVpIjo%5R^r@I&|q^%_ps;5h>Diq zd_K3mxnFK?yIowoTzNb#OMnr?CC|^2nX8B6%JXd3*8`v&#tO^6-OK#u`=gEXV!+Ms zpV|0%a>lk0$8(SuM#9fW=L=Vdcs^Gd_LPtVQ`D6$u`Q+4tvYAl9v*=frV$OS1(?$| zngC$RO{_B@Fu%Ki&39izQon%Ue4FFI+n2iWIwoCO^&Z4w9E5kf$%NCZ;J@SLQWp7# zRD`MZH^5Ne(VGjIKwgw!++rjVz(F#0pTe2-W}m>n-Hz-O68fe24thDIOAd^Mf@CSm zyn{)xS4%`qdR-I<^{``ptbfh9AQ*rK`;?qEZ;dfGR8j+^Y#KwY$2^6x^eUpVw(SZ? zLj_Ht6tqyzWB%%lOO3;+;XYp8x&l?4Y}J28qV#k?0S%S96>!?%Q^33N=|~`|YP8y* zi_KgDTxj)MK6jl+iG!8wE~H71rg&3r&5sT2=-bO=UN~{cv$Ju(aw?$QVXDG80+G_ zy)gB`X8bn1+*bd&=4I#Ps8!GLsv5hZ$F9+UP~{oUFLWL{%&;5_qQWLQ>`CcbQ#ISdo!0TDQ~4CL4op#$kbgyFvG4OAi7Qp4Z1u+F0hisHh=aza+G z;C<5!J7{mAVOm%haMPuW)Za8NHJ_L}NFC?3Iz7eujh!Y&C}Yi@FVHWQXu(FVe~B57 z8c@eaH9(tkOlY?SfakVi{>|d75%XFtvcw`sCq#Y{tZ#e6F>QICA)TY%SyStR)NO2< zHwHP#S(cu9h5sNsYa8B;?v`|pP~BeV5LP=@bA z&0{S(2Pak$ahL$eI=g`PAsT7Ij`RAkQAb*Xn$~YQm>q&vgFzmBhqZEY@2l;PF}C_l z-ruhtUfrH|gR!JyOm2GHR-`i7Y?~u;dWh?l{S+!~S#=oSpj{zZG@vr^>zUls{K^fl zbmnpzniLKL2iY5#JeHRi$=z+ldK%6Pin*6-MwMKuo8+!&1njP@=}7sjEvTrZ-?Xhi zZPDSd9^#3CIU&)BrtsKhW%JE5g51`yaT()9aeU&?F%cP>vTo=bM){DSOk8KY5Xt7-YH7dX3N@4+qUhUwr$(CZSJ&f+dFOB zwryi4JFBX@`s=^O=~H)c72_T6MXVJw=6Yrwu~)<&4F!$$;(`R#o4MJXJdNXK$sB;| z&yq$v7xHv*+0Yf7{l2iqmL~X9g6Fti$7_@3a6>Q;pZIUE7gr8J@2kxDG|LEr0d1ue z*l_X=Z9W>jlwq1n;0}>$WYL{%eQPv7JKKE-{b?dYV$p?!GLqI>iE;%%fBq0+$Q-8E z&?Zz3H~D5r%1?-eONF~5ESSukU2899aw3qODQ&^8k}Hc8yl%}ul_>flzC=T|T_C5s zCK1xmJw>L@vamTuHO4MQ>6R3!g#8#{jh*2s8;YtJ>5>T4f#_5^K1tX0W2r0U33Dr< zU0Y`0W@g~uj?=MUJWb3B{8vs%1oN@4AiI&f;l$43%CJHqgrTGfMw7aAkWS{04Vqy9LOoU?_z zWi(xBU}(`#0tIkJSotmsl}1LBV^$pEKEXARnWi#T2Xy({R7r*05FtyTONotA2S^XeJO$TTB*-j)=U=Uhh>sDc4>yPJ(gN8CmQhbh1Rvk) zpOsF$cuh3`kD}M7efT?od~D-+o!RYjAlu^Az`_#{0s@EPzNXMlS2z=^$i?@DIAD&g z(oC~UrwJ`~qJaY>y_>YaJb>aiA7JH_sW60pj*0m-g`!d5;WENAsbyhR0F{iKreuWB zEg?4zMp&}C4ltFnFE(AqXW1!2ij0>B3UZI5i)>;2R;Wuf87@%OXxt7Ag7z?6ir5zV z8ycb7s1Bl9`@nKWQ*{2MLs1->DRLNc3y~A5=w^5w|0MydS$c!IVW1W}f9ZnMfxAI*$Q9 zdQW}nD(!(NuX2HkEpWa&f4VuE05~w*SvLyL7DwC(LV#1Fqxy?gF-0> zD?ToqQS{tn07zqxkoO9{)zWdYr=TMKB^@tCT#YimdbBnm@65r2lD9z#o zD-i6UvH~I*ttq@-zB@7G5m@f}A=fc<3<;+AW;y9AQu*Sx8cDXm3xr{fp}G{NULxN) zbceH2&Ua*Z&s^7|Jf-|Vv1p^sKk%>?W`O=zF+?!x_d~@6fp_OdP$;nfXzkGlT#&42QDc1GKQu{+tV)X9Hoet&oG~nZtUb#eaqChz)G+pl^5Ts%RhsAGjoxkqRx($ zI_%w10ewZf2a!mimo?C;~d zQ58@I%=5STEfx=jwDlos#;>FCoLTZKg;ch>42?{~FWz0f{2YJ-CM9+L91Ef`iYdGu zriaG%yz{)tTEkqEwR<_fr|5`&>$<)EkZ5&a2CiF- z1PYyJ!#MXv6*Z_{16Ng{QT@|>cyGMF>z=6!XM|oLcMO@kkQ48iwN}_%uTo@hCYmQY z^#-5Y-$OP;fXzEj=|RkBH=6R`QZQ~sGV|#g1P?w%OGq6{38-&qb|Y_n<4|hI!w+D3 zR&-6b74~Iq2Iq&6RlM(`glW^X(r7L#=^xgqdP$YPqP(lx6@Q3vs$z5ZT?dTe;N?sC7nne~8;(*P&yF415MTx-|g|PBAOhJ+A5?$Y2Awm46M02|xn%6b`om4A-9q7#hceXtk zr!tK^ja#?-2U_s~DduKrKU>*KJ_n6IZs+eFi?xafSXCD%ar(%F@|Z}b6(zaMD^d4} zb8)$kZoE_)c{1f0TaE%I2Z372RJrCT6S*!f)4ICSXSM_hDt&Du1bSm}LE;G;A7cEJ{Xf=|k;US-)cwniL5tp@8@ycE6)`T^y?bs6im(}i44r+oFz3Gi59%?8!Y<_?JODt)&ur_z%dr11mFhpO9C{Xx z4tA!d|FmBGWEpL=-ZeZ@>$~j7{rSi^uEFJc&dw}z%V{`TboI1Jgb5`h%w!Qce+@fm z{^2j_0FdwpCSCek9#11d>et_(=k0`eZ zh4V!x8Q2QL;e{^sj=BJJ#`QWzOQ_Bf@4cnX<}$|&GPau77w04v`l3PvE9F;9 zG>r!ujnsxX-e;s%z)=YPCUSK1oNR?8AB4Ltxn+7W+w)ZSJx%QVAkO^*DC5|Kdyquo z$FG+g9s>03*v6MS0e`#QrL64iY2L4?_lw8-^GCzut`2uT6T3C~g^9IC=`NRT>D908 z;Hx&9`yej7`5Nx&HXnDku!UV+X~}+bxOYhr4!*ycZayCOw&yZtE2*Umm3_T!Z=o&U z&>JSBch1*I5_nr=+{O+Wo9(tuj{}eM+UMB-&E$MC;M4d&laGZ~*N!gHO4wdLcyL*0 zP<1ZkmteCEi5F*=TRUA7XC09fFF}1@+x8G|^1^_T34xH=;BAxeOBb2U(U=yIBgo%m z$Dspz*Y4cLM$b7jpNw?s*Yqj9omyby7{!E38^~(eUx+EUABF@z#apax*S&W*xa%V1`}>G(>@0 zp#1@IB~M0IzC2AM#`EF{gJ>sJGwj)wxP@4~ywR8)i>!j7LYtR-zS+y02RpP&07Q zzvDLbhOeoy0ak=@9(taUlDBCINfidX!^rjOtX%(sE`OSrUE==4!I#ZHk&Bi%FF#uKq@&oeam}>D52AE+jsH>Le~ecre}iU zih?*QivKFcw=93bZR}nTEA(m_@21d{*6^v4#XwCq_pL`rkkTe0R1G|}YcRyDF{`Tz z-R_Wuryd=Tvai`9kbhV#KzS)#BR|*EU!LF#XKXk@gc_MYUbGt^&hF6PnfDmTo&n6z zwvFl5=e_)1o8`Fhtvg*pPVyTdqD`g4ot`m1I1WJqAHAOf;JEt>$7B~P6a?LFKmqA> zGB!9xta%@WU5gfz*l@yvp4J0;aF`$UOtS8MdjN*ZD($;8k0+oHOyX{i%{0)>T{9nn z1*ApXRpJ8L1}Oe&Kht(hxlJ_3vAdjUa{j%!^hUa){vZH8^SJ6MDNvLGc*cQxg$Romgh1Z5 z43qQ=Rl9_JcYv|NfSGk3Xj|K&kkn3iLf7Nl*GT=sy)|+6nE}{&1_Td;P=nhVV>bCM#%<@eBAvb|tg|}+D6Fy*Ljc*Fb@IHH2D!9QP?FGid#TNJis-?CYgpj6b|a{ z9Q&>R*arkgp_`=Sd3h?XQIci$IHJ8mN+rI=3lPyUDq=&{_KEj17_r6X)uqWzv|9#J zi-#lP)X*V+$(VPcB52ZIfdI2vSRXgd#6vQIlvOMMjfK3scArJrq`5yZI%E^b5Xj^Y z+Czyp=gj2Ib%JktL2-YD2m=7X>LP8P9 zZ1Ct2KB4LK1FYyiQT!q=ifqH9xhg>2MSS5GOXZ@F-cR)TaqxD_9XRx$YQ{EEYl+=_ z>;3tiBW@mip=)O8G;6wQO`kxx$*)aNOI+DSmUKbTGyPjuqye;Kw+Y})xrx*mmRkzi zw|82q?hp1OtAjM)pH`vzI)?Fjdw5tj0!25H2C;F&6yv_&@IQ&?ICuO@FpwpcvRPFc zVKFP^e8XFqM0HAQQ}9W#@?G^bjAjiAy4LvEq_GolzY{ttmobmvG3|p`l}bpiz|P8w zNYyuZV)3(&&kqV~hGQ>O;lieTW+~d*R8pO38?4KhN4F+rk&Ky^yZEUb>b#w!Y=jZF z^Y$l#K}}Nn=nR3*YZYQTNn^ z0+0ou&1=w)IzTnng5&WjC=u=)CoG|5+^AvX*yWKU5;jw8^@Q$WStZUzTUYkmOCYtx zCJWfrX~SszG7e415*;Ub8N|XqFghg>i{n-&*J1kzkbie7sF@qCruzwLu!8- z1wm3_&Y(-l*%JpDZ_P|`mYGP?jsRJ&xv6ZaQhK9LZLQ1o`9jtudYngnJ>Hc9PfQZY z^eymMN12cOZj=8_pclY);2BWUq*Q52-Asw=TuK-V-E-y~QV2G(T0G ztO2_chNxqYJ-D*Z&iki1YpY^_z=(yF%2&Uo^LFu91h7LC(uPDe2kK_{V#a`N*P%q4 zJgyJ`gk3}bpa?AMInk00&ICOXcCW6*o4+r6i1lFM0m?`=1#E36sZW3fmKM`>XysAy zOSgieU{8$siVdh<>aq|T>W7CtIYc#5>XbPt$Efs^nm);+-_x^<%dckiwfbIz+d)ec z?XhZ6Db)g{Ox~Ep1I^5DpRo4i0OZ{i6foe%-R9wiL)m2Vq})D1iOM+OeCAUWOdB3V z-YFEE8ZRU`NlNa)dhl5!_%O#quGy$fYQ-D8+^pZFE-qZ}fA8NOy*(kPq2}8stQgx) zvJ8AzEbXv`)5$Ez_R<(TOlK@m{h!zR)v=mr*q2+bPLM4))(|?36l^e^;(LWc)^;(c zjguLB7SkU!osOh+^X7}nT&P#CLyA}@V#g2ogb@4?9My~R7b8uQ{5d%UPbe2Y%$Rc{ zahwCvS+N4VmzaT@rOXPQxg*F3QR=~Tg-pJYl@ok%M}?7r9mfC?MkhzBMCd^u+By9y zsAV@WlXdL_d3WbUx7MNve85pt+M>_vn|c?^e0QtcY<33#*_n=o9|wu#16>6{Z(5pz zUv&74-iQ#-2g;1b5@ptpcNc8Bn$B+`fu4F30T;Ozpl=mm!?Kv}LcA#&9X7dm}o1+*N z`_ZxJ5Y9r!$BM$8K}kG1&jud2L7!l8)o^Sx&inj=B=kF*$w=X*>xnSw$#}9Qw!TDM zUoOUuCV}Z&4VKq6nFdXHuKuJ)fnV_(aJT^^&Q&_xT6#QjV9H>CbgW!H1(z?|9t2aq z8RDE;B5JM4`KIy$EL<`}=jlxm{UJiH5=_{0;A;h@SbY^S>LVzMVBJGg+a~u=t*oh3 z3vDmGbcz0Xd_>FvuS~^sB~C%aTU_i-83f07R(F-y=mJOA4sPXCPSv@FtKJOhNg2lX z4^!fM{McPB#?=qpaeb1kX%U;zN+=&tkl7!WCX};2O5CW4atS} z_&(lM`D#-aCG1MfaKELZyJInKKEdB5q*Lfqk<&Zl0I+_*JInz-Xktf62I2fB6jiq5z7|(y`eEGo6TBE1LXbr=5((qj#-Rp zC=<;Fo!#E=&Nz*`!Mbg>QzF7PWJX;EhXG|C?$F zuaLBoNwLGY)|OL0uuCneh%@EhK7o5 z(l_C(wQqTnPLpa9;LNNVk&x=kx{B}kX;){{TsbqPn}MStuA9TRxBEvq{8~b( zx20bowOhzmYMVqN$dEN2^WCz}*Q4r9_M4J25_K-79YpIDQ3@cFmiet1^3mN~9qupC zMPUt*7m;gc9iq$XCQ(cGS4PFmmt`&0;-fX86#=IYmD*(kP6#*iWujgg#F`nKt^2g8 zP+(cQUq|PMGfE1#yglFriq>y{fOY%;s4ynqzg`IPd%{4l0bRm*LDTuj0t$PZkdPfN{8U7N zv?8>$qlFnuGsNt5`TIAeyrC(-6AuejnemV0>h@Z@K?G z=~5@|_@AcU6ak!Wz+CLh-n$H;PSoI5Mn>u&r}FBi_OVffnF2qC2@HmS?u((zx>YLc z_->kauRPhhI`Hz?=@?k+kcg<~A+vz?10xegC~gSe;(Grkc2c<@6N74r#NJFTE|ZTgHu$R`lf9nJZOWIRO716MtCnR*rC1ZB;kN4Mj8cuX z!Q~22X)ee~s)FXJT8}KV4GsG-Gf^MFWjWY^pXjchG#=xfgQ4OgBsI;QhX*pRG9mgGi!5uuOD(ZFLQ@edt<9y>>ezJ zd{^~5qKS8>;;39ZaNYQqty4!JrX_;I37Ir$AX@fVjrNmH*zKEtAay)%855}Nc1j%p^ zLmnVHI)`;espN6R^%^@m0$1sLnZ^9|N`vsmKyUupzwfvmsss%ThVkh6I-|XSm;dau z)5#PU<5ei|i(sh{U2H;@8xVfLbhvHRx+mRv4ed~oYTec07>Pw8%=CLyvVN(n4Wk1; zdzcIKSv`I}$G zi!0N3-qYFe$<||#c%uoCM7!sk>b?ut#)6g)g?$=S|{p`u;`S5mln*3t< zIPvv&PNVd|yJ5kvyMg#C3%_lN8*ZfM1|PTkUKU<20EpXXyNsQkhmBhH=K?Mkn7WWS z@RR*8rAt8kNq6hPZpM}cS~r=TA8+>{p0n-8_JvBH0(^Hc^kMn~((XwK!C z)U}=HCU5ZJve4h!Y35Q=d-r+K_8*m4vDGeD8L$8@=RH-OizN{A$$cyn`GZ@S#giE#>Y}*O(2v~6G{Op{{ z?ysnh)`g-9A|(~iuQIS_2y6`W?y>!KxZCt{{AhoPn~n2voE>{-2GVH7FY1tV@4qe0 z8Hyxl%i)v8B^}MN3WgS885*_iYuuaZzP;X$*PA6Y2p095R6+t^9#3LTi3sps+R#27 z=2t}2&iWfcbgqHF4$v>9RV+owmtVqs6}WQSgV zCYc0m0Q&)8#0Ntz;@xC^63p~_iHE%N34pLFp_U}hI68B8)WpeER>AoD~JH2-lk*8 z3xN~>^b;s2Snthg7@l~!w=MJw1#5-QYbHGsE}L+>X|9M|A6YrA*57w28L2?8s=xxE zl4;jIVqUfa%rK*Bp#k6nywR`CEHST=h8B}LS92KRk(@;#NOJw4ono+UVAtAPwR8cH z@d_Y?0^8ZD`*KzCpiogZlB~)PNSjHase?tbD~37gm^7)^%gP=Vk@DS}gUE;`RNV3z z7>4oR6YJdqzwx88Gz-NFP+L#1RMdW{q1xD#uR#I>b`*!^Qza=5am%$i0Z0t>-7h23 zBD})YWx6x?GZt&Y{N45c$pTzT!)draEN)8C$V@%yxn9@me};Q;V~h3d_s%>PNbz^c ztLY)3q*4sbKAgPR5g{M2u9XTb18s%rE#u zUtV%)rLrJ9k_3Eaky((yjh*4iuBh3rDD&&vxZ+e2SEsy)x)CM*26rG%jwW68*}3)_ zo7q7%QubHgSx|7tN@9ARk2a|PQots%ePLKD#C|R;O88?!jGjEN^oLnA4rNb^%p~98 z3t;JSZ~x+dJl{Xej19lFN|EyC5opQtLjEEk*tS2$5Ert&dq1pyPCHgh^01;ZJxvSW ztw>=%CkCdgBbQ540Oy<9PMrIqMw}UMuHhv?Kw37jigJgeEMx|4z~|#V{0QOs_9fuP zsR_sv)B;$|0?@ceOMJL{wf~?#3(CIN4Pqo)@N-8nnPu*>kQ$IB`+g021Cv-jfz_MsLD)?F2%gKmB6i7=% zY0EkvB(=AIssgAT_JIv4QkYQ4`&2h^C$t-TrHIABx-xnM<}*3En!*!z7Bp+krM_ww>R)01&{T^$p;t&Y40-vl=&V?Oa;)?u_jDxt z7UlFUI)wSwG~3#S0o8M3BEeJv(yC^wHyV3U;R|f9!JNZSfb?N`6gz$6c;qE_5;~zv z|*ksFpANz&{jBYJ}}kHcHWO zQJA)T_*pBS@|g81$Q=lZWch}yN=wb~yn;L-@vtw7fy^&R+sBRmZt=qq#mF5|Op%=E zS1Wqx>QFC*Diasz5V$T!m$+hyoN=zB@mI|wP2aZ)ADd^cS(EZ{>!6iewe4w|)+sQ- z`6{?zgAYeD#SX7p8P6_(=GmKbM9*?m75k{kqLLeDbhFpw>5xMkfVc9cUw$8TaeVBI z=AThrIpljofaRcNP^BG4{B&vm{&$WQTJ1bV3gZ>XSIXA72g*J0Sy%1Wtl#KlRx9hne!=v zdV%eD9-z$(in)!^!I8CZyKXQtQ5`{s(lTWCe0{(4z2&6(Z zIhjA!52{D8syn`&x33PMvzhzx3@ z(N!&UTvHv=KIy%upAg($#XNn>%=y zpdi2~M{%1wOaxrIB`j-e3hDqq&+sTRsI@J5VP{s$JubM^sd9-_AY@z_OMW!si7RB3 z)Kq=~xYBm`2RJfo@#S3cPH!JG2vH@9Np_UY=gs}iZ1_A-^7P6~9Ch!czC|96v;)7i zdVcap#BZc5Fnp4<73l5-@XW>$V}tL~EGBwn{Cs@6M&628`jE_iV}?Ky-jw4b9_*Dm zlnX11tjo%g$9B6cX+c!43$f5j>ShynV}+Q>o&tJUkAIW`Cl~{Ju3%+EwOg8F_90+~ zRx;_fNH~18wT21V4gi9=(2K;0jagY`=cP;pRXKR;mEsE_r)d+Di>_w3QR;6y0Fd%} zJ3+0%13mSM3dI*wZNbp8wakbY^@UK(S@ZAo-@5QSgC@HOg|adjjAmBY#G*N70UL5$ z(|*8Hn%HOsRoVh+!o*~MI#%GIvTDjZ)Yfl8SG(OWgFgXCOBz?b#-HF_0%JJZB;HoW zCPD4hr8~jfrgiq*9SK;ScB9u{qk@8f9H-sn)-VNvP_gJqqEE1rJt?IQOP%XB2}8o4 zLs&AnAv01*SSyDuboPz*uzp#Ke+2?MaKEAs`%EL1cTd?C-Z{!>nzX(}rdkQo{&;>%e2i_9_dd;yOq^+%B#vlHxhE8@pZ z&rfKPUIH8kKCP9}z-x&sRLZ^DgGJ8?%vV%Zh6!WyHhQxifp>JMRa>aYaUIDgUl@S& zqH)EKM#05R*wiMXa_ujb+mbP+{Xu;>mhqxAifhW|q*hA=FYHbftP><4H}1|&v6&iDDq zfJ#_dzrzEf0vd70FCjckm{UkqI*!fz{cgPM)KY0D=O&qjs(7}$Zq{|869{smK{mn* zja^$fWW*ZOJG-_U)J|N%JkwnF3{GuX8f?qhdd<;={ds9|nE>iX>5@#13bW1bM)mGg50{K;bwu zbCm?K>d=0F`XH`W-_YXgY8W?ZoH<+j3Z40|ZvD=mfq&DR=B>}Q(W>x9qfoUV<;cc( zH`Rjns;Xq1X%4i)KNSBczr6&nl^~YvM{z*p#M<&8Mg0S_QZaiYLit6?KZS%Djr_~d zAtMSC#~Tk)1Gvq6Y4;C8ubM({9C`e)LjS<)ww1NNSb&$w+-QRYVB<;1uNBe_lbvR~ zP7#&y_{S6sE6-X+5(6ze3wv)~j7R-I)<6p}uoc6>Bb%bY3)xs>-Dp8pc5dG9Aipbk zIe594zoCgiyc-6&?*j*4dFn3PPZ%FIZ6S2Dq4UWtRJ%9;jzb)FVWo~|Jm4%-n!hg4 zNMWw4^Pv3*R-DfFR%Omy7#N9sFsB?(ewO)56@TY1DcF8F` zk)G64h@rp^IY@f=wC`WpwCFUW z?>FU!`z<(l(Tc)@Z;~;@AlbFGM$s6EFbP2%9NP`@Hd!O6E-Vb%HSo4tNq^_8Uuop) zBT~i(&BT$aiIcS`<2!>dr3EqA%Qs&9%DCeHX^h-`aDVZ$hlV|2)RRitxOTACQC%59 ziO9sj5&=F-BZK+v0!afe5u;gr0KsfC-R`|YB_LH3+7sI8Qoz9&@iwJ_~!?=Rb-~#yvSsa&H8^I6M zj{u0m>u@_~C_E1mEx^EToSnZBeq#64t5MKqQ)V>KeLbs7HJ#0mJmHa-9TstN==E3N zxfvL7jH#90jbC=^;$)5Yy4RpBe6Jf%INp&+6I~}k3r46%pxc|OSzrz9LH?Rrj7=P1 z*PXJ%a^_xO@gFMmtFyMvzc4-&-5(|~9|?e(PS#3xnrzzr!9=XBV>#(ZWdtfI-0PH` z>YxIRHF+AetY@J344=jjzLLfu&OWl3rgP#v>hKH$1?RdnUv*P@p$ijx+q*(IY^|F= zjYnIeN}yiB&$d?q%6xWUd0Gi62c;xJ*tAqHRlc_ii49BQgH?;WB8ih*WYj}^xGUVl z=8X}JE*k6kP?cP=HnoM(!A6)5z>9ACepQS7_7w)nN; zHpVk4SO(jz_Si*7tEFZykmeql#qkKDX0f^3GkOL7D^saq#e>J-6zjAHPOE^|fz$3# z4{{YY9pS7zO)|gnFTu(wyQGlWT-#V4d=p8eZFB%>?ZO7qwsxGlnW11?GsL(SdGlQhVtbKaQi) z&=#j&c=K+?iw&Gl;KJbo6Az3IEPJj*r)7W%Wdr6WK{lzp_yIEv2U!)(PhN2j{#89L z1hzHYlXa~JOSLCq^Bf=dS34htt1f$&x_w(Z})(aw&4r znor26AYa5D!zi(&EB?z2xS8{Dh7qmLwf|@wn-w*6$804poek5Itu?ai)q2Q&S1S$H z&N2_g6M}aNzeFraqchxg?RG|qLB8XYbLEvr$uv(8N;BgGq(U(jknDvC;~}sRo%J-5 z*Lno?!|z#CV3@P6VKV#gU_Nq4gq=a!tGbwt#w~_CwoWwOeJ#Z(@qHi*5^9-Z}-W3%m;wS3y1k3HvWV z`u{YM{xoX3d}_?@#LqEuF>hj=&-(ax!14%47-YJmZRQ6`zUVv*k z7!`G=ugc$#`)y#yfi(jQ5(}=IMmZZZxAqfjsX)}N=ny1&mYHj<>B)oDaa&vitVcmz zTur`I)jL~u70^{GRjH`$Q;OoO2*9HwG$jo5i2x>wN)(5@5er-4Se$Ag)bbT8d-#V3 zg?J$QD|}VUva~l@vT&rNx?(=)j~>NT7W%94F8u)ipU8~=*(ebIyV7o9W@}*muf}Ly zZu2aDfdK&MqWmw$X#Z=c|8gigS(yE!KbTV2`soi)e4BlT42OjsK|&fE7ck7gG#3o^ z*>RB=&k#h3rKqdwt5n=b!!mDoyr!=umnHWTn&FQCCpu1i|4zrZ959PV#3dAx$VSfW zY{}CZrd-qNYHUPP*P{jGk`!Fn4)qh|XKRP^-Ej9jHPk#1P;-t2fx`AIRb;&bPN`=d zXJ^!KP;=UhvO{;4DH)3+`)#?@PoihN%@0oL;8#7Q;t7KXKUtkX52qYMo-@lGhvUIYLOngO6TB@SFk@+K5JvAOB_u{JER1nP5YfZ=(MS}tGABb37P~(XwQtS_ zu4l-X!;sD##6^u=_lcgs1`+S=$k)he<(iwzQ zNv$2_CFJ5FEuEn>xARU#3b^m>dwiS)XoTW(O)lmsm(KBT23}mw6uP6igAxJs(ILVM zVmkcWdg#}?Kz!d?>$aSo4nYY%LG_e4f|Oq=0;ab!nJ?KjJL990ytGgz87qZE71UNh zE#TClAxO98W7!nMj25YiOJXVr|LXSTzs;>$ot6;=LWn*FFS_hg|&8sfh zi|%!qxJkfjeNiRD~ADohy*n;2n;`hOOH$7w8ldNxYi)T>091kgiNEso_JJ~b~`@w9& zq2LN&+%yN3ifY#&we~5>@w9afEcQ@5>h?jfWbfkWasxS;iQk8U3cXz((^*)`FmjM&ePjP#gy7ocoLM3gTKL& z;Djb&M`FWT=)u`>75l;d3Rz7;-OjmPIhh-d42uU zPb*WUCNP>^K*%sTpU|ig6M8pu0W5WQR!_|c%?y&$Qh{dMgMvk?PFeAISk8wK;1+@= z&C3Hz2Qhny_!OqNxDjuGGW_7(@fh8{oP8-H<*28~86Q&ue_q4i>tu5OqVj-E_7ma-;p3BU}yAYKT&!s$Uq66|Sm zG&o8i@t^mBHVY#teLr0`e*aiNwErThAG4 zStR8{m|Ka>A)di_WFTWM=#w92%={E!wH5s+YyBFS&flQFJ!0=$D+TYduqQr$HaPIi zXV{MOBe6zCe0`Env;9nb98pNd*kEP=LNj(xHH z>v~4|Z|a$|fs>VoDFykigQLums@l=e*0ksOLt8a21f2W&LoWf; zYgzfw4d=21a@mcPa_Gmdj;6%EKd)AMJ|7lNGi}c9Ufq&+nppfkz}8RTrOy%C{GpcUPa53lWEqRm77<#xfYCAv>rX+fw_UIDME} zdK&ce`!oOn^u_VXi5fntaypM^!}E9=r>(yRjSMpY*fmjpYd?Ex?XXG1+6L<8L?bnvPeYyu4 zrXyPm#9^n`6sfFYn5cp7sG_X11}v_-g#d}h%0%!ILa?b4 z4G+u|>?3ARvt<&i9X(S1*4VJ>h_8siAG&kS(B^i>jN)MBOl6)R1xF$2>^Tzy=PDF; zow<z`1MvxexJpU=x?jKq?X6|6=gcD5N)a7M12@mewC(o39;Vn__g40v->#Y zk-2Dw$ARf?yt_W=y%-VdM9Kah`^Gp~55>QFvfl;Pxq}ev{k^@Wl8x4L4pG&95dr;s zy;^b*P`*?rg4wSCMUv23Mjwx4qCz)1-%1?6M_@TFmWp_fJ8tmBNrd!F?g2wlmp_>3 z1k+UyXP^w8uY!U+3@!=YBB*4K@$KUAa*A)soo>Q`)be)=1Vn^WpazpWDe~yV+!6?u zV*{Jc-+B}&+%(`kytzKW#tnw|&wE?%NK*!2N+o`DJan+GfrSPpP?A85_+yBujy&$5 ztr|kLJw}&aFa&~+oJd$c3Y4M&$ciZg5ZxGgv)`ho=f2H2KeK(6QIE9}r0mfI^3Km9 z`5|X@uB#`)^+%M5M6^5>W0vQ5<=3OXIavuBk*8fbOkW898e(JUaQ)?rPqL~$ zAd>*bg-oJ2J^}KJZkKRng>T6zU$IH~WE_BM*eow)9c!i2bi7iks3iONDS&<JDhn4x~Y?Vearp18=UbijmMn4Haoc^o&nDD~c0PXSH zQbZ5Lg_}nKSlT?#K1d=f{r0v`tMdr!&S?G@38Yxi-)W;uP64r73A5b}qiy7xZs{wq zXbiKpW0`phs)AE&RWqXCwB7N`WG(qM>{21m7Wa)?VSFy0C6FSEkN=)I`k$8rihp;Q znm9S@+1MHXKym)n4WU}RySoek0ATwEPWsGu(=n*BQRk1RY88npB?)PY&ix>-ZPG@FgHviG>u;$0hdh*zo@gD=*ffG~k$!{d7 z&XU6E08PZLQ*7k>+>|stifRHAT*r)42fpmUF;8t2Lox6 zL9xmw5nt4@E6)jJiFOYo+J_qc>BrxVkhlSx$@N&XDG(?MQ{QE|`2_X9F#m9_Mxp=f z!u0PHSr==7Rw6rSjvHJqF{9O|ge!KS2qATp?P zu+m9!K;t44{m_Pt9KkcQzc{vv!pgM&)KxVGzfp);W|I_vuw13F$s5xJV_png7Yi>XP_e+cC!PM^+Yh;ye2lMt}?<*2NFg1v6 zVC8abTGq zbht*^J`{sXbHXhvj@li|4FIP6_H;~d?g-+g)=yq^-+Fd84q3S|8zsHDebNt>Y{8!S zKtR{O7|baGqs|yvl#qYYfrY(+hwuMt?aJe7+P?UW(nO*tB$`yz)uh=_M5WT8 zfyUcB-)1V&NJ&MLh)_yYLN6rJj6$MONpngOr6)sv`?~yIJ6CniJ^9@C{#)PgTHm$S z-e>Q9&fZo1S)yl=h1U9I!`tT1d!uqy?ZT4Nw2sFt_%<7WGtY2c0=_{fmL|8SqV#r8UItx>Y-rxq>TsD8N+||(?>x_>gRhDA>1z*H z$GSX!T|+A^_a>@Og(l0-Z_$V4tX9sRv2*YCCB5nBeBf_`zo#_lP^oupj|NMnP?eg2 z*!m3ZO3SgjrzT>u%E{Hy0$$<&Mr(yytn&<<0J3 z`zY>&uW?CkyK+9Hh~C$-E0JeaS)!b}F4)=fg-hU9jSR`+Inbypr*b^&xtyk1h%31{d2!1)0UvvM|Qbzin!h z_I~epJ%!71eu8j8aNmsvx(pT(2mO3g{vH{cEp@!vs_Cx|ci1ah$mVy_R&gZ;Tp$(HPmEY@Ivr z`P=&AH@D^5EmbwIxwYiky_h-i>|RN8rL;RLC5dH&yO$;2ug$z%TXSsi*nOSh%Duje zL-r|~4(MoJmpi@dfI-dD_NUHqt;rV-6bB8j`nZkh#pkfj@ZvymJ|@;Swnzp6C5I>6 zDfA(ElIsm=RJSh;ee55wPTrZg_XDB4M90_Sie0YRJz{T>WCf!E4GqJbFfEZLn@{iW zOV&(GoOj*LDbJx9my;H@oM)ecjEaf-mE%0Y#t)XZE-wxWTz(f-2WJ+Hb-F z5)TkNM;KTtl-La<&JOG{H&vQ1Z0}+GS*?D{QM=H4OZ}eW##) zbdP2C&yzM(P|timXOWy%v4{JPx>Or=&*PacVPEs3kJL1{CFbnlc-!NDPS#R6_N|)< z-MQq%O)0Gk-dDFU7YT?}3E7TZ;g`rbBEHCIaZ1Rchk`=$_J3(*zL{<)#O|@~>Dg?Z zjamw?zZ>5fqq~e>+p4MQIRTWvC7m;TCO{zgY9x@M_NgA*Cr$JepVOJA*>7(@T~ zsbS6gb#V_?N$qnc&Xv~SvR7*;D7`7Zi0&0*scS(vEurNuTdmo>1criA#+xmJviRt+ z%<97ro|QCezN(EHeDwKKe}8beZDaehXlFv?J@?OU?@Z?x?z|;BdKfB<}KKR%YIBB@?y2lI6)MPG8;3=Z<$e1ioBWqMx)~@9{Y|0Bp}T^G^cI!& z=dXosG>IvWtv{1HEUR_&ro8Z3E57;Pw|?03@sPG)po=nBYFlydryC;{#u24Ti(*EV zw(#Tc#J^);jNDlnclXQrIZ+9QwW=!^26iyA?ONfxb=P^PyoT|MG;Lq^3V(^q%hRW= za!w!l=M=9b$DGrK0^xR=el%$gEAOn}rB&p3TkF3nSuy^6sO}r>t=Bs(1EdeVe<2Vl z#&W{9{MxbjqkQL1H7s))D|6&q<1gnD%zIWPrO4PTVxxNMm($O-yv1paYJ6C?OsrXt zr7k+M{Kc40R93#nfv^~EetL&Q57lnE&l;5zBkvC_s-FA1}06+0_&2 zd9}J0ln@85IcBQ2`33aGE?uTNL^yI|Cr{4yg1j=n{TC-I_R;Jty;P@shtFT#dBcs`wBChs|%;U$Fj}?tHvt1iKn-a_3_K<-|`|DyxrLR2a z5=>}Rj_-TBWrbSsJQ}B+_>LDsDFPOZUA2|#dqwwndZH?(I>! zxG-MjL+8S@U0koNiU}zxEMfM2`CRAK!Bk~QiD0~x_!1}MrMAtS#qDjIRlXSAacJJD z6ZGcm-jBm}_>2|L-x-&>G8&v^6WI4rw{>xb-z%#!oIJDg-O-@UYvuNM@)|nu2r+p7 zlk+O?LS0WGEmuhJYSw?P)!BOA-PLAEe_tB)dwq7%#Qn(hn9KwV|2y45HT#448d`TrD%~^cN?j=KRPnTP$;a!q zWzmMo%Pk!^lP}xORf-&Sk#WlGe_y>z{F7@pb92t!_A3fBCi$*R47)dlypnKx;xtf1 zr|%?5<2+P&`$SIN+ty(7;M}=uYt27M^BY%bNWIakmkV9a&Hg55gFzL0M|CAjOwuS% zSP6dfDj9CcYI&`QOO+=b!3*FXA>M7c1B}Ek!XG@wXrDh-G+h&u-kN;)Me?2`gDqQ> znLiRL8mdiireuX&Z7}$*lUFmyIJljsXQ``zr^}jcfyc8hH9ETAvbz5*UGiGy_Ux^k z*?tq>MoqVD_|n|*Xq|v%UR|ts(`cRXR*hp~!p+fTQcu>7`|SOsZ4h3U;^TBDVL|-m zq&GWj>phfxFG`u+J`<6kXvAc+dcUpP@WXUE<`8kUp?l0HWSt5Aap`I%i57JJZfAu1 zPM*A*t!`jSv-mqMt)F{GRDrHEtZ`|tG8_G99?`M=yI>;qZ<+ZgHH4K zAIa}bd=(r2@V3zRmZ*e6AIro=<0F!ud5_%A-OUdFaVqJ)h2@0i%AS`6+Fr({_fH;I z`+D_k{-U#G7lgS*#(Tm(ZGT@hZX(vK`X=LgreDT6tE{X0WY^8FuP8n@_C;>&PIgbj z(CtHBe6*g2LL8lP2!ANaKQzb3NJ%^CQfGV_XE4d%@%-kg9vb*(&#F=@t=QIeJ z2GMH^Hi_*Fy*X&+bi!ufaB(lELUFja$?&z#w$q$+`E|PF zam+K1bcl3Wh_lDOzgbWwdPJNpgu!p)5QzY`C2^T~*s7uQ~S;O}@TSW*lZ z5^E6QdhtvBk(4;qi>&YWDj0t*ytRI;YeV{)+OKAJS8jB0p(oO@v*fQ#8|z)DZ{79z z{_oWS2ArOEt^vMeN0&2LCg}njs;mF9^5W_1NpK=9xj?r(#%sEkt!52t>|uZSs%f!f z;Vr+|E6VI~oXv;9ccoQ|{MC-F)OeZ6V^&dnKv8j|_mz5ueobUouRDYWkoBSHoP^e@<&Ift$>Dg|5BQ!D8DDlh4j7hAqB==e+N*dc{{A9l6J?n3LM^FD5o*dMFqp`7?I z`Ob=TLAi*nyN+#8Hd)8K|2^mC4&UP41tq(hs)!e4dK^cW6q!$K&R5e<`~Q(UvSiPV ze0m>))vX>N+bkrr*WXQtqcN)^Sce_IM_SJc?CTYtWBSq+k3UOGuT*{^EdQ&jp9xBp8b^!!Z=yM|+$BCK@6p24 z?28J)+ANW>j%!7`;>E3;42{|ZKZns(Y|54E70BMvrCAlcFoeTN!6-11$?*N*2IZ&& z_S-g#Ic@GJS-NET&Yf(vzO@M}l_KxwN3LuNmtVXpa{2CWAsY*bgH{F6aDN~iYkLpz9raER{SyrLJj^Dz>ktC4YjQ*T6!w~5A=WJ$QBcX zZ~NH;y0;i`I4;QIL4ZTaq9(z~#@4~v=C3CAS^vB8EMVmZ%&vq?3UgC7sjH&2O;ZK$ zVoky3pE3OHI1WdOA=7HgHm!&*E=1SAhtlxN(NPUxU_=RrBW;qA{>Y)!nBJnLGr`Kq zlDL1;>fzL&GA+(M&kQ!r7B9fzNC!&(v)wH=En6#l4{IAAik8W7IJ+fr5%^3yj`Sma z@clo_a^OBPN^<>*gN=`k>*NLGsgynOY~l{!vb=1t&&LB9j-TGJJ(1`{kuW*G{JNK! zX_FEGgaZz9b7>rfTK2(ZseQFc}4w(WgbAYM!=gN;@knsVVYn#dHT9o5-0CY zPBqkv;YvU0EB-R2sxc^{KYWH2M=G07XK`{-<^S6ayd$rsXDk?IeS|}TOV-y(qFr{ z?I}nGF>sEWbP?Zw**iRg$^RGKe?!Jp$=1>Ae-Z^Owt*j<4a_Rj^Cg$aG>P_EKc{O9 zm^}fw!>V=7VESiu{GD_8nzuh3f#G2AgR6rVba+Z>_B+PEQ~2+;lj}WbMuFW_KpM8) zG=uG57h+BmkOzquOl?5b)qrjR2ZU2?dfq?lgA(bPlCDyNvSISb=}D>4|7r4fUXb&c zQFD5e4n$9D2ZG(~OpE-=XR_(tvo!r%&RGq7J0OTl2q7f#;$nC)QE@S83A{M?XX7k` z-!Cg|E-gjLL2_4-!J<~d>lNy9Ej$Y&Q27W{FzCx7Pf}eFp;2Ai#qJkQEPE_FJ=)ytc5C67Xkz- zn2<^XX<=wVNg0nJJOVX#LnO?sv5Ntfa`b`mjnY8&yKnz)vJ@MIEY)%tIAI5mT8`DO z*pWai2i&PHd!eTyg+dK@)Zp;O3>|?O@Tk$g4>Jx_YGHqn8Ya|^8V%T?qt@bF5M~je zQVYRwYM4kO9yH*(NWcyqwH9WPm_>j}EqYI&XCgy&kJ=rJ89Hh$RwiJ^fl4jXlhHGg zqgo5LGnk>H)}l29GY*Kg*b2>Tt<%vnK{}~bgXWV@u|YUHcmHfwXhv>Wj>j0nqn0DF2D=y_ zmIHq7Ue$n}3Yy}8pD&`YLq{M6{M>EOj9CN_YXQ%r+Fqk)Lft${wH*^=1RB8es0Ce^ z;3JMdJdZljkCKHnm_(u3x`0~~ZU8%Y1WLg3sH#EiNKh$<-Ut;`Hc&lqmtqKyT8?92 zdl%JcNlM8nF+eN_JW?$Aj-Cpd?F)FM2*C~=ff(>ep#cu!pcP|E1gO-aZXPvEkWXo0 zEtKgoLr1LzGZSVUsMI2k6+IJa6i<7w7Bq`6Lr1Me6&q$85NiR?pmdj`XF}Z!>Ml0O z2sD6aP(dp(@f>md;W2`a9~BE(($Nnoywt2#KJ+c0@F;?_A8Y?O`{9WlIszTwF@jGJ zvj|YBMV2TkCUP5O@Dk$q+3g(tU5jOS%+OJ5@kk6a4v4i-f>y|NWRNidJKA!12`N!I zJYg26p-!DOySWZjggB^(-}OiXPT1ja1z6zD5P#XkvZ_LGaB@BPwj4Y*>B?i^HR8x` zgCZZKjDmx#xTrX3!wzZi)uGLSL5!ehh`+qr^1gpS{1<@uYeNLWRj~1$N(u5bQPYs1 zQUY!*Y@ks~a7P;(0*EEp2~|Ol5egb|;-X^Wl0X3mDKRlIJi(RnxlE3^q*qGF=Q2GD z_H2>_ok$yWBArROI22^K2U7+V1ewgk_q3K`H#of+-Kgx+p`sSlgmw zBQA@$Fn%v}kq3q+m;h5yf&79IIU6ijFq^yuov9dfCU~jK&H)R*5eFY08XmZ#BthDR zpl)c80Pk=hSxNXg9{}|TW(5%R3<6XSG2rTf0HJ>j4NtwWAwZ=B%L7o;pmt~o!w4F+ z1U!M*_)et+F5&2CfMz7(ve<0oRJsNk%5ix zR7#L}4K)pFTM*lIY@ks~a4QcR0#r)CR)ms#@6H}1Xsa9jG$3Vz*~Zi z?}#OUM}dRYsA(W=L+~ijhY2u(@WZ1(#8WK%MjU*26zFO~NrLq53DWSx`LETC4KxA) z;8EaX3pNC(lt8N&H4X4IOjQdizz7<(1g8Di_>NctxCIrApr(Pe2H+ONJBkJH4B_|5 z(cIApo¥MIpEam5*ToPi62~!8ZxetO-bLK%D(>@O?1>o+0>?AHv|^ub{=kZ^Xfe z`@c5}N)n_Ue5AP#_y2Z`pl1+Za`wahKXef`zEdf|^A)IRNK+}nMlNiiQA^OljST_B z62Sd`y)bGTNNWIYK}nbZBM3j-g5*W8@SDouAC^W>0^#fj4*q*gfM*E)-~0awIV}99 zGWc(|q9h@Mxb?un-@OeB;2DBHx%b1FKeinUc*Mb1fp)=^b&!$R@bj%TaS73%hpq|E zGp{~Gr~9q90sE5jU|;h0&M0{!>?&5+Gf1FYLjPEav{AYe-WkQ=!ggciJ(VhKGDpvY zvDF@H2HB&9^f$@>A+RKXdnun}l5`IH7Y-+vGz@2ONlBmzATYI{)S)aLImyWiz>O#X8)O6mz>P?^923tG#~&^M#u^kXWWk(>^l=fSeF!cA zFN~lOC;*p$`g3f2r&5ABuTaxKJ|Ti7aKQ)~wFFz+vGJWs3BC@Xra?MIjqJ$)mcaHc zHqfXgP<)3C0V*XJ{fwFhIHp3C1m@$|K% z^EYpVNFU`l{@-@l!CXzb>Gd*DFu-0|#nWPjzC>*4Y7g zxSQJ(TA9AK=|B+$Yj2*N_vNHD`z6Y31oK4ZwtR(1$pn#Eu2v0uBhEUu$*% zlah*3tp5B+{_i_J$af;V(48IzTsvX!O}h2v@AnZ%GB~Tcfx7@Iz>79`$cNQ>vo-$f zwhu}kkkd)0Zx_QpaE;0y5GWX=XAeSt?eBA>G1cMF4lC zPow(Hrotsek|Xj{Q@7iCwm3i-tLaiVN&1%CU68t@T|cQ-;nhe5o<;$QZh*1EzG)Z`pBtt<`QL_U92|PuI$@WJsSDX)F z!Qm3X%}$fkcPOYfcGmVuaiXXJS?i6@$1GdG8-fQoVGJBD^TO;fh?b5vR$h~@Ih=X{ z(xGDAjt1g zE4Xe9t_Jb!*&28G4MS0hKdXTks)nLF)8lx8PHPR zX{s0xU(FuI!`s!}nJ~4q|CUjLkQgmPv%r}lr858J+YAEh+-vAjh?V{t6FtZV1eipk|pxa5cEho!$p;XQGu8XirvC z#_GxaDR7m!#$iy?JW5lmp>X!r{{y1v2f2Gpy|@>|?bp?s$!wJU_<0^>>NUPo$G54M z%l>CpYsrjeC(kVDKt17b