diff --git a/arch/arm64/configs/tencent.config b/arch/arm64/configs/tencent.config index 1c5f5afc021285331c8571046abe178d374001d9..752d43eeb1eb8773ea6c57bc2fe18607d287e230 100644 --- a/arch/arm64/configs/tencent.config +++ b/arch/arm64/configs/tencent.config @@ -1498,3 +1498,5 @@ CONFIG_TEST_BPF=m CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_VIRT_PLAT_DEV=y CONFIG_VIRT_VTIMER_IRQ_BYPASS=y +CONFIG_HISI_SOC_CACHE=m +CONFIG_HISI_SOC_HHA=m \ No newline at end of file diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index c24f49ee10d7378bcaffa1ff0449d3c16a9409ea..f3a31ec1d1c7d849398da29d68797a2a7deb6225 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -230,7 +230,7 @@ vma_create(struct drm_i915_gem_object *obj, } static struct i915_vma * -vma_lookup(struct drm_i915_gem_object *obj, +i915_vma_lookup(struct drm_i915_gem_object *obj, struct i915_address_space *vm, const struct i915_ggtt_view *view) { @@ -280,7 +280,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj, GEM_BUG_ON(vm->closed); spin_lock(&obj->vma.lock); - vma = vma_lookup(obj, vm, view); + vma = i915_vma_lookup(obj, vm, view); spin_unlock(&obj->vma.lock); /* vma_create() will resolve the race if another creates the vma */ diff --git a/drivers/soc/hisilicon/Kconfig b/drivers/soc/hisilicon/Kconfig index 0ab688af308fed625ec8c04723a87911b8da7373..49ccc25a0d13d366239bcf2c37df0a4a6c327c33 100644 --- a/drivers/soc/hisilicon/Kconfig +++ b/drivers/soc/hisilicon/Kconfig @@ -18,4 +18,26 @@ config KUNPENG_HCCS Say M here if you want to include support for querying the health status and port information of HCCS on Kunpeng SoC. +config HISI_SOC_CACHE + tristate "HiSilicon Cache driver for Kunpeng SoC" + depends on ARCH_HISI + help + This driver provides the basic utilities for drivers of + different part of Kunpeng SoC cache, including L3 cache and + Hydra Home Agent etc. + + If either HiSilicon L3 cache driver or HiSilicon Hydra Home + Agent driver is needed, say yes. + +config HISI_SOC_HHA + tristate "HiSilicon Hydra Home Agent (HHA) device driver" + depends on ARM64 && ACPI || COMPILE_TEST + depends on HISI_SOC_CACHE + help + The Hydra Home Agent (HHA) is responsible of cache coherency + on SoC. This drivers provides cache maintenance functions of HHA. + + This driver can be built as a module. If so, the module will be + called hisi_soc_hha. + endmenu diff --git a/drivers/soc/hisilicon/Makefile b/drivers/soc/hisilicon/Makefile index 226e747e70d67511e1954010e143d39df6b73f55..0d3bcdbec6b8de27f04a9ce07f6282ca2bba703c 100644 --- a/drivers/soc/hisilicon/Makefile +++ b/drivers/soc/hisilicon/Makefile @@ -1,2 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_KUNPENG_HCCS) += kunpeng_hccs.o + +obj-$(CONFIG_HISI_SOC_CACHE) += hisi_soc_cache_framework.o +obj-$(CONFIG_HISI_SOC_HHA) += hisi_soc_hha.o diff --git a/drivers/soc/hisilicon/hisi_soc_cache_framework.c b/drivers/soc/hisilicon/hisi_soc_cache_framework.c new file mode 100644 index 0000000000000000000000000000000000000000..1bdf2dea05f9a4588fdc37c5f4c1e822d8cd13e6 --- /dev/null +++ b/drivers/soc/hisilicon/hisi_soc_cache_framework.c @@ -0,0 +1,382 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Framework for HiSilicon SoC cache, manages HiSilicon SoC cache drivers. + * + * Copyright (c) 2024 HiSilicon Technologies Co., Ltd. + * Author: Jie Wang + * Author: Yicong Yang + * Author: Yushan Wang + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "hisi_soc_cache_framework.h" + +struct hisi_soc_comp_inst { + struct list_head node; + struct hisi_soc_comp *comp; +}; + +struct hisi_soc_comp_list { + struct list_head node; + /* protects list of HiSilicon SoC cache components */ + spinlock_t lock; + u32 inst_num; +}; + +static struct hisi_soc_comp_list soc_cache_devs[SOC_COMP_TYPE_MAX]; + +int hisi_soc_cache_maintain(phys_addr_t addr, size_t size, + enum hisi_soc_cache_maint_type mnt_type) +{ + struct hisi_soc_comp_inst *inst; + struct list_head *head; + int ret = -EOPNOTSUPP; + + if (mnt_type >= HISI_CACHE_MAINT_MAX) + return -EINVAL; + + spin_lock(&soc_cache_devs[HISI_SOC_HHA].lock); + + head = &soc_cache_devs[HISI_SOC_HHA].node; + list_for_each_entry(inst, head, node) { + ret = inst->comp->ops->do_maintain(inst->comp, addr, size, + mnt_type); + if (ret) + goto out_unlock; + } + + list_for_each_entry(inst, head, node) { + ret = inst->comp->ops->poll_maintain_done(inst->comp, addr, + size, mnt_type); + if (ret) + goto out_unlock; + } + +out_unlock: + spin_unlock(&soc_cache_devs[HISI_SOC_HHA].lock); + return ret; +} +EXPORT_SYMBOL_GPL(hisi_soc_cache_maintain); + +static int hisi_soc_cache_maint_pte_entry(pte_t *pte, unsigned long addr, + unsigned long next, struct mm_walk *walk) +{ +#ifdef HISI_SOC_CACHE_LLT + unsigned int mnt_type = *((unsigned int *)walk->priv); +#else + unsigned int mnt_type = *((unsigned int *)walk->private); +#endif + size_t size = next - addr; + phys_addr_t paddr; + + if (!pte_present(*pte)) + return -EINVAL; + + paddr = PFN_PHYS(pte_pfn(*pte)) + offset_in_page(addr); + + return hisi_soc_cache_maintain(paddr, size, mnt_type); +} + +static const struct mm_walk_ops hisi_soc_cache_maint_walk = { + .pte_entry = hisi_soc_cache_maint_pte_entry, +}; + +static int hisi_soc_cache_inst_check(const struct hisi_soc_comp *comp, + enum hisi_soc_comp_type comp_type) +{ + /* Different types of component could have different ops. */ + switch (comp_type) { + case HISI_SOC_HHA: + if (!comp->ops->do_maintain || !comp->ops->poll_maintain_done) + return -EINVAL; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int hisi_soc_cache_inst_add(struct hisi_soc_comp *comp, + enum hisi_soc_comp_type comp_type) +{ + struct hisi_soc_comp_inst *comp_inst; + int ret; + + ret = hisi_soc_cache_inst_check(comp, comp_type); + if (ret) + return ret; + + comp_inst = kzalloc(sizeof(*comp_inst), GFP_KERNEL); + if (!comp_inst) + return -ENOMEM; + + comp_inst->comp = comp; + + spin_lock(&soc_cache_devs[comp_type].lock); + list_add_tail(&comp_inst->node, + &soc_cache_devs[comp_type].node); + soc_cache_devs[comp_type].inst_num++; + spin_unlock(&soc_cache_devs[comp_type].lock); + + return 0; +} + +/* + * When @comp is NULL, it means to delete all instances of @comp_type. + */ +static void hisi_soc_cache_inst_del(struct hisi_soc_comp *comp, + enum hisi_soc_comp_type comp_type) +{ + struct hisi_soc_comp_inst *inst, *tmp; + + spin_lock(&soc_cache_devs[comp_type].lock); + list_for_each_entry_safe(inst, tmp, &soc_cache_devs[comp_type].node, + node) { + if (comp && comp != inst->comp) + continue; + + if (soc_cache_devs[comp_type].inst_num > 0) + soc_cache_devs[comp_type].inst_num--; + + list_del(&inst->node); + kfree(inst); + + /* Stop the loop if we have already deleted @comp. */ + if (comp) + break; + } + spin_unlock(&soc_cache_devs[comp_type].lock); +} + +int hisi_soc_comp_inst_add(struct hisi_soc_comp *comp) +{ + int ret, i = 0; + + if (!comp || !comp->ops || comp->comp_type == 0) + return -EINVAL; + + for_each_set_bit_from(i, &comp->comp_type, SOC_COMP_TYPE_MAX) { + ret = hisi_soc_cache_inst_add(comp, i); + if (ret) + return ret; + } + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_soc_comp_inst_add); + +int hisi_soc_comp_inst_del(struct hisi_soc_comp *comp) +{ + int i; + + if (!comp) + return -EINVAL; + + for_each_set_bit(i, &comp->comp_type, SOC_COMP_TYPE_MAX) + hisi_soc_cache_inst_del(comp, i); + + return 0; +} +EXPORT_SYMBOL_GPL(hisi_soc_comp_inst_del); + +static int __hisi_soc_cache_maintain(unsigned long __user vaddr, size_t size, + enum hisi_soc_cache_maint_type mnt_type) +{ + unsigned long start = untagged_addr(vaddr); + struct vm_area_struct *vma; + int ret = 0; + + mmap_read_lock_killable(current->mm); + + vma = vma_lookup(current->mm, vaddr); + if (!vma || vaddr + size > vma->vm_end || !size) { + ret = -EINVAL; + goto out; + } + + /* User should have the write permission of target memory */ + if (!(vma->vm_flags & VM_WRITE)) { + ret = -EINVAL; + goto out; + } + + ret = walk_page_range(current->mm, start, start + size, + &hisi_soc_cache_maint_walk, &mnt_type); + +out: + mmap_read_unlock(current->mm); + return ret; +} + +static long hisi_soc_cache_mgmt_ioctl(struct file *file, u32 cmd, + unsigned long arg) +{ + struct hisi_soc_cache_ioctl_param *param = + kzalloc(sizeof(struct hisi_soc_cache_ioctl_param), GFP_KERNEL); + long ret; + + if (!param) { + ret = -ENOMEM; + goto out; + } + + if (copy_from_user(param, (void __user *)arg, sizeof(*param))) { + ret = -EFAULT; + goto out; + } + + switch (cmd) { + case HISI_CACHE_MAINTAIN: + ret = __hisi_soc_cache_maintain(param->addr, param->size, + param->op_type); + break; + default: + ret = -EINVAL; + break; + } +out: + kfree(param); + return ret; +} + +static const struct file_operations soc_cache_dev_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = hisi_soc_cache_mgmt_ioctl, +}; + +static struct miscdevice soc_cache_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "hisi_soc_cache_mgmt", + .fops = &soc_cache_dev_fops, + .mode = 0600, +}; + +static void hisi_soc_cache_inst_uninit(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(soc_cache_devs); ++i) + hisi_soc_cache_inst_del(NULL, i); +} + +static void hisi_soc_cache_framework_data_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(soc_cache_devs); ++i) { + spin_lock_init(&soc_cache_devs[i].lock); + INIT_LIST_HEAD(&soc_cache_devs[i].node); + } +} + +static const char *const hisi_soc_cache_item_str[SOC_COMP_TYPE_MAX] = { + "hha" +}; + +/* + * Print cache instance number debug information for debug FS. + */ +static ssize_t hisi_soc_cache_dbg_get_inst_num(struct file *file, + char __user *buff, + size_t cnt, + loff_t *ppos) +{ +#define HISI_SOC_CACHE_DBGFS_REG_LEN 100 + char *read_buff; + int len, i, pos = 0; + int ret = 0; + + if (!access_ok(buff, cnt)) + return -EFAULT; + if (*ppos < 0) + return -EINVAL; + if (cnt == 0) + return 0; + + read_buff = kzalloc(HISI_SOC_CACHE_DBGFS_REG_LEN, GFP_KERNEL); + if (!read_buff) + return -ENOMEM; + + len = HISI_SOC_CACHE_DBGFS_REG_LEN; + + for (i = 0; i < ARRAY_SIZE(soc_cache_devs); i++) { + spin_lock(&soc_cache_devs[i].lock); + pos += scnprintf(read_buff + pos, len - pos, + "%s inst num: %u\n", + hisi_soc_cache_item_str[i], + soc_cache_devs[i].inst_num); + spin_unlock(&soc_cache_devs[i].lock); + } + + ret = simple_read_from_buffer(buff, cnt, ppos, read_buff, + strlen(read_buff)); + kfree(read_buff); + return ret; +} + +static struct dentry *hisi_cache_dbgfs_root; +static const struct file_operations hisi_cache_dbgfs_ops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = hisi_soc_cache_dbg_get_inst_num, +}; + +static void hisi_soc_cache_dbgfs_init(void) +{ + hisi_cache_dbgfs_root = debugfs_create_dir("hisi_soc_cache_frm", NULL); + debugfs_create_file("instance", 0400, hisi_cache_dbgfs_root, NULL, + &hisi_cache_dbgfs_ops); +} + +static void hisi_soc_cache_dbgfs_uninit(void) +{ + debugfs_remove_recursive(hisi_cache_dbgfs_root); + hisi_cache_dbgfs_root = NULL; +} + +static int __init hisi_soc_cache_framework_init(void) +{ + int ret; + + hisi_soc_cache_framework_data_init(); + + ret = misc_register(&soc_cache_miscdev); + if (ret) { + hisi_soc_cache_inst_uninit(); + return ret; + } + + hisi_soc_cache_dbgfs_init(); + + return 0; +} +module_init(hisi_soc_cache_framework_init); + +static void __exit hisi_soc_cache_framework_exit(void) +{ + hisi_soc_cache_dbgfs_uninit(); + misc_deregister(&soc_cache_miscdev); + hisi_soc_cache_inst_uninit(); +} +module_exit(hisi_soc_cache_framework_exit); + +MODULE_DESCRIPTION("HiSilicon SoC Cache Framework Driver"); +MODULE_AUTHOR("Jie Wang "); +MODULE_AUTHOR("Yushan Wang "); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/hisilicon/hisi_soc_cache_framework.h b/drivers/soc/hisilicon/hisi_soc_cache_framework.h new file mode 100644 index 0000000000000000000000000000000000000000..9b3de4e5016129ce0ab7535153748bf95fe6dbdf --- /dev/null +++ b/drivers/soc/hisilicon/hisi_soc_cache_framework.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Header file of framework for HiSilicon SoC cache. + * + * Copyright (c) 2024 HiSilicon Technologies Co., Ltd. + * Author: Jie Wang + * Author: Yicong Yang + * Author: Yushan Wang + */ + +#ifndef HISI_CACHE_FRAMEWORK_H +#define HISI_CACHE_FRAMEWORK_H + +#include +#include + +#include + +enum hisi_soc_comp_type { + HISI_SOC_HHA, + SOC_COMP_TYPE_MAX +}; + +struct hisi_soc_comp; + +/** + * struct hisi_soc_comp_ops - Callbacks for SoC cache drivers to handle + * operation requests. + * @maintain_enable: perform certain cache maintain operation on HHA. + * @poll_maintain_done: check if the HHA maintain operation has succeeded. + * + * Operations are decoupled into two phases so that framework does not have + * to wait for one operation to finish before calling the next when multiple + * hardwares onboard. + * + * Implementers must implement the functions in pairs. Implementation should + * return -EBUSY when: + * - insufficient resources are available to perform the operation. + * - previously raised operation is not finished. + * - new operations (do_lock(), do_unlock() etc.) to the same address + * before corresponding done functions being called. + */ +struct hisi_soc_comp_ops { + int (*do_maintain)(struct hisi_soc_comp *comp, + phys_addr_t addr, size_t size, + enum hisi_soc_cache_maint_type mnt_type); + int (*poll_maintain_done)(struct hisi_soc_comp *comp, + phys_addr_t addr, size_t size, + enum hisi_soc_cache_maint_type mnt_type); +}; + +/** + * struct hisi_soc_comp - Struct of HiSilicon SoC cache components. + * @ops: possible operations a component may perform. + * @affinity_mask: cpus that associate with this component. + * @comp_type: bitmap declaring the type of the component. + * + * A component may have multiple types (e.g. a piece of multi-function device). + * If so, set the bit of @comp_type according to its supporting type in struct + * hisi_soc_comp_type. + */ +struct hisi_soc_comp { + struct hisi_soc_comp_ops *ops; + cpumask_t affinity_mask; + /* + * Setting bit x to 1 means this instance supports feature of x-th + * entry in enum hisi_soc_comp_type. + */ + unsigned long comp_type; +}; + +int hisi_soc_comp_inst_add(struct hisi_soc_comp *comp); +int hisi_soc_comp_inst_del(struct hisi_soc_comp *comp); +int hisi_soc_cache_maintain(phys_addr_t addr, size_t size, + enum hisi_soc_cache_maint_type mnt_type); + +#endif diff --git a/drivers/soc/hisilicon/hisi_soc_hha.c b/drivers/soc/hisilicon/hisi_soc_hha.c new file mode 100644 index 0000000000000000000000000000000000000000..c46668f0a9f58d67f2f9ad561044528eb7c7fa3a --- /dev/null +++ b/drivers/soc/hisilicon/hisi_soc_hha.c @@ -0,0 +1,191 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for HiSilicon Hydra Home Agent (HHA). + * + * Copyright (c) 2024 HiSilicon Technologies Co., Ltd. + * Author: Yicong Yang + * Yushan Wang + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "hisi_soc_cache_framework.h" + +#define HISI_HHA_CTRL 0x5004 +#define HISI_HHA_CTRL_EN BIT(0) +#define HISI_HHA_CTRL_RANGE BIT(1) +#define HISI_HHA_CTRL_TYPE GENMASK(3, 2) +#define HISI_HHA_START_L 0x5008 +#define HISI_HHA_START_H 0x500c +#define HISI_HHA_LEN_L 0x5010 +#define HISI_HHA_LEN_H 0x5014 + +/* The maintain operation performs in a 128 Byte granularity */ +#define HISI_HHA_MAINT_ALIGN 128 + +#define HISI_HHA_POLL_GAP_US 10 + +struct hisi_soc_hha { + struct hisi_soc_comp comp; + /* Locks HHA instance to forbid overlapping access. */ + spinlock_t lock; + struct device *dev; + void __iomem *base; +}; + +static bool hisi_hha_cache_maintain_wait_finished(struct hisi_soc_hha *soc_hha) +{ + u32 val; + + return !readl_poll_timeout_atomic(soc_hha->base + HISI_HHA_CTRL, val, + !(val & HISI_HHA_CTRL_EN), + HISI_HHA_POLL_GAP_US, + jiffies_to_usecs(HZ)); +} + +static int hisi_hha_cache_do_maintain(struct hisi_soc_comp *comp, + phys_addr_t addr, size_t size, + enum hisi_soc_cache_maint_type mnt_type) +{ + struct hisi_soc_hha *soc_hha = container_of(comp, struct hisi_soc_hha, + comp); + int ret = 0; + u32 reg; + + if (!size) + return -EINVAL; + + if (mnt_type < 0) + return -EOPNOTSUPP; + + /* + * Hardware will search for addresses ranging [addr, addr + size -1], + * last byte included, and perform maintain in 128 byte granule + * on those which contain the addresses. + */ + size -= 1; + + spin_lock(&soc_hha->lock); + + if (!hisi_hha_cache_maintain_wait_finished(soc_hha)) + return -EBUSY; + + writel(lower_32_bits(addr), soc_hha->base + HISI_HHA_START_L); + writel(upper_32_bits(addr), soc_hha->base + HISI_HHA_START_H); + writel(lower_32_bits(size), soc_hha->base + HISI_HHA_LEN_L); + writel(upper_32_bits(size), soc_hha->base + HISI_HHA_LEN_H); + + reg = FIELD_PREP(HISI_HHA_CTRL_TYPE, mnt_type); + reg |= HISI_HHA_CTRL_RANGE | HISI_HHA_CTRL_EN; + writel(reg, soc_hha->base + HISI_HHA_CTRL); + + spin_unlock(&soc_hha->lock); + return ret; +} + +static int hisi_hha_cache_poll_maintain_done(struct hisi_soc_comp *comp, + phys_addr_t addr, size_t size, + enum hisi_soc_cache_maint_type mnt_type) +{ + struct hisi_soc_hha *soc_hha = container_of(comp, struct hisi_soc_hha, + comp); + + spin_lock(&soc_hha->lock); + + if (!hisi_hha_cache_maintain_wait_finished(soc_hha)) + return -ETIMEDOUT; + + spin_unlock(&soc_hha->lock); + return 0; +} + +static struct hisi_soc_comp_ops hisi_soc_hha_comp_ops = { + .do_maintain = hisi_hha_cache_do_maintain, + .poll_maintain_done = hisi_hha_cache_poll_maintain_done, +}; + +static void hisi_hha_comp_inst_del(void *priv) +{ + struct hisi_soc_hha *soc_hha = priv; + + hisi_soc_comp_inst_del(&soc_hha->comp); +} + +static int hisi_soc_hha_probe(struct platform_device *pdev) +{ + struct hisi_soc_hha *soc_hha; + struct resource *mem; + int ret; + + soc_hha = devm_kzalloc(&pdev->dev, sizeof(*soc_hha), GFP_KERNEL); + if (!soc_hha) + return -ENOMEM; + + platform_set_drvdata(pdev, soc_hha); + soc_hha->dev = &pdev->dev; + + spin_lock_init(&soc_hha->lock); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) + return -ENODEV; + + /* + * HHA cache driver share the same register region with HHA uncore PMU + * driver in hardware's perspective, none of them should reserve the + * resource to itself only. Here exclusive access verification is + * avoided by calling devm_ioremap instead of devm_ioremap_resource to + * allow both drivers to exist at the same time. + */ + soc_hha->base = devm_ioremap(&pdev->dev, mem->start, + resource_size(mem)); + if (IS_ERR_OR_NULL(soc_hha->base)) { + return dev_err_probe(&pdev->dev, PTR_ERR(soc_hha->base), + "failed to remap io memory"); + } + + soc_hha->comp.ops = &hisi_soc_hha_comp_ops; + soc_hha->comp.comp_type = BIT(HISI_SOC_HHA); + cpumask_copy(&soc_hha->comp.affinity_mask, cpu_possible_mask); + + ret = hisi_soc_comp_inst_add(&soc_hha->comp); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "failed to register maintain inst"); + + return devm_add_action_or_reset(&pdev->dev, hisi_hha_comp_inst_del, + soc_hha); +} + +static const struct acpi_device_id hisi_soc_hha_ids[] = { + { "HISI0511", }, + { } +}; +MODULE_DEVICE_TABLE(acpi, hisi_soc_hha_ids); + +static struct platform_driver hisi_soc_hha_driver = { + .driver = { + .name = "hisi_soc_hha", + .acpi_match_table = hisi_soc_hha_ids, + }, + .probe = hisi_soc_hha_probe, +}; + +module_platform_driver(hisi_soc_hha_driver); + +MODULE_DESCRIPTION("Hisilicon Hydra Home Agent driver supporting cache maintenance"); +MODULE_AUTHOR("Yicong Yang "); +MODULE_AUTHOR("Yushan Wang "); +MODULE_LICENSE("GPL"); diff --git a/include/linux/mm.h b/include/linux/mm.h index 8ff12971a0d232ed7e97c73427bad8db1af06bc9..162a1156f2029efe1ceeb86512de0af9dd6452ac 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2547,17 +2547,45 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, struct vm_area_struct **pprev); -/* Look up the first VMA which intersects the interval start_addr..end_addr-1, - NULL if none. Assume start_addr < end_addr. */ -static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) +/** + * find_vma_intersection() - Look up the first VMA which intersects the interval + * @mm: The process address space. + * @start_addr: The inclusive start user address. + * @end_addr: The exclusive end user address. + * + * Returns: The first VMA within the provided range, %NULL otherwise. Assumes + * start_addr < end_addr. + */ +static inline +struct vm_area_struct *find_vma_intersection(struct mm_struct *mm, + unsigned long start_addr, + unsigned long end_addr) { - struct vm_area_struct * vma = find_vma(mm,start_addr); + struct vm_area_struct *vma = find_vma(mm, start_addr); if (vma && end_addr <= vma->vm_start) vma = NULL; return vma; } +/** + * vma_lookup() - Find a VMA at a specific address + * @mm: The process address space. + * @addr: The user address. + * + * Return: The vm_area_struct at the given address, %NULL otherwise. + */ +static inline +struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr) +{ + struct vm_area_struct *vma = find_vma(mm, addr); + + if (vma && addr < vma->vm_start) + vma = NULL; + + return vma; +} + static inline unsigned long vm_start_gap(struct vm_area_struct *vma) { unsigned long vm_start = vma->vm_start; diff --git a/include/uapi/misc/hisi_soc_cache/hisi_soc_cache.h b/include/uapi/misc/hisi_soc_cache/hisi_soc_cache.h new file mode 100644 index 0000000000000000000000000000000000000000..5441f6f75b81946bc64128efa11735e7e29149ea --- /dev/null +++ b/include/uapi/misc/hisi_soc_cache/hisi_soc_cache.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */ +/* Copyright (c) 2024-2024 HiSilicon Limited. */ +#ifndef _UAPI_HISI_SOC_CACHE_H +#define _UAPI_HISI_SOC_CACHE_H + +#include + +/* HISI_CACHE_MAINTAIN: cache maintain operation for HiSilicon SoC */ +#define HISI_CACHE_MAINTAIN _IOW('C', 1, unsigned long) + +/* + * Further information of these operations can be found at: + * https://developer.arm.com/documentation/ihi0050/latest/ + */ +enum hisi_soc_cache_maint_type { + HISI_CACHE_MAINT_CLEANSHARED, + HISI_CACHE_MAINT_CLEANINVALID, + HISI_CACHE_MAINT_MAKEINVALID, + + HISI_CACHE_MAINT_MAX +}; + +/** + * struct hisi_soc_cache_ioctl_param - User data for hisi cache operates. + * @op_type: cache maintain type + * @addr: cache maintain address + * @size: cache maintain size + */ +struct hisi_soc_cache_ioctl_param { + enum hisi_soc_cache_maint_type op_type; + unsigned long addr; + unsigned long size; +}; + +#endif diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 0d4c6159a4eaf0d83a3e7707fe87f0e31ca085fb..45244fc439e6018d37f2dd6db17c125e864bc9ff 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -375,6 +375,7 @@ int walk_page_range(struct mm_struct *mm, unsigned long start, } while (start = next, start < end); return err; } +EXPORT_SYMBOL_GPL(walk_page_range); int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops, void *private)