1 From 92f0ef51270b2961f63b2e985831f5e9a6251a2f Mon Sep 17 00:00:00 2001
2 From: Biwen Li <biwen.li@nxp.com>
3 Date: Tue, 30 Oct 2018 18:29:03 +0800
4 Subject: [PATCH 25/40] vfio: support layerscape
5 This is an integrated patch of vfio for layerscape
7 Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
8 Signed-off-by: Biwen Li <biwen.li@nxp.com>
10 drivers/vfio/Kconfig | 1 +
11 drivers/vfio/Makefile | 1 +
12 drivers/vfio/fsl-mc/Kconfig | 9 +
13 drivers/vfio/fsl-mc/Makefile | 2 +
14 drivers/vfio/fsl-mc/vfio_fsl_mc.c | 751 ++++++++++++++++++++++
15 drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c | 199 ++++++
16 drivers/vfio/fsl-mc/vfio_fsl_mc_private.h | 55 ++
17 include/uapi/linux/vfio.h | 1 +
18 8 files changed, 1019 insertions(+)
19 create mode 100644 drivers/vfio/fsl-mc/Kconfig
20 create mode 100644 drivers/vfio/fsl-mc/Makefile
21 create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc.c
22 create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
23 create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
25 --- a/drivers/vfio/Kconfig
26 +++ b/drivers/vfio/Kconfig
27 @@ -47,4 +47,5 @@ menuconfig VFIO_NOIOMMU
28 source "drivers/vfio/pci/Kconfig"
29 source "drivers/vfio/platform/Kconfig"
30 source "drivers/vfio/mdev/Kconfig"
31 +source "drivers/vfio/fsl-mc/Kconfig"
32 source "virt/lib/Kconfig"
33 --- a/drivers/vfio/Makefile
34 +++ b/drivers/vfio/Makefile
35 @@ -9,3 +9,4 @@ obj-$(CONFIG_VFIO_SPAPR_EEH) += vfio_spa
36 obj-$(CONFIG_VFIO_PCI) += pci/
37 obj-$(CONFIG_VFIO_PLATFORM) += platform/
38 obj-$(CONFIG_VFIO_MDEV) += mdev/
39 +obj-$(CONFIG_VFIO_FSL_MC) += fsl-mc/
41 +++ b/drivers/vfio/fsl-mc/Kconfig
44 + tristate "VFIO support for QorIQ DPAA2 fsl-mc bus devices"
45 + depends on VFIO && FSL_MC_BUS && EVENTFD
47 + Driver to enable support for the VFIO QorIQ DPAA2 fsl-mc
48 + (Management Complex) devices. This is required to passthrough
49 + fsl-mc bus devices using the VFIO framework.
51 + If you don't know what to do here, say N.
53 +++ b/drivers/vfio/fsl-mc/Makefile
55 +vfio-fsl_mc-y := vfio_fsl_mc.o
56 +obj-$(CONFIG_VFIO_FSL_MC) += vfio_fsl_mc.o vfio_fsl_mc_intr.o
58 +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
61 + * Freescale Management Complex (MC) device passthrough using VFIO
63 + * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
64 + * Copyright 2016-2017 NXP
65 + * Author: Bharat Bhushan <bharat.bhushan@nxp.com>
67 + * This file is licensed under the terms of the GNU General Public
68 + * License version 2. This program is licensed "as is" without any
69 + * warranty of any kind, whether express or implied.
72 +#include <linux/device.h>
73 +#include <linux/iommu.h>
74 +#include <linux/module.h>
75 +#include <linux/mutex.h>
76 +#include <linux/slab.h>
77 +#include <linux/types.h>
78 +#include <linux/vfio.h>
79 +#include <linux/delay.h>
80 +#include <linux/fsl/mc.h>
82 +#include "vfio_fsl_mc_private.h"
84 +#define DRIVER_VERSION "0.10"
85 +#define DRIVER_AUTHOR "Bharat Bhushan <bharat.bhushan@nxp.com>"
86 +#define DRIVER_DESC "VFIO for FSL-MC devices - User Level meta-driver"
88 +static DEFINE_MUTEX(driver_lock);
90 +/* FSl-MC device regions (address and size) are aligned to 64K.
91 + * While MC firmware reports size less than 64K for some objects (it actually
92 + * reports size which does not include reserved space beyond valid bytes).
93 + * Align the size to PAGE_SIZE for userspace to mmap.
95 +static size_t aligned_region_size(struct fsl_mc_device *mc_dev, int index)
99 + size = resource_size(&mc_dev->regions[index]);
100 + return PAGE_ALIGN(size);
103 +static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev)
105 + struct fsl_mc_device *mc_dev = vdev->mc_dev;
106 + int count = mc_dev->obj_desc.region_count;
109 + vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region),
111 + if (!vdev->regions)
114 + for (i = 0; i < mc_dev->obj_desc.region_count; i++) {
115 + vdev->regions[i].addr = mc_dev->regions[i].start;
116 + vdev->regions[i].size = aligned_region_size(mc_dev, i);
117 + vdev->regions[i].type = VFIO_FSL_MC_REGION_TYPE_MMIO;
118 + if (mc_dev->regions[i].flags & IORESOURCE_CACHEABLE)
119 + vdev->regions[i].type |=
120 + VFIO_FSL_MC_REGION_TYPE_CACHEABLE;
121 + vdev->regions[i].flags = VFIO_REGION_INFO_FLAG_MMAP;
122 + vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
123 + if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY))
124 + vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
127 + vdev->num_regions = mc_dev->obj_desc.region_count;
131 +static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
135 + for (i = 0; i < vdev->num_regions; i++)
136 + iounmap(vdev->regions[i].ioaddr);
138 + vdev->num_regions = 0;
139 + kfree(vdev->regions);
142 +static int vfio_fsl_mc_open(void *device_data)
144 + struct vfio_fsl_mc_device *vdev = device_data;
147 + if (!try_module_get(THIS_MODULE))
150 + mutex_lock(&driver_lock);
151 + if (!vdev->refcnt) {
152 + ret = vfio_fsl_mc_regions_init(vdev);
154 + goto error_region_init;
156 + ret = vfio_fsl_mc_irqs_init(vdev);
158 + goto error_irq_init;
162 + mutex_unlock(&driver_lock);
166 + vfio_fsl_mc_regions_cleanup(vdev);
168 + mutex_unlock(&driver_lock);
170 + module_put(THIS_MODULE);
175 +static void vfio_fsl_mc_release(void *device_data)
177 + struct vfio_fsl_mc_device *vdev = device_data;
178 + struct fsl_mc_device *mc_dev = vdev->mc_dev;
180 + mutex_lock(&driver_lock);
182 + if (!(--vdev->refcnt)) {
183 + vfio_fsl_mc_regions_cleanup(vdev);
184 + vfio_fsl_mc_irqs_cleanup(vdev);
187 + if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
188 + dprc_reset_container(mc_dev->mc_io, 0, mc_dev->mc_handle,
189 + mc_dev->obj_desc.id);
191 + mutex_unlock(&driver_lock);
193 + module_put(THIS_MODULE);
196 +static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd,
199 + struct vfio_fsl_mc_device *vdev = device_data;
200 + struct fsl_mc_device *mc_dev = vdev->mc_dev;
201 + unsigned long minsz;
203 + if (WARN_ON(!mc_dev))
207 + case VFIO_DEVICE_GET_INFO:
209 + struct vfio_device_info info;
211 + minsz = offsetofend(struct vfio_device_info, num_irqs);
213 + if (copy_from_user(&info, (void __user *)arg, minsz))
216 + if (info.argsz < minsz)
219 + info.flags = VFIO_DEVICE_FLAGS_FSL_MC;
220 + info.num_regions = mc_dev->obj_desc.region_count;
221 + info.num_irqs = mc_dev->obj_desc.irq_count;
223 + return copy_to_user((void __user *)arg, &info, minsz);
225 + case VFIO_DEVICE_GET_REGION_INFO:
227 + struct vfio_region_info info;
229 + minsz = offsetofend(struct vfio_region_info, offset);
231 + if (copy_from_user(&info, (void __user *)arg, minsz))
234 + if (info.argsz < minsz)
237 + if (info.index >= vdev->num_regions)
240 + /* map offset to the physical address */
241 + info.offset = VFIO_FSL_MC_INDEX_TO_OFFSET(info.index);
242 + info.size = vdev->regions[info.index].size;
243 + info.flags = vdev->regions[info.index].flags;
245 + return copy_to_user((void __user *)arg, &info, minsz);
247 + case VFIO_DEVICE_GET_IRQ_INFO:
249 + struct vfio_irq_info info;
251 + minsz = offsetofend(struct vfio_irq_info, count);
252 + if (copy_from_user(&info, (void __user *)arg, minsz))
255 + if (info.argsz < minsz)
258 + if (info.index >= mc_dev->obj_desc.irq_count)
261 + if (vdev->mc_irqs != NULL) {
262 + info.flags = vdev->mc_irqs[info.index].flags;
263 + info.count = vdev->mc_irqs[info.index].count;
266 + * If IRQs are not initialized then these can not
267 + * be configuted and used by user-space/
273 + return copy_to_user((void __user *)arg, &info, minsz);
275 + case VFIO_DEVICE_SET_IRQS:
277 + struct vfio_irq_set hdr;
281 + minsz = offsetofend(struct vfio_irq_set, count);
283 + if (copy_from_user(&hdr, (void __user *)arg, minsz))
286 + if (hdr.argsz < minsz)
289 + if (hdr.index >= mc_dev->obj_desc.irq_count)
292 + if (hdr.start != 0 || hdr.count > 1)
295 + if (hdr.count == 0 &&
296 + (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE) ||
297 + !(hdr.flags & VFIO_IRQ_SET_ACTION_TRIGGER)))
300 + if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
301 + VFIO_IRQ_SET_ACTION_TYPE_MASK))
304 + if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
307 + if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
308 + size = sizeof(uint8_t);
309 + else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
310 + size = sizeof(int32_t);
314 + if (hdr.argsz - minsz < hdr.count * size)
317 + data = memdup_user((void __user *)(arg + minsz),
320 + return PTR_ERR(data);
323 + ret = vfio_fsl_mc_set_irqs_ioctl(vdev, hdr.flags,
324 + hdr.index, hdr.start,
328 + case VFIO_DEVICE_RESET:
337 +static ssize_t vfio_fsl_mc_read(void *device_data, char __user *buf,
338 + size_t count, loff_t *ppos)
340 + struct vfio_fsl_mc_device *vdev = device_data;
341 + unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
342 + loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
343 + struct vfio_fsl_mc_region *region;
347 + /* Read ioctl supported only for DPRC and DPMCP device */
348 + if (strcmp(vdev->mc_dev->obj_desc.type, "dprc") &&
349 + strcmp(vdev->mc_dev->obj_desc.type, "dpmcp"))
352 + if (index >= vdev->num_regions)
355 + region = &vdev->regions[index];
357 + if (!(region->flags & VFIO_REGION_INFO_FLAG_READ))
360 + if (!region->type & VFIO_FSL_MC_REGION_TYPE_MMIO)
363 + if (!region->ioaddr) {
364 + region->ioaddr = ioremap_nocache(region->addr, region->size);
365 + if (!region->ioaddr)
369 + if (count != 64 || off != 0)
372 + for (i = 7; i >= 0; i--)
373 + data[i] = readq(region->ioaddr + i * sizeof(uint64_t));
375 + if (copy_to_user(buf, data, 64))
381 +#define MC_CMD_COMPLETION_TIMEOUT_MS 5000
382 +#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
384 +static int vfio_fsl_mc_dprc_wait_for_response(void __iomem *ioaddr)
386 + enum mc_cmd_status status;
387 + unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
391 + struct mc_cmd_header *resp_hdr;
394 + header = readq(ioaddr);
397 + resp_hdr = (struct mc_cmd_header *)&header;
398 + status = (enum mc_cmd_status)resp_hdr->status;
399 + if (status != MC_CMD_STATUS_READY)
402 + udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
403 + timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
404 + if (timeout_usecs == 0)
411 +static int vfio_fsl_mc_send_command(void __iomem *ioaddr, uint64_t *cmd_data)
415 + /* Write at command header in the end */
416 + for (i = 7; i >= 0; i--)
417 + writeq(cmd_data[i], ioaddr + i * sizeof(uint64_t));
419 + /* Wait for response before returning to user-space
420 + * This can be optimized in future to even prepare response
421 + * before returning to user-space and avoid read ioctl.
423 + return vfio_fsl_mc_dprc_wait_for_response(ioaddr);
426 +static int vfio_handle_dprc_commands(void __iomem *ioaddr, uint64_t *cmd_data)
428 + uint64_t cmd_hdr = cmd_data[0];
429 + int cmd = (cmd_hdr >> 52) & 0xfff;
432 + case DPRC_CMDID_OPEN:
434 + return vfio_fsl_mc_send_command(ioaddr, cmd_data);
440 +static ssize_t vfio_fsl_mc_write(void *device_data, const char __user *buf,
441 + size_t count, loff_t *ppos)
443 + struct vfio_fsl_mc_device *vdev = device_data;
444 + unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
445 + loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
446 + struct vfio_fsl_mc_region *region;
450 + /* Write ioctl supported only for DPRC and DPMCP device */
451 + if (strcmp(vdev->mc_dev->obj_desc.type, "dprc") &&
452 + strcmp(vdev->mc_dev->obj_desc.type, "dpmcp"))
455 + if (index >= vdev->num_regions)
458 + region = &vdev->regions[index];
460 + if (!(region->flags & VFIO_REGION_INFO_FLAG_WRITE))
463 + if (!region->type & VFIO_FSL_MC_REGION_TYPE_MMIO)
466 + if (!region->ioaddr) {
467 + region->ioaddr = ioremap_nocache(region->addr, region->size);
468 + if (!region->ioaddr)
472 + if (count != 64 || off != 0)
475 + if (copy_from_user(&data, buf, 64))
478 + ret = vfio_handle_dprc_commands(region->ioaddr, data);
485 +static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region,
486 + struct vm_area_struct *vma)
488 + u64 size = vma->vm_end - vma->vm_start;
491 + pgoff = vma->vm_pgoff &
492 + ((1U << (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
493 + base = pgoff << PAGE_SHIFT;
495 + if (region.size < PAGE_SIZE || base + size > region.size)
498 + * Set the REGION_TYPE_CACHEABLE (QBman CENA regs) to be the
499 + * cache inhibited area of the portal to avoid coherency issues
500 + * if a user migrates to another core.
502 + if (region.type & VFIO_FSL_MC_REGION_TYPE_CACHEABLE)
503 + vma->vm_page_prot = pgprot_cached_ns(vma->vm_page_prot);
505 + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
507 + vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
509 + return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
510 + size, vma->vm_page_prot);
513 +/* Allows mmaping fsl_mc device regions in assigned DPRC */
514 +static int vfio_fsl_mc_mmap(void *device_data, struct vm_area_struct *vma)
516 + struct vfio_fsl_mc_device *vdev = device_data;
517 + struct fsl_mc_device *mc_dev = vdev->mc_dev;
518 + unsigned long size, addr;
521 + index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT);
523 + if (vma->vm_end < vma->vm_start)
525 + if (vma->vm_start & ~PAGE_MASK)
527 + if (vma->vm_end & ~PAGE_MASK)
529 + if (!(vma->vm_flags & VM_SHARED))
531 + if (index >= vdev->num_regions)
534 + if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
537 + if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
538 + && (vma->vm_flags & VM_READ))
541 + if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
542 + && (vma->vm_flags & VM_WRITE))
545 + addr = vdev->regions[index].addr;
546 + size = vdev->regions[index].size;
548 + vma->vm_private_data = mc_dev;
550 + if (vdev->regions[index].type & VFIO_FSL_MC_REGION_TYPE_MMIO)
551 + return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma);
556 +static const struct vfio_device_ops vfio_fsl_mc_ops = {
557 + .name = "vfio-fsl-mc",
558 + .open = vfio_fsl_mc_open,
559 + .release = vfio_fsl_mc_release,
560 + .ioctl = vfio_fsl_mc_ioctl,
561 + .read = vfio_fsl_mc_read,
562 + .write = vfio_fsl_mc_write,
563 + .mmap = vfio_fsl_mc_mmap,
566 +static int vfio_fsl_mc_initialize_dprc(struct vfio_fsl_mc_device *vdev)
568 + struct device *root_dprc_dev;
569 + struct fsl_mc_device *mc_dev = vdev->mc_dev;
570 + struct device *dev = &mc_dev->dev;
571 + struct fsl_mc_bus *mc_bus;
572 + struct irq_domain *mc_msi_domain;
573 + unsigned int irq_count;
576 + /* device must be DPRC */
577 + if (strcmp(mc_dev->obj_desc.type, "dprc"))
580 + /* mc_io must be un-initialized */
581 + WARN_ON(mc_dev->mc_io);
583 + /* allocate a portal from the root DPRC for vfio use */
584 + fsl_mc_get_root_dprc(dev, &root_dprc_dev);
585 + if (WARN_ON(!root_dprc_dev))
588 + ret = fsl_mc_portal_allocate(to_fsl_mc_device(root_dprc_dev),
589 + FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
592 + goto clean_msi_domain;
594 + /* Reset MCP before move on */
595 + ret = fsl_mc_portal_reset(mc_dev->mc_io);
597 + dev_err(dev, "dprc portal reset failed: error = %d\n", ret);
598 + goto free_mc_portal;
601 + /* MSI domain set up */
602 + ret = fsl_mc_find_msi_domain(root_dprc_dev->parent, &mc_msi_domain);
604 + goto free_mc_portal;
606 + dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
608 + ret = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
609 + &mc_dev->mc_handle);
611 + dev_err(dev, "dprc_open() failed: error = %d\n", ret);
612 + goto free_mc_portal;
615 + /* Initialize resource pool */
616 + fsl_mc_init_all_resource_pools(mc_dev);
618 + mc_bus = to_fsl_mc_bus(mc_dev);
620 + if (!mc_bus->irq_resources) {
621 + irq_count = FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS;
622 + ret = fsl_mc_populate_irq_pool(mc_bus, irq_count);
624 + dev_err(dev, "%s: Failed to init irq-pool\n", __func__);
625 + goto clean_resource_pool;
629 + mutex_init(&mc_bus->scan_mutex);
631 + mutex_lock(&mc_bus->scan_mutex);
632 + ret = dprc_scan_objects(mc_dev, mc_dev->driver_override,
634 + mutex_unlock(&mc_bus->scan_mutex);
636 + dev_err(dev, "dprc_scan_objects() fails (%d)\n", ret);
637 + goto clean_irq_pool;
640 + if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
641 + dev_warn(&mc_dev->dev,
642 + "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
643 + irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
649 + fsl_mc_cleanup_irq_pool(mc_bus);
651 +clean_resource_pool:
652 + fsl_mc_cleanup_all_resource_pools(mc_dev);
653 + dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
656 + fsl_mc_portal_free(mc_dev->mc_io);
659 + dev_set_msi_domain(&mc_dev->dev, NULL);
664 +static int vfio_fsl_mc_device_remove(struct device *dev, void *data)
666 + struct fsl_mc_device *mc_dev;
668 + WARN_ON(dev == NULL);
670 + mc_dev = to_fsl_mc_device(dev);
671 + if (WARN_ON(mc_dev == NULL))
674 + fsl_mc_device_remove(mc_dev);
678 +static void vfio_fsl_mc_cleanup_dprc(struct vfio_fsl_mc_device *vdev)
680 + struct fsl_mc_device *mc_dev = vdev->mc_dev;
681 + struct fsl_mc_bus *mc_bus;
683 + /* device must be DPRC */
684 + if (strcmp(mc_dev->obj_desc.type, "dprc"))
687 + device_for_each_child(&mc_dev->dev, NULL, vfio_fsl_mc_device_remove);
689 + mc_bus = to_fsl_mc_bus(mc_dev);
690 + if (dev_get_msi_domain(&mc_dev->dev))
691 + fsl_mc_cleanup_irq_pool(mc_bus);
693 + dev_set_msi_domain(&mc_dev->dev, NULL);
695 + fsl_mc_cleanup_all_resource_pools(mc_dev);
696 + dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
697 + fsl_mc_portal_free(mc_dev->mc_io);
700 +static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
702 + struct iommu_group *group;
703 + struct vfio_fsl_mc_device *vdev;
704 + struct device *dev = &mc_dev->dev;
707 + group = vfio_iommu_group_get(dev);
709 + dev_err(dev, "%s: VFIO: No IOMMU group\n", __func__);
713 + vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
715 + vfio_iommu_group_put(group, dev);
719 + vdev->mc_dev = mc_dev;
721 + ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
723 + dev_err(dev, "%s: Failed to add to vfio group\n", __func__);
724 + goto free_vfio_device;
727 + /* DPRC container scanned and it's chilren bound with vfio driver */
728 + if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) {
729 + ret = vfio_fsl_mc_initialize_dprc(vdev);
731 + vfio_del_group_dev(dev);
732 + goto free_vfio_device;
735 + struct fsl_mc_device *mc_bus_dev;
737 + /* Non-dprc devices share mc_io from the parent dprc */
738 + mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
739 + if (mc_bus_dev == NULL) {
740 + vfio_del_group_dev(dev);
741 + goto free_vfio_device;
744 + mc_dev->mc_io = mc_bus_dev->mc_io;
746 + /* Inherit parent MSI domain */
747 + dev_set_msi_domain(&mc_dev->dev,
748 + dev_get_msi_domain(mc_dev->dev.parent));
754 + vfio_iommu_group_put(group, dev);
758 +static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
760 + struct vfio_fsl_mc_device *vdev;
761 + struct device *dev = &mc_dev->dev;
763 + vdev = vfio_del_group_dev(dev);
767 + if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
768 + vfio_fsl_mc_cleanup_dprc(vdev);
770 + dev_set_msi_domain(&mc_dev->dev, NULL);
772 + mc_dev->mc_io = NULL;
774 + vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
781 + * vfio-fsl_mc is a meta-driver, so use driver_override interface to
782 + * bind a fsl_mc container with this driver and match_id_table is NULL.
784 +static struct fsl_mc_driver vfio_fsl_mc_driver = {
785 + .probe = vfio_fsl_mc_probe,
786 + .remove = vfio_fsl_mc_remove,
787 + .match_id_table = NULL,
789 + .name = "vfio-fsl-mc",
790 + .owner = THIS_MODULE,
794 +static int __init vfio_fsl_mc_driver_init(void)
796 + return fsl_mc_driver_register(&vfio_fsl_mc_driver);
799 +static void __exit vfio_fsl_mc_driver_exit(void)
801 + fsl_mc_driver_unregister(&vfio_fsl_mc_driver);
804 +module_init(vfio_fsl_mc_driver_init);
805 +module_exit(vfio_fsl_mc_driver_exit);
807 +MODULE_VERSION(DRIVER_VERSION);
808 +MODULE_LICENSE("GPL v2");
809 +MODULE_AUTHOR(DRIVER_AUTHOR);
810 +MODULE_DESCRIPTION(DRIVER_DESC);
812 +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
815 + * Freescale Management Complex (MC) device passthrough using VFIO
817 + * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
818 + * Author: Bharat Bhushan <bharat.bhushan@nxp.com>
820 + * This file is licensed under the terms of the GNU General Public
821 + * License version 2. This program is licensed "as is" without any
822 + * warranty of any kind, whether express or implied.
825 +#include <linux/vfio.h>
826 +#include <linux/slab.h>
827 +#include <linux/types.h>
828 +#include <linux/eventfd.h>
829 +#include <linux/msi.h>
831 +#include "linux/fsl/mc.h"
832 +#include "vfio_fsl_mc_private.h"
834 +static irqreturn_t vfio_fsl_mc_irq_handler(int irq_num, void *arg)
836 + struct vfio_fsl_mc_irq *mc_irq = (struct vfio_fsl_mc_irq *)arg;
838 + eventfd_signal(mc_irq->trigger, 1);
839 + return IRQ_HANDLED;
842 +static int vfio_fsl_mc_irq_mask(struct vfio_fsl_mc_device *vdev,
843 + unsigned int index, unsigned int start,
844 + unsigned int count, uint32_t flags,
850 +static int vfio_fsl_mc_irq_unmask(struct vfio_fsl_mc_device *vdev,
851 + unsigned int index, unsigned int start,
852 + unsigned int count, uint32_t flags,
858 +static int vfio_set_trigger(struct vfio_fsl_mc_device *vdev,
861 + struct vfio_fsl_mc_irq *irq = &vdev->mc_irqs[index];
862 + struct eventfd_ctx *trigger;
866 + hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
867 + if (irq->trigger) {
868 + free_irq(hwirq, irq);
870 + eventfd_ctx_put(irq->trigger);
871 + irq->trigger = NULL;
874 + if (fd < 0) /* Disable only */
877 + irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)",
878 + hwirq, dev_name(&vdev->mc_dev->dev));
882 + trigger = eventfd_ctx_fdget(fd);
883 + if (IS_ERR(trigger)) {
885 + return PTR_ERR(trigger);
888 + irq->trigger = trigger;
890 + ret = request_irq(hwirq, vfio_fsl_mc_irq_handler, 0,
894 + eventfd_ctx_put(trigger);
895 + irq->trigger = NULL;
902 +int vfio_fsl_mc_irqs_init(struct vfio_fsl_mc_device *vdev)
904 + struct fsl_mc_device *mc_dev = vdev->mc_dev;
905 + struct vfio_fsl_mc_irq *mc_irq;
909 + /* Device does not support any interrupt */
910 + if (mc_dev->obj_desc.irq_count == 0)
913 + irq_count = mc_dev->obj_desc.irq_count;
915 + mc_irq = kcalloc(irq_count, sizeof(*mc_irq), GFP_KERNEL);
916 + if (mc_irq == NULL)
919 + /* Allocate IRQs */
920 + ret = fsl_mc_allocate_irqs(mc_dev);
926 + for (i = 0; i < irq_count; i++) {
927 + mc_irq[i].count = 1;
928 + mc_irq[i].flags = VFIO_IRQ_INFO_EVENTFD;
931 + vdev->mc_irqs = mc_irq;
936 +/* Free All IRQs for the given MC object */
937 +void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev)
939 + struct fsl_mc_device *mc_dev = vdev->mc_dev;
940 + int irq_count = mc_dev->obj_desc.irq_count;
943 + /* Device does not support any interrupt */
944 + if (mc_dev->obj_desc.irq_count == 0)
947 + for (i = 0; i < irq_count; i++)
948 + vfio_set_trigger(vdev, i, -1);
950 + fsl_mc_free_irqs(mc_dev);
951 + kfree(vdev->mc_irqs);
954 +static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
955 + unsigned int index, unsigned int start,
956 + unsigned int count, uint32_t flags,
959 + struct vfio_fsl_mc_irq *irq = &vdev->mc_irqs[index];
962 + if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
963 + return vfio_set_trigger(vdev, index, -1);
965 + if (start != 0 || count != 1)
968 + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
969 + int32_t fd = *(int32_t *)data;
971 + return vfio_set_trigger(vdev, index, fd);
974 + hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
976 + if (flags & VFIO_IRQ_SET_DATA_NONE) {
977 + vfio_fsl_mc_irq_handler(hwirq, irq);
979 + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
980 + uint8_t trigger = *(uint8_t *)data;
983 + vfio_fsl_mc_irq_handler(hwirq, irq);
989 +int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
990 + uint32_t flags, unsigned int index,
991 + unsigned int start, unsigned int count,
996 + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
997 + case VFIO_IRQ_SET_ACTION_MASK:
998 + ret = vfio_fsl_mc_irq_mask(vdev, index, start, count,
1001 + case VFIO_IRQ_SET_ACTION_UNMASK:
1002 + ret = vfio_fsl_mc_irq_unmask(vdev, index, start, count,
1005 + case VFIO_IRQ_SET_ACTION_TRIGGER:
1006 + ret = vfio_fsl_mc_set_irq_trigger(vdev, index, start,
1007 + count, flags, data);
1014 +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
1017 + * Freescale Management Complex VFIO private declarations
1019 + * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
1020 + * Copyright 2016 NXP
1021 + * Author: Bharat Bhushan <bharat.bhushan@nxp.com>
1023 + * This file is licensed under the terms of the GNU General Public
1024 + * License version 2. This program is licensed "as is" without any
1025 + * warranty of any kind, whether express or implied.
1028 +#ifndef VFIO_FSL_MC_PRIVATE_H
1029 +#define VFIO_FSL_MC_PRIVATE_H
1031 +#define VFIO_FSL_MC_OFFSET_SHIFT 40
1032 +#define VFIO_FSL_MC_OFFSET_MASK (((u64)(1) << VFIO_FSL_MC_OFFSET_SHIFT) - 1)
1034 +#define VFIO_FSL_MC_OFFSET_TO_INDEX(off) (off >> VFIO_FSL_MC_OFFSET_SHIFT)
1036 +#define VFIO_FSL_MC_INDEX_TO_OFFSET(index) \
1037 + ((u64)(index) << VFIO_FSL_MC_OFFSET_SHIFT)
1039 +struct vfio_fsl_mc_irq {
1042 + struct eventfd_ctx *trigger;
1046 +struct vfio_fsl_mc_region {
1048 +#define VFIO_FSL_MC_REGION_TYPE_MMIO 1
1049 +#define VFIO_FSL_MC_REGION_TYPE_CACHEABLE 2
1052 + resource_size_t size;
1053 + void __iomem *ioaddr;
1056 +struct vfio_fsl_mc_device {
1057 + struct fsl_mc_device *mc_dev;
1060 + struct vfio_fsl_mc_region *regions;
1061 + struct vfio_fsl_mc_irq *mc_irqs;
1064 +int vfio_fsl_mc_irqs_init(struct vfio_fsl_mc_device *vdev);
1065 +void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev);
1066 +int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
1067 + uint32_t flags, unsigned int index,
1068 + unsigned int start, unsigned int count,
1070 +#endif /* VFIO_PCI_PRIVATE_H */
1071 --- a/include/uapi/linux/vfio.h
1072 +++ b/include/uapi/linux/vfio.h
1073 @@ -200,6 +200,7 @@ struct vfio_device_info {
1074 #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
1075 #define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
1076 #define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */
1077 +#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 5) /* vfio-fsl-mc device */
1078 __u32 num_regions; /* Max region index + 1 */
1079 __u32 num_irqs; /* Max IRQ index + 1 */