layerscape: add linux 4.9 support
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 810-iommu-support-layerscape.patch
1 From f1874c71c855bd8ca8478a622053276f2c61eeca Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 27 Sep 2017 10:33:26 +0800
4 Subject: [PATCH] iommu: support layerscape
5
6 This is a integrated patch for layerscape smmu support.
7
8 Signed-off-by: Eric Auger <eric.auger@redhat.com>
9 Signed-off-by: Robin Murphy <robin.murphy@arm.com>
10 Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
11 Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
12 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
13 ---
14 drivers/iommu/amd_iommu.c | 56 ++++++----
15 drivers/iommu/arm-smmu-v3.c | 35 ++++++-
16 drivers/iommu/arm-smmu.c | 74 ++++++++++---
17 drivers/iommu/dma-iommu.c | 242 ++++++++++++++++++++++++++++++++++++-------
18 drivers/iommu/intel-iommu.c | 92 ++++++++++++----
19 drivers/iommu/iommu.c | 191 ++++++++++++++++++++++++++++++++--
20 drivers/iommu/mtk_iommu.c | 2 +
21 drivers/iommu/mtk_iommu_v1.c | 2 +
22 include/linux/dma-iommu.h | 11 ++
23 include/linux/iommu.h | 55 +++++++---
24 10 files changed, 645 insertions(+), 115 deletions(-)
25
26 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
27 index c380b7e8..93199931 100644
28 --- a/drivers/iommu/amd_iommu.c
29 +++ b/drivers/iommu/amd_iommu.c
30 @@ -373,6 +373,8 @@ static struct iommu_group *acpihid_device_group(struct device *dev)
31
32 if (!entry->group)
33 entry->group = generic_device_group(dev);
34 + else
35 + iommu_group_ref_get(entry->group);
36
37 return entry->group;
38 }
39 @@ -3159,9 +3161,10 @@ static bool amd_iommu_capable(enum iommu_cap cap)
40 return false;
41 }
42
43 -static void amd_iommu_get_dm_regions(struct device *dev,
44 - struct list_head *head)
45 +static void amd_iommu_get_resv_regions(struct device *dev,
46 + struct list_head *head)
47 {
48 + struct iommu_resv_region *region;
49 struct unity_map_entry *entry;
50 int devid;
51
52 @@ -3170,41 +3173,56 @@ static void amd_iommu_get_dm_regions(struct device *dev,
53 return;
54
55 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
56 - struct iommu_dm_region *region;
57 + size_t length;
58 + int prot = 0;
59
60 if (devid < entry->devid_start || devid > entry->devid_end)
61 continue;
62
63 - region = kzalloc(sizeof(*region), GFP_KERNEL);
64 + length = entry->address_end - entry->address_start;
65 + if (entry->prot & IOMMU_PROT_IR)
66 + prot |= IOMMU_READ;
67 + if (entry->prot & IOMMU_PROT_IW)
68 + prot |= IOMMU_WRITE;
69 +
70 + region = iommu_alloc_resv_region(entry->address_start,
71 + length, prot,
72 + IOMMU_RESV_DIRECT);
73 if (!region) {
74 pr_err("Out of memory allocating dm-regions for %s\n",
75 dev_name(dev));
76 return;
77 }
78 -
79 - region->start = entry->address_start;
80 - region->length = entry->address_end - entry->address_start;
81 - if (entry->prot & IOMMU_PROT_IR)
82 - region->prot |= IOMMU_READ;
83 - if (entry->prot & IOMMU_PROT_IW)
84 - region->prot |= IOMMU_WRITE;
85 -
86 list_add_tail(&region->list, head);
87 }
88 +
89 + region = iommu_alloc_resv_region(MSI_RANGE_START,
90 + MSI_RANGE_END - MSI_RANGE_START + 1,
91 + 0, IOMMU_RESV_MSI);
92 + if (!region)
93 + return;
94 + list_add_tail(&region->list, head);
95 +
96 + region = iommu_alloc_resv_region(HT_RANGE_START,
97 + HT_RANGE_END - HT_RANGE_START + 1,
98 + 0, IOMMU_RESV_RESERVED);
99 + if (!region)
100 + return;
101 + list_add_tail(&region->list, head);
102 }
103
104 -static void amd_iommu_put_dm_regions(struct device *dev,
105 +static void amd_iommu_put_resv_regions(struct device *dev,
106 struct list_head *head)
107 {
108 - struct iommu_dm_region *entry, *next;
109 + struct iommu_resv_region *entry, *next;
110
111 list_for_each_entry_safe(entry, next, head, list)
112 kfree(entry);
113 }
114
115 -static void amd_iommu_apply_dm_region(struct device *dev,
116 +static void amd_iommu_apply_resv_region(struct device *dev,
117 struct iommu_domain *domain,
118 - struct iommu_dm_region *region)
119 + struct iommu_resv_region *region)
120 {
121 struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
122 unsigned long start, end;
123 @@ -3228,9 +3246,9 @@ static const struct iommu_ops amd_iommu_ops = {
124 .add_device = amd_iommu_add_device,
125 .remove_device = amd_iommu_remove_device,
126 .device_group = amd_iommu_device_group,
127 - .get_dm_regions = amd_iommu_get_dm_regions,
128 - .put_dm_regions = amd_iommu_put_dm_regions,
129 - .apply_dm_region = amd_iommu_apply_dm_region,
130 + .get_resv_regions = amd_iommu_get_resv_regions,
131 + .put_resv_regions = amd_iommu_put_resv_regions,
132 + .apply_resv_region = amd_iommu_apply_resv_region,
133 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
134 };
135
136 diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
137 index e6f9b2d7..e3ed8dc5 100644
138 --- a/drivers/iommu/arm-smmu-v3.c
139 +++ b/drivers/iommu/arm-smmu-v3.c
140 @@ -410,6 +410,9 @@
141 /* High-level queue structures */
142 #define ARM_SMMU_POLL_TIMEOUT_US 100
143
144 +#define MSI_IOVA_BASE 0x8000000
145 +#define MSI_IOVA_LENGTH 0x100000
146 +
147 static bool disable_bypass;
148 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
149 MODULE_PARM_DESC(disable_bypass,
150 @@ -1370,8 +1373,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
151 switch (cap) {
152 case IOMMU_CAP_CACHE_COHERENCY:
153 return true;
154 - case IOMMU_CAP_INTR_REMAP:
155 - return true; /* MSIs are just memory writes */
156 case IOMMU_CAP_NOEXEC:
157 return true;
158 default:
159 @@ -1709,6 +1710,9 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
160 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
161 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
162
163 + if (domain->type == IOMMU_DOMAIN_IDENTITY)
164 + return iova;
165 +
166 if (!ops)
167 return 0;
168
169 @@ -1880,6 +1884,31 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
170 return iommu_fwspec_add_ids(dev, args->args, 1);
171 }
172
173 +static void arm_smmu_get_resv_regions(struct device *dev,
174 + struct list_head *head)
175 +{
176 + struct iommu_resv_region *region;
177 + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
178 +
179 + region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
180 + prot, IOMMU_RESV_SW_MSI);
181 + if (!region)
182 + return;
183 +
184 + list_add_tail(&region->list, head);
185 +
186 + iommu_dma_get_resv_regions(dev, head);
187 +}
188 +
189 +static void arm_smmu_put_resv_regions(struct device *dev,
190 + struct list_head *head)
191 +{
192 + struct iommu_resv_region *entry, *next;
193 +
194 + list_for_each_entry_safe(entry, next, head, list)
195 + kfree(entry);
196 +}
197 +
198 static struct iommu_ops arm_smmu_ops = {
199 .capable = arm_smmu_capable,
200 .domain_alloc = arm_smmu_domain_alloc,
201 @@ -1895,6 +1924,8 @@ static struct iommu_ops arm_smmu_ops = {
202 .domain_get_attr = arm_smmu_domain_get_attr,
203 .domain_set_attr = arm_smmu_domain_set_attr,
204 .of_xlate = arm_smmu_of_xlate,
205 + .get_resv_regions = arm_smmu_get_resv_regions,
206 + .put_resv_regions = arm_smmu_put_resv_regions,
207 .pgsize_bitmap = -1UL, /* Restricted during device attach */
208 };
209
210 diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
211 index 8f728144..df18dac3 100644
212 --- a/drivers/iommu/arm-smmu.c
213 +++ b/drivers/iommu/arm-smmu.c
214 @@ -49,6 +49,7 @@
215 #include <linux/spinlock.h>
216
217 #include <linux/amba/bus.h>
218 +#include "../staging/fsl-mc/include/mc-bus.h"
219
220 #include "io-pgtable.h"
221
222 @@ -247,6 +248,7 @@ enum arm_smmu_s2cr_privcfg {
223 #define ARM_MMU500_ACTLR_CPRE (1 << 1)
224
225 #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
226 +#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
227
228 #define CB_PAR_F (1 << 0)
229
230 @@ -278,6 +280,9 @@ enum arm_smmu_s2cr_privcfg {
231
232 #define FSYNR0_WNR (1 << 4)
233
234 +#define MSI_IOVA_BASE 0x8000000
235 +#define MSI_IOVA_LENGTH 0x100000
236 +
237 static int force_stage;
238 module_param(force_stage, int, S_IRUGO);
239 MODULE_PARM_DESC(force_stage,
240 @@ -1343,6 +1348,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
241 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
242 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
243
244 + if (domain->type == IOMMU_DOMAIN_IDENTITY)
245 + return iova;
246 +
247 if (!ops)
248 return 0;
249
250 @@ -1368,8 +1376,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
251 * requests.
252 */
253 return true;
254 - case IOMMU_CAP_INTR_REMAP:
255 - return true; /* MSIs are just memory writes */
256 case IOMMU_CAP_NOEXEC:
257 return true;
258 default:
259 @@ -1478,10 +1484,12 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
260 }
261
262 if (group)
263 - return group;
264 + return iommu_group_ref_get(group);
265
266 if (dev_is_pci(dev))
267 group = pci_device_group(dev);
268 + else if (dev_is_fsl_mc(dev))
269 + group = fsl_mc_device_group(dev);
270 else
271 group = generic_device_group(dev);
272
273 @@ -1534,17 +1542,44 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
274
275 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
276 {
277 - u32 fwid = 0;
278 + u32 mask, fwid = 0;
279
280 if (args->args_count > 0)
281 fwid |= (u16)args->args[0];
282
283 if (args->args_count > 1)
284 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
285 + else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
286 + fwid |= (u16)mask << SMR_MASK_SHIFT;
287
288 return iommu_fwspec_add_ids(dev, &fwid, 1);
289 }
290
291 +static void arm_smmu_get_resv_regions(struct device *dev,
292 + struct list_head *head)
293 +{
294 + struct iommu_resv_region *region;
295 + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
296 +
297 + region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
298 + prot, IOMMU_RESV_SW_MSI);
299 + if (!region)
300 + return;
301 +
302 + list_add_tail(&region->list, head);
303 +
304 + iommu_dma_get_resv_regions(dev, head);
305 +}
306 +
307 +static void arm_smmu_put_resv_regions(struct device *dev,
308 + struct list_head *head)
309 +{
310 + struct iommu_resv_region *entry, *next;
311 +
312 + list_for_each_entry_safe(entry, next, head, list)
313 + kfree(entry);
314 +}
315 +
316 static struct iommu_ops arm_smmu_ops = {
317 .capable = arm_smmu_capable,
318 .domain_alloc = arm_smmu_domain_alloc,
319 @@ -1560,6 +1595,8 @@ static struct iommu_ops arm_smmu_ops = {
320 .domain_get_attr = arm_smmu_domain_get_attr,
321 .domain_set_attr = arm_smmu_domain_set_attr,
322 .of_xlate = arm_smmu_of_xlate,
323 + .get_resv_regions = arm_smmu_get_resv_regions,
324 + .put_resv_regions = arm_smmu_put_resv_regions,
325 .pgsize_bitmap = -1UL, /* Restricted during device attach */
326 };
327
328 @@ -1581,16 +1618,22 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
329 for (i = 0; i < smmu->num_mapping_groups; ++i)
330 arm_smmu_write_sme(smmu, i);
331
332 - /*
333 - * Before clearing ARM_MMU500_ACTLR_CPRE, need to
334 - * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
335 - * bit is only present in MMU-500r2 onwards.
336 - */
337 - reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
338 - major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
339 - if ((smmu->model == ARM_MMU500) && (major >= 2)) {
340 + if (smmu->model == ARM_MMU500) {
341 + /*
342 + * Before clearing ARM_MMU500_ACTLR_CPRE, need to
343 + * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
344 + * bit is only present in MMU-500r2 onwards.
345 + */
346 + reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
347 + major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
348 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
349 - reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
350 + if (major >= 2)
351 + reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
352 + /*
353 + * Allow unmatched Stream IDs to allocate bypass
354 + * TLB entries for reduced latency.
355 + */
356 + reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
357 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
358 }
359
360 @@ -2024,6 +2067,11 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
361 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
362 }
363 #endif
364 +#ifdef CONFIG_FSL_MC_BUS
365 + if (!iommu_present(&fsl_mc_bus_type))
366 + bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
367 +#endif
368 +
369 return 0;
370 }
371
372 diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
373 index 1520e7f0..3ade4153 100644
374 --- a/drivers/iommu/dma-iommu.c
375 +++ b/drivers/iommu/dma-iommu.c
376 @@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
377 phys_addr_t phys;
378 };
379
380 +enum iommu_dma_cookie_type {
381 + IOMMU_DMA_IOVA_COOKIE,
382 + IOMMU_DMA_MSI_COOKIE,
383 +};
384 +
385 struct iommu_dma_cookie {
386 - struct iova_domain iovad;
387 - struct list_head msi_page_list;
388 - spinlock_t msi_lock;
389 + enum iommu_dma_cookie_type type;
390 + union {
391 + /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
392 + struct iova_domain iovad;
393 + /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
394 + dma_addr_t msi_iova;
395 + };
396 + struct list_head msi_page_list;
397 + spinlock_t msi_lock;
398 };
399
400 +static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
401 +{
402 + if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
403 + return cookie->iovad.granule;
404 + return PAGE_SIZE;
405 +}
406 +
407 static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
408 {
409 - return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
410 + struct iommu_dma_cookie *cookie = domain->iova_cookie;
411 +
412 + if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
413 + return &cookie->iovad;
414 + return NULL;
415 +}
416 +
417 +static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
418 +{
419 + struct iommu_dma_cookie *cookie;
420 +
421 + cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
422 + if (cookie) {
423 + spin_lock_init(&cookie->msi_lock);
424 + INIT_LIST_HEAD(&cookie->msi_page_list);
425 + cookie->type = type;
426 + }
427 + return cookie;
428 }
429
430 int iommu_dma_init(void)
431 @@ -61,26 +96,54 @@ int iommu_dma_init(void)
432 * callback when domain->type == IOMMU_DOMAIN_DMA.
433 */
434 int iommu_get_dma_cookie(struct iommu_domain *domain)
435 +{
436 + if (domain->iova_cookie)
437 + return -EEXIST;
438 +
439 + domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
440 + if (!domain->iova_cookie)
441 + return -ENOMEM;
442 +
443 + return 0;
444 +}
445 +EXPORT_SYMBOL(iommu_get_dma_cookie);
446 +
447 +/**
448 + * iommu_get_msi_cookie - Acquire just MSI remapping resources
449 + * @domain: IOMMU domain to prepare
450 + * @base: Start address of IOVA region for MSI mappings
451 + *
452 + * Users who manage their own IOVA allocation and do not want DMA API support,
453 + * but would still like to take advantage of automatic MSI remapping, can use
454 + * this to initialise their own domain appropriately. Users should reserve a
455 + * contiguous IOVA region, starting at @base, large enough to accommodate the
456 + * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
457 + * used by the devices attached to @domain.
458 + */
459 +int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
460 {
461 struct iommu_dma_cookie *cookie;
462
463 + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
464 + return -EINVAL;
465 +
466 if (domain->iova_cookie)
467 return -EEXIST;
468
469 - cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
470 + cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
471 if (!cookie)
472 return -ENOMEM;
473
474 - spin_lock_init(&cookie->msi_lock);
475 - INIT_LIST_HEAD(&cookie->msi_page_list);
476 + cookie->msi_iova = base;
477 domain->iova_cookie = cookie;
478 return 0;
479 }
480 -EXPORT_SYMBOL(iommu_get_dma_cookie);
481 +EXPORT_SYMBOL(iommu_get_msi_cookie);
482
483 /**
484 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
485 - * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
486 + * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
487 + * iommu_get_msi_cookie()
488 *
489 * IOMMU drivers should normally call this from their domain_free callback.
490 */
491 @@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
492 if (!cookie)
493 return;
494
495 - if (cookie->iovad.granule)
496 + if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
497 put_iova_domain(&cookie->iovad);
498
499 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
500 @@ -104,21 +167,99 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
501 }
502 EXPORT_SYMBOL(iommu_put_dma_cookie);
503
504 -static void iova_reserve_pci_windows(struct pci_dev *dev,
505 - struct iova_domain *iovad)
506 +/**
507 + * iommu_dma_get_resv_regions - Reserved region driver helper
508 + * @dev: Device from iommu_get_resv_regions()
509 + * @list: Reserved region list from iommu_get_resv_regions()
510 + *
511 + * IOMMU drivers can use this to implement their .get_resv_regions callback
512 + * for general non-IOMMU-specific reservations. Currently, this covers host
513 + * bridge windows for PCI devices.
514 + */
515 +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
516 {
517 - struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
518 + struct pci_host_bridge *bridge;
519 struct resource_entry *window;
520 - unsigned long lo, hi;
521
522 + if (!dev_is_pci(dev))
523 + return;
524 +
525 + bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
526 resource_list_for_each_entry(window, &bridge->windows) {
527 + struct iommu_resv_region *region;
528 + phys_addr_t start;
529 + size_t length;
530 +
531 if (resource_type(window->res) != IORESOURCE_MEM)
532 continue;
533
534 - lo = iova_pfn(iovad, window->res->start - window->offset);
535 - hi = iova_pfn(iovad, window->res->end - window->offset);
536 + start = window->res->start - window->offset;
537 + length = window->res->end - window->res->start + 1;
538 + region = iommu_alloc_resv_region(start, length, 0,
539 + IOMMU_RESV_RESERVED);
540 + if (!region)
541 + return;
542 +
543 + list_add_tail(&region->list, list);
544 + }
545 +}
546 +EXPORT_SYMBOL(iommu_dma_get_resv_regions);
547 +
548 +static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
549 + phys_addr_t start, phys_addr_t end)
550 +{
551 + struct iova_domain *iovad = &cookie->iovad;
552 + struct iommu_dma_msi_page *msi_page;
553 + int i, num_pages;
554 +
555 + start -= iova_offset(iovad, start);
556 + num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
557 +
558 + msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
559 + if (!msi_page)
560 + return -ENOMEM;
561 +
562 + for (i = 0; i < num_pages; i++) {
563 + msi_page[i].phys = start;
564 + msi_page[i].iova = start;
565 + INIT_LIST_HEAD(&msi_page[i].list);
566 + list_add(&msi_page[i].list, &cookie->msi_page_list);
567 + start += iovad->granule;
568 + }
569 +
570 + return 0;
571 +}
572 +
573 +static int iova_reserve_iommu_regions(struct device *dev,
574 + struct iommu_domain *domain)
575 +{
576 + struct iommu_dma_cookie *cookie = domain->iova_cookie;
577 + struct iova_domain *iovad = &cookie->iovad;
578 + struct iommu_resv_region *region;
579 + LIST_HEAD(resv_regions);
580 + int ret = 0;
581 +
582 + iommu_get_resv_regions(dev, &resv_regions);
583 + list_for_each_entry(region, &resv_regions, list) {
584 + unsigned long lo, hi;
585 +
586 + /* We ARE the software that manages these! */
587 + if (region->type == IOMMU_RESV_SW_MSI)
588 + continue;
589 +
590 + lo = iova_pfn(iovad, region->start);
591 + hi = iova_pfn(iovad, region->start + region->length - 1);
592 reserve_iova(iovad, lo, hi);
593 +
594 + if (region->type == IOMMU_RESV_MSI)
595 + ret = cookie_init_hw_msi_region(cookie, region->start,
596 + region->start + region->length);
597 + if (ret)
598 + break;
599 }
600 + iommu_put_resv_regions(dev, &resv_regions);
601 +
602 + return ret;
603 }
604
605 /**
606 @@ -136,11 +277,12 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
607 int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
608 u64 size, struct device *dev)
609 {
610 - struct iova_domain *iovad = cookie_iovad(domain);
611 + struct iommu_dma_cookie *cookie = domain->iova_cookie;
612 + struct iova_domain *iovad = &cookie->iovad;
613 unsigned long order, base_pfn, end_pfn;
614
615 - if (!iovad)
616 - return -ENODEV;
617 + if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
618 + return -EINVAL;
619
620 /* Use the smallest supported page size for IOVA granularity */
621 order = __ffs(domain->pgsize_bitmap);
622 @@ -160,22 +302,37 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
623 end_pfn = min_t(unsigned long, end_pfn,
624 domain->geometry.aperture_end >> order);
625 }
626 + /*
627 + * PCI devices may have larger DMA masks, but still prefer allocating
628 + * within a 32-bit mask to avoid DAC addressing. Such limitations don't
629 + * apply to the typical platform device, so for those we may as well
630 + * leave the cache limit at the top of their range to save an rb_last()
631 + * traversal on every allocation.
632 + */
633 + if (dev && dev_is_pci(dev))
634 + end_pfn &= DMA_BIT_MASK(32) >> order;
635
636 - /* All we can safely do with an existing domain is enlarge it */
637 + /* start_pfn is always nonzero for an already-initialised domain */
638 if (iovad->start_pfn) {
639 if (1UL << order != iovad->granule ||
640 - base_pfn != iovad->start_pfn ||
641 - end_pfn < iovad->dma_32bit_pfn) {
642 + base_pfn != iovad->start_pfn) {
643 pr_warn("Incompatible range for DMA domain\n");
644 return -EFAULT;
645 }
646 - iovad->dma_32bit_pfn = end_pfn;
647 - } else {
648 - init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
649 - if (dev && dev_is_pci(dev))
650 - iova_reserve_pci_windows(to_pci_dev(dev), iovad);
651 + /*
652 + * If we have devices with different DMA masks, move the free
653 + * area cache limit down for the benefit of the smaller one.
654 + */
655 + iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
656 +
657 + return 0;
658 }
659 - return 0;
660 +
661 + init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
662 + if (!dev)
663 + return 0;
664 +
665 + return iova_reserve_iommu_regions(dev, domain);
666 }
667 EXPORT_SYMBOL(iommu_dma_init_domain);
668
669 @@ -643,11 +800,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
670 {
671 struct iommu_dma_cookie *cookie = domain->iova_cookie;
672 struct iommu_dma_msi_page *msi_page;
673 - struct iova_domain *iovad = &cookie->iovad;
674 + struct iova_domain *iovad = cookie_iovad(domain);
675 struct iova *iova;
676 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
677 + size_t size = cookie_msi_granule(cookie);
678
679 - msi_addr &= ~(phys_addr_t)iova_mask(iovad);
680 + msi_addr &= ~(phys_addr_t)(size - 1);
681 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
682 if (msi_page->phys == msi_addr)
683 return msi_page;
684 @@ -656,13 +814,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
685 if (!msi_page)
686 return NULL;
687
688 - iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev));
689 - if (!iova)
690 - goto out_free_page;
691 -
692 msi_page->phys = msi_addr;
693 - msi_page->iova = iova_dma_addr(iovad, iova);
694 - if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot))
695 + if (iovad) {
696 + iova = __alloc_iova(domain, size, dma_get_mask(dev));
697 + if (!iova)
698 + goto out_free_page;
699 + msi_page->iova = iova_dma_addr(iovad, iova);
700 + } else {
701 + msi_page->iova = cookie->msi_iova;
702 + cookie->msi_iova += size;
703 + }
704 +
705 + if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
706 goto out_free_iova;
707
708 INIT_LIST_HEAD(&msi_page->list);
709 @@ -670,7 +833,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
710 return msi_page;
711
712 out_free_iova:
713 - __free_iova(iovad, iova);
714 + if (iovad)
715 + __free_iova(iovad, iova);
716 + else
717 + cookie->msi_iova -= size;
718 out_free_page:
719 kfree(msi_page);
720 return NULL;
721 @@ -711,7 +877,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
722 msg->data = ~0U;
723 } else {
724 msg->address_hi = upper_32_bits(msi_page->iova);
725 - msg->address_lo &= iova_mask(&cookie->iovad);
726 + msg->address_lo &= cookie_msi_granule(cookie) - 1;
727 msg->address_lo += lower_32_bits(msi_page->iova);
728 }
729 }
730 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
731 index 002f8a42..befbfd30 100644
732 --- a/drivers/iommu/intel-iommu.c
733 +++ b/drivers/iommu/intel-iommu.c
734 @@ -440,6 +440,7 @@ struct dmar_rmrr_unit {
735 u64 end_address; /* reserved end address */
736 struct dmar_dev_scope *devices; /* target devices */
737 int devices_cnt; /* target device count */
738 + struct iommu_resv_region *resv; /* reserved region handle */
739 };
740
741 struct dmar_atsr_unit {
742 @@ -4250,27 +4251,40 @@ static inline void init_iommu_pm_ops(void) {}
743 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
744 {
745 struct acpi_dmar_reserved_memory *rmrr;
746 + int prot = DMA_PTE_READ|DMA_PTE_WRITE;
747 struct dmar_rmrr_unit *rmrru;
748 + size_t length;
749
750 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
751 if (!rmrru)
752 - return -ENOMEM;
753 + goto out;
754
755 rmrru->hdr = header;
756 rmrr = (struct acpi_dmar_reserved_memory *)header;
757 rmrru->base_address = rmrr->base_address;
758 rmrru->end_address = rmrr->end_address;
759 +
760 + length = rmrr->end_address - rmrr->base_address + 1;
761 + rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
762 + IOMMU_RESV_DIRECT);
763 + if (!rmrru->resv)
764 + goto free_rmrru;
765 +
766 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
767 ((void *)rmrr) + rmrr->header.length,
768 &rmrru->devices_cnt);
769 - if (rmrru->devices_cnt && rmrru->devices == NULL) {
770 - kfree(rmrru);
771 - return -ENOMEM;
772 - }
773 + if (rmrru->devices_cnt && rmrru->devices == NULL)
774 + goto free_all;
775
776 list_add(&rmrru->list, &dmar_rmrr_units);
777
778 return 0;
779 +free_all:
780 + kfree(rmrru->resv);
781 +free_rmrru:
782 + kfree(rmrru);
783 +out:
784 + return -ENOMEM;
785 }
786
787 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
788 @@ -4484,6 +4498,7 @@ static void intel_iommu_free_dmars(void)
789 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
790 list_del(&rmrru->list);
791 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
792 + kfree(rmrru->resv);
793 kfree(rmrru);
794 }
795
796 @@ -5219,6 +5234,45 @@ static void intel_iommu_remove_device(struct device *dev)
797 iommu_device_unlink(iommu->iommu_dev, dev);
798 }
799
800 +static void intel_iommu_get_resv_regions(struct device *device,
801 + struct list_head *head)
802 +{
803 + struct iommu_resv_region *reg;
804 + struct dmar_rmrr_unit *rmrr;
805 + struct device *i_dev;
806 + int i;
807 +
808 + rcu_read_lock();
809 + for_each_rmrr_units(rmrr) {
810 + for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
811 + i, i_dev) {
812 + if (i_dev != device)
813 + continue;
814 +
815 + list_add_tail(&rmrr->resv->list, head);
816 + }
817 + }
818 + rcu_read_unlock();
819 +
820 + reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
821 + IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
822 + 0, IOMMU_RESV_MSI);
823 + if (!reg)
824 + return;
825 + list_add_tail(&reg->list, head);
826 +}
827 +
828 +static void intel_iommu_put_resv_regions(struct device *dev,
829 + struct list_head *head)
830 +{
831 + struct iommu_resv_region *entry, *next;
832 +
833 + list_for_each_entry_safe(entry, next, head, list) {
834 + if (entry->type == IOMMU_RESV_RESERVED)
835 + kfree(entry);
836 + }
837 +}
838 +
839 #ifdef CONFIG_INTEL_IOMMU_SVM
840 #define MAX_NR_PASID_BITS (20)
841 static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
842 @@ -5349,19 +5403,21 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
843 #endif /* CONFIG_INTEL_IOMMU_SVM */
844
845 static const struct iommu_ops intel_iommu_ops = {
846 - .capable = intel_iommu_capable,
847 - .domain_alloc = intel_iommu_domain_alloc,
848 - .domain_free = intel_iommu_domain_free,
849 - .attach_dev = intel_iommu_attach_device,
850 - .detach_dev = intel_iommu_detach_device,
851 - .map = intel_iommu_map,
852 - .unmap = intel_iommu_unmap,
853 - .map_sg = default_iommu_map_sg,
854 - .iova_to_phys = intel_iommu_iova_to_phys,
855 - .add_device = intel_iommu_add_device,
856 - .remove_device = intel_iommu_remove_device,
857 - .device_group = pci_device_group,
858 - .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
859 + .capable = intel_iommu_capable,
860 + .domain_alloc = intel_iommu_domain_alloc,
861 + .domain_free = intel_iommu_domain_free,
862 + .attach_dev = intel_iommu_attach_device,
863 + .detach_dev = intel_iommu_detach_device,
864 + .map = intel_iommu_map,
865 + .unmap = intel_iommu_unmap,
866 + .map_sg = default_iommu_map_sg,
867 + .iova_to_phys = intel_iommu_iova_to_phys,
868 + .add_device = intel_iommu_add_device,
869 + .remove_device = intel_iommu_remove_device,
870 + .get_resv_regions = intel_iommu_get_resv_regions,
871 + .put_resv_regions = intel_iommu_put_resv_regions,
872 + .device_group = pci_device_group,
873 + .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
874 };
875
876 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
877 diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
878 index 87d3060f..372fc463 100644
879 --- a/drivers/iommu/iommu.c
880 +++ b/drivers/iommu/iommu.c
881 @@ -68,6 +68,13 @@ struct iommu_group_attribute {
882 const char *buf, size_t count);
883 };
884
885 +static const char * const iommu_group_resv_type_string[] = {
886 + [IOMMU_RESV_DIRECT] = "direct",
887 + [IOMMU_RESV_RESERVED] = "reserved",
888 + [IOMMU_RESV_MSI] = "msi",
889 + [IOMMU_RESV_SW_MSI] = "msi",
890 +};
891 +
892 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
893 struct iommu_group_attribute iommu_group_attr_##_name = \
894 __ATTR(_name, _mode, _show, _store)
895 @@ -133,8 +140,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
896 return sprintf(buf, "%s\n", group->name);
897 }
898
899 +/**
900 + * iommu_insert_resv_region - Insert a new region in the
901 + * list of reserved regions.
902 + * @new: new region to insert
903 + * @regions: list of regions
904 + *
905 + * The new element is sorted by address with respect to the other
906 + * regions of the same type. In case it overlaps with another
907 + * region of the same type, regions are merged. In case it
908 + * overlaps with another region of different type, regions are
909 + * not merged.
910 + */
911 +static int iommu_insert_resv_region(struct iommu_resv_region *new,
912 + struct list_head *regions)
913 +{
914 + struct iommu_resv_region *region;
915 + phys_addr_t start = new->start;
916 + phys_addr_t end = new->start + new->length - 1;
917 + struct list_head *pos = regions->next;
918 +
919 + while (pos != regions) {
920 + struct iommu_resv_region *entry =
921 + list_entry(pos, struct iommu_resv_region, list);
922 + phys_addr_t a = entry->start;
923 + phys_addr_t b = entry->start + entry->length - 1;
924 + int type = entry->type;
925 +
926 + if (end < a) {
927 + goto insert;
928 + } else if (start > b) {
929 + pos = pos->next;
930 + } else if ((start >= a) && (end <= b)) {
931 + if (new->type == type)
932 + goto done;
933 + else
934 + pos = pos->next;
935 + } else {
936 + if (new->type == type) {
937 + phys_addr_t new_start = min(a, start);
938 + phys_addr_t new_end = max(b, end);
939 +
940 + list_del(&entry->list);
941 + entry->start = new_start;
942 + entry->length = new_end - new_start + 1;
943 + iommu_insert_resv_region(entry, regions);
944 + } else {
945 + pos = pos->next;
946 + }
947 + }
948 + }
949 +insert:
950 + region = iommu_alloc_resv_region(new->start, new->length,
951 + new->prot, new->type);
952 + if (!region)
953 + return -ENOMEM;
954 +
955 + list_add_tail(&region->list, pos);
956 +done:
957 + return 0;
958 +}
959 +
960 +static int
961 +iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
962 + struct list_head *group_resv_regions)
963 +{
964 + struct iommu_resv_region *entry;
965 + int ret;
966 +
967 + list_for_each_entry(entry, dev_resv_regions, list) {
968 + ret = iommu_insert_resv_region(entry, group_resv_regions);
969 + if (ret)
970 + break;
971 + }
972 + return ret;
973 +}
974 +
975 +int iommu_get_group_resv_regions(struct iommu_group *group,
976 + struct list_head *head)
977 +{
978 + struct iommu_device *device;
979 + int ret = 0;
980 +
981 + mutex_lock(&group->mutex);
982 + list_for_each_entry(device, &group->devices, list) {
983 + struct list_head dev_resv_regions;
984 +
985 + INIT_LIST_HEAD(&dev_resv_regions);
986 + iommu_get_resv_regions(device->dev, &dev_resv_regions);
987 + ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
988 + iommu_put_resv_regions(device->dev, &dev_resv_regions);
989 + if (ret)
990 + break;
991 + }
992 + mutex_unlock(&group->mutex);
993 + return ret;
994 +}
995 +EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
996 +
997 +static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
998 + char *buf)
999 +{
1000 + struct iommu_resv_region *region, *next;
1001 + struct list_head group_resv_regions;
1002 + char *str = buf;
1003 +
1004 + INIT_LIST_HEAD(&group_resv_regions);
1005 + iommu_get_group_resv_regions(group, &group_resv_regions);
1006 +
1007 + list_for_each_entry_safe(region, next, &group_resv_regions, list) {
1008 + str += sprintf(str, "0x%016llx 0x%016llx %s\n",
1009 + (long long int)region->start,
1010 + (long long int)(region->start +
1011 + region->length - 1),
1012 + iommu_group_resv_type_string[region->type]);
1013 + kfree(region);
1014 + }
1015 +
1016 + return (str - buf);
1017 +}
1018 +
1019 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
1020
1021 +static IOMMU_GROUP_ATTR(reserved_regions, 0444,
1022 + iommu_group_show_resv_regions, NULL);
1023 +
1024 static void iommu_group_release(struct kobject *kobj)
1025 {
1026 struct iommu_group *group = to_iommu_group(kobj);
1027 @@ -212,6 +342,11 @@ struct iommu_group *iommu_group_alloc(void)
1028 */
1029 kobject_put(&group->kobj);
1030
1031 + ret = iommu_group_create_file(group,
1032 + &iommu_group_attr_reserved_regions);
1033 + if (ret)
1034 + return ERR_PTR(ret);
1035 +
1036 pr_debug("Allocated group %d\n", group->id);
1037
1038 return group;
1039 @@ -318,7 +453,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
1040 struct device *dev)
1041 {
1042 struct iommu_domain *domain = group->default_domain;
1043 - struct iommu_dm_region *entry;
1044 + struct iommu_resv_region *entry;
1045 struct list_head mappings;
1046 unsigned long pg_size;
1047 int ret = 0;
1048 @@ -331,18 +466,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
1049 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
1050 INIT_LIST_HEAD(&mappings);
1051
1052 - iommu_get_dm_regions(dev, &mappings);
1053 + iommu_get_resv_regions(dev, &mappings);
1054
1055 /* We need to consider overlapping regions for different devices */
1056 list_for_each_entry(entry, &mappings, list) {
1057 dma_addr_t start, end, addr;
1058
1059 - if (domain->ops->apply_dm_region)
1060 - domain->ops->apply_dm_region(dev, domain, entry);
1061 + if (domain->ops->apply_resv_region)
1062 + domain->ops->apply_resv_region(dev, domain, entry);
1063
1064 start = ALIGN(entry->start, pg_size);
1065 end = ALIGN(entry->start + entry->length, pg_size);
1066
1067 + if (entry->type != IOMMU_RESV_DIRECT)
1068 + continue;
1069 +
1070 for (addr = start; addr < end; addr += pg_size) {
1071 phys_addr_t phys_addr;
1072
1073 @@ -358,7 +496,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
1074 }
1075
1076 out:
1077 - iommu_put_dm_regions(dev, &mappings);
1078 + iommu_put_resv_regions(dev, &mappings);
1079
1080 return ret;
1081 }
1082 @@ -562,6 +700,19 @@ struct iommu_group *iommu_group_get(struct device *dev)
1083 }
1084 EXPORT_SYMBOL_GPL(iommu_group_get);
1085
1086 +/**
1087 + * iommu_group_ref_get - Increment reference on a group
1088 + * @group: the group to use, must not be NULL
1089 + *
1090 + * This function is called by iommu drivers to take additional references on an
1091 + * existing group. Returns the given group for convenience.
1092 + */
1093 +struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
1094 +{
1095 + kobject_get(group->devices_kobj);
1096 + return group;
1097 +}
1098 +
1099 /**
1100 * iommu_group_put - Decrement group reference
1101 * @group: the group to use
1102 @@ -1557,20 +1708,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
1103 }
1104 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
1105
1106 -void iommu_get_dm_regions(struct device *dev, struct list_head *list)
1107 +void iommu_get_resv_regions(struct device *dev, struct list_head *list)
1108 {
1109 const struct iommu_ops *ops = dev->bus->iommu_ops;
1110
1111 - if (ops && ops->get_dm_regions)
1112 - ops->get_dm_regions(dev, list);
1113 + if (ops && ops->get_resv_regions)
1114 + ops->get_resv_regions(dev, list);
1115 }
1116
1117 -void iommu_put_dm_regions(struct device *dev, struct list_head *list)
1118 +void iommu_put_resv_regions(struct device *dev, struct list_head *list)
1119 {
1120 const struct iommu_ops *ops = dev->bus->iommu_ops;
1121
1122 - if (ops && ops->put_dm_regions)
1123 - ops->put_dm_regions(dev, list);
1124 + if (ops && ops->put_resv_regions)
1125 + ops->put_resv_regions(dev, list);
1126 +}
1127 +
1128 +struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
1129 + size_t length, int prot,
1130 + enum iommu_resv_type type)
1131 +{
1132 + struct iommu_resv_region *region;
1133 +
1134 + region = kzalloc(sizeof(*region), GFP_KERNEL);
1135 + if (!region)
1136 + return NULL;
1137 +
1138 + INIT_LIST_HEAD(&region->list);
1139 + region->start = start;
1140 + region->length = length;
1141 + region->prot = prot;
1142 + region->type = type;
1143 + return region;
1144 }
1145
1146 /* Request that a device is direct mapped by the IOMMU */
1147 diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
1148 index b12c12d7..9799daea 100644
1149 --- a/drivers/iommu/mtk_iommu.c
1150 +++ b/drivers/iommu/mtk_iommu.c
1151 @@ -410,6 +410,8 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
1152 data->m4u_group = iommu_group_alloc();
1153 if (IS_ERR(data->m4u_group))
1154 dev_err(dev, "Failed to allocate M4U IOMMU group\n");
1155 + } else {
1156 + iommu_group_ref_get(data->m4u_group);
1157 }
1158 return data->m4u_group;
1159 }
1160 diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
1161 index b8aeb076..c7063e9d 100644
1162 --- a/drivers/iommu/mtk_iommu_v1.c
1163 +++ b/drivers/iommu/mtk_iommu_v1.c
1164 @@ -502,6 +502,8 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
1165 data->m4u_group = iommu_group_alloc();
1166 if (IS_ERR(data->m4u_group))
1167 dev_err(dev, "Failed to allocate M4U IOMMU group\n");
1168 + } else {
1169 + iommu_group_ref_get(data->m4u_group);
1170 }
1171 return data->m4u_group;
1172 }
1173 diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
1174 index 32c58906..36d3206d 100644
1175 --- a/include/linux/dma-iommu.h
1176 +++ b/include/linux/dma-iommu.h
1177 @@ -27,6 +27,7 @@ int iommu_dma_init(void);
1178
1179 /* Domain management interface for IOMMU drivers */
1180 int iommu_get_dma_cookie(struct iommu_domain *domain);
1181 +int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
1182 void iommu_put_dma_cookie(struct iommu_domain *domain);
1183
1184 /* Setup call for arch DMA mapping code */
1185 @@ -66,6 +67,7 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
1186
1187 /* The DMA API isn't _quite_ the whole story, though... */
1188 void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
1189 +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
1190
1191 #else
1192
1193 @@ -82,6 +84,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
1194 return -ENODEV;
1195 }
1196
1197 +static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
1198 +{
1199 + return -ENODEV;
1200 +}
1201 +
1202 static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
1203 {
1204 }
1205 @@ -90,6 +97,10 @@ static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
1206 {
1207 }
1208
1209 +static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
1210 +{
1211 +}
1212 +
1213 #endif /* CONFIG_IOMMU_DMA */
1214 #endif /* __KERNEL__ */
1215 #endif /* __DMA_IOMMU_H */
1216 diff --git a/include/linux/iommu.h b/include/linux/iommu.h
1217 index 436dc213..188599f5 100644
1218 --- a/include/linux/iommu.h
1219 +++ b/include/linux/iommu.h
1220 @@ -117,18 +117,32 @@ enum iommu_attr {
1221 DOMAIN_ATTR_MAX,
1222 };
1223
1224 +/* These are the possible reserved region types */
1225 +enum iommu_resv_type {
1226 + /* Memory regions which must be mapped 1:1 at all times */
1227 + IOMMU_RESV_DIRECT,
1228 + /* Arbitrary "never map this or give it to a device" address ranges */
1229 + IOMMU_RESV_RESERVED,
1230 + /* Hardware MSI region (untranslated) */
1231 + IOMMU_RESV_MSI,
1232 + /* Software-managed MSI translation window */
1233 + IOMMU_RESV_SW_MSI,
1234 +};
1235 +
1236 /**
1237 - * struct iommu_dm_region - descriptor for a direct mapped memory region
1238 + * struct iommu_resv_region - descriptor for a reserved memory region
1239 * @list: Linked list pointers
1240 * @start: System physical start address of the region
1241 * @length: Length of the region in bytes
1242 * @prot: IOMMU Protection flags (READ/WRITE/...)
1243 + * @type: Type of the reserved region
1244 */
1245 -struct iommu_dm_region {
1246 +struct iommu_resv_region {
1247 struct list_head list;
1248 phys_addr_t start;
1249 size_t length;
1250 int prot;
1251 + enum iommu_resv_type type;
1252 };
1253
1254 #ifdef CONFIG_IOMMU_API
1255 @@ -150,9 +164,9 @@ struct iommu_dm_region {
1256 * @device_group: find iommu group for a particular device
1257 * @domain_get_attr: Query domain attributes
1258 * @domain_set_attr: Change domain attributes
1259 - * @get_dm_regions: Request list of direct mapping requirements for a device
1260 - * @put_dm_regions: Free list of direct mapping requirements for a device
1261 - * @apply_dm_region: Temporary helper call-back for iova reserved ranges
1262 + * @get_resv_regions: Request list of reserved regions for a device
1263 + * @put_resv_regions: Free list of reserved regions for a device
1264 + * @apply_resv_region: Temporary helper call-back for iova reserved ranges
1265 * @domain_window_enable: Configure and enable a particular window for a domain
1266 * @domain_window_disable: Disable a particular window for a domain
1267 * @domain_set_windows: Set the number of windows for a domain
1268 @@ -184,11 +198,12 @@ struct iommu_ops {
1269 int (*domain_set_attr)(struct iommu_domain *domain,
1270 enum iommu_attr attr, void *data);
1271
1272 - /* Request/Free a list of direct mapping requirements for a device */
1273 - void (*get_dm_regions)(struct device *dev, struct list_head *list);
1274 - void (*put_dm_regions)(struct device *dev, struct list_head *list);
1275 - void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain,
1276 - struct iommu_dm_region *region);
1277 + /* Request/Free a list of reserved regions for a device */
1278 + void (*get_resv_regions)(struct device *dev, struct list_head *list);
1279 + void (*put_resv_regions)(struct device *dev, struct list_head *list);
1280 + void (*apply_resv_region)(struct device *dev,
1281 + struct iommu_domain *domain,
1282 + struct iommu_resv_region *region);
1283
1284 /* Window handling functions */
1285 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
1286 @@ -233,9 +248,14 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
1287 extern void iommu_set_fault_handler(struct iommu_domain *domain,
1288 iommu_fault_handler_t handler, void *token);
1289
1290 -extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
1291 -extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
1292 +extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
1293 +extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
1294 extern int iommu_request_dm_for_dev(struct device *dev);
1295 +extern struct iommu_resv_region *
1296 +iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
1297 + enum iommu_resv_type type);
1298 +extern int iommu_get_group_resv_regions(struct iommu_group *group,
1299 + struct list_head *head);
1300
1301 extern int iommu_attach_group(struct iommu_domain *domain,
1302 struct iommu_group *group);
1303 @@ -253,6 +273,7 @@ extern void iommu_group_remove_device(struct device *dev);
1304 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
1305 int (*fn)(struct device *, void *));
1306 extern struct iommu_group *iommu_group_get(struct device *dev);
1307 +extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
1308 extern void iommu_group_put(struct iommu_group *group);
1309 extern int iommu_group_register_notifier(struct iommu_group *group,
1310 struct notifier_block *nb);
1311 @@ -439,16 +460,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
1312 {
1313 }
1314
1315 -static inline void iommu_get_dm_regions(struct device *dev,
1316 +static inline void iommu_get_resv_regions(struct device *dev,
1317 struct list_head *list)
1318 {
1319 }
1320
1321 -static inline void iommu_put_dm_regions(struct device *dev,
1322 +static inline void iommu_put_resv_regions(struct device *dev,
1323 struct list_head *list)
1324 {
1325 }
1326
1327 +static inline int iommu_get_group_resv_regions(struct iommu_group *group,
1328 + struct list_head *head)
1329 +{
1330 + return -ENODEV;
1331 +}
1332 +
1333 static inline int iommu_request_dm_for_dev(struct device *dev)
1334 {
1335 return -ENODEV;
1336 --
1337 2.14.1
1338