layerscape: refresh patches
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 810-iommu-support-layerscape.patch
1 From f1874c71c855bd8ca8478a622053276f2c61eeca Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 27 Sep 2017 10:33:26 +0800
4 Subject: [PATCH] iommu: support layerscape
5
6 This is a integrated patch for layerscape smmu support.
7
8 Signed-off-by: Eric Auger <eric.auger@redhat.com>
9 Signed-off-by: Robin Murphy <robin.murphy@arm.com>
10 Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
11 Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
12 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
13 ---
14 drivers/iommu/amd_iommu.c | 56 ++++++----
15 drivers/iommu/arm-smmu-v3.c | 35 ++++++-
16 drivers/iommu/arm-smmu.c | 74 ++++++++++---
17 drivers/iommu/dma-iommu.c | 242 ++++++++++++++++++++++++++++++++++++-------
18 drivers/iommu/intel-iommu.c | 92 ++++++++++++----
19 drivers/iommu/iommu.c | 191 ++++++++++++++++++++++++++++++++--
20 drivers/iommu/mtk_iommu.c | 2 +
21 drivers/iommu/mtk_iommu_v1.c | 2 +
22 include/linux/dma-iommu.h | 11 ++
23 include/linux/iommu.h | 55 +++++++---
24 10 files changed, 645 insertions(+), 115 deletions(-)
25
26 --- a/drivers/iommu/amd_iommu.c
27 +++ b/drivers/iommu/amd_iommu.c
28 @@ -373,6 +373,8 @@ static struct iommu_group *acpihid_devic
29
30 if (!entry->group)
31 entry->group = generic_device_group(dev);
32 + else
33 + iommu_group_ref_get(entry->group);
34
35 return entry->group;
36 }
37 @@ -3159,9 +3161,10 @@ static bool amd_iommu_capable(enum iommu
38 return false;
39 }
40
41 -static void amd_iommu_get_dm_regions(struct device *dev,
42 - struct list_head *head)
43 +static void amd_iommu_get_resv_regions(struct device *dev,
44 + struct list_head *head)
45 {
46 + struct iommu_resv_region *region;
47 struct unity_map_entry *entry;
48 int devid;
49
50 @@ -3170,41 +3173,56 @@ static void amd_iommu_get_dm_regions(str
51 return;
52
53 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
54 - struct iommu_dm_region *region;
55 + size_t length;
56 + int prot = 0;
57
58 if (devid < entry->devid_start || devid > entry->devid_end)
59 continue;
60
61 - region = kzalloc(sizeof(*region), GFP_KERNEL);
62 + length = entry->address_end - entry->address_start;
63 + if (entry->prot & IOMMU_PROT_IR)
64 + prot |= IOMMU_READ;
65 + if (entry->prot & IOMMU_PROT_IW)
66 + prot |= IOMMU_WRITE;
67 +
68 + region = iommu_alloc_resv_region(entry->address_start,
69 + length, prot,
70 + IOMMU_RESV_DIRECT);
71 if (!region) {
72 pr_err("Out of memory allocating dm-regions for %s\n",
73 dev_name(dev));
74 return;
75 }
76 -
77 - region->start = entry->address_start;
78 - region->length = entry->address_end - entry->address_start;
79 - if (entry->prot & IOMMU_PROT_IR)
80 - region->prot |= IOMMU_READ;
81 - if (entry->prot & IOMMU_PROT_IW)
82 - region->prot |= IOMMU_WRITE;
83 -
84 list_add_tail(&region->list, head);
85 }
86 +
87 + region = iommu_alloc_resv_region(MSI_RANGE_START,
88 + MSI_RANGE_END - MSI_RANGE_START + 1,
89 + 0, IOMMU_RESV_MSI);
90 + if (!region)
91 + return;
92 + list_add_tail(&region->list, head);
93 +
94 + region = iommu_alloc_resv_region(HT_RANGE_START,
95 + HT_RANGE_END - HT_RANGE_START + 1,
96 + 0, IOMMU_RESV_RESERVED);
97 + if (!region)
98 + return;
99 + list_add_tail(&region->list, head);
100 }
101
102 -static void amd_iommu_put_dm_regions(struct device *dev,
103 +static void amd_iommu_put_resv_regions(struct device *dev,
104 struct list_head *head)
105 {
106 - struct iommu_dm_region *entry, *next;
107 + struct iommu_resv_region *entry, *next;
108
109 list_for_each_entry_safe(entry, next, head, list)
110 kfree(entry);
111 }
112
113 -static void amd_iommu_apply_dm_region(struct device *dev,
114 +static void amd_iommu_apply_resv_region(struct device *dev,
115 struct iommu_domain *domain,
116 - struct iommu_dm_region *region)
117 + struct iommu_resv_region *region)
118 {
119 struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
120 unsigned long start, end;
121 @@ -3228,9 +3246,9 @@ static const struct iommu_ops amd_iommu_
122 .add_device = amd_iommu_add_device,
123 .remove_device = amd_iommu_remove_device,
124 .device_group = amd_iommu_device_group,
125 - .get_dm_regions = amd_iommu_get_dm_regions,
126 - .put_dm_regions = amd_iommu_put_dm_regions,
127 - .apply_dm_region = amd_iommu_apply_dm_region,
128 + .get_resv_regions = amd_iommu_get_resv_regions,
129 + .put_resv_regions = amd_iommu_put_resv_regions,
130 + .apply_resv_region = amd_iommu_apply_resv_region,
131 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
132 };
133
134 --- a/drivers/iommu/arm-smmu-v3.c
135 +++ b/drivers/iommu/arm-smmu-v3.c
136 @@ -410,6 +410,9 @@
137 /* High-level queue structures */
138 #define ARM_SMMU_POLL_TIMEOUT_US 100
139
140 +#define MSI_IOVA_BASE 0x8000000
141 +#define MSI_IOVA_LENGTH 0x100000
142 +
143 static bool disable_bypass;
144 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
145 MODULE_PARM_DESC(disable_bypass,
146 @@ -1370,8 +1373,6 @@ static bool arm_smmu_capable(enum iommu_
147 switch (cap) {
148 case IOMMU_CAP_CACHE_COHERENCY:
149 return true;
150 - case IOMMU_CAP_INTR_REMAP:
151 - return true; /* MSIs are just memory writes */
152 case IOMMU_CAP_NOEXEC:
153 return true;
154 default:
155 @@ -1709,6 +1710,9 @@ arm_smmu_iova_to_phys(struct iommu_domai
156 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
157 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
158
159 + if (domain->type == IOMMU_DOMAIN_IDENTITY)
160 + return iova;
161 +
162 if (!ops)
163 return 0;
164
165 @@ -1880,6 +1884,31 @@ static int arm_smmu_of_xlate(struct devi
166 return iommu_fwspec_add_ids(dev, args->args, 1);
167 }
168
169 +static void arm_smmu_get_resv_regions(struct device *dev,
170 + struct list_head *head)
171 +{
172 + struct iommu_resv_region *region;
173 + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
174 +
175 + region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
176 + prot, IOMMU_RESV_SW_MSI);
177 + if (!region)
178 + return;
179 +
180 + list_add_tail(&region->list, head);
181 +
182 + iommu_dma_get_resv_regions(dev, head);
183 +}
184 +
185 +static void arm_smmu_put_resv_regions(struct device *dev,
186 + struct list_head *head)
187 +{
188 + struct iommu_resv_region *entry, *next;
189 +
190 + list_for_each_entry_safe(entry, next, head, list)
191 + kfree(entry);
192 +}
193 +
194 static struct iommu_ops arm_smmu_ops = {
195 .capable = arm_smmu_capable,
196 .domain_alloc = arm_smmu_domain_alloc,
197 @@ -1895,6 +1924,8 @@ static struct iommu_ops arm_smmu_ops = {
198 .domain_get_attr = arm_smmu_domain_get_attr,
199 .domain_set_attr = arm_smmu_domain_set_attr,
200 .of_xlate = arm_smmu_of_xlate,
201 + .get_resv_regions = arm_smmu_get_resv_regions,
202 + .put_resv_regions = arm_smmu_put_resv_regions,
203 .pgsize_bitmap = -1UL, /* Restricted during device attach */
204 };
205
206 --- a/drivers/iommu/arm-smmu.c
207 +++ b/drivers/iommu/arm-smmu.c
208 @@ -49,6 +49,7 @@
209 #include <linux/spinlock.h>
210
211 #include <linux/amba/bus.h>
212 +#include "../staging/fsl-mc/include/mc-bus.h"
213
214 #include "io-pgtable.h"
215
216 @@ -247,6 +248,7 @@ enum arm_smmu_s2cr_privcfg {
217 #define ARM_MMU500_ACTLR_CPRE (1 << 1)
218
219 #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
220 +#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
221
222 #define CB_PAR_F (1 << 0)
223
224 @@ -278,6 +280,9 @@ enum arm_smmu_s2cr_privcfg {
225
226 #define FSYNR0_WNR (1 << 4)
227
228 +#define MSI_IOVA_BASE 0x8000000
229 +#define MSI_IOVA_LENGTH 0x100000
230 +
231 static int force_stage;
232 module_param(force_stage, int, S_IRUGO);
233 MODULE_PARM_DESC(force_stage,
234 @@ -1343,6 +1348,9 @@ static phys_addr_t arm_smmu_iova_to_phys
235 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
236 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
237
238 + if (domain->type == IOMMU_DOMAIN_IDENTITY)
239 + return iova;
240 +
241 if (!ops)
242 return 0;
243
244 @@ -1368,8 +1376,6 @@ static bool arm_smmu_capable(enum iommu_
245 * requests.
246 */
247 return true;
248 - case IOMMU_CAP_INTR_REMAP:
249 - return true; /* MSIs are just memory writes */
250 case IOMMU_CAP_NOEXEC:
251 return true;
252 default:
253 @@ -1478,10 +1484,12 @@ static struct iommu_group *arm_smmu_devi
254 }
255
256 if (group)
257 - return group;
258 + return iommu_group_ref_get(group);
259
260 if (dev_is_pci(dev))
261 group = pci_device_group(dev);
262 + else if (dev_is_fsl_mc(dev))
263 + group = fsl_mc_device_group(dev);
264 else
265 group = generic_device_group(dev);
266
267 @@ -1534,17 +1542,44 @@ out_unlock:
268
269 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
270 {
271 - u32 fwid = 0;
272 + u32 mask, fwid = 0;
273
274 if (args->args_count > 0)
275 fwid |= (u16)args->args[0];
276
277 if (args->args_count > 1)
278 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
279 + else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
280 + fwid |= (u16)mask << SMR_MASK_SHIFT;
281
282 return iommu_fwspec_add_ids(dev, &fwid, 1);
283 }
284
285 +static void arm_smmu_get_resv_regions(struct device *dev,
286 + struct list_head *head)
287 +{
288 + struct iommu_resv_region *region;
289 + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
290 +
291 + region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
292 + prot, IOMMU_RESV_SW_MSI);
293 + if (!region)
294 + return;
295 +
296 + list_add_tail(&region->list, head);
297 +
298 + iommu_dma_get_resv_regions(dev, head);
299 +}
300 +
301 +static void arm_smmu_put_resv_regions(struct device *dev,
302 + struct list_head *head)
303 +{
304 + struct iommu_resv_region *entry, *next;
305 +
306 + list_for_each_entry_safe(entry, next, head, list)
307 + kfree(entry);
308 +}
309 +
310 static struct iommu_ops arm_smmu_ops = {
311 .capable = arm_smmu_capable,
312 .domain_alloc = arm_smmu_domain_alloc,
313 @@ -1560,6 +1595,8 @@ static struct iommu_ops arm_smmu_ops = {
314 .domain_get_attr = arm_smmu_domain_get_attr,
315 .domain_set_attr = arm_smmu_domain_set_attr,
316 .of_xlate = arm_smmu_of_xlate,
317 + .get_resv_regions = arm_smmu_get_resv_regions,
318 + .put_resv_regions = arm_smmu_put_resv_regions,
319 .pgsize_bitmap = -1UL, /* Restricted during device attach */
320 };
321
322 @@ -1581,16 +1618,22 @@ static void arm_smmu_device_reset(struct
323 for (i = 0; i < smmu->num_mapping_groups; ++i)
324 arm_smmu_write_sme(smmu, i);
325
326 - /*
327 - * Before clearing ARM_MMU500_ACTLR_CPRE, need to
328 - * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
329 - * bit is only present in MMU-500r2 onwards.
330 - */
331 - reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
332 - major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
333 - if ((smmu->model == ARM_MMU500) && (major >= 2)) {
334 + if (smmu->model == ARM_MMU500) {
335 + /*
336 + * Before clearing ARM_MMU500_ACTLR_CPRE, need to
337 + * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
338 + * bit is only present in MMU-500r2 onwards.
339 + */
340 + reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
341 + major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
342 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
343 - reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
344 + if (major >= 2)
345 + reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
346 + /*
347 + * Allow unmatched Stream IDs to allocate bypass
348 + * TLB entries for reduced latency.
349 + */
350 + reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
351 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
352 }
353
354 @@ -2024,6 +2067,11 @@ static int arm_smmu_device_dt_probe(stru
355 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
356 }
357 #endif
358 +#ifdef CONFIG_FSL_MC_BUS
359 + if (!iommu_present(&fsl_mc_bus_type))
360 + bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
361 +#endif
362 +
363 return 0;
364 }
365
366 --- a/drivers/iommu/dma-iommu.c
367 +++ b/drivers/iommu/dma-iommu.c
368 @@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
369 phys_addr_t phys;
370 };
371
372 +enum iommu_dma_cookie_type {
373 + IOMMU_DMA_IOVA_COOKIE,
374 + IOMMU_DMA_MSI_COOKIE,
375 +};
376 +
377 struct iommu_dma_cookie {
378 - struct iova_domain iovad;
379 - struct list_head msi_page_list;
380 - spinlock_t msi_lock;
381 + enum iommu_dma_cookie_type type;
382 + union {
383 + /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
384 + struct iova_domain iovad;
385 + /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
386 + dma_addr_t msi_iova;
387 + };
388 + struct list_head msi_page_list;
389 + spinlock_t msi_lock;
390 };
391
392 +static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
393 +{
394 + if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
395 + return cookie->iovad.granule;
396 + return PAGE_SIZE;
397 +}
398 +
399 static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
400 {
401 - return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
402 + struct iommu_dma_cookie *cookie = domain->iova_cookie;
403 +
404 + if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
405 + return &cookie->iovad;
406 + return NULL;
407 +}
408 +
409 +static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
410 +{
411 + struct iommu_dma_cookie *cookie;
412 +
413 + cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
414 + if (cookie) {
415 + spin_lock_init(&cookie->msi_lock);
416 + INIT_LIST_HEAD(&cookie->msi_page_list);
417 + cookie->type = type;
418 + }
419 + return cookie;
420 }
421
422 int iommu_dma_init(void)
423 @@ -62,25 +97,53 @@ int iommu_dma_init(void)
424 */
425 int iommu_get_dma_cookie(struct iommu_domain *domain)
426 {
427 + if (domain->iova_cookie)
428 + return -EEXIST;
429 +
430 + domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
431 + if (!domain->iova_cookie)
432 + return -ENOMEM;
433 +
434 + return 0;
435 +}
436 +EXPORT_SYMBOL(iommu_get_dma_cookie);
437 +
438 +/**
439 + * iommu_get_msi_cookie - Acquire just MSI remapping resources
440 + * @domain: IOMMU domain to prepare
441 + * @base: Start address of IOVA region for MSI mappings
442 + *
443 + * Users who manage their own IOVA allocation and do not want DMA API support,
444 + * but would still like to take advantage of automatic MSI remapping, can use
445 + * this to initialise their own domain appropriately. Users should reserve a
446 + * contiguous IOVA region, starting at @base, large enough to accommodate the
447 + * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
448 + * used by the devices attached to @domain.
449 + */
450 +int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
451 +{
452 struct iommu_dma_cookie *cookie;
453
454 + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
455 + return -EINVAL;
456 +
457 if (domain->iova_cookie)
458 return -EEXIST;
459
460 - cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
461 + cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
462 if (!cookie)
463 return -ENOMEM;
464
465 - spin_lock_init(&cookie->msi_lock);
466 - INIT_LIST_HEAD(&cookie->msi_page_list);
467 + cookie->msi_iova = base;
468 domain->iova_cookie = cookie;
469 return 0;
470 }
471 -EXPORT_SYMBOL(iommu_get_dma_cookie);
472 +EXPORT_SYMBOL(iommu_get_msi_cookie);
473
474 /**
475 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
476 - * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
477 + * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
478 + * iommu_get_msi_cookie()
479 *
480 * IOMMU drivers should normally call this from their domain_free callback.
481 */
482 @@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_d
483 if (!cookie)
484 return;
485
486 - if (cookie->iovad.granule)
487 + if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
488 put_iova_domain(&cookie->iovad);
489
490 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
491 @@ -104,21 +167,99 @@ void iommu_put_dma_cookie(struct iommu_d
492 }
493 EXPORT_SYMBOL(iommu_put_dma_cookie);
494
495 -static void iova_reserve_pci_windows(struct pci_dev *dev,
496 - struct iova_domain *iovad)
497 +/**
498 + * iommu_dma_get_resv_regions - Reserved region driver helper
499 + * @dev: Device from iommu_get_resv_regions()
500 + * @list: Reserved region list from iommu_get_resv_regions()
501 + *
502 + * IOMMU drivers can use this to implement their .get_resv_regions callback
503 + * for general non-IOMMU-specific reservations. Currently, this covers host
504 + * bridge windows for PCI devices.
505 + */
506 +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
507 {
508 - struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
509 + struct pci_host_bridge *bridge;
510 struct resource_entry *window;
511 - unsigned long lo, hi;
512
513 + if (!dev_is_pci(dev))
514 + return;
515 +
516 + bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
517 resource_list_for_each_entry(window, &bridge->windows) {
518 + struct iommu_resv_region *region;
519 + phys_addr_t start;
520 + size_t length;
521 +
522 if (resource_type(window->res) != IORESOURCE_MEM)
523 continue;
524
525 - lo = iova_pfn(iovad, window->res->start - window->offset);
526 - hi = iova_pfn(iovad, window->res->end - window->offset);
527 + start = window->res->start - window->offset;
528 + length = window->res->end - window->res->start + 1;
529 + region = iommu_alloc_resv_region(start, length, 0,
530 + IOMMU_RESV_RESERVED);
531 + if (!region)
532 + return;
533 +
534 + list_add_tail(&region->list, list);
535 + }
536 +}
537 +EXPORT_SYMBOL(iommu_dma_get_resv_regions);
538 +
539 +static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
540 + phys_addr_t start, phys_addr_t end)
541 +{
542 + struct iova_domain *iovad = &cookie->iovad;
543 + struct iommu_dma_msi_page *msi_page;
544 + int i, num_pages;
545 +
546 + start -= iova_offset(iovad, start);
547 + num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
548 +
549 + msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
550 + if (!msi_page)
551 + return -ENOMEM;
552 +
553 + for (i = 0; i < num_pages; i++) {
554 + msi_page[i].phys = start;
555 + msi_page[i].iova = start;
556 + INIT_LIST_HEAD(&msi_page[i].list);
557 + list_add(&msi_page[i].list, &cookie->msi_page_list);
558 + start += iovad->granule;
559 + }
560 +
561 + return 0;
562 +}
563 +
564 +static int iova_reserve_iommu_regions(struct device *dev,
565 + struct iommu_domain *domain)
566 +{
567 + struct iommu_dma_cookie *cookie = domain->iova_cookie;
568 + struct iova_domain *iovad = &cookie->iovad;
569 + struct iommu_resv_region *region;
570 + LIST_HEAD(resv_regions);
571 + int ret = 0;
572 +
573 + iommu_get_resv_regions(dev, &resv_regions);
574 + list_for_each_entry(region, &resv_regions, list) {
575 + unsigned long lo, hi;
576 +
577 + /* We ARE the software that manages these! */
578 + if (region->type == IOMMU_RESV_SW_MSI)
579 + continue;
580 +
581 + lo = iova_pfn(iovad, region->start);
582 + hi = iova_pfn(iovad, region->start + region->length - 1);
583 reserve_iova(iovad, lo, hi);
584 +
585 + if (region->type == IOMMU_RESV_MSI)
586 + ret = cookie_init_hw_msi_region(cookie, region->start,
587 + region->start + region->length);
588 + if (ret)
589 + break;
590 }
591 + iommu_put_resv_regions(dev, &resv_regions);
592 +
593 + return ret;
594 }
595
596 /**
597 @@ -136,11 +277,12 @@ static void iova_reserve_pci_windows(str
598 int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
599 u64 size, struct device *dev)
600 {
601 - struct iova_domain *iovad = cookie_iovad(domain);
602 + struct iommu_dma_cookie *cookie = domain->iova_cookie;
603 + struct iova_domain *iovad = &cookie->iovad;
604 unsigned long order, base_pfn, end_pfn;
605
606 - if (!iovad)
607 - return -ENODEV;
608 + if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
609 + return -EINVAL;
610
611 /* Use the smallest supported page size for IOVA granularity */
612 order = __ffs(domain->pgsize_bitmap);
613 @@ -160,22 +302,37 @@ int iommu_dma_init_domain(struct iommu_d
614 end_pfn = min_t(unsigned long, end_pfn,
615 domain->geometry.aperture_end >> order);
616 }
617 + /*
618 + * PCI devices may have larger DMA masks, but still prefer allocating
619 + * within a 32-bit mask to avoid DAC addressing. Such limitations don't
620 + * apply to the typical platform device, so for those we may as well
621 + * leave the cache limit at the top of their range to save an rb_last()
622 + * traversal on every allocation.
623 + */
624 + if (dev && dev_is_pci(dev))
625 + end_pfn &= DMA_BIT_MASK(32) >> order;
626
627 - /* All we can safely do with an existing domain is enlarge it */
628 + /* start_pfn is always nonzero for an already-initialised domain */
629 if (iovad->start_pfn) {
630 if (1UL << order != iovad->granule ||
631 - base_pfn != iovad->start_pfn ||
632 - end_pfn < iovad->dma_32bit_pfn) {
633 + base_pfn != iovad->start_pfn) {
634 pr_warn("Incompatible range for DMA domain\n");
635 return -EFAULT;
636 }
637 - iovad->dma_32bit_pfn = end_pfn;
638 - } else {
639 - init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
640 - if (dev && dev_is_pci(dev))
641 - iova_reserve_pci_windows(to_pci_dev(dev), iovad);
642 + /*
643 + * If we have devices with different DMA masks, move the free
644 + * area cache limit down for the benefit of the smaller one.
645 + */
646 + iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
647 +
648 + return 0;
649 }
650 - return 0;
651 +
652 + init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
653 + if (!dev)
654 + return 0;
655 +
656 + return iova_reserve_iommu_regions(dev, domain);
657 }
658 EXPORT_SYMBOL(iommu_dma_init_domain);
659
660 @@ -643,11 +800,12 @@ static struct iommu_dma_msi_page *iommu_
661 {
662 struct iommu_dma_cookie *cookie = domain->iova_cookie;
663 struct iommu_dma_msi_page *msi_page;
664 - struct iova_domain *iovad = &cookie->iovad;
665 + struct iova_domain *iovad = cookie_iovad(domain);
666 struct iova *iova;
667 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
668 + size_t size = cookie_msi_granule(cookie);
669
670 - msi_addr &= ~(phys_addr_t)iova_mask(iovad);
671 + msi_addr &= ~(phys_addr_t)(size - 1);
672 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
673 if (msi_page->phys == msi_addr)
674 return msi_page;
675 @@ -656,13 +814,18 @@ static struct iommu_dma_msi_page *iommu_
676 if (!msi_page)
677 return NULL;
678
679 - iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev));
680 - if (!iova)
681 - goto out_free_page;
682 -
683 msi_page->phys = msi_addr;
684 - msi_page->iova = iova_dma_addr(iovad, iova);
685 - if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot))
686 + if (iovad) {
687 + iova = __alloc_iova(domain, size, dma_get_mask(dev));
688 + if (!iova)
689 + goto out_free_page;
690 + msi_page->iova = iova_dma_addr(iovad, iova);
691 + } else {
692 + msi_page->iova = cookie->msi_iova;
693 + cookie->msi_iova += size;
694 + }
695 +
696 + if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
697 goto out_free_iova;
698
699 INIT_LIST_HEAD(&msi_page->list);
700 @@ -670,7 +833,10 @@ static struct iommu_dma_msi_page *iommu_
701 return msi_page;
702
703 out_free_iova:
704 - __free_iova(iovad, iova);
705 + if (iovad)
706 + __free_iova(iovad, iova);
707 + else
708 + cookie->msi_iova -= size;
709 out_free_page:
710 kfree(msi_page);
711 return NULL;
712 @@ -711,7 +877,7 @@ void iommu_dma_map_msi_msg(int irq, stru
713 msg->data = ~0U;
714 } else {
715 msg->address_hi = upper_32_bits(msi_page->iova);
716 - msg->address_lo &= iova_mask(&cookie->iovad);
717 + msg->address_lo &= cookie_msi_granule(cookie) - 1;
718 msg->address_lo += lower_32_bits(msi_page->iova);
719 }
720 }
721 --- a/drivers/iommu/intel-iommu.c
722 +++ b/drivers/iommu/intel-iommu.c
723 @@ -440,6 +440,7 @@ struct dmar_rmrr_unit {
724 u64 end_address; /* reserved end address */
725 struct dmar_dev_scope *devices; /* target devices */
726 int devices_cnt; /* target device count */
727 + struct iommu_resv_region *resv; /* reserved region handle */
728 };
729
730 struct dmar_atsr_unit {
731 @@ -4250,27 +4251,40 @@ static inline void init_iommu_pm_ops(voi
732 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
733 {
734 struct acpi_dmar_reserved_memory *rmrr;
735 + int prot = DMA_PTE_READ|DMA_PTE_WRITE;
736 struct dmar_rmrr_unit *rmrru;
737 + size_t length;
738
739 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
740 if (!rmrru)
741 - return -ENOMEM;
742 + goto out;
743
744 rmrru->hdr = header;
745 rmrr = (struct acpi_dmar_reserved_memory *)header;
746 rmrru->base_address = rmrr->base_address;
747 rmrru->end_address = rmrr->end_address;
748 +
749 + length = rmrr->end_address - rmrr->base_address + 1;
750 + rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
751 + IOMMU_RESV_DIRECT);
752 + if (!rmrru->resv)
753 + goto free_rmrru;
754 +
755 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
756 ((void *)rmrr) + rmrr->header.length,
757 &rmrru->devices_cnt);
758 - if (rmrru->devices_cnt && rmrru->devices == NULL) {
759 - kfree(rmrru);
760 - return -ENOMEM;
761 - }
762 + if (rmrru->devices_cnt && rmrru->devices == NULL)
763 + goto free_all;
764
765 list_add(&rmrru->list, &dmar_rmrr_units);
766
767 return 0;
768 +free_all:
769 + kfree(rmrru->resv);
770 +free_rmrru:
771 + kfree(rmrru);
772 +out:
773 + return -ENOMEM;
774 }
775
776 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
777 @@ -4484,6 +4498,7 @@ static void intel_iommu_free_dmars(void)
778 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
779 list_del(&rmrru->list);
780 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
781 + kfree(rmrru->resv);
782 kfree(rmrru);
783 }
784
785 @@ -5219,6 +5234,45 @@ static void intel_iommu_remove_device(st
786 iommu_device_unlink(iommu->iommu_dev, dev);
787 }
788
789 +static void intel_iommu_get_resv_regions(struct device *device,
790 + struct list_head *head)
791 +{
792 + struct iommu_resv_region *reg;
793 + struct dmar_rmrr_unit *rmrr;
794 + struct device *i_dev;
795 + int i;
796 +
797 + rcu_read_lock();
798 + for_each_rmrr_units(rmrr) {
799 + for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
800 + i, i_dev) {
801 + if (i_dev != device)
802 + continue;
803 +
804 + list_add_tail(&rmrr->resv->list, head);
805 + }
806 + }
807 + rcu_read_unlock();
808 +
809 + reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
810 + IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
811 + 0, IOMMU_RESV_MSI);
812 + if (!reg)
813 + return;
814 + list_add_tail(&reg->list, head);
815 +}
816 +
817 +static void intel_iommu_put_resv_regions(struct device *dev,
818 + struct list_head *head)
819 +{
820 + struct iommu_resv_region *entry, *next;
821 +
822 + list_for_each_entry_safe(entry, next, head, list) {
823 + if (entry->type == IOMMU_RESV_RESERVED)
824 + kfree(entry);
825 + }
826 +}
827 +
828 #ifdef CONFIG_INTEL_IOMMU_SVM
829 #define MAX_NR_PASID_BITS (20)
830 static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
831 @@ -5349,19 +5403,21 @@ struct intel_iommu *intel_svm_device_to_
832 #endif /* CONFIG_INTEL_IOMMU_SVM */
833
834 static const struct iommu_ops intel_iommu_ops = {
835 - .capable = intel_iommu_capable,
836 - .domain_alloc = intel_iommu_domain_alloc,
837 - .domain_free = intel_iommu_domain_free,
838 - .attach_dev = intel_iommu_attach_device,
839 - .detach_dev = intel_iommu_detach_device,
840 - .map = intel_iommu_map,
841 - .unmap = intel_iommu_unmap,
842 - .map_sg = default_iommu_map_sg,
843 - .iova_to_phys = intel_iommu_iova_to_phys,
844 - .add_device = intel_iommu_add_device,
845 - .remove_device = intel_iommu_remove_device,
846 - .device_group = pci_device_group,
847 - .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
848 + .capable = intel_iommu_capable,
849 + .domain_alloc = intel_iommu_domain_alloc,
850 + .domain_free = intel_iommu_domain_free,
851 + .attach_dev = intel_iommu_attach_device,
852 + .detach_dev = intel_iommu_detach_device,
853 + .map = intel_iommu_map,
854 + .unmap = intel_iommu_unmap,
855 + .map_sg = default_iommu_map_sg,
856 + .iova_to_phys = intel_iommu_iova_to_phys,
857 + .add_device = intel_iommu_add_device,
858 + .remove_device = intel_iommu_remove_device,
859 + .get_resv_regions = intel_iommu_get_resv_regions,
860 + .put_resv_regions = intel_iommu_put_resv_regions,
861 + .device_group = pci_device_group,
862 + .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
863 };
864
865 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
866 --- a/drivers/iommu/iommu.c
867 +++ b/drivers/iommu/iommu.c
868 @@ -68,6 +68,13 @@ struct iommu_group_attribute {
869 const char *buf, size_t count);
870 };
871
872 +static const char * const iommu_group_resv_type_string[] = {
873 + [IOMMU_RESV_DIRECT] = "direct",
874 + [IOMMU_RESV_RESERVED] = "reserved",
875 + [IOMMU_RESV_MSI] = "msi",
876 + [IOMMU_RESV_SW_MSI] = "msi",
877 +};
878 +
879 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
880 struct iommu_group_attribute iommu_group_attr_##_name = \
881 __ATTR(_name, _mode, _show, _store)
882 @@ -133,8 +140,131 @@ static ssize_t iommu_group_show_name(str
883 return sprintf(buf, "%s\n", group->name);
884 }
885
886 +/**
887 + * iommu_insert_resv_region - Insert a new region in the
888 + * list of reserved regions.
889 + * @new: new region to insert
890 + * @regions: list of regions
891 + *
892 + * The new element is sorted by address with respect to the other
893 + * regions of the same type. In case it overlaps with another
894 + * region of the same type, regions are merged. In case it
895 + * overlaps with another region of different type, regions are
896 + * not merged.
897 + */
898 +static int iommu_insert_resv_region(struct iommu_resv_region *new,
899 + struct list_head *regions)
900 +{
901 + struct iommu_resv_region *region;
902 + phys_addr_t start = new->start;
903 + phys_addr_t end = new->start + new->length - 1;
904 + struct list_head *pos = regions->next;
905 +
906 + while (pos != regions) {
907 + struct iommu_resv_region *entry =
908 + list_entry(pos, struct iommu_resv_region, list);
909 + phys_addr_t a = entry->start;
910 + phys_addr_t b = entry->start + entry->length - 1;
911 + int type = entry->type;
912 +
913 + if (end < a) {
914 + goto insert;
915 + } else if (start > b) {
916 + pos = pos->next;
917 + } else if ((start >= a) && (end <= b)) {
918 + if (new->type == type)
919 + goto done;
920 + else
921 + pos = pos->next;
922 + } else {
923 + if (new->type == type) {
924 + phys_addr_t new_start = min(a, start);
925 + phys_addr_t new_end = max(b, end);
926 +
927 + list_del(&entry->list);
928 + entry->start = new_start;
929 + entry->length = new_end - new_start + 1;
930 + iommu_insert_resv_region(entry, regions);
931 + } else {
932 + pos = pos->next;
933 + }
934 + }
935 + }
936 +insert:
937 + region = iommu_alloc_resv_region(new->start, new->length,
938 + new->prot, new->type);
939 + if (!region)
940 + return -ENOMEM;
941 +
942 + list_add_tail(&region->list, pos);
943 +done:
944 + return 0;
945 +}
946 +
947 +static int
948 +iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
949 + struct list_head *group_resv_regions)
950 +{
951 + struct iommu_resv_region *entry;
952 + int ret;
953 +
954 + list_for_each_entry(entry, dev_resv_regions, list) {
955 + ret = iommu_insert_resv_region(entry, group_resv_regions);
956 + if (ret)
957 + break;
958 + }
959 + return ret;
960 +}
961 +
962 +int iommu_get_group_resv_regions(struct iommu_group *group,
963 + struct list_head *head)
964 +{
965 + struct iommu_device *device;
966 + int ret = 0;
967 +
968 + mutex_lock(&group->mutex);
969 + list_for_each_entry(device, &group->devices, list) {
970 + struct list_head dev_resv_regions;
971 +
972 + INIT_LIST_HEAD(&dev_resv_regions);
973 + iommu_get_resv_regions(device->dev, &dev_resv_regions);
974 + ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
975 + iommu_put_resv_regions(device->dev, &dev_resv_regions);
976 + if (ret)
977 + break;
978 + }
979 + mutex_unlock(&group->mutex);
980 + return ret;
981 +}
982 +EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
983 +
984 +static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
985 + char *buf)
986 +{
987 + struct iommu_resv_region *region, *next;
988 + struct list_head group_resv_regions;
989 + char *str = buf;
990 +
991 + INIT_LIST_HEAD(&group_resv_regions);
992 + iommu_get_group_resv_regions(group, &group_resv_regions);
993 +
994 + list_for_each_entry_safe(region, next, &group_resv_regions, list) {
995 + str += sprintf(str, "0x%016llx 0x%016llx %s\n",
996 + (long long int)region->start,
997 + (long long int)(region->start +
998 + region->length - 1),
999 + iommu_group_resv_type_string[region->type]);
1000 + kfree(region);
1001 + }
1002 +
1003 + return (str - buf);
1004 +}
1005 +
1006 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
1007
1008 +static IOMMU_GROUP_ATTR(reserved_regions, 0444,
1009 + iommu_group_show_resv_regions, NULL);
1010 +
1011 static void iommu_group_release(struct kobject *kobj)
1012 {
1013 struct iommu_group *group = to_iommu_group(kobj);
1014 @@ -212,6 +342,11 @@ struct iommu_group *iommu_group_alloc(vo
1015 */
1016 kobject_put(&group->kobj);
1017
1018 + ret = iommu_group_create_file(group,
1019 + &iommu_group_attr_reserved_regions);
1020 + if (ret)
1021 + return ERR_PTR(ret);
1022 +
1023 pr_debug("Allocated group %d\n", group->id);
1024
1025 return group;
1026 @@ -318,7 +453,7 @@ static int iommu_group_create_direct_map
1027 struct device *dev)
1028 {
1029 struct iommu_domain *domain = group->default_domain;
1030 - struct iommu_dm_region *entry;
1031 + struct iommu_resv_region *entry;
1032 struct list_head mappings;
1033 unsigned long pg_size;
1034 int ret = 0;
1035 @@ -331,18 +466,21 @@ static int iommu_group_create_direct_map
1036 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
1037 INIT_LIST_HEAD(&mappings);
1038
1039 - iommu_get_dm_regions(dev, &mappings);
1040 + iommu_get_resv_regions(dev, &mappings);
1041
1042 /* We need to consider overlapping regions for different devices */
1043 list_for_each_entry(entry, &mappings, list) {
1044 dma_addr_t start, end, addr;
1045
1046 - if (domain->ops->apply_dm_region)
1047 - domain->ops->apply_dm_region(dev, domain, entry);
1048 + if (domain->ops->apply_resv_region)
1049 + domain->ops->apply_resv_region(dev, domain, entry);
1050
1051 start = ALIGN(entry->start, pg_size);
1052 end = ALIGN(entry->start + entry->length, pg_size);
1053
1054 + if (entry->type != IOMMU_RESV_DIRECT)
1055 + continue;
1056 +
1057 for (addr = start; addr < end; addr += pg_size) {
1058 phys_addr_t phys_addr;
1059
1060 @@ -358,7 +496,7 @@ static int iommu_group_create_direct_map
1061 }
1062
1063 out:
1064 - iommu_put_dm_regions(dev, &mappings);
1065 + iommu_put_resv_regions(dev, &mappings);
1066
1067 return ret;
1068 }
1069 @@ -563,6 +701,19 @@ struct iommu_group *iommu_group_get(stru
1070 EXPORT_SYMBOL_GPL(iommu_group_get);
1071
1072 /**
1073 + * iommu_group_ref_get - Increment reference on a group
1074 + * @group: the group to use, must not be NULL
1075 + *
1076 + * This function is called by iommu drivers to take additional references on an
1077 + * existing group. Returns the given group for convenience.
1078 + */
1079 +struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
1080 +{
1081 + kobject_get(group->devices_kobj);
1082 + return group;
1083 +}
1084 +
1085 +/**
1086 * iommu_group_put - Decrement group reference
1087 * @group: the group to use
1088 *
1089 @@ -1557,20 +1708,38 @@ int iommu_domain_set_attr(struct iommu_d
1090 }
1091 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
1092
1093 -void iommu_get_dm_regions(struct device *dev, struct list_head *list)
1094 +void iommu_get_resv_regions(struct device *dev, struct list_head *list)
1095 {
1096 const struct iommu_ops *ops = dev->bus->iommu_ops;
1097
1098 - if (ops && ops->get_dm_regions)
1099 - ops->get_dm_regions(dev, list);
1100 + if (ops && ops->get_resv_regions)
1101 + ops->get_resv_regions(dev, list);
1102 }
1103
1104 -void iommu_put_dm_regions(struct device *dev, struct list_head *list)
1105 +void iommu_put_resv_regions(struct device *dev, struct list_head *list)
1106 {
1107 const struct iommu_ops *ops = dev->bus->iommu_ops;
1108
1109 - if (ops && ops->put_dm_regions)
1110 - ops->put_dm_regions(dev, list);
1111 + if (ops && ops->put_resv_regions)
1112 + ops->put_resv_regions(dev, list);
1113 +}
1114 +
1115 +struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
1116 + size_t length, int prot,
1117 + enum iommu_resv_type type)
1118 +{
1119 + struct iommu_resv_region *region;
1120 +
1121 + region = kzalloc(sizeof(*region), GFP_KERNEL);
1122 + if (!region)
1123 + return NULL;
1124 +
1125 + INIT_LIST_HEAD(&region->list);
1126 + region->start = start;
1127 + region->length = length;
1128 + region->prot = prot;
1129 + region->type = type;
1130 + return region;
1131 }
1132
1133 /* Request that a device is direct mapped by the IOMMU */
1134 --- a/drivers/iommu/mtk_iommu.c
1135 +++ b/drivers/iommu/mtk_iommu.c
1136 @@ -410,6 +410,8 @@ static struct iommu_group *mtk_iommu_dev
1137 data->m4u_group = iommu_group_alloc();
1138 if (IS_ERR(data->m4u_group))
1139 dev_err(dev, "Failed to allocate M4U IOMMU group\n");
1140 + } else {
1141 + iommu_group_ref_get(data->m4u_group);
1142 }
1143 return data->m4u_group;
1144 }
1145 --- a/drivers/iommu/mtk_iommu_v1.c
1146 +++ b/drivers/iommu/mtk_iommu_v1.c
1147 @@ -502,6 +502,8 @@ static struct iommu_group *mtk_iommu_dev
1148 data->m4u_group = iommu_group_alloc();
1149 if (IS_ERR(data->m4u_group))
1150 dev_err(dev, "Failed to allocate M4U IOMMU group\n");
1151 + } else {
1152 + iommu_group_ref_get(data->m4u_group);
1153 }
1154 return data->m4u_group;
1155 }
1156 --- a/include/linux/dma-iommu.h
1157 +++ b/include/linux/dma-iommu.h
1158 @@ -27,6 +27,7 @@ int iommu_dma_init(void);
1159
1160 /* Domain management interface for IOMMU drivers */
1161 int iommu_get_dma_cookie(struct iommu_domain *domain);
1162 +int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
1163 void iommu_put_dma_cookie(struct iommu_domain *domain);
1164
1165 /* Setup call for arch DMA mapping code */
1166 @@ -66,6 +67,7 @@ int iommu_dma_mapping_error(struct devic
1167
1168 /* The DMA API isn't _quite_ the whole story, though... */
1169 void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
1170 +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
1171
1172 #else
1173
1174 @@ -82,6 +84,11 @@ static inline int iommu_get_dma_cookie(s
1175 return -ENODEV;
1176 }
1177
1178 +static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
1179 +{
1180 + return -ENODEV;
1181 +}
1182 +
1183 static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
1184 {
1185 }
1186 @@ -90,6 +97,10 @@ static inline void iommu_dma_map_msi_msg
1187 {
1188 }
1189
1190 +static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
1191 +{
1192 +}
1193 +
1194 #endif /* CONFIG_IOMMU_DMA */
1195 #endif /* __KERNEL__ */
1196 #endif /* __DMA_IOMMU_H */
1197 --- a/include/linux/iommu.h
1198 +++ b/include/linux/iommu.h
1199 @@ -117,18 +117,32 @@ enum iommu_attr {
1200 DOMAIN_ATTR_MAX,
1201 };
1202
1203 +/* These are the possible reserved region types */
1204 +enum iommu_resv_type {
1205 + /* Memory regions which must be mapped 1:1 at all times */
1206 + IOMMU_RESV_DIRECT,
1207 + /* Arbitrary "never map this or give it to a device" address ranges */
1208 + IOMMU_RESV_RESERVED,
1209 + /* Hardware MSI region (untranslated) */
1210 + IOMMU_RESV_MSI,
1211 + /* Software-managed MSI translation window */
1212 + IOMMU_RESV_SW_MSI,
1213 +};
1214 +
1215 /**
1216 - * struct iommu_dm_region - descriptor for a direct mapped memory region
1217 + * struct iommu_resv_region - descriptor for a reserved memory region
1218 * @list: Linked list pointers
1219 * @start: System physical start address of the region
1220 * @length: Length of the region in bytes
1221 * @prot: IOMMU Protection flags (READ/WRITE/...)
1222 + * @type: Type of the reserved region
1223 */
1224 -struct iommu_dm_region {
1225 +struct iommu_resv_region {
1226 struct list_head list;
1227 phys_addr_t start;
1228 size_t length;
1229 int prot;
1230 + enum iommu_resv_type type;
1231 };
1232
1233 #ifdef CONFIG_IOMMU_API
1234 @@ -150,9 +164,9 @@ struct iommu_dm_region {
1235 * @device_group: find iommu group for a particular device
1236 * @domain_get_attr: Query domain attributes
1237 * @domain_set_attr: Change domain attributes
1238 - * @get_dm_regions: Request list of direct mapping requirements for a device
1239 - * @put_dm_regions: Free list of direct mapping requirements for a device
1240 - * @apply_dm_region: Temporary helper call-back for iova reserved ranges
1241 + * @get_resv_regions: Request list of reserved regions for a device
1242 + * @put_resv_regions: Free list of reserved regions for a device
1243 + * @apply_resv_region: Temporary helper call-back for iova reserved ranges
1244 * @domain_window_enable: Configure and enable a particular window for a domain
1245 * @domain_window_disable: Disable a particular window for a domain
1246 * @domain_set_windows: Set the number of windows for a domain
1247 @@ -184,11 +198,12 @@ struct iommu_ops {
1248 int (*domain_set_attr)(struct iommu_domain *domain,
1249 enum iommu_attr attr, void *data);
1250
1251 - /* Request/Free a list of direct mapping requirements for a device */
1252 - void (*get_dm_regions)(struct device *dev, struct list_head *list);
1253 - void (*put_dm_regions)(struct device *dev, struct list_head *list);
1254 - void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain,
1255 - struct iommu_dm_region *region);
1256 + /* Request/Free a list of reserved regions for a device */
1257 + void (*get_resv_regions)(struct device *dev, struct list_head *list);
1258 + void (*put_resv_regions)(struct device *dev, struct list_head *list);
1259 + void (*apply_resv_region)(struct device *dev,
1260 + struct iommu_domain *domain,
1261 + struct iommu_resv_region *region);
1262
1263 /* Window handling functions */
1264 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
1265 @@ -233,9 +248,14 @@ extern phys_addr_t iommu_iova_to_phys(st
1266 extern void iommu_set_fault_handler(struct iommu_domain *domain,
1267 iommu_fault_handler_t handler, void *token);
1268
1269 -extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
1270 -extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
1271 +extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
1272 +extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
1273 extern int iommu_request_dm_for_dev(struct device *dev);
1274 +extern struct iommu_resv_region *
1275 +iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
1276 + enum iommu_resv_type type);
1277 +extern int iommu_get_group_resv_regions(struct iommu_group *group,
1278 + struct list_head *head);
1279
1280 extern int iommu_attach_group(struct iommu_domain *domain,
1281 struct iommu_group *group);
1282 @@ -253,6 +273,7 @@ extern void iommu_group_remove_device(st
1283 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
1284 int (*fn)(struct device *, void *));
1285 extern struct iommu_group *iommu_group_get(struct device *dev);
1286 +extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
1287 extern void iommu_group_put(struct iommu_group *group);
1288 extern int iommu_group_register_notifier(struct iommu_group *group,
1289 struct notifier_block *nb);
1290 @@ -439,16 +460,22 @@ static inline void iommu_set_fault_handl
1291 {
1292 }
1293
1294 -static inline void iommu_get_dm_regions(struct device *dev,
1295 +static inline void iommu_get_resv_regions(struct device *dev,
1296 struct list_head *list)
1297 {
1298 }
1299
1300 -static inline void iommu_put_dm_regions(struct device *dev,
1301 +static inline void iommu_put_resv_regions(struct device *dev,
1302 struct list_head *list)
1303 {
1304 }
1305
1306 +static inline int iommu_get_group_resv_regions(struct iommu_group *group,
1307 + struct list_head *head)
1308 +{
1309 + return -ENODEV;
1310 +}
1311 +
1312 static inline int iommu_request_dm_for_dev(struct device *dev)
1313 {
1314 return -ENODEV;