layerscape: update kernel patches
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 810-iommu-support-layerscape.patch
1 From 152f316e7829f6aeb3a36009e7e5ec0f1d97d770 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Wed, 27 Sep 2017 10:33:26 +0800
4 Subject: [PATCH] iommu: support layerscape
5
6 This is a integrated patch for layerscape smmu support.
7
8 Signed-off-by: Eric Auger <eric.auger@redhat.com>
9 Signed-off-by: Robin Murphy <robin.murphy@arm.com>
10 Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
11 Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
12 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
13 ---
14 drivers/iommu/amd_iommu.c | 56 ++++++----
15 drivers/iommu/arm-smmu-v3.c | 117 ++++++++++++++-------
16 drivers/iommu/arm-smmu.c | 100 +++++++++++++++---
17 drivers/iommu/dma-iommu.c | 242 ++++++++++++++++++++++++++++++++++++-------
18 drivers/iommu/intel-iommu.c | 92 ++++++++++++----
19 drivers/iommu/iommu.c | 219 ++++++++++++++++++++++++++++++++++++---
20 drivers/iommu/mtk_iommu.c | 2 +
21 drivers/iommu/mtk_iommu_v1.c | 2 +
22 include/linux/dma-iommu.h | 11 ++
23 include/linux/iommu.h | 55 +++++++---
24 10 files changed, 739 insertions(+), 157 deletions(-)
25
26 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
27 index c380b7e8..93199931 100644
28 --- a/drivers/iommu/amd_iommu.c
29 +++ b/drivers/iommu/amd_iommu.c
30 @@ -373,6 +373,8 @@ static struct iommu_group *acpihid_device_group(struct device *dev)
31
32 if (!entry->group)
33 entry->group = generic_device_group(dev);
34 + else
35 + iommu_group_ref_get(entry->group);
36
37 return entry->group;
38 }
39 @@ -3159,9 +3161,10 @@ static bool amd_iommu_capable(enum iommu_cap cap)
40 return false;
41 }
42
43 -static void amd_iommu_get_dm_regions(struct device *dev,
44 - struct list_head *head)
45 +static void amd_iommu_get_resv_regions(struct device *dev,
46 + struct list_head *head)
47 {
48 + struct iommu_resv_region *region;
49 struct unity_map_entry *entry;
50 int devid;
51
52 @@ -3170,41 +3173,56 @@ static void amd_iommu_get_dm_regions(struct device *dev,
53 return;
54
55 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
56 - struct iommu_dm_region *region;
57 + size_t length;
58 + int prot = 0;
59
60 if (devid < entry->devid_start || devid > entry->devid_end)
61 continue;
62
63 - region = kzalloc(sizeof(*region), GFP_KERNEL);
64 + length = entry->address_end - entry->address_start;
65 + if (entry->prot & IOMMU_PROT_IR)
66 + prot |= IOMMU_READ;
67 + if (entry->prot & IOMMU_PROT_IW)
68 + prot |= IOMMU_WRITE;
69 +
70 + region = iommu_alloc_resv_region(entry->address_start,
71 + length, prot,
72 + IOMMU_RESV_DIRECT);
73 if (!region) {
74 pr_err("Out of memory allocating dm-regions for %s\n",
75 dev_name(dev));
76 return;
77 }
78 -
79 - region->start = entry->address_start;
80 - region->length = entry->address_end - entry->address_start;
81 - if (entry->prot & IOMMU_PROT_IR)
82 - region->prot |= IOMMU_READ;
83 - if (entry->prot & IOMMU_PROT_IW)
84 - region->prot |= IOMMU_WRITE;
85 -
86 list_add_tail(&region->list, head);
87 }
88 +
89 + region = iommu_alloc_resv_region(MSI_RANGE_START,
90 + MSI_RANGE_END - MSI_RANGE_START + 1,
91 + 0, IOMMU_RESV_MSI);
92 + if (!region)
93 + return;
94 + list_add_tail(&region->list, head);
95 +
96 + region = iommu_alloc_resv_region(HT_RANGE_START,
97 + HT_RANGE_END - HT_RANGE_START + 1,
98 + 0, IOMMU_RESV_RESERVED);
99 + if (!region)
100 + return;
101 + list_add_tail(&region->list, head);
102 }
103
104 -static void amd_iommu_put_dm_regions(struct device *dev,
105 +static void amd_iommu_put_resv_regions(struct device *dev,
106 struct list_head *head)
107 {
108 - struct iommu_dm_region *entry, *next;
109 + struct iommu_resv_region *entry, *next;
110
111 list_for_each_entry_safe(entry, next, head, list)
112 kfree(entry);
113 }
114
115 -static void amd_iommu_apply_dm_region(struct device *dev,
116 +static void amd_iommu_apply_resv_region(struct device *dev,
117 struct iommu_domain *domain,
118 - struct iommu_dm_region *region)
119 + struct iommu_resv_region *region)
120 {
121 struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
122 unsigned long start, end;
123 @@ -3228,9 +3246,9 @@ static const struct iommu_ops amd_iommu_ops = {
124 .add_device = amd_iommu_add_device,
125 .remove_device = amd_iommu_remove_device,
126 .device_group = amd_iommu_device_group,
127 - .get_dm_regions = amd_iommu_get_dm_regions,
128 - .put_dm_regions = amd_iommu_put_dm_regions,
129 - .apply_dm_region = amd_iommu_apply_dm_region,
130 + .get_resv_regions = amd_iommu_get_resv_regions,
131 + .put_resv_regions = amd_iommu_put_resv_regions,
132 + .apply_resv_region = amd_iommu_apply_resv_region,
133 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
134 };
135
136 diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
137 index e6f9b2d7..48e2a7c4 100644
138 --- a/drivers/iommu/arm-smmu-v3.c
139 +++ b/drivers/iommu/arm-smmu-v3.c
140 @@ -410,6 +410,9 @@
141 /* High-level queue structures */
142 #define ARM_SMMU_POLL_TIMEOUT_US 100
143
144 +#define MSI_IOVA_BASE 0x8000000
145 +#define MSI_IOVA_LENGTH 0x100000
146 +
147 static bool disable_bypass;
148 module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
149 MODULE_PARM_DESC(disable_bypass,
150 @@ -552,9 +555,14 @@ struct arm_smmu_s2_cfg {
151 };
152
153 struct arm_smmu_strtab_ent {
154 - bool valid;
155 -
156 - bool bypass; /* Overrides s1/s2 config */
157 + /*
158 + * An STE is "assigned" if the master emitting the corresponding SID
159 + * is attached to a domain. The behaviour of an unassigned STE is
160 + * determined by the disable_bypass parameter, whereas an assigned
161 + * STE behaves according to s1_cfg/s2_cfg, which themselves are
162 + * configured according to the domain type.
163 + */
164 + bool assigned;
165 struct arm_smmu_s1_cfg *s1_cfg;
166 struct arm_smmu_s2_cfg *s2_cfg;
167 };
168 @@ -627,6 +635,7 @@ enum arm_smmu_domain_stage {
169 ARM_SMMU_DOMAIN_S1 = 0,
170 ARM_SMMU_DOMAIN_S2,
171 ARM_SMMU_DOMAIN_NESTED,
172 + ARM_SMMU_DOMAIN_BYPASS,
173 };
174
175 struct arm_smmu_domain {
176 @@ -1000,9 +1009,9 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
177 * This is hideously complicated, but we only really care about
178 * three cases at the moment:
179 *
180 - * 1. Invalid (all zero) -> bypass (init)
181 - * 2. Bypass -> translation (attach)
182 - * 3. Translation -> bypass (detach)
183 + * 1. Invalid (all zero) -> bypass/fault (init)
184 + * 2. Bypass/fault -> translation/bypass (attach)
185 + * 3. Translation/bypass -> bypass/fault (detach)
186 *
187 * Given that we can't update the STE atomically and the SMMU
188 * doesn't read the thing in a defined order, that leaves us
189 @@ -1040,17 +1049,16 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
190 }
191 }
192
193 - /* Nuke the existing Config, as we're going to rewrite it */
194 - val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
195 + /* Nuke the existing STE_0 value, as we're going to rewrite it */
196 + val = STRTAB_STE_0_V;
197
198 - if (ste->valid)
199 - val |= STRTAB_STE_0_V;
200 - else
201 - val &= ~STRTAB_STE_0_V;
202 + /* Bypass/fault */
203 + if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
204 + if (!ste->assigned && disable_bypass)
205 + val |= STRTAB_STE_0_CFG_ABORT;
206 + else
207 + val |= STRTAB_STE_0_CFG_BYPASS;
208
209 - if (ste->bypass) {
210 - val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
211 - : STRTAB_STE_0_CFG_BYPASS;
212 dst[0] = cpu_to_le64(val);
213 dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
214 << STRTAB_STE_1_SHCFG_SHIFT);
215 @@ -1081,7 +1089,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
216 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
217 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
218 STRTAB_STE_0_CFG_S1_TRANS;
219 -
220 }
221
222 if (ste->s2_cfg) {
223 @@ -1114,10 +1121,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
224 static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
225 {
226 unsigned int i;
227 - struct arm_smmu_strtab_ent ste = {
228 - .valid = true,
229 - .bypass = true,
230 - };
231 + struct arm_smmu_strtab_ent ste = { .assigned = false };
232
233 for (i = 0; i < nent; ++i) {
234 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
235 @@ -1370,8 +1374,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
236 switch (cap) {
237 case IOMMU_CAP_CACHE_COHERENCY:
238 return true;
239 - case IOMMU_CAP_INTR_REMAP:
240 - return true; /* MSIs are just memory writes */
241 case IOMMU_CAP_NOEXEC:
242 return true;
243 default:
244 @@ -1383,7 +1385,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
245 {
246 struct arm_smmu_domain *smmu_domain;
247
248 - if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
249 + if (type != IOMMU_DOMAIN_UNMANAGED &&
250 + type != IOMMU_DOMAIN_DMA &&
251 + type != IOMMU_DOMAIN_IDENTITY)
252 return NULL;
253
254 /*
255 @@ -1514,6 +1518,11 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
256 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
257 struct arm_smmu_device *smmu = smmu_domain->smmu;
258
259 + if (domain->type == IOMMU_DOMAIN_IDENTITY) {
260 + smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
261 + return 0;
262 + }
263 +
264 /* Restrict the stage to what we can actually support */
265 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
266 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
267 @@ -1584,7 +1593,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
268 return step;
269 }
270
271 -static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
272 +static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
273 {
274 int i;
275 struct arm_smmu_master_data *master = fwspec->iommu_priv;
276 @@ -1596,17 +1605,14 @@ static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
277
278 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
279 }
280 -
281 - return 0;
282 }
283
284 static void arm_smmu_detach_dev(struct device *dev)
285 {
286 struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
287
288 - master->ste.bypass = true;
289 - if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
290 - dev_warn(dev, "failed to install bypass STE\n");
291 + master->ste.assigned = false;
292 + arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
293 }
294
295 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
296 @@ -1625,7 +1631,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
297 ste = &master->ste;
298
299 /* Already attached to a different domain? */
300 - if (!ste->bypass)
301 + if (ste->assigned)
302 arm_smmu_detach_dev(dev);
303
304 mutex_lock(&smmu_domain->init_mutex);
305 @@ -1646,10 +1652,12 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
306 goto out_unlock;
307 }
308
309 - ste->bypass = false;
310 - ste->valid = true;
311 + ste->assigned = true;
312
313 - if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
314 + if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
315 + ste->s1_cfg = NULL;
316 + ste->s2_cfg = NULL;
317 + } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
318 ste->s1_cfg = &smmu_domain->s1_cfg;
319 ste->s2_cfg = NULL;
320 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
321 @@ -1658,10 +1666,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
322 ste->s2_cfg = &smmu_domain->s2_cfg;
323 }
324
325 - ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
326 - if (ret < 0)
327 - ste->valid = false;
328 -
329 + arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
330 out_unlock:
331 mutex_unlock(&smmu_domain->init_mutex);
332 return ret;
333 @@ -1709,6 +1714,9 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
334 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
335 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
336
337 + if (domain->type == IOMMU_DOMAIN_IDENTITY)
338 + return iova;
339 +
340 if (!ops)
341 return 0;
342
343 @@ -1807,7 +1815,7 @@ static void arm_smmu_remove_device(struct device *dev)
344 return;
345
346 master = fwspec->iommu_priv;
347 - if (master && master->ste.valid)
348 + if (master && master->ste.assigned)
349 arm_smmu_detach_dev(dev);
350 iommu_group_remove_device(dev);
351 kfree(master);
352 @@ -1836,6 +1844,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
353 {
354 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
355
356 + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
357 + return -EINVAL;
358 +
359 switch (attr) {
360 case DOMAIN_ATTR_NESTING:
361 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
362 @@ -1851,6 +1862,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
363 int ret = 0;
364 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
365
366 + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
367 + return -EINVAL;
368 +
369 mutex_lock(&smmu_domain->init_mutex);
370
371 switch (attr) {
372 @@ -1880,6 +1894,31 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
373 return iommu_fwspec_add_ids(dev, args->args, 1);
374 }
375
376 +static void arm_smmu_get_resv_regions(struct device *dev,
377 + struct list_head *head)
378 +{
379 + struct iommu_resv_region *region;
380 + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
381 +
382 + region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
383 + prot, IOMMU_RESV_SW_MSI);
384 + if (!region)
385 + return;
386 +
387 + list_add_tail(&region->list, head);
388 +
389 + iommu_dma_get_resv_regions(dev, head);
390 +}
391 +
392 +static void arm_smmu_put_resv_regions(struct device *dev,
393 + struct list_head *head)
394 +{
395 + struct iommu_resv_region *entry, *next;
396 +
397 + list_for_each_entry_safe(entry, next, head, list)
398 + kfree(entry);
399 +}
400 +
401 static struct iommu_ops arm_smmu_ops = {
402 .capable = arm_smmu_capable,
403 .domain_alloc = arm_smmu_domain_alloc,
404 @@ -1895,6 +1934,8 @@ static struct iommu_ops arm_smmu_ops = {
405 .domain_get_attr = arm_smmu_domain_get_attr,
406 .domain_set_attr = arm_smmu_domain_set_attr,
407 .of_xlate = arm_smmu_of_xlate,
408 + .get_resv_regions = arm_smmu_get_resv_regions,
409 + .put_resv_regions = arm_smmu_put_resv_regions,
410 .pgsize_bitmap = -1UL, /* Restricted during device attach */
411 };
412
413 diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
414 index 8f728144..3243a96d 100644
415 --- a/drivers/iommu/arm-smmu.c
416 +++ b/drivers/iommu/arm-smmu.c
417 @@ -49,6 +49,7 @@
418 #include <linux/spinlock.h>
419
420 #include <linux/amba/bus.h>
421 +#include "../staging/fsl-mc/include/mc-bus.h"
422
423 #include "io-pgtable.h"
424
425 @@ -247,6 +248,7 @@ enum arm_smmu_s2cr_privcfg {
426 #define ARM_MMU500_ACTLR_CPRE (1 << 1)
427
428 #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
429 +#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
430
431 #define CB_PAR_F (1 << 0)
432
433 @@ -278,6 +280,9 @@ enum arm_smmu_s2cr_privcfg {
434
435 #define FSYNR0_WNR (1 << 4)
436
437 +#define MSI_IOVA_BASE 0x8000000
438 +#define MSI_IOVA_LENGTH 0x100000
439 +
440 static int force_stage;
441 module_param(force_stage, int, S_IRUGO);
442 MODULE_PARM_DESC(force_stage,
443 @@ -401,6 +406,7 @@ enum arm_smmu_domain_stage {
444 ARM_SMMU_DOMAIN_S1 = 0,
445 ARM_SMMU_DOMAIN_S2,
446 ARM_SMMU_DOMAIN_NESTED,
447 + ARM_SMMU_DOMAIN_BYPASS,
448 };
449
450 struct arm_smmu_domain {
451 @@ -821,6 +827,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
452 if (smmu_domain->smmu)
453 goto out_unlock;
454
455 + if (domain->type == IOMMU_DOMAIN_IDENTITY) {
456 + smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
457 + smmu_domain->smmu = smmu;
458 + goto out_unlock;
459 + }
460 +
461 /*
462 * Mapping the requested stage onto what we support is surprisingly
463 * complicated, mainly because the spec allows S1+S2 SMMUs without
464 @@ -981,7 +993,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
465 void __iomem *cb_base;
466 int irq;
467
468 - if (!smmu)
469 + if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
470 return;
471
472 /*
473 @@ -1004,7 +1016,9 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
474 {
475 struct arm_smmu_domain *smmu_domain;
476
477 - if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
478 + if (type != IOMMU_DOMAIN_UNMANAGED &&
479 + type != IOMMU_DOMAIN_DMA &&
480 + type != IOMMU_DOMAIN_IDENTITY)
481 return NULL;
482 /*
483 * Allocate the domain and initialise some of its data structures.
484 @@ -1202,10 +1216,15 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
485 {
486 struct arm_smmu_device *smmu = smmu_domain->smmu;
487 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
488 - enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
489 u8 cbndx = smmu_domain->cfg.cbndx;
490 + enum arm_smmu_s2cr_type type;
491 int i, idx;
492
493 + if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
494 + type = S2CR_TYPE_BYPASS;
495 + else
496 + type = S2CR_TYPE_TRANS;
497 +
498 for_each_cfg_sme(fwspec, i, idx) {
499 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
500 continue;
501 @@ -1343,6 +1362,9 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
502 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
503 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
504
505 + if (domain->type == IOMMU_DOMAIN_IDENTITY)
506 + return iova;
507 +
508 if (!ops)
509 return 0;
510
511 @@ -1368,8 +1390,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
512 * requests.
513 */
514 return true;
515 - case IOMMU_CAP_INTR_REMAP:
516 - return true; /* MSIs are just memory writes */
517 case IOMMU_CAP_NOEXEC:
518 return true;
519 default:
520 @@ -1478,10 +1498,12 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
521 }
522
523 if (group)
524 - return group;
525 + return iommu_group_ref_get(group);
526
527 if (dev_is_pci(dev))
528 group = pci_device_group(dev);
529 + else if (dev_is_fsl_mc(dev))
530 + group = fsl_mc_device_group(dev);
531 else
532 group = generic_device_group(dev);
533
534 @@ -1493,6 +1515,9 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
535 {
536 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
537
538 + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
539 + return -EINVAL;
540 +
541 switch (attr) {
542 case DOMAIN_ATTR_NESTING:
543 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
544 @@ -1508,6 +1533,9 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
545 int ret = 0;
546 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
547
548 + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
549 + return -EINVAL;
550 +
551 mutex_lock(&smmu_domain->init_mutex);
552
553 switch (attr) {
554 @@ -1534,17 +1562,44 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
555
556 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
557 {
558 - u32 fwid = 0;
559 + u32 mask, fwid = 0;
560
561 if (args->args_count > 0)
562 fwid |= (u16)args->args[0];
563
564 if (args->args_count > 1)
565 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
566 + else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
567 + fwid |= (u16)mask << SMR_MASK_SHIFT;
568
569 return iommu_fwspec_add_ids(dev, &fwid, 1);
570 }
571
572 +static void arm_smmu_get_resv_regions(struct device *dev,
573 + struct list_head *head)
574 +{
575 + struct iommu_resv_region *region;
576 + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
577 +
578 + region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
579 + prot, IOMMU_RESV_SW_MSI);
580 + if (!region)
581 + return;
582 +
583 + list_add_tail(&region->list, head);
584 +
585 + iommu_dma_get_resv_regions(dev, head);
586 +}
587 +
588 +static void arm_smmu_put_resv_regions(struct device *dev,
589 + struct list_head *head)
590 +{
591 + struct iommu_resv_region *entry, *next;
592 +
593 + list_for_each_entry_safe(entry, next, head, list)
594 + kfree(entry);
595 +}
596 +
597 static struct iommu_ops arm_smmu_ops = {
598 .capable = arm_smmu_capable,
599 .domain_alloc = arm_smmu_domain_alloc,
600 @@ -1560,6 +1615,8 @@ static struct iommu_ops arm_smmu_ops = {
601 .domain_get_attr = arm_smmu_domain_get_attr,
602 .domain_set_attr = arm_smmu_domain_set_attr,
603 .of_xlate = arm_smmu_of_xlate,
604 + .get_resv_regions = arm_smmu_get_resv_regions,
605 + .put_resv_regions = arm_smmu_put_resv_regions,
606 .pgsize_bitmap = -1UL, /* Restricted during device attach */
607 };
608
609 @@ -1581,16 +1638,22 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
610 for (i = 0; i < smmu->num_mapping_groups; ++i)
611 arm_smmu_write_sme(smmu, i);
612
613 - /*
614 - * Before clearing ARM_MMU500_ACTLR_CPRE, need to
615 - * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
616 - * bit is only present in MMU-500r2 onwards.
617 - */
618 - reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
619 - major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
620 - if ((smmu->model == ARM_MMU500) && (major >= 2)) {
621 + if (smmu->model == ARM_MMU500) {
622 + /*
623 + * Before clearing ARM_MMU500_ACTLR_CPRE, need to
624 + * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
625 + * bit is only present in MMU-500r2 onwards.
626 + */
627 + reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
628 + major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
629 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
630 - reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
631 + if (major >= 2)
632 + reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
633 + /*
634 + * Allow unmatched Stream IDs to allocate bypass
635 + * TLB entries for reduced latency.
636 + */
637 + reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
638 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
639 }
640
641 @@ -2024,6 +2087,11 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
642 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
643 }
644 #endif
645 +#ifdef CONFIG_FSL_MC_BUS
646 + if (!iommu_present(&fsl_mc_bus_type))
647 + bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
648 +#endif
649 +
650 return 0;
651 }
652
653 diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
654 index 1520e7f0..3ade4153 100644
655 --- a/drivers/iommu/dma-iommu.c
656 +++ b/drivers/iommu/dma-iommu.c
657 @@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
658 phys_addr_t phys;
659 };
660
661 +enum iommu_dma_cookie_type {
662 + IOMMU_DMA_IOVA_COOKIE,
663 + IOMMU_DMA_MSI_COOKIE,
664 +};
665 +
666 struct iommu_dma_cookie {
667 - struct iova_domain iovad;
668 - struct list_head msi_page_list;
669 - spinlock_t msi_lock;
670 + enum iommu_dma_cookie_type type;
671 + union {
672 + /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
673 + struct iova_domain iovad;
674 + /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
675 + dma_addr_t msi_iova;
676 + };
677 + struct list_head msi_page_list;
678 + spinlock_t msi_lock;
679 };
680
681 +static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
682 +{
683 + if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
684 + return cookie->iovad.granule;
685 + return PAGE_SIZE;
686 +}
687 +
688 static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
689 {
690 - return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
691 + struct iommu_dma_cookie *cookie = domain->iova_cookie;
692 +
693 + if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
694 + return &cookie->iovad;
695 + return NULL;
696 +}
697 +
698 +static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
699 +{
700 + struct iommu_dma_cookie *cookie;
701 +
702 + cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
703 + if (cookie) {
704 + spin_lock_init(&cookie->msi_lock);
705 + INIT_LIST_HEAD(&cookie->msi_page_list);
706 + cookie->type = type;
707 + }
708 + return cookie;
709 }
710
711 int iommu_dma_init(void)
712 @@ -61,26 +96,54 @@ int iommu_dma_init(void)
713 * callback when domain->type == IOMMU_DOMAIN_DMA.
714 */
715 int iommu_get_dma_cookie(struct iommu_domain *domain)
716 +{
717 + if (domain->iova_cookie)
718 + return -EEXIST;
719 +
720 + domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
721 + if (!domain->iova_cookie)
722 + return -ENOMEM;
723 +
724 + return 0;
725 +}
726 +EXPORT_SYMBOL(iommu_get_dma_cookie);
727 +
728 +/**
729 + * iommu_get_msi_cookie - Acquire just MSI remapping resources
730 + * @domain: IOMMU domain to prepare
731 + * @base: Start address of IOVA region for MSI mappings
732 + *
733 + * Users who manage their own IOVA allocation and do not want DMA API support,
734 + * but would still like to take advantage of automatic MSI remapping, can use
735 + * this to initialise their own domain appropriately. Users should reserve a
736 + * contiguous IOVA region, starting at @base, large enough to accommodate the
737 + * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
738 + * used by the devices attached to @domain.
739 + */
740 +int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
741 {
742 struct iommu_dma_cookie *cookie;
743
744 + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
745 + return -EINVAL;
746 +
747 if (domain->iova_cookie)
748 return -EEXIST;
749
750 - cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
751 + cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
752 if (!cookie)
753 return -ENOMEM;
754
755 - spin_lock_init(&cookie->msi_lock);
756 - INIT_LIST_HEAD(&cookie->msi_page_list);
757 + cookie->msi_iova = base;
758 domain->iova_cookie = cookie;
759 return 0;
760 }
761 -EXPORT_SYMBOL(iommu_get_dma_cookie);
762 +EXPORT_SYMBOL(iommu_get_msi_cookie);
763
764 /**
765 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
766 - * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
767 + * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
768 + * iommu_get_msi_cookie()
769 *
770 * IOMMU drivers should normally call this from their domain_free callback.
771 */
772 @@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
773 if (!cookie)
774 return;
775
776 - if (cookie->iovad.granule)
777 + if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
778 put_iova_domain(&cookie->iovad);
779
780 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
781 @@ -104,21 +167,99 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
782 }
783 EXPORT_SYMBOL(iommu_put_dma_cookie);
784
785 -static void iova_reserve_pci_windows(struct pci_dev *dev,
786 - struct iova_domain *iovad)
787 +/**
788 + * iommu_dma_get_resv_regions - Reserved region driver helper
789 + * @dev: Device from iommu_get_resv_regions()
790 + * @list: Reserved region list from iommu_get_resv_regions()
791 + *
792 + * IOMMU drivers can use this to implement their .get_resv_regions callback
793 + * for general non-IOMMU-specific reservations. Currently, this covers host
794 + * bridge windows for PCI devices.
795 + */
796 +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
797 {
798 - struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
799 + struct pci_host_bridge *bridge;
800 struct resource_entry *window;
801 - unsigned long lo, hi;
802
803 + if (!dev_is_pci(dev))
804 + return;
805 +
806 + bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
807 resource_list_for_each_entry(window, &bridge->windows) {
808 + struct iommu_resv_region *region;
809 + phys_addr_t start;
810 + size_t length;
811 +
812 if (resource_type(window->res) != IORESOURCE_MEM)
813 continue;
814
815 - lo = iova_pfn(iovad, window->res->start - window->offset);
816 - hi = iova_pfn(iovad, window->res->end - window->offset);
817 + start = window->res->start - window->offset;
818 + length = window->res->end - window->res->start + 1;
819 + region = iommu_alloc_resv_region(start, length, 0,
820 + IOMMU_RESV_RESERVED);
821 + if (!region)
822 + return;
823 +
824 + list_add_tail(&region->list, list);
825 + }
826 +}
827 +EXPORT_SYMBOL(iommu_dma_get_resv_regions);
828 +
829 +static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
830 + phys_addr_t start, phys_addr_t end)
831 +{
832 + struct iova_domain *iovad = &cookie->iovad;
833 + struct iommu_dma_msi_page *msi_page;
834 + int i, num_pages;
835 +
836 + start -= iova_offset(iovad, start);
837 + num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
838 +
839 + msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
840 + if (!msi_page)
841 + return -ENOMEM;
842 +
843 + for (i = 0; i < num_pages; i++) {
844 + msi_page[i].phys = start;
845 + msi_page[i].iova = start;
846 + INIT_LIST_HEAD(&msi_page[i].list);
847 + list_add(&msi_page[i].list, &cookie->msi_page_list);
848 + start += iovad->granule;
849 + }
850 +
851 + return 0;
852 +}
853 +
854 +static int iova_reserve_iommu_regions(struct device *dev,
855 + struct iommu_domain *domain)
856 +{
857 + struct iommu_dma_cookie *cookie = domain->iova_cookie;
858 + struct iova_domain *iovad = &cookie->iovad;
859 + struct iommu_resv_region *region;
860 + LIST_HEAD(resv_regions);
861 + int ret = 0;
862 +
863 + iommu_get_resv_regions(dev, &resv_regions);
864 + list_for_each_entry(region, &resv_regions, list) {
865 + unsigned long lo, hi;
866 +
867 + /* We ARE the software that manages these! */
868 + if (region->type == IOMMU_RESV_SW_MSI)
869 + continue;
870 +
871 + lo = iova_pfn(iovad, region->start);
872 + hi = iova_pfn(iovad, region->start + region->length - 1);
873 reserve_iova(iovad, lo, hi);
874 +
875 + if (region->type == IOMMU_RESV_MSI)
876 + ret = cookie_init_hw_msi_region(cookie, region->start,
877 + region->start + region->length);
878 + if (ret)
879 + break;
880 }
881 + iommu_put_resv_regions(dev, &resv_regions);
882 +
883 + return ret;
884 }
885
886 /**
887 @@ -136,11 +277,12 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
888 int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
889 u64 size, struct device *dev)
890 {
891 - struct iova_domain *iovad = cookie_iovad(domain);
892 + struct iommu_dma_cookie *cookie = domain->iova_cookie;
893 + struct iova_domain *iovad = &cookie->iovad;
894 unsigned long order, base_pfn, end_pfn;
895
896 - if (!iovad)
897 - return -ENODEV;
898 + if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
899 + return -EINVAL;
900
901 /* Use the smallest supported page size for IOVA granularity */
902 order = __ffs(domain->pgsize_bitmap);
903 @@ -160,22 +302,37 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
904 end_pfn = min_t(unsigned long, end_pfn,
905 domain->geometry.aperture_end >> order);
906 }
907 + /*
908 + * PCI devices may have larger DMA masks, but still prefer allocating
909 + * within a 32-bit mask to avoid DAC addressing. Such limitations don't
910 + * apply to the typical platform device, so for those we may as well
911 + * leave the cache limit at the top of their range to save an rb_last()
912 + * traversal on every allocation.
913 + */
914 + if (dev && dev_is_pci(dev))
915 + end_pfn &= DMA_BIT_MASK(32) >> order;
916
917 - /* All we can safely do with an existing domain is enlarge it */
918 + /* start_pfn is always nonzero for an already-initialised domain */
919 if (iovad->start_pfn) {
920 if (1UL << order != iovad->granule ||
921 - base_pfn != iovad->start_pfn ||
922 - end_pfn < iovad->dma_32bit_pfn) {
923 + base_pfn != iovad->start_pfn) {
924 pr_warn("Incompatible range for DMA domain\n");
925 return -EFAULT;
926 }
927 - iovad->dma_32bit_pfn = end_pfn;
928 - } else {
929 - init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
930 - if (dev && dev_is_pci(dev))
931 - iova_reserve_pci_windows(to_pci_dev(dev), iovad);
932 + /*
933 + * If we have devices with different DMA masks, move the free
934 + * area cache limit down for the benefit of the smaller one.
935 + */
936 + iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
937 +
938 + return 0;
939 }
940 - return 0;
941 +
942 + init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
943 + if (!dev)
944 + return 0;
945 +
946 + return iova_reserve_iommu_regions(dev, domain);
947 }
948 EXPORT_SYMBOL(iommu_dma_init_domain);
949
950 @@ -643,11 +800,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
951 {
952 struct iommu_dma_cookie *cookie = domain->iova_cookie;
953 struct iommu_dma_msi_page *msi_page;
954 - struct iova_domain *iovad = &cookie->iovad;
955 + struct iova_domain *iovad = cookie_iovad(domain);
956 struct iova *iova;
957 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
958 + size_t size = cookie_msi_granule(cookie);
959
960 - msi_addr &= ~(phys_addr_t)iova_mask(iovad);
961 + msi_addr &= ~(phys_addr_t)(size - 1);
962 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
963 if (msi_page->phys == msi_addr)
964 return msi_page;
965 @@ -656,13 +814,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
966 if (!msi_page)
967 return NULL;
968
969 - iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev));
970 - if (!iova)
971 - goto out_free_page;
972 -
973 msi_page->phys = msi_addr;
974 - msi_page->iova = iova_dma_addr(iovad, iova);
975 - if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot))
976 + if (iovad) {
977 + iova = __alloc_iova(domain, size, dma_get_mask(dev));
978 + if (!iova)
979 + goto out_free_page;
980 + msi_page->iova = iova_dma_addr(iovad, iova);
981 + } else {
982 + msi_page->iova = cookie->msi_iova;
983 + cookie->msi_iova += size;
984 + }
985 +
986 + if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
987 goto out_free_iova;
988
989 INIT_LIST_HEAD(&msi_page->list);
990 @@ -670,7 +833,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
991 return msi_page;
992
993 out_free_iova:
994 - __free_iova(iovad, iova);
995 + if (iovad)
996 + __free_iova(iovad, iova);
997 + else
998 + cookie->msi_iova -= size;
999 out_free_page:
1000 kfree(msi_page);
1001 return NULL;
1002 @@ -711,7 +877,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
1003 msg->data = ~0U;
1004 } else {
1005 msg->address_hi = upper_32_bits(msi_page->iova);
1006 - msg->address_lo &= iova_mask(&cookie->iovad);
1007 + msg->address_lo &= cookie_msi_granule(cookie) - 1;
1008 msg->address_lo += lower_32_bits(msi_page->iova);
1009 }
1010 }
1011 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1012 index 002f8a42..befbfd30 100644
1013 --- a/drivers/iommu/intel-iommu.c
1014 +++ b/drivers/iommu/intel-iommu.c
1015 @@ -440,6 +440,7 @@ struct dmar_rmrr_unit {
1016 u64 end_address; /* reserved end address */
1017 struct dmar_dev_scope *devices; /* target devices */
1018 int devices_cnt; /* target device count */
1019 + struct iommu_resv_region *resv; /* reserved region handle */
1020 };
1021
1022 struct dmar_atsr_unit {
1023 @@ -4250,27 +4251,40 @@ static inline void init_iommu_pm_ops(void) {}
1024 int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
1025 {
1026 struct acpi_dmar_reserved_memory *rmrr;
1027 + int prot = DMA_PTE_READ|DMA_PTE_WRITE;
1028 struct dmar_rmrr_unit *rmrru;
1029 + size_t length;
1030
1031 rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
1032 if (!rmrru)
1033 - return -ENOMEM;
1034 + goto out;
1035
1036 rmrru->hdr = header;
1037 rmrr = (struct acpi_dmar_reserved_memory *)header;
1038 rmrru->base_address = rmrr->base_address;
1039 rmrru->end_address = rmrr->end_address;
1040 +
1041 + length = rmrr->end_address - rmrr->base_address + 1;
1042 + rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
1043 + IOMMU_RESV_DIRECT);
1044 + if (!rmrru->resv)
1045 + goto free_rmrru;
1046 +
1047 rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
1048 ((void *)rmrr) + rmrr->header.length,
1049 &rmrru->devices_cnt);
1050 - if (rmrru->devices_cnt && rmrru->devices == NULL) {
1051 - kfree(rmrru);
1052 - return -ENOMEM;
1053 - }
1054 + if (rmrru->devices_cnt && rmrru->devices == NULL)
1055 + goto free_all;
1056
1057 list_add(&rmrru->list, &dmar_rmrr_units);
1058
1059 return 0;
1060 +free_all:
1061 + kfree(rmrru->resv);
1062 +free_rmrru:
1063 + kfree(rmrru);
1064 +out:
1065 + return -ENOMEM;
1066 }
1067
1068 static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
1069 @@ -4484,6 +4498,7 @@ static void intel_iommu_free_dmars(void)
1070 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
1071 list_del(&rmrru->list);
1072 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
1073 + kfree(rmrru->resv);
1074 kfree(rmrru);
1075 }
1076
1077 @@ -5219,6 +5234,45 @@ static void intel_iommu_remove_device(struct device *dev)
1078 iommu_device_unlink(iommu->iommu_dev, dev);
1079 }
1080
1081 +static void intel_iommu_get_resv_regions(struct device *device,
1082 + struct list_head *head)
1083 +{
1084 + struct iommu_resv_region *reg;
1085 + struct dmar_rmrr_unit *rmrr;
1086 + struct device *i_dev;
1087 + int i;
1088 +
1089 + rcu_read_lock();
1090 + for_each_rmrr_units(rmrr) {
1091 + for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
1092 + i, i_dev) {
1093 + if (i_dev != device)
1094 + continue;
1095 +
1096 + list_add_tail(&rmrr->resv->list, head);
1097 + }
1098 + }
1099 + rcu_read_unlock();
1100 +
1101 + reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
1102 + IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
1103 + 0, IOMMU_RESV_MSI);
1104 + if (!reg)
1105 + return;
1106 + list_add_tail(&reg->list, head);
1107 +}
1108 +
1109 +static void intel_iommu_put_resv_regions(struct device *dev,
1110 + struct list_head *head)
1111 +{
1112 + struct iommu_resv_region *entry, *next;
1113 +
1114 + list_for_each_entry_safe(entry, next, head, list) {
1115 + if (entry->type == IOMMU_RESV_RESERVED)
1116 + kfree(entry);
1117 + }
1118 +}
1119 +
1120 #ifdef CONFIG_INTEL_IOMMU_SVM
1121 #define MAX_NR_PASID_BITS (20)
1122 static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
1123 @@ -5349,19 +5403,21 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
1124 #endif /* CONFIG_INTEL_IOMMU_SVM */
1125
1126 static const struct iommu_ops intel_iommu_ops = {
1127 - .capable = intel_iommu_capable,
1128 - .domain_alloc = intel_iommu_domain_alloc,
1129 - .domain_free = intel_iommu_domain_free,
1130 - .attach_dev = intel_iommu_attach_device,
1131 - .detach_dev = intel_iommu_detach_device,
1132 - .map = intel_iommu_map,
1133 - .unmap = intel_iommu_unmap,
1134 - .map_sg = default_iommu_map_sg,
1135 - .iova_to_phys = intel_iommu_iova_to_phys,
1136 - .add_device = intel_iommu_add_device,
1137 - .remove_device = intel_iommu_remove_device,
1138 - .device_group = pci_device_group,
1139 - .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
1140 + .capable = intel_iommu_capable,
1141 + .domain_alloc = intel_iommu_domain_alloc,
1142 + .domain_free = intel_iommu_domain_free,
1143 + .attach_dev = intel_iommu_attach_device,
1144 + .detach_dev = intel_iommu_detach_device,
1145 + .map = intel_iommu_map,
1146 + .unmap = intel_iommu_unmap,
1147 + .map_sg = default_iommu_map_sg,
1148 + .iova_to_phys = intel_iommu_iova_to_phys,
1149 + .add_device = intel_iommu_add_device,
1150 + .remove_device = intel_iommu_remove_device,
1151 + .get_resv_regions = intel_iommu_get_resv_regions,
1152 + .put_resv_regions = intel_iommu_put_resv_regions,
1153 + .device_group = pci_device_group,
1154 + .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
1155 };
1156
1157 static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
1158 diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
1159 index 87d3060f..e6a8c225 100644
1160 --- a/drivers/iommu/iommu.c
1161 +++ b/drivers/iommu/iommu.c
1162 @@ -36,6 +36,7 @@
1163
1164 static struct kset *iommu_group_kset;
1165 static DEFINE_IDA(iommu_group_ida);
1166 +static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
1167
1168 struct iommu_callback_data {
1169 const struct iommu_ops *ops;
1170 @@ -68,6 +69,13 @@ struct iommu_group_attribute {
1171 const char *buf, size_t count);
1172 };
1173
1174 +static const char * const iommu_group_resv_type_string[] = {
1175 + [IOMMU_RESV_DIRECT] = "direct",
1176 + [IOMMU_RESV_RESERVED] = "reserved",
1177 + [IOMMU_RESV_MSI] = "msi",
1178 + [IOMMU_RESV_SW_MSI] = "msi",
1179 +};
1180 +
1181 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
1182 struct iommu_group_attribute iommu_group_attr_##_name = \
1183 __ATTR(_name, _mode, _show, _store)
1184 @@ -86,6 +94,18 @@ static int __iommu_attach_group(struct iommu_domain *domain,
1185 static void __iommu_detach_group(struct iommu_domain *domain,
1186 struct iommu_group *group);
1187
1188 +static int __init iommu_set_def_domain_type(char *str)
1189 +{
1190 + bool pt;
1191 +
1192 + if (!str || strtobool(str, &pt))
1193 + return -EINVAL;
1194 +
1195 + iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
1196 + return 0;
1197 +}
1198 +early_param("iommu.passthrough", iommu_set_def_domain_type);
1199 +
1200 static ssize_t iommu_group_attr_show(struct kobject *kobj,
1201 struct attribute *__attr, char *buf)
1202 {
1203 @@ -133,8 +153,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
1204 return sprintf(buf, "%s\n", group->name);
1205 }
1206
1207 +/**
1208 + * iommu_insert_resv_region - Insert a new region in the
1209 + * list of reserved regions.
1210 + * @new: new region to insert
1211 + * @regions: list of regions
1212 + *
1213 + * The new element is sorted by address with respect to the other
1214 + * regions of the same type. In case it overlaps with another
1215 + * region of the same type, regions are merged. In case it
1216 + * overlaps with another region of different type, regions are
1217 + * not merged.
1218 + */
1219 +static int iommu_insert_resv_region(struct iommu_resv_region *new,
1220 + struct list_head *regions)
1221 +{
1222 + struct iommu_resv_region *region;
1223 + phys_addr_t start = new->start;
1224 + phys_addr_t end = new->start + new->length - 1;
1225 + struct list_head *pos = regions->next;
1226 +
1227 + while (pos != regions) {
1228 + struct iommu_resv_region *entry =
1229 + list_entry(pos, struct iommu_resv_region, list);
1230 + phys_addr_t a = entry->start;
1231 + phys_addr_t b = entry->start + entry->length - 1;
1232 + int type = entry->type;
1233 +
1234 + if (end < a) {
1235 + goto insert;
1236 + } else if (start > b) {
1237 + pos = pos->next;
1238 + } else if ((start >= a) && (end <= b)) {
1239 + if (new->type == type)
1240 + goto done;
1241 + else
1242 + pos = pos->next;
1243 + } else {
1244 + if (new->type == type) {
1245 + phys_addr_t new_start = min(a, start);
1246 + phys_addr_t new_end = max(b, end);
1247 +
1248 + list_del(&entry->list);
1249 + entry->start = new_start;
1250 + entry->length = new_end - new_start + 1;
1251 + iommu_insert_resv_region(entry, regions);
1252 + } else {
1253 + pos = pos->next;
1254 + }
1255 + }
1256 + }
1257 +insert:
1258 + region = iommu_alloc_resv_region(new->start, new->length,
1259 + new->prot, new->type);
1260 + if (!region)
1261 + return -ENOMEM;
1262 +
1263 + list_add_tail(&region->list, pos);
1264 +done:
1265 + return 0;
1266 +}
1267 +
1268 +static int
1269 +iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
1270 + struct list_head *group_resv_regions)
1271 +{
1272 + struct iommu_resv_region *entry;
1273 + int ret;
1274 +
1275 + list_for_each_entry(entry, dev_resv_regions, list) {
1276 + ret = iommu_insert_resv_region(entry, group_resv_regions);
1277 + if (ret)
1278 + break;
1279 + }
1280 + return ret;
1281 +}
1282 +
1283 +int iommu_get_group_resv_regions(struct iommu_group *group,
1284 + struct list_head *head)
1285 +{
1286 + struct iommu_device *device;
1287 + int ret = 0;
1288 +
1289 + mutex_lock(&group->mutex);
1290 + list_for_each_entry(device, &group->devices, list) {
1291 + struct list_head dev_resv_regions;
1292 +
1293 + INIT_LIST_HEAD(&dev_resv_regions);
1294 + iommu_get_resv_regions(device->dev, &dev_resv_regions);
1295 + ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
1296 + iommu_put_resv_regions(device->dev, &dev_resv_regions);
1297 + if (ret)
1298 + break;
1299 + }
1300 + mutex_unlock(&group->mutex);
1301 + return ret;
1302 +}
1303 +EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
1304 +
1305 +static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
1306 + char *buf)
1307 +{
1308 + struct iommu_resv_region *region, *next;
1309 + struct list_head group_resv_regions;
1310 + char *str = buf;
1311 +
1312 + INIT_LIST_HEAD(&group_resv_regions);
1313 + iommu_get_group_resv_regions(group, &group_resv_regions);
1314 +
1315 + list_for_each_entry_safe(region, next, &group_resv_regions, list) {
1316 + str += sprintf(str, "0x%016llx 0x%016llx %s\n",
1317 + (long long int)region->start,
1318 + (long long int)(region->start +
1319 + region->length - 1),
1320 + iommu_group_resv_type_string[region->type]);
1321 + kfree(region);
1322 + }
1323 +
1324 + return (str - buf);
1325 +}
1326 +
1327 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
1328
1329 +static IOMMU_GROUP_ATTR(reserved_regions, 0444,
1330 + iommu_group_show_resv_regions, NULL);
1331 +
1332 static void iommu_group_release(struct kobject *kobj)
1333 {
1334 struct iommu_group *group = to_iommu_group(kobj);
1335 @@ -212,6 +355,11 @@ struct iommu_group *iommu_group_alloc(void)
1336 */
1337 kobject_put(&group->kobj);
1338
1339 + ret = iommu_group_create_file(group,
1340 + &iommu_group_attr_reserved_regions);
1341 + if (ret)
1342 + return ERR_PTR(ret);
1343 +
1344 pr_debug("Allocated group %d\n", group->id);
1345
1346 return group;
1347 @@ -318,7 +466,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
1348 struct device *dev)
1349 {
1350 struct iommu_domain *domain = group->default_domain;
1351 - struct iommu_dm_region *entry;
1352 + struct iommu_resv_region *entry;
1353 struct list_head mappings;
1354 unsigned long pg_size;
1355 int ret = 0;
1356 @@ -331,18 +479,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
1357 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
1358 INIT_LIST_HEAD(&mappings);
1359
1360 - iommu_get_dm_regions(dev, &mappings);
1361 + iommu_get_resv_regions(dev, &mappings);
1362
1363 /* We need to consider overlapping regions for different devices */
1364 list_for_each_entry(entry, &mappings, list) {
1365 dma_addr_t start, end, addr;
1366
1367 - if (domain->ops->apply_dm_region)
1368 - domain->ops->apply_dm_region(dev, domain, entry);
1369 + if (domain->ops->apply_resv_region)
1370 + domain->ops->apply_resv_region(dev, domain, entry);
1371
1372 start = ALIGN(entry->start, pg_size);
1373 end = ALIGN(entry->start + entry->length, pg_size);
1374
1375 + if (entry->type != IOMMU_RESV_DIRECT)
1376 + continue;
1377 +
1378 for (addr = start; addr < end; addr += pg_size) {
1379 phys_addr_t phys_addr;
1380
1381 @@ -358,7 +509,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
1382 }
1383
1384 out:
1385 - iommu_put_dm_regions(dev, &mappings);
1386 + iommu_put_resv_regions(dev, &mappings);
1387
1388 return ret;
1389 }
1390 @@ -562,6 +713,19 @@ struct iommu_group *iommu_group_get(struct device *dev)
1391 }
1392 EXPORT_SYMBOL_GPL(iommu_group_get);
1393
1394 +/**
1395 + * iommu_group_ref_get - Increment reference on a group
1396 + * @group: the group to use, must not be NULL
1397 + *
1398 + * This function is called by iommu drivers to take additional references on an
1399 + * existing group. Returns the given group for convenience.
1400 + */
1401 +struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
1402 +{
1403 + kobject_get(group->devices_kobj);
1404 + return group;
1405 +}
1406 +
1407 /**
1408 * iommu_group_put - Decrement group reference
1409 * @group: the group to use
1410 @@ -845,10 +1009,19 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1411 * IOMMU driver.
1412 */
1413 if (!group->default_domain) {
1414 - group->default_domain = __iommu_domain_alloc(dev->bus,
1415 - IOMMU_DOMAIN_DMA);
1416 + struct iommu_domain *dom;
1417 +
1418 + dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
1419 + if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
1420 + dev_warn(dev,
1421 + "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1422 + iommu_def_domain_type);
1423 + dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
1424 + }
1425 +
1426 + group->default_domain = dom;
1427 if (!group->domain)
1428 - group->domain = group->default_domain;
1429 + group->domain = dom;
1430 }
1431
1432 ret = iommu_group_add_device(group, dev);
1433 @@ -1557,20 +1730,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
1434 }
1435 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
1436
1437 -void iommu_get_dm_regions(struct device *dev, struct list_head *list)
1438 +void iommu_get_resv_regions(struct device *dev, struct list_head *list)
1439 {
1440 const struct iommu_ops *ops = dev->bus->iommu_ops;
1441
1442 - if (ops && ops->get_dm_regions)
1443 - ops->get_dm_regions(dev, list);
1444 + if (ops && ops->get_resv_regions)
1445 + ops->get_resv_regions(dev, list);
1446 }
1447
1448 -void iommu_put_dm_regions(struct device *dev, struct list_head *list)
1449 +void iommu_put_resv_regions(struct device *dev, struct list_head *list)
1450 {
1451 const struct iommu_ops *ops = dev->bus->iommu_ops;
1452
1453 - if (ops && ops->put_dm_regions)
1454 - ops->put_dm_regions(dev, list);
1455 + if (ops && ops->put_resv_regions)
1456 + ops->put_resv_regions(dev, list);
1457 +}
1458 +
1459 +struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
1460 + size_t length, int prot,
1461 + enum iommu_resv_type type)
1462 +{
1463 + struct iommu_resv_region *region;
1464 +
1465 + region = kzalloc(sizeof(*region), GFP_KERNEL);
1466 + if (!region)
1467 + return NULL;
1468 +
1469 + INIT_LIST_HEAD(&region->list);
1470 + region->start = start;
1471 + region->length = length;
1472 + region->prot = prot;
1473 + region->type = type;
1474 + return region;
1475 }
1476
1477 /* Request that a device is direct mapped by the IOMMU */
1478 diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
1479 index b12c12d7..9799daea 100644
1480 --- a/drivers/iommu/mtk_iommu.c
1481 +++ b/drivers/iommu/mtk_iommu.c
1482 @@ -410,6 +410,8 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
1483 data->m4u_group = iommu_group_alloc();
1484 if (IS_ERR(data->m4u_group))
1485 dev_err(dev, "Failed to allocate M4U IOMMU group\n");
1486 + } else {
1487 + iommu_group_ref_get(data->m4u_group);
1488 }
1489 return data->m4u_group;
1490 }
1491 diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
1492 index b8aeb076..c7063e9d 100644
1493 --- a/drivers/iommu/mtk_iommu_v1.c
1494 +++ b/drivers/iommu/mtk_iommu_v1.c
1495 @@ -502,6 +502,8 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
1496 data->m4u_group = iommu_group_alloc();
1497 if (IS_ERR(data->m4u_group))
1498 dev_err(dev, "Failed to allocate M4U IOMMU group\n");
1499 + } else {
1500 + iommu_group_ref_get(data->m4u_group);
1501 }
1502 return data->m4u_group;
1503 }
1504 diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
1505 index 32c58906..36d3206d 100644
1506 --- a/include/linux/dma-iommu.h
1507 +++ b/include/linux/dma-iommu.h
1508 @@ -27,6 +27,7 @@ int iommu_dma_init(void);
1509
1510 /* Domain management interface for IOMMU drivers */
1511 int iommu_get_dma_cookie(struct iommu_domain *domain);
1512 +int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
1513 void iommu_put_dma_cookie(struct iommu_domain *domain);
1514
1515 /* Setup call for arch DMA mapping code */
1516 @@ -66,6 +67,7 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
1517
1518 /* The DMA API isn't _quite_ the whole story, though... */
1519 void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
1520 +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
1521
1522 #else
1523
1524 @@ -82,6 +84,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain)
1525 return -ENODEV;
1526 }
1527
1528 +static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
1529 +{
1530 + return -ENODEV;
1531 +}
1532 +
1533 static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
1534 {
1535 }
1536 @@ -90,6 +97,10 @@ static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
1537 {
1538 }
1539
1540 +static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
1541 +{
1542 +}
1543 +
1544 #endif /* CONFIG_IOMMU_DMA */
1545 #endif /* __KERNEL__ */
1546 #endif /* __DMA_IOMMU_H */
1547 diff --git a/include/linux/iommu.h b/include/linux/iommu.h
1548 index 436dc213..188599f5 100644
1549 --- a/include/linux/iommu.h
1550 +++ b/include/linux/iommu.h
1551 @@ -117,18 +117,32 @@ enum iommu_attr {
1552 DOMAIN_ATTR_MAX,
1553 };
1554
1555 +/* These are the possible reserved region types */
1556 +enum iommu_resv_type {
1557 + /* Memory regions which must be mapped 1:1 at all times */
1558 + IOMMU_RESV_DIRECT,
1559 + /* Arbitrary "never map this or give it to a device" address ranges */
1560 + IOMMU_RESV_RESERVED,
1561 + /* Hardware MSI region (untranslated) */
1562 + IOMMU_RESV_MSI,
1563 + /* Software-managed MSI translation window */
1564 + IOMMU_RESV_SW_MSI,
1565 +};
1566 +
1567 /**
1568 - * struct iommu_dm_region - descriptor for a direct mapped memory region
1569 + * struct iommu_resv_region - descriptor for a reserved memory region
1570 * @list: Linked list pointers
1571 * @start: System physical start address of the region
1572 * @length: Length of the region in bytes
1573 * @prot: IOMMU Protection flags (READ/WRITE/...)
1574 + * @type: Type of the reserved region
1575 */
1576 -struct iommu_dm_region {
1577 +struct iommu_resv_region {
1578 struct list_head list;
1579 phys_addr_t start;
1580 size_t length;
1581 int prot;
1582 + enum iommu_resv_type type;
1583 };
1584
1585 #ifdef CONFIG_IOMMU_API
1586 @@ -150,9 +164,9 @@ struct iommu_dm_region {
1587 * @device_group: find iommu group for a particular device
1588 * @domain_get_attr: Query domain attributes
1589 * @domain_set_attr: Change domain attributes
1590 - * @get_dm_regions: Request list of direct mapping requirements for a device
1591 - * @put_dm_regions: Free list of direct mapping requirements for a device
1592 - * @apply_dm_region: Temporary helper call-back for iova reserved ranges
1593 + * @get_resv_regions: Request list of reserved regions for a device
1594 + * @put_resv_regions: Free list of reserved regions for a device
1595 + * @apply_resv_region: Temporary helper call-back for iova reserved ranges
1596 * @domain_window_enable: Configure and enable a particular window for a domain
1597 * @domain_window_disable: Disable a particular window for a domain
1598 * @domain_set_windows: Set the number of windows for a domain
1599 @@ -184,11 +198,12 @@ struct iommu_ops {
1600 int (*domain_set_attr)(struct iommu_domain *domain,
1601 enum iommu_attr attr, void *data);
1602
1603 - /* Request/Free a list of direct mapping requirements for a device */
1604 - void (*get_dm_regions)(struct device *dev, struct list_head *list);
1605 - void (*put_dm_regions)(struct device *dev, struct list_head *list);
1606 - void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain,
1607 - struct iommu_dm_region *region);
1608 + /* Request/Free a list of reserved regions for a device */
1609 + void (*get_resv_regions)(struct device *dev, struct list_head *list);
1610 + void (*put_resv_regions)(struct device *dev, struct list_head *list);
1611 + void (*apply_resv_region)(struct device *dev,
1612 + struct iommu_domain *domain,
1613 + struct iommu_resv_region *region);
1614
1615 /* Window handling functions */
1616 int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
1617 @@ -233,9 +248,14 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t io
1618 extern void iommu_set_fault_handler(struct iommu_domain *domain,
1619 iommu_fault_handler_t handler, void *token);
1620
1621 -extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
1622 -extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
1623 +extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
1624 +extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
1625 extern int iommu_request_dm_for_dev(struct device *dev);
1626 +extern struct iommu_resv_region *
1627 +iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
1628 + enum iommu_resv_type type);
1629 +extern int iommu_get_group_resv_regions(struct iommu_group *group,
1630 + struct list_head *head);
1631
1632 extern int iommu_attach_group(struct iommu_domain *domain,
1633 struct iommu_group *group);
1634 @@ -253,6 +273,7 @@ extern void iommu_group_remove_device(struct device *dev);
1635 extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
1636 int (*fn)(struct device *, void *));
1637 extern struct iommu_group *iommu_group_get(struct device *dev);
1638 +extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
1639 extern void iommu_group_put(struct iommu_group *group);
1640 extern int iommu_group_register_notifier(struct iommu_group *group,
1641 struct notifier_block *nb);
1642 @@ -439,16 +460,22 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
1643 {
1644 }
1645
1646 -static inline void iommu_get_dm_regions(struct device *dev,
1647 +static inline void iommu_get_resv_regions(struct device *dev,
1648 struct list_head *list)
1649 {
1650 }
1651
1652 -static inline void iommu_put_dm_regions(struct device *dev,
1653 +static inline void iommu_put_resv_regions(struct device *dev,
1654 struct list_head *list)
1655 {
1656 }
1657
1658 +static inline int iommu_get_group_resv_regions(struct iommu_group *group,
1659 + struct list_head *head)
1660 +{
1661 + return -ENODEV;
1662 +}
1663 +
1664 static inline int iommu_request_dm_for_dev(struct device *dev)
1665 {
1666 return -ENODEV;
1667 --
1668 2.14.1
1669