kernel: bump 4.9 to 4.9.146
[openwrt/openwrt.git] / target / linux / layerscape / patches-4.9 / 820-kvm-support-layerscape.patch
1 From fe22151c95c02c6bb145ea6c3685941e8fb09d60 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Thu, 5 Jul 2018 17:43:16 +0800
4 Subject: [PATCH 32/32] kvm: support layerscape
5
6 This is an integrated patch for layerscape kvm support.
7
8 Signed-off-by: Laurentiu Tudor <laurentiu.tudor@nxp.com>
9 Signed-off-by: Bharat Bhushan <Bharat.Bhushan@nxp.com>
10 Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
11 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
12 ---
13 arch/arm/include/asm/kvm_mmu.h | 3 +-
14 arch/arm/kvm/mmu.c | 56 ++++++++++++++++++++++++++++++--
15 arch/arm64/include/asm/kvm_mmu.h | 14 ++++++--
16 virt/kvm/arm/vgic/vgic-its.c | 24 +++++++++++---
17 virt/kvm/arm/vgic/vgic-v2.c | 3 +-
18 5 files changed, 88 insertions(+), 12 deletions(-)
19
20 --- a/arch/arm/include/asm/kvm_mmu.h
21 +++ b/arch/arm/include/asm/kvm_mmu.h
22 @@ -55,7 +55,8 @@ void stage2_unmap_vm(struct kvm *kvm);
23 int kvm_alloc_stage2_pgd(struct kvm *kvm);
24 void kvm_free_stage2_pgd(struct kvm *kvm);
25 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
26 - phys_addr_t pa, unsigned long size, bool writable);
27 + phys_addr_t pa, unsigned long size, bool writable,
28 + pgprot_t prot);
29
30 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
31
32 --- a/arch/arm/kvm/mmu.c
33 +++ b/arch/arm/kvm/mmu.c
34 @@ -1020,9 +1020,11 @@ static int stage2_pmdp_test_and_clear_yo
35 * @guest_ipa: The IPA at which to insert the mapping
36 * @pa: The physical address of the device
37 * @size: The size of the mapping
38 + * @prot: S2 page translation bits
39 */
40 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
41 - phys_addr_t pa, unsigned long size, bool writable)
42 + phys_addr_t pa, unsigned long size, bool writable,
43 + pgprot_t prot)
44 {
45 phys_addr_t addr, end;
46 int ret = 0;
47 @@ -1033,7 +1035,7 @@ int kvm_phys_addr_ioremap(struct kvm *kv
48 pfn = __phys_to_pfn(pa);
49
50 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
51 - pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
52 + pte_t pte = pfn_pte(pfn, prot);
53
54 if (writable)
55 pte = kvm_s2pte_mkwrite(pte);
56 @@ -1057,6 +1059,30 @@ out:
57 return ret;
58 }
59
60 +#ifdef CONFIG_ARM64
61 +static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
62 +{
63 + switch (pgprot_val(prot) & PTE_ATTRINDX_MASK) {
64 + case PTE_ATTRINDX(MT_DEVICE_nGnRE):
65 + case PTE_ATTRINDX(MT_DEVICE_nGnRnE):
66 + case PTE_ATTRINDX(MT_DEVICE_GRE):
67 + return PAGE_S2_DEVICE;
68 + case PTE_ATTRINDX(MT_NORMAL_NC):
69 + case PTE_ATTRINDX(MT_NORMAL):
70 + return (pgprot_val(prot) & PTE_SHARED)
71 + ? PAGE_S2
72 + : PAGE_S2_NS;
73 + }
74 +
75 + return PAGE_S2_DEVICE;
76 +}
77 +#else
78 +static pgprot_t stage1_to_stage2_pgprot(pgprot_t prot)
79 +{
80 + return PAGE_S2_DEVICE;
81 +}
82 +#endif
83 +
84 static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
85 {
86 kvm_pfn_t pfn = *pfnp;
87 @@ -1308,6 +1334,19 @@ static int user_mem_abort(struct kvm_vcp
88 hugetlb = true;
89 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
90 } else {
91 + if (!is_vm_hugetlb_page(vma)) {
92 + pte_t *pte;
93 + spinlock_t *ptl;
94 + pgprot_t prot;
95 +
96 + pte = get_locked_pte(current->mm, memslot->userspace_addr, &ptl);
97 + prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
98 + pte_unmap_unlock(pte, ptl);
99 +#ifdef CONFIG_ARM64
100 + if (pgprot_val(prot) == pgprot_val(PAGE_S2_NS))
101 + mem_type = PAGE_S2_NS;
102 +#endif
103 + }
104 /*
105 * Pages belonging to memslots that don't have the same
106 * alignment for userspace and IPA cannot be mapped using
107 @@ -1345,6 +1384,11 @@ static int user_mem_abort(struct kvm_vcp
108 if (is_error_noslot_pfn(pfn))
109 return -EFAULT;
110
111 +#ifdef CONFIG_ARM64
112 + if (pgprot_val(mem_type) == pgprot_val(PAGE_S2_NS)) {
113 + flags |= KVM_S2PTE_FLAG_IS_IOMAP;
114 + } else
115 +#endif
116 if (kvm_is_device_pfn(pfn)) {
117 mem_type = PAGE_S2_DEVICE;
118 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
119 @@ -1882,6 +1926,9 @@ int kvm_arch_prepare_memory_region(struc
120 gpa_t gpa = mem->guest_phys_addr +
121 (vm_start - mem->userspace_addr);
122 phys_addr_t pa;
123 + pgprot_t prot;
124 + pte_t *pte;
125 + spinlock_t *ptl;
126
127 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
128 pa += vm_start - vma->vm_start;
129 @@ -1891,10 +1938,13 @@ int kvm_arch_prepare_memory_region(struc
130 ret = -EINVAL;
131 goto out;
132 }
133 + pte = get_locked_pte(current->mm, mem->userspace_addr, &ptl);
134 + prot = stage1_to_stage2_pgprot(__pgprot(pte_val(*pte)));
135 + pte_unmap_unlock(pte, ptl);
136
137 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
138 vm_end - vm_start,
139 - writable);
140 + writable, prot);
141 if (ret)
142 break;
143 }
144 --- a/arch/arm64/include/asm/kvm_mmu.h
145 +++ b/arch/arm64/include/asm/kvm_mmu.h
146 @@ -167,7 +167,8 @@ void stage2_unmap_vm(struct kvm *kvm);
147 int kvm_alloc_stage2_pgd(struct kvm *kvm);
148 void kvm_free_stage2_pgd(struct kvm *kvm);
149 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
150 - phys_addr_t pa, unsigned long size, bool writable);
151 + phys_addr_t pa, unsigned long size, bool writable,
152 + pgprot_t prot);
153
154 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
155
156 @@ -274,8 +275,15 @@ static inline void __coherent_cache_gues
157
158 static inline void __kvm_flush_dcache_pte(pte_t pte)
159 {
160 - struct page *page = pte_page(pte);
161 - kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
162 + if (pfn_valid(pte_pfn(pte))) {
163 + struct page *page = pte_page(pte);
164 + kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
165 + } else {
166 + void __iomem *va = ioremap_cache_ns(pte_pfn(pte) << PAGE_SHIFT, PAGE_SIZE);
167 +
168 + kvm_flush_dcache_to_poc(va, PAGE_SIZE);
169 + iounmap(va);
170 + }
171 }
172
173 static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
174 --- a/virt/kvm/arm/vgic/vgic-its.c
175 +++ b/virt/kvm/arm/vgic/vgic-its.c
176 @@ -176,6 +176,8 @@ static struct its_itte *find_itte(struct
177
178 #define GIC_LPI_OFFSET 8192
179
180 +#define VITS_TYPER_DEVBITS 17
181 +
182 /*
183 * Finds and returns a collection in the ITS collection table.
184 * Must be called with the its_lock mutex held.
185 @@ -375,7 +377,7 @@ static unsigned long vgic_mmio_read_its_
186 * To avoid memory waste in the guest, we keep the number of IDBits and
187 * DevBits low - as least for the time being.
188 */
189 - reg |= 0x0f << GITS_TYPER_DEVBITS_SHIFT;
190 + reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
191 reg |= 0x0f << GITS_TYPER_IDBITS_SHIFT;
192
193 return extract_bytes(reg, addr & 7, len);
194 @@ -601,16 +603,30 @@ static int vgic_its_cmd_handle_movi(stru
195 * Check whether an ID can be stored into the corresponding guest table.
196 * For a direct table this is pretty easy, but gets a bit nasty for
197 * indirect tables. We check whether the resulting guest physical address
198 - * is actually valid (covered by a memslot and guest accessbible).
199 + * is actually valid (covered by a memslot and guest accessible).
200 * For this we have to read the respective first level entry.
201 */
202 -static bool vgic_its_check_id(struct vgic_its *its, u64 baser, int id)
203 +static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id)
204 {
205 int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
206 + u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
207 int index;
208 - u64 indirect_ptr;
209 gfn_t gfn;
210
211 + switch (type) {
212 + case GITS_BASER_TYPE_DEVICE:
213 + if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
214 + return false;
215 + break;
216 + case GITS_BASER_TYPE_COLLECTION:
217 + /* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
218 + if (id >= BIT_ULL(16))
219 + return false;
220 + break;
221 + default:
222 + return false;
223 + }
224 +
225 if (!(baser & GITS_BASER_INDIRECT)) {
226 phys_addr_t addr;
227
228 --- a/virt/kvm/arm/vgic/vgic-v2.c
229 +++ b/virt/kvm/arm/vgic/vgic-v2.c
230 @@ -290,7 +290,8 @@ int vgic_v2_map_resources(struct kvm *kv
231 if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
232 ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
233 kvm_vgic_global_state.vcpu_base,
234 - KVM_VGIC_V2_CPU_SIZE, true);
235 + KVM_VGIC_V2_CPU_SIZE, true,
236 + PAGE_S2_DEVICE);
237 if (ret) {
238 kvm_err("Unable to remap VGIC CPU to VCPU\n");
239 goto out;