1 From 361974032ae1b0eec36c51a8f1cd9b447864fcbd Mon Sep 17 00:00:00 2001
2 From: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
3 Date: Fri, 5 Jan 2018 00:44:00 +0900
4 Subject: [PATCH 150/454] vcsm: Unify cache manipulating functions
6 Signed-off-by: Sugizaki Yukimasa <i.can.speak.c.and.basic@gmail.com>
8 drivers/char/broadcom/vc_sm/vmcs_sm.c | 309 +++++++++++---------------
9 1 file changed, 132 insertions(+), 177 deletions(-)
11 --- a/drivers/char/broadcom/vc_sm/vmcs_sm.c
12 +++ b/drivers/char/broadcom/vc_sm/vmcs_sm.c
13 @@ -1256,61 +1256,106 @@ static const struct vm_operations_struct
14 .fault = vcsm_vma_fault,
17 -/* Walks a VMA and clean each valid page from the cache */
18 -static void vcsm_vma_cache_clean_page_range(unsigned long addr,
20 +static int clean_invalid_mem_2d(const void __user *addr,
21 + const size_t block_count, const size_t block_size, const size_t stride,
22 + const unsigned cache_op)
28 - unsigned long pgd_next, pud_next, pmd_next;
34 - pgd = pgd_offset(current->mm, addr);
36 - pgd_next = pgd_addr_end(addr, end);
38 - if (pgd_none(*pgd) || pgd_bad(*pgd))
42 - pud = pud_offset(pgd, addr);
44 - pud_next = pud_addr_end(addr, pgd_next);
45 - if (pud_none(*pud) || pud_bad(*pud))
49 - pmd = pmd_offset(pud, addr);
51 - pmd_next = pmd_addr_end(addr, pud_next);
52 - if (pmd_none(*pmd) || pmd_bad(*pmd))
56 - pte = pte_offset_map(pmd, addr);
59 - || !pte_present(*pte))
62 - /* Clean + invalidate */
63 - dmac_flush_range((const void *) addr,
65 - (addr + PAGE_SIZE));
67 - } while (pte++, addr +=
68 - PAGE_SIZE, addr != pmd_next);
71 + void (*op_fn)(const void*, const void*);
73 - } while (pmd++, addr = pmd_next, addr != pud_next);
74 + if (block_size <= 0) {
75 + pr_err("[%s]: size cannot be 0\n", __func__);
80 + case VCSM_CACHE_OP_INV:
81 + op_fn = dmac_inv_range;
83 + case VCSM_CACHE_OP_CLEAN:
84 + op_fn = dmac_clean_range;
86 + case VCSM_CACHE_OP_FLUSH:
87 + op_fn = dmac_flush_range;
90 + pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op);
94 + for (i = 0; i < block_count; i ++, addr += stride)
95 + op_fn(addr, addr + block_size);
100 +static int clean_invalid_mem(const void __user *addr, const size_t size,
101 + const unsigned cache_op)
103 + return clean_invalid_mem_2d(addr, 1, size, 0, cache_op);
106 +static int clean_invalid_resource(const void __user *addr, const size_t size,
107 + const unsigned cache_op, const int usr_hdl,
108 + struct sm_resource_t *resource)
111 + enum sm_stats_t stat_attempt, stat_failure;
112 + void __user *res_addr;
114 + if (resource == NULL) {
115 + pr_err("[%s]: resource is NULL\n", __func__);
118 + if (resource->res_cached != VMCS_SM_CACHE_HOST &&
119 + resource->res_cached != VMCS_SM_CACHE_BOTH)
122 + switch (cache_op) {
123 + case VCSM_CACHE_OP_INV:
124 + stat_attempt = INVALID;
125 + stat_failure = INVALID_FAIL;
127 + case VCSM_CACHE_OP_CLEAN:
128 + /* Like the original VMCS_SM_CMD_CLEAN_INVALID ioctl handler does. */
129 + stat_attempt = FLUSH;
130 + stat_failure = FLUSH_FAIL;
132 + case VCSM_CACHE_OP_FLUSH:
133 + stat_attempt = FLUSH;
134 + stat_failure = FLUSH_FAIL;
137 + pr_err("[%s]: Invalid cache_op: 0x%08x\n", __func__, cache_op);
140 + resource->res_stats[stat_attempt]++;
142 - } while (pud++, addr = pud_next, addr != pgd_next);
143 - } while (pgd++, addr = pgd_next, addr != end);
144 + if (size > resource->res_size) {
145 + pr_err("[%s]: size (0x%08zu) is larger than res_size (0x%08zu)\n",
146 + __func__, size, resource->res_size);
149 + res_addr = (void __user*) vmcs_sm_usr_address_from_pid_and_usr_handle(
150 + current->tgid, usr_hdl);
151 + if (res_addr == NULL) {
152 + pr_err("[%s]: Failed to get user address "
153 + "from pid (%d) and user handle (%d)\n", __func__, current->tgid,
154 + resource->res_handle);
157 + if (!(res_addr <= addr && addr + size <= res_addr + resource->res_size)) {
158 + pr_err("[%s]: Addr (0x%p-0x%p) out of range (0x%p-0x%p)\n",
159 + __func__, addr, addr + size, res_addr,
160 + res_addr + resource->res_size);
164 + err = clean_invalid_mem(addr, size, cache_op);
166 + resource->res_stats[stat_failure]++;
171 /* Map an allocated data into something that the user space. */
172 @@ -1952,14 +1997,13 @@ static int vc_sm_ioctl_unlock(struct sm_
173 list_for_each_entry(map, &resource->map_list,
176 - unsigned long start;
179 - start = map->vma->vm_start;
180 - end = map->vma->vm_end;
181 + const unsigned long start = map->vma->vm_start;
182 + const unsigned long end = map->vma->vm_end;
184 - vcsm_vma_cache_clean_page_range(
186 + ret = clean_invalid_mem((void __user*) start, end - start,
187 + VCSM_CACHE_OP_FLUSH);
192 up_read(¤t->mm->mmap_sem);
193 @@ -2833,41 +2877,17 @@ static long vc_sm_ioctl(struct file *fil
194 /* Locate resource from GUID. */
196 vmcs_sm_acquire_resource(file_data, ioparam.handle);
198 - if ((resource != NULL) && resource->res_cached) {
199 - dma_addr_t phys_addr = 0;
201 - resource->res_stats[FLUSH]++;
204 - (dma_addr_t)((uint32_t)
205 - resource->res_base_mem &
207 - phys_addr += (dma_addr_t)mm_vc_mem_phys_addr;
209 - /* L1 cache flush */
210 - down_read(¤t->mm->mmap_sem);
211 - vcsm_vma_cache_clean_page_range((unsigned long)
216 - up_read(¤t->mm->mmap_sem);
218 - /* L2 cache flush */
219 - outer_clean_range(phys_addr,
221 - (size_t) ioparam.size);
222 - } else if (resource == NULL) {
223 + if (resource == NULL) {
229 - vmcs_sm_release_resource(resource, 0);
233 + ret = clean_invalid_resource((void __user*) ioparam.addr,
234 + ioparam.size, VCSM_CACHE_OP_FLUSH, ioparam.handle,
236 + vmcs_sm_release_resource(resource, 0);
242 @@ -2888,41 +2908,16 @@ static long vc_sm_ioctl(struct file *fil
243 /* Locate resource from GUID. */
245 vmcs_sm_acquire_resource(file_data, ioparam.handle);
247 - if ((resource != NULL) && resource->res_cached) {
248 - dma_addr_t phys_addr = 0;
250 - resource->res_stats[INVALID]++;
253 - (dma_addr_t)((uint32_t)
254 - resource->res_base_mem &
256 - phys_addr += (dma_addr_t)mm_vc_mem_phys_addr;
258 - /* L2 cache invalidate */
259 - outer_inv_range(phys_addr,
261 - (size_t) ioparam.size);
263 - /* L1 cache invalidate */
264 - down_read(¤t->mm->mmap_sem);
265 - vcsm_vma_cache_clean_page_range((unsigned long)
270 - up_read(¤t->mm->mmap_sem);
271 - } else if (resource == NULL) {
272 + if (resource == NULL) {
278 - vmcs_sm_release_resource(resource, 0);
282 + ret = clean_invalid_resource((void __user*) ioparam.addr,
283 + ioparam.size, VCSM_CACHE_OP_INV, ioparam.handle, resource);
284 + vmcs_sm_release_resource(resource, 0);
290 @@ -2941,43 +2936,27 @@ static long vc_sm_ioctl(struct file *fil
293 for (i = 0; i < sizeof(ioparam.s) / sizeof(*ioparam.s); i++) {
294 - switch (ioparam.s[i].cmd) {
295 - case VCSM_CACHE_OP_INV: /* L1/L2 invalidate virtual range */
296 - case VCSM_CACHE_OP_FLUSH: /* L1/L2 clean physical range */
297 - case VCSM_CACHE_OP_CLEAN: /* L1/L2 clean+invalidate all */
298 - /* Locate resource from GUID. */
300 - vmcs_sm_acquire_resource(file_data, ioparam.s[i].handle);
302 - if ((resource != NULL) && resource->res_cached) {
303 - unsigned long base = ioparam.s[i].addr & ~(PAGE_SIZE - 1);
304 - unsigned long end = (ioparam.s[i].addr + ioparam.s[i].size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
306 - resource->res_stats[ioparam.s[i].cmd == 1 ? INVALID : FLUSH]++;
308 - /* L1/L2 cache flush */
309 - down_read(¤t->mm->mmap_sem);
310 - vcsm_vma_cache_clean_page_range(base, end);
311 - up_read(¤t->mm->mmap_sem);
312 - } else if (resource == NULL) {
318 - vmcs_sm_release_resource(resource, 0);
323 + /* Locate resource from GUID. */
325 + vmcs_sm_acquire_resource(file_data, ioparam.s[i].handle);
326 + if (resource == NULL) {
331 + ret = clean_invalid_resource((void __user*) ioparam.s[i].addr,
332 + ioparam.s[i].size, ioparam.s[i].cmd,
333 + ioparam.s[i].handle, resource);
334 + vmcs_sm_release_resource(resource, 0);
340 /* Flush/Invalidate the cache for a given mapping. */
341 case VMCS_SM_CMD_CLEAN_INVALID2:
345 struct vmcs_sm_ioctl_clean_invalid2 ioparam;
346 struct vmcs_sm_ioctl_clean_invalid_block *block = NULL;
348 @@ -3006,36 +2985,12 @@ static long vc_sm_ioctl(struct file *fil
350 for (i = 0; i < ioparam.op_count; i++) {
351 const struct vmcs_sm_ioctl_clean_invalid_block * const op = block + i;
352 - void (*op_fn)(const void *, const void *);
354 - switch(op->invalidate_mode & 3) {
355 - case VCSM_CACHE_OP_INV:
356 - op_fn = dmac_inv_range;
358 - case VCSM_CACHE_OP_CLEAN:
359 - op_fn = dmac_clean_range;
361 - case VCSM_CACHE_OP_FLUSH:
362 - op_fn = dmac_flush_range;
369 - if ((op->invalidate_mode & ~3) != 0) {
377 - for (j = 0; j < op->block_count; ++j) {
378 - const char * const base = (const char *)op->start_address + j * op->inter_block_stride;
379 - const char * const end = base + op->block_size;
382 + ret = clean_invalid_mem_2d((void __user*) op->start_address,
383 + op->block_count, op->block_size,
384 + op->inter_block_stride, op->invalidate_mode);