summaryrefslogtreecommitdiff
path: root/driver/product/kernel/drivers/gpu/arm/midgard/mali_kbase_mem_linux.h
blob: 2053a47749c2673b04d27fcb98ef6bbea8f69291 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
/*
 *
 * (C) COPYRIGHT 2010, 2012-2016 ARM Limited. All rights reserved.
 *
 * This program is free software and is provided to you under the terms of the
 * GNU General Public License version 2 as published by the Free Software
 * Foundation, and any use by you of this program is subject to the terms
 * of such GNU licence.
 *
 * A copy of the licence is included with the program, and can also be obtained
 * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
 * Boston, MA  02110-1301, USA.
 *
 */





/**
 * @file mali_kbase_mem_linux.h
 * Base kernel memory APIs, Linux implementation.
 */

#ifndef _KBASE_MEM_LINUX_H_
#define _KBASE_MEM_LINUX_H_

/** A HWC dump mapping */
struct kbase_hwc_dma_mapping {
	void       *cpu_va;
	dma_addr_t  dma_pa;
	size_t      size;
};

struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
		u64 va_pages, u64 commit_pages, u64 extent, u64 *flags,
		u64 *gpu_va);
int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 *const pages);
int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type,
		void __user *phandle, u64 *gpu_va, u64 *va_pages,
		u64 *flags);
u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride, u64 nents, struct base_mem_aliasing_info *ai, u64 *num_pages);
int kbase_mem_flags_change(struct kbase_context *kctx, u64 gpu_addr, unsigned int flags, unsigned int mask);
int kbase_mem_commit(struct kbase_context *kctx, u64 gpu_addr, u64 new_pages, enum base_backing_threshold_status *failure_reason);
int kbase_mmap(struct file *file, struct vm_area_struct *vma);

/**
 * kbase_mem_evictable_init - Initialize the Ephemeral memory the eviction
 * mechanism.
 * @kctx: The kbase context to initialize.
 *
 * Return: Zero on success or -errno on failure.
 */
int kbase_mem_evictable_init(struct kbase_context *kctx);

/**
 * kbase_mem_evictable_deinit - De-initialize the Ephemeral memory eviction
 * mechanism.
 * @kctx: The kbase context to de-initialize.
 */
void kbase_mem_evictable_deinit(struct kbase_context *kctx);

/**
 * kbase_mem_grow_gpu_mapping - Grow the GPU mapping of an allocation
 * @kctx:      Context the region belongs to
 * @reg:       The GPU region
 * @new_pages: The number of pages after the grow
 * @old_pages: The number of pages before the grow
 *
 * Return: 0 on success, -errno on error.
 *
 * Expand the GPU mapping to encompass the new psychical pages which have
 * been added to the allocation.
 *
 * Note: Caller must be holding the region lock.
 */
int kbase_mem_grow_gpu_mapping(struct kbase_context *kctx,
		struct kbase_va_region *reg,
		u64 new_pages, u64 old_pages);

/**
 * kbase_mem_evictable_make - Make a physical allocation eligible for eviction
 * @gpu_alloc: The physical allocation to make evictable
 *
 * Return: 0 on success, -errno on error.
 *
 * Take the provided region and make all the physical pages within it
 * reclaimable by the kernel, updating the per-process VM stats as well.
 * Remove any CPU mappings (as these can't be removed in the shrinker callback
 * as mmap_sem might already be taken) but leave the GPU mapping intact as
 * and until the shrinker reclaims the allocation.
 *
 * Note: Must be called with the region lock of the containing context.
 */
int kbase_mem_evictable_make(struct kbase_mem_phy_alloc *gpu_alloc);

/**
 * kbase_mem_evictable_unmake - Remove a physical allocations eligibility for
 * eviction.
 * @alloc: The physical allocation to remove eviction eligibility from.
 *
 * Return: True if the allocation had its backing restored and false if
 * it hasn't.
 *
 * Make the physical pages in the region no longer reclaimable and update the
 * per-process stats, if the shrinker has already evicted the memory then
 * re-allocate it if the region is still alive.
 *
 * Note: Must be called with the region lock of the containing context.
 */
bool kbase_mem_evictable_unmake(struct kbase_mem_phy_alloc *alloc);

struct kbase_vmap_struct {
	u64 gpu_addr;
	struct kbase_mem_phy_alloc *cpu_alloc;
	struct kbase_mem_phy_alloc *gpu_alloc;
	phys_addr_t *cpu_pages;
	phys_addr_t *gpu_pages;
	void *addr;
	size_t size;
	bool is_cached;
};


/**
 * kbase_vmap_prot - Map a GPU VA range into the kernel safely, only if the
 * requested access permissions are supported
 * @kctx:         Context the VA range belongs to
 * @gpu_addr:     Start address of VA range
 * @size:         Size of VA range
 * @prot_request: Flags indicating how the caller will then access the memory
 * @map:          Structure to be given to kbase_vunmap() on freeing
 *
 * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error
 *
 * Map a GPU VA Range into the kernel. The VA range must be contained within a
 * GPU memory region. Appropriate CPU cache-flushing operations are made as
 * required, dependent on the CPU mapping for the memory region.
 *
 * This is safer than using kmap() on the pages directly,
 * because the pages here are refcounted to prevent freeing (and hence reuse
 * elsewhere in the system) until an kbase_vunmap()
 *
 * The flags in @prot_request should use KBASE_REG_{CPU,GPU}_{RD,WR}, to check
 * whether the region should allow the intended access, and return an error if
 * disallowed. This is essential for security of imported memory, particularly
 * a user buf from SHM mapped into the process as RO. In that case, write
 * access must be checked if the intention is for kernel to write to the
 * memory.
 *
 * The checks are also there to help catch access errors on memory where
 * security is not a concern: imported memory that is always RW, and memory
 * that was allocated and owned by the process attached to @kctx. In this case,
 * it helps to identify memory that was was mapped with the wrong access type.
 *
 * Note: KBASE_REG_GPU_{RD,WR} flags are currently supported for legacy cases
 * where either the security of memory is solely dependent on those flags, or
 * when userspace code was expecting only the GPU to access the memory (e.g. HW
 * workarounds).
 *
 */
void *kbase_vmap_prot(struct kbase_context *kctx, u64 gpu_addr, size_t size,
		      unsigned long prot_request, struct kbase_vmap_struct *map);

/**
 * kbase_vmap - Map a GPU VA range into the kernel safely
 * @kctx:     Context the VA range belongs to
 * @gpu_addr: Start address of VA range
 * @size:     Size of VA range
 * @map:      Structure to be given to kbase_vunmap() on freeing
 *
 * Return: Kernel-accessible CPU pointer to the VA range, or NULL on error
 *
 * Map a GPU VA Range into the kernel. The VA range must be contained within a
 * GPU memory region. Appropriate CPU cache-flushing operations are made as
 * required, dependent on the CPU mapping for the memory region.
 *
 * This is safer than using kmap() on the pages directly,
 * because the pages here are refcounted to prevent freeing (and hence reuse
 * elsewhere in the system) until an kbase_vunmap()
 *
 * kbase_vmap_prot() should be used in preference, since kbase_vmap() makes no
 * checks to ensure the security of e.g. imported user bufs from RO SHM.
 */
void *kbase_vmap(struct kbase_context *kctx, u64 gpu_addr, size_t size,
		struct kbase_vmap_struct *map);

/**
 * kbase_vunmap - Unmap a GPU VA range from the kernel
 * @kctx: Context the VA range belongs to
 * @map:  Structure describing the mapping from the corresponding kbase_vmap()
 *        call
 *
 * Unmaps a GPU VA range from the kernel, given its @map structure obtained
 * from kbase_vmap(). Appropriate CPU cache-flushing operations are made as
 * required, dependent on the CPU mapping for the memory region.
 *
 * The reference taken on pages during kbase_vmap() is released.
 */
void kbase_vunmap(struct kbase_context *kctx, struct kbase_vmap_struct *map);

/** @brief Allocate memory from kernel space and map it onto the GPU
 *
 * @param kctx   The context used for the allocation/mapping
 * @param size   The size of the allocation in bytes
 * @param handle An opaque structure used to contain the state needed to free the memory
 * @return the VA for kernel space and GPU MMU
 */
void *kbase_va_alloc(struct kbase_context *kctx, u32 size, struct kbase_hwc_dma_mapping *handle);

/** @brief Free/unmap memory allocated by kbase_va_alloc
 *
 * @param kctx   The context used for the allocation/mapping
 * @param handle An opaque structure returned by the kbase_va_alloc function.
 */
void kbase_va_free(struct kbase_context *kctx, struct kbase_hwc_dma_mapping *handle);

extern const struct vm_operations_struct kbase_vm_ops;

#endif				/* _KBASE_MEM_LINUX_H_ */