aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Wiklander <jens.wiklander@linaro.org>2018-09-26 15:32:50 +0200
committerJérôme Forissier <jerome.forissier@linaro.org>2018-10-01 17:57:03 +0200
commit162f445443d98d01e76769731b61470b4082638b (patch)
treeed0799cdf569cabda0a64609d25121f4a3ba1a2c
parent0b020f945a19d1741d56b4d6a215f3763275e171 (diff)
core: add mobj_reg_shm_{inc,dec}_map()
mobj_reg_shm_inc_map() used when a reg_shm mobj needs to be mapped and mobj_reg_shm_dec_map() is called when the mapping isn't needed any longer. Reviewed-by: Etienne Carriere <etienne.carriere@linaro.org> Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
-rw-r--r--core/arch/arm/include/mm/mobj.h24
-rw-r--r--core/arch/arm/mm/mobj.c88
2 files changed, 111 insertions, 1 deletions
diff --git a/core/arch/arm/include/mm/mobj.h b/core/arch/arm/include/mm/mobj.h
index 9d999ab5..ed49baba 100644
--- a/core/arch/arm/include/mm/mobj.h
+++ b/core/arch/arm/include/mm/mobj.h
@@ -143,6 +143,30 @@ TEE_Result mobj_reg_shm_release_by_cookie(uint64_t cookie);
TEE_Result mobj_reg_shm_map(struct mobj *mobj);
TEE_Result mobj_reg_shm_unmap(struct mobj *mobj);
+/**
+ * mobj_reg_shm_inc_map() - increase map count
+ * @mobj: pointer to a registered shared memory MOBJ
+ *
+ * Maps the MOBJ if it isn't mapped already and increaes the map count
+ * Each call to mobj_reg_shm_inc_map() is supposed to be matches by a call
+ * to mobj_reg_shm_dec_map().
+ *
+ * Returns TEE_SUCCESS on success or an error code on failure
+ */
+TEE_Result mobj_reg_shm_inc_map(struct mobj *mobj);
+
+/**
+ * mobj_reg_shm_dec_map() - decrease map count
+ * @mobj: pointer to a registered shared memory MOBJ
+ *
+ * Decreases the map count and also unmaps the MOBJ if the map count
+ * reaches 0. Each call to mobj_reg_shm_inc_map() is supposed to be
+ * matched by a call to mobj_reg_shm_dec_map().
+ *
+ * Returns TEE_SUCCESS on success or an error code on failure
+ */
+TEE_Result mobj_reg_shm_dec_map(struct mobj *mobj);
+
/*
* mapped_shm represents registered shared buffer
* which is mapped into OPTEE va space
diff --git a/core/arch/arm/mm/mobj.c b/core/arch/arm/mm/mobj.c
index 67c28814..21fa7b3f 100644
--- a/core/arch/arm/mm/mobj.c
+++ b/core/arch/arm/mm/mobj.c
@@ -310,6 +310,7 @@ struct mobj_reg_shm {
tee_mm_entry_t *mm;
paddr_t page_offset;
struct refcount refcount;
+ struct refcount mapcount;
int num_pages;
paddr_t pages[];
};
@@ -321,6 +322,7 @@ static SLIST_HEAD(reg_shm_head, mobj_reg_shm) reg_shm_list =
SLIST_HEAD_INITIALIZER(reg_shm_head);
static unsigned int reg_shm_slist_lock = SPINLOCK_UNLOCK;
+static unsigned int reg_shm_map_lock = SPINLOCK_UNLOCK;
static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj);
@@ -374,6 +376,20 @@ static void *mobj_reg_shm_get_va(struct mobj *mobj, size_t offst)
mrs->page_offset);
}
+static void reg_shm_unmap_helper(struct mobj_reg_shm *r)
+{
+ uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
+
+ if (r->mm) {
+ core_mmu_unmap_pages(tee_mm_get_smem(r->mm),
+ r->mobj.size / SMALL_PAGE_SIZE);
+ tee_mm_free(r->mm);
+ r->mm = NULL;
+ }
+
+ cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
+}
+
static void reg_shm_free_helper(struct mobj_reg_shm *mobj_reg_shm,
bool unlocked)
{
@@ -389,7 +405,7 @@ static void reg_shm_free_helper(struct mobj_reg_shm *mobj_reg_shm,
if (!refcount_dec(&mobj_reg_shm->refcount))
panic();
- mobj_reg_shm_unmap(&mobj_reg_shm->mobj);
+ reg_shm_unmap_helper(mobj_reg_shm);
if (!unlocked)
exceptions = cpu_spin_lock_xsave(&reg_shm_slist_lock);
@@ -443,6 +459,14 @@ static struct mobj_reg_shm *to_mobj_reg_shm(struct mobj *mobj)
return container_of(mobj, struct mobj_reg_shm, mobj);
}
+static struct mobj_reg_shm *to_mobj_reg_shm_may_fail(struct mobj *mobj)
+{
+ if (mobj->ops != &mobj_reg_shm_ops)
+ return NULL;
+
+ return container_of(mobj, struct mobj_reg_shm, mobj);
+}
+
struct mobj *mobj_reg_shm_alloc(paddr_t *pages, size_t num_pages,
paddr_t page_offset, uint64_t cookie)
{
@@ -639,6 +663,68 @@ TEE_Result mobj_reg_shm_unmap(struct mobj *mobj)
return TEE_SUCCESS;
}
+TEE_Result mobj_reg_shm_inc_map(struct mobj *mobj)
+{
+ TEE_Result res = TEE_SUCCESS;
+ struct mobj_reg_shm *r = to_mobj_reg_shm_may_fail(mobj);
+
+ if (!r)
+ return TEE_ERROR_GENERIC;
+
+ if (refcount_inc(&r->mapcount))
+ return TEE_SUCCESS;
+
+ uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
+
+ if (refcount_val(&r->mapcount))
+ goto out;
+
+ r->mm = tee_mm_alloc(&tee_mm_shm, SMALL_PAGE_SIZE * r->num_pages);
+ if (!r->mm) {
+ res = TEE_ERROR_OUT_OF_MEMORY;
+ goto out;
+ }
+
+ res = core_mmu_map_pages(tee_mm_get_smem(r->mm), r->pages,
+ r->num_pages, MEM_AREA_NSEC_SHM);
+ if (res) {
+ tee_mm_free(r->mm);
+ r->mm = NULL;
+ goto out;
+ }
+
+ refcount_set(&r->mapcount, 1);
+out:
+ cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
+
+ return res;
+}
+
+TEE_Result mobj_reg_shm_dec_map(struct mobj *mobj)
+{
+ struct mobj_reg_shm *r = to_mobj_reg_shm_may_fail(mobj);
+
+ if (!r)
+ return TEE_ERROR_GENERIC;
+
+ if (!refcount_dec(&r->mapcount))
+ return TEE_SUCCESS;
+
+ uint32_t exceptions = cpu_spin_lock_xsave(&reg_shm_map_lock);
+
+ if (refcount_val(&r->mapcount)) {
+ core_mmu_unmap_pages(tee_mm_get_smem(r->mm),
+ r->mobj.size / SMALL_PAGE_SIZE);
+ tee_mm_free(r->mm);
+ r->mm = NULL;
+ }
+
+ cpu_spin_unlock_xrestore(&reg_shm_map_lock, exceptions);
+
+ return TEE_SUCCESS;
+}
+
+
struct mobj *mobj_mapped_shm_alloc(paddr_t *pages, size_t num_pages,
paddr_t page_offset, uint64_t cookie)
{