aboutsummaryrefslogtreecommitdiff
path: root/drivers/staging/gma500/gtt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/gma500/gtt.c')
-rw-r--r--drivers/staging/gma500/gtt.c81
1 files changed, 67 insertions, 14 deletions
diff --git a/drivers/staging/gma500/gtt.c b/drivers/staging/gma500/gtt.c
index 461ead251bb..e770bd190a5 100644
--- a/drivers/staging/gma500/gtt.c
+++ b/drivers/staging/gma500/gtt.c
@@ -72,9 +72,8 @@ u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
* @r: our GTT range
*
* Take our preallocated GTT range and insert the GEM object into
- * the GTT.
- *
- * FIXME: gtt lock ?
+ * the GTT. This is protected via the gtt mutex which the caller
+ * must hold.
*/
static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
{
@@ -96,12 +95,17 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
set_pages_array_uc(pages, r->npage);
/* Write our page entries into the GTT itself */
- for (i = 0; i < r->npage; i++) {
- pte = psb_gtt_mask_pte(page_to_pfn(*pages++), 0/*type*/);
+ for (i = r->roll; i < r->npage; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ iowrite32(pte, gtt_slot++);
+ }
+ for (i = 0; i < r->roll; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
iowrite32(pte, gtt_slot++);
}
/* Make sure all the entries are set before we return */
ioread32(gtt_slot - 1);
+
return 0;
}
@@ -111,9 +115,9 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r)
* @r: our GTT range
*
* Remove a preallocated GTT range from the GTT. Overwrite all the
- * page table entries with the dummy page
+ * page table entries with the dummy page. This is protected via the gtt
+ * mutex which the caller must hold.
*/
-
static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
{
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -132,11 +136,52 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
}
/**
+ * psb_gtt_roll - set scrolling position
+ * @dev: our DRM device
+ * @r: the gtt mapping we are using
+ * @roll: roll offset
+ *
+ * Roll an existing pinned mapping by moving the pages through the GTT.
+ * This allows us to implement hardware scrolling on the consoles without
+ * a 2D engine
+ */
+void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
+{
+ u32 *gtt_slot, pte;
+ int i;
+
+ if (roll >= r->npage) {
+ WARN_ON(1);
+ return;
+ }
+
+ r->roll = roll;
+
+ /* Not currently in the GTT - no worry we will write the mapping at
+ the right position when it gets pinned */
+ if (!r->stolen && !r->in_gart)
+ return;
+
+ gtt_slot = psb_gtt_entry(dev, r);
+
+ for (i = r->roll; i < r->npage; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ iowrite32(pte, gtt_slot++);
+ }
+ for (i = 0; i < r->roll; i++) {
+ pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0);
+ iowrite32(pte, gtt_slot++);
+ }
+ ioread32(gtt_slot - 1);
+}
+
+/**
* psb_gtt_attach_pages - attach and pin GEM pages
* @gt: the gtt range
*
* Pin and build an in kernel list of the pages that back our GEM object.
- * While we hold this the pages cannot be swapped out
+ * While we hold this the pages cannot be swapped out. This is protected
+ * via the gtt mutex which the caller must hold.
*/
static int psb_gtt_attach_pages(struct gtt_range *gt)
{
@@ -158,7 +203,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
gt->npage = pages;
for (i = 0; i < pages; i++) {
- /* FIXME: review flags later */
+ /* FIXME: needs updating as per mail from Hugh Dickins */
p = read_cache_page_gfp(mapping, i,
__GFP_COLD | GFP_KERNEL);
if (IS_ERR(p))
@@ -181,7 +226,8 @@ err:
*
* Undo the effect of psb_gtt_attach_pages. At this point the pages
* must have been removed from the GTT as they could now be paged out
- * and move bus address.
+ * and move bus address. This is protected via the gtt mutex which the
+ * caller must hold.
*/
static void psb_gtt_detach_pages(struct gtt_range *gt)
{
@@ -300,6 +346,7 @@ struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
gt->resource.name = name;
gt->stolen = backed;
gt->in_gart = backed;
+ gt->roll = 0;
/* Ensure this is set for non GEM objects */
gt->gem.dev = dev;
ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
@@ -390,15 +437,18 @@ int psb_gtt_init(struct drm_device *dev, int resume)
pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
/*
- * FIXME: video mmu has hw bug to access 0x0D0000000,
- * then make gatt start at 0x0e000,0000
+ * The video mmu has a hw bug when accessing 0x0D0000000.
+ * Make gatt start at 0x0e000,0000. This doesn't actually
+ * matter for us but may do if the video acceleration ever
+ * gets opened up.
*/
pg->mmu_gatt_start = 0xE0000000;
pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
>> PAGE_SHIFT;
- /* CDV workaround */
+ /* Some CDV firmware doesn't report this currently. In which case the
+ system has 64 gtt pages */
if (pg->gtt_start == 0 || gtt_pages == 0) {
dev_err(dev->dev, "GTT PCI BAR not initialized.\n");
gtt_pages = 64;
@@ -412,13 +462,16 @@ int psb_gtt_init(struct drm_device *dev, int resume)
if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
static struct resource fudge; /* Preferably peppermint */
-
/* This can occur on CDV SDV systems. Fudge it in this case.
We really don't care what imaginary space is being allocated
at this point */
dev_err(dev->dev, "GATT PCI BAR not initialized.\n");
pg->gatt_start = 0x40000000;
pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
+ /* This is a little confusing but in fact the GTT is providing
+ a view from the GPU into memory and not vice versa. As such
+ this is really allocating space that is not the same as the
+ CPU address space on CDV */
fudge.start = 0x40000000;
fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
fudge.name = "fudge";