aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/radeon/ni.c
diff options
context:
space:
mode:
authorAlex Deucher <alexander.deucher@amd.com>2012-10-22 12:19:01 -0400
committerAlex Deucher <alexander.deucher@amd.com>2012-12-10 17:21:41 -0500
commit3b6b59b610f0c0f351e68ec3eff9ab51ef75fb1a (patch)
tree46cd9b78aa994668f7075fbe57a0b2431b7338dd /drivers/gpu/drm/radeon/ni.c
parent2d6cc7296d4ee128ab0fa3b715f0afde511f49c2 (diff)
drm/radeon: add dma engine support for vm pt updates on ni (v5)
Async DMA has a special packet for contiguous pt updates which saves overhead. v2: leave the CP method enabled for now as doing the updates in the DMA rings is not working properly yet. v3: update for 2 level pts v4: rebase v5: drop pte/pde packet. doesn't seem to work on NI. Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/ni.c')
-rw-r--r--drivers/gpu/drm/radeon/ni.c69
1 files changed, 48 insertions, 21 deletions
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index b81aca44fd4..39e8be1d1e8 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1795,30 +1795,57 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
{
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-
- while (count) {
- unsigned ndw = 1 + count * 2;
- if (ndw > 0x3FFF)
- ndw = 0x3FFF;
-
- radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
- radeon_ring_write(ring, pe);
- radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
- for (; ndw > 1; ndw -= 2, --count, pe += 8) {
- uint64_t value = 0;
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
+ uint64_t value;
+ unsigned ndw;
+
+ if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) {
+ while (count) {
+ ndw = 1 + count * 2;
+ if (ndw > 0x3FFF)
+ ndw = 0x3FFF;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 1; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
addr += incr;
-
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw));
+ radeon_ring_write(ring, pe);
+ radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
addr += incr;
+ value |= r600_flags;
+ radeon_ring_write(ring, value);
+ radeon_ring_write(ring, upper_32_bits(value));
}
-
- value |= r600_flags;
- radeon_ring_write(ring, value);
- radeon_ring_write(ring, upper_32_bits(value));
}
}
}