summaryrefslogtreecommitdiff
path: root/ArmPkg/Library/ArmLib/AArch64
diff options
context:
space:
mode:
Diffstat (limited to 'ArmPkg/Library/ArmLib/AArch64')
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64ArchTimer.c275
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64ArchTimerSupport.S140
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64Lib.c263
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64Lib.h98
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64Lib.inf44
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64LibPrePi.inf48
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64LibSec.inf43
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c671
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64Support.S503
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/ArmLib.c53
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/ArmLibPrivate.h82
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/ArmLibSupportV8.S127
12 files changed, 2347 insertions, 0 deletions
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64ArchTimer.c b/ArmPkg/Library/ArmLib/AArch64/AArch64ArchTimer.c
new file mode 100644
index 000000000..fa4f7c741
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64ArchTimer.c
@@ -0,0 +1,275 @@
+/** @file
+*
+* Copyright (c) 2011-2013, ARM Limited. All rights reserved.
+*
+* This program and the accompanying materials
+* are licensed and made available under the terms and conditions of the BSD License
+* which accompanies this distribution. The full text of the license may be found at
+* http://opensource.org/licenses/bsd-license.php
+*
+* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+*
+**/
+
+#include <Uefi.h>
+#include <Chipset/AArch64.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/ArmLib.h>
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+#include "AArch64Lib.h"
+#include "ArmLibPrivate.h"
+#include <Library/ArmArchTimerLib.h>
+
+VOID
+EFIAPI
+ArmArchTimerReadReg (
+ IN ARM_ARCH_TIMER_REGS Reg,
+ OUT VOID *DstBuf
+ )
+{
+ // Check if the Generic/Architecture timer is implemented
+ if (ArmIsArchTimerImplemented ()) {
+
+ switch (Reg) {
+
+ case CntFrq:
+ *((UINTN *)DstBuf) = ArmReadCntFrq ();
+ break;
+
+ case CntPct:
+ *((UINT64 *)DstBuf) = ArmReadCntPct ();
+ break;
+
+ case CntkCtl:
+ *((UINTN *)DstBuf) = ArmReadCntkCtl();
+ break;
+
+ case CntpTval:
+ *((UINTN *)DstBuf) = ArmReadCntpTval ();
+ break;
+
+ case CntpCtl:
+ *((UINTN *)DstBuf) = ArmReadCntpCtl ();
+ break;
+
+ case CntvTval:
+ *((UINTN *)DstBuf) = ArmReadCntvTval ();
+ break;
+
+ case CntvCtl:
+ *((UINTN *)DstBuf) = ArmReadCntvCtl ();
+ break;
+
+ case CntvCt:
+ *((UINT64 *)DstBuf) = ArmReadCntvCt ();
+ break;
+
+ case CntpCval:
+ *((UINT64 *)DstBuf) = ArmReadCntpCval ();
+ break;
+
+ case CntvCval:
+ *((UINT64 *)DstBuf) = ArmReadCntvCval ();
+ break;
+
+ case CntvOff:
+ *((UINT64 *)DstBuf) = ArmReadCntvOff ();
+ break;
+
+ case CnthCtl:
+ case CnthpTval:
+ case CnthpCtl:
+ case CnthpCval:
+ DEBUG ((EFI_D_ERROR, "The register is related to Hypervisor Mode. Can't perform requested operation\n "));
+ break;
+
+ default:
+ DEBUG ((EFI_D_ERROR, "Unknown ARM Generic Timer register %x. \n ", Reg));
+ }
+ } else {
+ DEBUG ((EFI_D_ERROR, "Attempt to read ARM Generic Timer registers. But ARM Generic Timer extension is not implemented \n "));
+ ASSERT (0);
+ }
+}
+
+VOID
+EFIAPI
+ArmArchTimerWriteReg (
+ IN ARM_ARCH_TIMER_REGS Reg,
+ IN VOID *SrcBuf
+ )
+{
+ // Check if the Generic/Architecture timer is implemented
+ if (ArmIsArchTimerImplemented ()) {
+
+ switch (Reg) {
+
+ case CntFrq:
+ ArmWriteCntFrq (*((UINTN *)SrcBuf));
+ break;
+
+ case CntPct:
+ DEBUG ((EFI_D_ERROR, "Can't write to Read Only Register: CNTPCT \n"));
+ break;
+
+ case CntkCtl:
+ ArmWriteCntkCtl (*((UINTN *)SrcBuf));
+ break;
+
+ case CntpTval:
+ ArmWriteCntpTval (*((UINTN *)SrcBuf));
+ break;
+
+ case CntpCtl:
+ ArmWriteCntpCtl (*((UINTN *)SrcBuf));
+ break;
+
+ case CntvTval:
+ ArmWriteCntvTval (*((UINTN *)SrcBuf));
+ break;
+
+ case CntvCtl:
+ ArmWriteCntvCtl (*((UINTN *)SrcBuf));
+ break;
+
+ case CntvCt:
+ DEBUG ((EFI_D_ERROR, "Can't write to Read Only Register: CNTVCT \n"));
+ break;
+
+ case CntpCval:
+ ArmWriteCntpCval (*((UINT64 *)SrcBuf) );
+ break;
+
+ case CntvCval:
+ ArmWriteCntvCval (*((UINT64 *)SrcBuf) );
+ break;
+
+ case CntvOff:
+ ArmWriteCntvOff (*((UINT64 *)SrcBuf));
+ break;
+
+ case CnthCtl:
+ case CnthpTval:
+ case CnthpCtl:
+ case CnthpCval:
+ DEBUG ((EFI_D_ERROR, "The register is related to Hypervisor Mode. Can't perform requested operation\n "));
+ break;
+
+ default:
+ DEBUG ((EFI_D_ERROR, "Unknown ARM Generic Timer register %x. \n ", Reg));
+ }
+ } else {
+ DEBUG ((EFI_D_ERROR, "Attempt to write to ARM Generic Timer registers. But ARM Generic Timer extension is not implemented \n "));
+ ASSERT (0);
+ }
+}
+
+VOID
+EFIAPI
+ArmArchTimerEnableTimer (
+ VOID
+ )
+{
+ UINTN TimerCtrlReg;
+
+ ArmArchTimerReadReg (CntpCtl, (VOID *)&TimerCtrlReg);
+ TimerCtrlReg |= ARM_ARCH_TIMER_ENABLE;
+ ArmArchTimerWriteReg (CntpCtl, (VOID *)&TimerCtrlReg);
+}
+
+VOID
+EFIAPI
+ArmArchTimerDisableTimer (
+ VOID
+ )
+{
+ UINTN TimerCtrlReg;
+
+ ArmArchTimerReadReg (CntpCtl, (VOID *)&TimerCtrlReg);
+ TimerCtrlReg &= ~ARM_ARCH_TIMER_ENABLE;
+ ArmArchTimerWriteReg (CntpCtl, (VOID *)&TimerCtrlReg);
+}
+
+VOID
+EFIAPI
+ArmArchTimerSetTimerFreq (
+ IN UINTN FreqInHz
+ )
+{
+ ArmArchTimerWriteReg (CntFrq, (VOID *)&FreqInHz);
+}
+
+UINTN
+EFIAPI
+ArmArchTimerGetTimerFreq (
+ VOID
+ )
+{
+ UINTN ArchTimerFreq = 0;
+ ArmArchTimerReadReg (CntFrq, (VOID *)&ArchTimerFreq);
+ return ArchTimerFreq;
+}
+
+UINTN
+EFIAPI
+ArmArchTimerGetTimerVal (
+ VOID
+ )
+{
+ UINTN ArchTimerVal;
+ ArmArchTimerReadReg (CntpTval, (VOID *)&ArchTimerVal);
+ return ArchTimerVal;
+}
+
+
+VOID
+EFIAPI
+ArmArchTimerSetTimerVal (
+ IN UINTN Val
+ )
+{
+ ArmArchTimerWriteReg (CntpTval, (VOID *)&Val);
+}
+
+UINT64
+EFIAPI
+ArmArchTimerGetSystemCount (
+ VOID
+ )
+{
+ UINT64 SystemCount;
+ ArmArchTimerReadReg (CntPct, (VOID *)&SystemCount);
+ return SystemCount;
+}
+
+UINTN
+EFIAPI
+ArmArchTimerGetTimerCtrlReg (
+ VOID
+ )
+{
+ UINTN Val;
+ ArmArchTimerReadReg (CntpCtl, (VOID *)&Val);
+ return Val;
+}
+
+VOID
+EFIAPI
+ArmArchTimerSetTimerCtrlReg (
+ UINTN Val
+ )
+{
+ ArmArchTimerWriteReg (CntpCtl, (VOID *)&Val);
+}
+
+VOID
+EFIAPI
+ArmArchTimerSetCompareVal (
+ IN UINT64 Val
+ )
+{
+ ArmArchTimerWriteReg (CntpCval, (VOID *)&Val);
+}
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64ArchTimerSupport.S b/ArmPkg/Library/ArmLib/AArch64/AArch64ArchTimerSupport.S
new file mode 100644
index 000000000..c6087aa61
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64ArchTimerSupport.S
@@ -0,0 +1,140 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#------------------------------------------------------------------------------
+
+.text
+.align 2
+
+ASM_GLOBAL ASM_PFX(ArmReadCntFrq)
+ASM_GLOBAL ASM_PFX(ArmWriteCntFrq)
+ASM_GLOBAL ASM_PFX(ArmReadCntPct)
+ASM_GLOBAL ASM_PFX(ArmReadCntkCtl)
+ASM_GLOBAL ASM_PFX(ArmWriteCntkCtl)
+ASM_GLOBAL ASM_PFX(ArmReadCntpTval)
+ASM_GLOBAL ASM_PFX(ArmWriteCntpTval)
+ASM_GLOBAL ASM_PFX(ArmReadCntpCtl)
+ASM_GLOBAL ASM_PFX(ArmWriteCntpCtl)
+ASM_GLOBAL ASM_PFX(ArmReadCntvTval)
+ASM_GLOBAL ASM_PFX(ArmWriteCntvTval)
+ASM_GLOBAL ASM_PFX(ArmReadCntvCtl)
+ASM_GLOBAL ASM_PFX(ArmWriteCntvCtl)
+ASM_GLOBAL ASM_PFX(ArmReadCntvCt)
+ASM_GLOBAL ASM_PFX(ArmReadCntpCval)
+ASM_GLOBAL ASM_PFX(ArmWriteCntpCval)
+ASM_GLOBAL ASM_PFX(ArmReadCntvCval)
+ASM_GLOBAL ASM_PFX(ArmWriteCntvCval)
+ASM_GLOBAL ASM_PFX(ArmReadCntvOff)
+ASM_GLOBAL ASM_PFX(ArmWriteCntvOff)
+
+ASM_PFX(ArmReadCntFrq):
+ mrs x0, cntfrq_el0 // Read CNTFRQ
+ ret
+
+
+# NOTE - Can only write while at highest implemented EL level (EL3 on model). Else ReadOnly (EL2, EL1, EL0)
+ASM_PFX(ArmWriteCntFrq):
+ msr cntfrq_el0, x0 // Write to CNTFRQ
+ ret
+
+
+ASM_PFX(ArmReadCntPct):
+ mrs x0, cntpct_el0 // Read CNTPCT (Physical counter register)
+ ret
+
+
+ASM_PFX(ArmReadCntkCtl):
+ mrs x0, cntkctl_el1 // Read CNTK_CTL (Timer PL1 Control Register)
+ ret
+
+
+ASM_PFX(ArmWriteCntkCtl):
+ mrs x0, cntkctl_el1 // Write to CNTK_CTL (Timer PL1 Control Register)
+ ret
+
+
+ASM_PFX(ArmReadCntpTval):
+ mrs x0, cntp_tval_el0 // Read CNTP_TVAL (PL1 physical timer value register)
+ ret
+
+
+ASM_PFX(ArmWriteCntpTval):
+ msr cntp_tval_el0, x0 // Write to CNTP_TVAL (PL1 physical timer value register)
+ ret
+
+
+ASM_PFX(ArmReadCntpCtl):
+ mrs x0, cntp_ctl_el0 // Read CNTP_CTL (PL1 Physical Timer Control Register)
+ ret
+
+
+ASM_PFX(ArmWriteCntpCtl):
+ msr cntp_ctl_el0, x0 // Write to CNTP_CTL (PL1 Physical Timer Control Register)
+ ret
+
+
+ASM_PFX(ArmReadCntvTval):
+ mrs x0, cntv_tval_el0 // Read CNTV_TVAL (Virtual Timer Value register)
+ ret
+
+
+ASM_PFX(ArmWriteCntvTval):
+ msr cntv_tval_el0, x0 // Write to CNTV_TVAL (Virtual Timer Value register)
+ ret
+
+
+ASM_PFX(ArmReadCntvCtl):
+ mrs x0, cntv_ctl_el0 // Read CNTV_CTL (Virtual Timer Control Register)
+ ret
+
+
+ASM_PFX(ArmWriteCntvCtl):
+ msr cntv_ctl_el0, x0 // Write to CNTV_CTL (Virtual Timer Control Register)
+ ret
+
+
+ASM_PFX(ArmReadCntvCt):
+ mrs x0, cntvct_el0 // Read CNTVCT (Virtual Count Register)
+ ret
+
+
+ASM_PFX(ArmReadCntpCval):
+ mrs x0, cntp_cval_el0 // Read CNTP_CTVAL (Physical Timer Compare Value Register)
+ ret
+
+
+ASM_PFX(ArmWriteCntpCval):
+ msr cntp_cval_el0, x0 // Write to CNTP_CTVAL (Physical Timer Compare Value Register)
+ ret
+
+
+ASM_PFX(ArmReadCntvCval):
+ mrs x0, cntv_cval_el0 // Read CNTV_CTVAL (Virtual Timer Compare Value Register)
+ ret
+
+
+ASM_PFX(ArmWriteCntvCval):
+ msr cntv_cval_el0, x0 // write to CNTV_CTVAL (Virtual Timer Compare Value Register)
+ ret
+
+
+ASM_PFX(ArmReadCntvOff):
+ mrs x0, cntvoff_el2 // Read CNTVOFF (virtual Offset register)
+ ret
+
+
+ASM_PFX(ArmWriteCntvOff):
+ msr cntvoff_el2, x0 // Write to CNTVOFF (Virtual Offset register)
+ ret
+
+
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.c b/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.c
new file mode 100644
index 000000000..fd7f14f9c
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.c
@@ -0,0 +1,263 @@
+/** @file
+
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+ Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include <Uefi.h>
+#include <Chipset/AArch64.h>
+#include <Library/ArmLib.h>
+#include <Library/BaseLib.h>
+#include <Library/IoLib.h>
+#include "AArch64Lib.h"
+#include "ArmLibPrivate.h"
+
+ARM_CACHE_TYPE
+EFIAPI
+ArmCacheType (
+ VOID
+ )
+{
+ return ARM_CACHE_TYPE_WRITE_BACK;
+}
+
+ARM_CACHE_ARCHITECTURE
+EFIAPI
+ArmCacheArchitecture (
+ VOID
+ )
+{
+ UINT32 CLIDR = ReadCLIDR ();
+
+ return (ARM_CACHE_ARCHITECTURE)CLIDR; // BugBug Fix Me
+}
+
+BOOLEAN
+EFIAPI
+ArmDataCachePresent (
+ VOID
+ )
+{
+ UINT32 CLIDR = ReadCLIDR ();
+
+ if ((CLIDR & 0x2) == 0x2) {
+ // Instruction cache exists
+ return TRUE;
+ }
+ if ((CLIDR & 0x7) == 0x4) {
+ // Unified cache
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+UINTN
+EFIAPI
+ArmDataCacheSize (
+ VOID
+ )
+{
+ UINT32 NumSets;
+ UINT32 Associativity;
+ UINT32 LineSize;
+ UINT32 CCSIDR = ReadCCSIDR (0);
+
+ LineSize = (1 << ((CCSIDR & 0x7) + 2));
+ Associativity = ((CCSIDR >> 3) & 0x3ff) + 1;
+ NumSets = ((CCSIDR >> 13) & 0x7fff) + 1;
+
+ // LineSize is in words (4 byte chunks)
+ return NumSets * Associativity * LineSize * 4;
+}
+
+UINTN
+EFIAPI
+ArmDataCacheAssociativity (
+ VOID
+ )
+{
+ UINT32 CCSIDR = ReadCCSIDR (0);
+
+ return ((CCSIDR >> 3) & 0x3ff) + 1;
+}
+
+UINTN
+ArmDataCacheSets (
+ VOID
+ )
+{
+ UINT32 CCSIDR = ReadCCSIDR (0);
+
+ return ((CCSIDR >> 13) & 0x7fff) + 1;
+}
+
+UINTN
+EFIAPI
+ArmDataCacheLineLength (
+ VOID
+ )
+{
+ UINT32 CCSIDR = ReadCCSIDR (0) & 7;
+
+ // * 4 converts to bytes
+ return (1 << (CCSIDR + 2)) * 4;
+}
+
+BOOLEAN
+EFIAPI
+ArmInstructionCachePresent (
+ VOID
+ )
+{
+ UINT32 CLIDR = ReadCLIDR ();
+
+ if ((CLIDR & 1) == 1) {
+ // Instruction cache exists
+ return TRUE;
+ }
+ if ((CLIDR & 0x7) == 0x4) {
+ // Unified cache
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+UINTN
+EFIAPI
+ArmInstructionCacheSize (
+ VOID
+ )
+{
+ UINT32 NumSets;
+ UINT32 Associativity;
+ UINT32 LineSize;
+ UINT32 CCSIDR = ReadCCSIDR (1);
+
+ LineSize = (1 << ((CCSIDR & 0x7) + 2));
+ Associativity = ((CCSIDR >> 3) & 0x3ff) + 1;
+ NumSets = ((CCSIDR >> 13) & 0x7fff) + 1;
+
+ // LineSize is in words (4 byte chunks)
+ return NumSets * Associativity * LineSize * 4;
+}
+
+UINTN
+EFIAPI
+ArmInstructionCacheAssociativity (
+ VOID
+ )
+{
+ UINT32 CCSIDR = ReadCCSIDR (1);
+
+ return ((CCSIDR >> 3) & 0x3ff) + 1;
+}
+
+UINTN
+EFIAPI
+ArmInstructionCacheSets (
+ VOID
+ )
+{
+ UINT32 CCSIDR = ReadCCSIDR (1);
+
+ return ((CCSIDR >> 13) & 0x7fff) + 1;
+}
+
+UINTN
+EFIAPI
+ArmInstructionCacheLineLength (
+ VOID
+ )
+{
+ UINT32 CCSIDR = ReadCCSIDR (1) & 7;
+
+ // * 4 converts to bytes
+ return (1 << (CCSIDR + 2)) * 4;
+}
+
+
+VOID
+AArch64DataCacheOperation (
+ IN AARCH64_CACHE_OPERATION DataCacheOperation
+ )
+{
+ UINTN SavedInterruptState;
+
+ SavedInterruptState = ArmGetInterruptState ();
+ ArmDisableInterrupts();
+
+ AArch64AllDataCachesOperation (DataCacheOperation);
+
+ ArmDrainWriteBuffer ();
+
+ if (SavedInterruptState) {
+ ArmEnableInterrupts ();
+ }
+}
+
+
+VOID
+AArch64PoUDataCacheOperation (
+ IN AARCH64_CACHE_OPERATION DataCacheOperation
+ )
+{
+ UINTN SavedInterruptState;
+
+ SavedInterruptState = ArmGetInterruptState ();
+ ArmDisableInterrupts ();
+
+ AArch64PerformPoUDataCacheOperation (DataCacheOperation);
+
+ ArmDrainWriteBuffer ();
+
+ if (SavedInterruptState) {
+ ArmEnableInterrupts ();
+ }
+}
+
+VOID
+EFIAPI
+ArmInvalidateDataCache (
+ VOID
+ )
+{
+ AArch64DataCacheOperation (ArmInvalidateDataCacheEntryBySetWay);
+}
+
+VOID
+EFIAPI
+ArmCleanInvalidateDataCache (
+ VOID
+ )
+{
+ AArch64DataCacheOperation (ArmCleanInvalidateDataCacheEntryBySetWay);
+}
+
+VOID
+EFIAPI
+ArmCleanDataCache (
+ VOID
+ )
+{
+ AArch64DataCacheOperation (ArmCleanDataCacheEntryBySetWay);
+}
+
+VOID
+EFIAPI
+ArmCleanDataCacheToPoU (
+ VOID
+ )
+{
+ AArch64PoUDataCacheOperation (ArmCleanDataCacheEntryBySetWay);
+}
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.h b/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.h
new file mode 100644
index 000000000..04e3be042
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.h
@@ -0,0 +1,98 @@
+/** @file
+
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+ Portions Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef __AARCH64_LIB_H__
+#define __AARCH64_LIB_H__
+
+typedef VOID (*AARCH64_CACHE_OPERATION)(UINTN);
+
+VOID
+EFIAPI
+ArmDrainWriteBuffer (
+ VOID
+ );
+
+VOID
+EFIAPI
+ArmInvalidateDataCacheEntryBySetWay (
+ IN UINTN SetWayFormat
+ );
+
+VOID
+EFIAPI
+ArmCleanDataCacheEntryBySetWay (
+ IN UINTN SetWayFormat
+ );
+
+VOID
+EFIAPI
+ArmCleanDataCacheToPoUEntryBySetWay (
+ IN UINTN SetWayFormat
+ );
+
+VOID
+EFIAPI
+ArmCleanInvalidateDataCacheEntryBySetWay (
+ IN UINTN SetWayFormat
+ );
+
+VOID
+EFIAPI
+ArmEnableAsynchronousAbort (
+ VOID
+ );
+
+UINTN
+EFIAPI
+ArmDisableAsynchronousAbort (
+ VOID
+ );
+
+VOID
+EFIAPI
+ArmEnableIrq (
+ VOID
+ );
+
+UINTN
+EFIAPI
+ArmDisableIrq (
+ VOID
+ );
+
+VOID
+EFIAPI
+ArmEnableFiq (
+ VOID
+ );
+
+UINTN
+EFIAPI
+ArmDisableFiq (
+ VOID
+ );
+
+VOID
+AArch64PerformPoUDataCacheOperation (
+ IN AARCH64_CACHE_OPERATION DataCacheOperation
+ );
+
+VOID
+AArch64AllDataCachesOperation (
+ IN AARCH64_CACHE_OPERATION DataCacheOperation
+ );
+
+#endif // __AARCH64_LIB_H__
+
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.inf b/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.inf
new file mode 100644
index 000000000..dca0b22dd
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64Lib.inf
@@ -0,0 +1,44 @@
+#/** @file
+#
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
+# Portions copyright (c) 2011-2013, ARM Limited. All rights reserved.
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#
+#**/
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = AArch64Lib
+ FILE_GUID = ef20ddf5-b334-47b3-94cf-52ff44c29138
+ MODULE_TYPE = DXE_DRIVER
+ VERSION_STRING = 1.0
+ LIBRARY_CLASS = ArmLib
+
+[Sources.AARCH64]
+ AArch64Lib.c
+ AArch64Mmu.c
+ AArch64ArchTimer.c
+ ArmLibSupportV8.S | GCC
+ ../Common/AArch64/ArmLibSupport.S | GCC
+ AArch64Support.S | GCC
+ AArch64ArchTimerSupport.S | GCC
+
+[Packages]
+ ArmPkg/ArmPkg.dec
+ MdePkg/MdePkg.dec
+
+[LibraryClasses]
+ MemoryAllocationLib
+
+[Protocols]
+ gEfiCpuArchProtocolGuid
+
+[FixedPcd]
+ gArmTokenSpaceGuid.PcdArmCacheOperationThreshold
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64LibPrePi.inf b/ArmPkg/Library/ArmLib/AArch64/AArch64LibPrePi.inf
new file mode 100644
index 000000000..42f7e5628
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64LibPrePi.inf
@@ -0,0 +1,48 @@
+#/** @file
+#
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
+# Portions copyright (c) 2011-2013, ARM Ltd. All rights reserved.<BR>
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#
+#**/
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = AArch64LibPrePi
+ FILE_GUID = fd72688d-dbd8-4cf2-91a3-15171dea7816
+ MODULE_TYPE = BASE
+ VERSION_STRING = 1.0
+ LIBRARY_CLASS = ArmLib
+
+[Sources.common]
+ ArmLibSupportV8.S | GCC
+ AArch64Support.S | GCC
+
+ ../Common/AArch64/ArmLibSupport.S | GCC
+ ../Common/ArmLib.c
+
+ AArch64Lib.c
+ AArch64Mmu.c
+
+ AArch64ArchTimer.c
+ AArch64ArchTimerSupport.S | GCC
+
+[Packages]
+ ArmPkg/ArmPkg.dec
+ MdePkg/MdePkg.dec
+
+[LibraryClasses]
+ PrePiLib
+
+[Protocols]
+ gEfiCpuArchProtocolGuid
+
+[FixedPcd]
+ gArmTokenSpaceGuid.PcdArmCacheOperationThreshold
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64LibSec.inf b/ArmPkg/Library/ArmLib/AArch64/AArch64LibSec.inf
new file mode 100644
index 000000000..9bb0bd21d
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64LibSec.inf
@@ -0,0 +1,43 @@
+#/* @file
+#
+# Copyright (c) 2011-2013, ARM Limited. All rights reserved.
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#*/
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = AArch64Lib
+ FILE_GUID = eb7441e4-3ddf-48b8-a009-14f428b19e49
+ MODULE_TYPE = BASE
+ VERSION_STRING = 1.0
+ LIBRARY_CLASS = ArmLib
+
+[Sources.common]
+ ArmLibSupportV8.S | GCC
+ AArch64Support.S | GCC
+ ArmLib.c
+
+ ../Common/AArch64/ArmLibSupport.S | GCC
+
+ AArch64Lib.c
+
+ AArch64ArchTimer.c
+ AArch64ArchTimerSupport.S | GCC
+
+[Packages]
+ ArmPkg/ArmPkg.dec
+ MdePkg/MdePkg.dec
+
+[Protocols]
+ gEfiCpuArchProtocolGuid
+
+[FixedPcd]
+ gArmTokenSpaceGuid.PcdArmCacheOperationThreshold
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c b/ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c
new file mode 100644
index 000000000..bca397dcf
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c
@@ -0,0 +1,671 @@
+/** @file
+* File managing the MMU for ARMv8 architecture
+*
+* Copyright (c) 2011-2013, ARM Limited. All rights reserved.
+*
+* This program and the accompanying materials
+* are licensed and made available under the terms and conditions of the BSD License
+* which accompanies this distribution. The full text of the license may be found at
+* http://opensource.org/licenses/bsd-license.php
+*
+* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+*
+**/
+
+#include <Uefi.h>
+#include <Chipset/AArch64.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/ArmLib.h>
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+#include "AArch64Lib.h"
+#include "ArmLibPrivate.h"
+
+// We use this index definition to define an invalid block entry
+#define TT_ATTR_INDX_INVALID ((UINT32)~0)
+
+STATIC
+UINT64
+ArmMemoryAttributeToPageAttribute (
+ IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
+ )
+{
+ switch (Attributes) {
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
+ return TT_ATTR_INDX_MEMORY_WRITE_BACK;
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
+ return TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
+ case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
+ return TT_ATTR_INDX_DEVICE_MEMORY;
+ case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
+ return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
+ return TT_ATTR_INDX_MEMORY_WRITE_BACK;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
+ return TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
+ return TT_ATTR_INDX_DEVICE_MEMORY;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
+ return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
+ default:
+ ASSERT(0);
+ return TT_ATTR_INDX_DEVICE_MEMORY;
+ }
+}
+
+UINT64
+PageAttributeToGcdAttribute (
+ IN UINT64 PageAttributes
+ )
+{
+ UINT64 GcdAttributes;
+
+ switch (PageAttributes & TT_ATTR_INDX_MASK) {
+ case TT_ATTR_INDX_DEVICE_MEMORY:
+ GcdAttributes = EFI_MEMORY_UC;
+ break;
+ case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
+ GcdAttributes = EFI_MEMORY_WC;
+ break;
+ case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
+ GcdAttributes = EFI_MEMORY_WT;
+ break;
+ case TT_ATTR_INDX_MEMORY_WRITE_BACK:
+ GcdAttributes = EFI_MEMORY_WB;
+ break;
+ default:
+ DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
+ ASSERT (0);
+ // The Global Coherency Domain (GCD) value is defined as a bit set.
+ // Returning 0 means no attribute has been set.
+ GcdAttributes = 0;
+ }
+
+ // Determine protection attributes
+ if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
+ // Read only cases map to write-protect
+ GcdAttributes |= EFI_MEMORY_WP;
+ }
+
+ // Process eXecute Never attribute
+ if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
+ GcdAttributes |= EFI_MEMORY_XP;
+ }
+
+ return GcdAttributes;
+}
+
+UINT64
+GcdAttributeToPageAttribute (
+ IN UINT64 GcdAttributes
+ )
+{
+ UINT64 PageAttributes;
+
+ switch (GcdAttributes & 0xFF) {
+ case EFI_MEMORY_UC:
+ PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
+ break;
+ case EFI_MEMORY_WC:
+ PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
+ break;
+ case EFI_MEMORY_WT:
+ PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
+ break;
+ case EFI_MEMORY_WB:
+ PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK;
+ break;
+ default:
+ DEBUG ((EFI_D_ERROR, "GcdAttributeToPageAttribute: 0x%X attributes is not supported.\n", GcdAttributes));
+ ASSERT (0);
+ // If no match has been found then we mark the memory as device memory.
+ // The only side effect of using device memory should be a slow down in the performance.
+ PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
+ }
+
+ // Determine protection attributes
+ if (GcdAttributes & EFI_MEMORY_WP) {
+ // Read only cases map to write-protect
+ PageAttributes |= TT_AP_RO_RO;
+ }
+
+ // Process eXecute Never attribute
+ if (GcdAttributes & EFI_MEMORY_XP) {
+ PageAttributes |= (TT_PXN_MASK | TT_UXN_MASK);
+ }
+
+ return PageAttributes;
+}
+
+ARM_MEMORY_REGION_ATTRIBUTES
+GcdAttributeToArmAttribute (
+ IN UINT64 GcdAttributes
+ )
+{
+ switch (GcdAttributes & 0xFF) {
+ case EFI_MEMORY_UC:
+ return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
+ case EFI_MEMORY_WC:
+ return ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED;
+ case EFI_MEMORY_WT:
+ return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH;
+ case EFI_MEMORY_WB:
+ return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK;
+ default:
+ DEBUG ((EFI_D_ERROR, "GcdAttributeToArmAttribute: 0x%lX attributes is not supported.\n", GcdAttributes));
+ ASSERT (0);
+ return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
+ }
+}
+
+// Describe the T0SZ values for each translation table level
+typedef struct {
+ UINTN MinT0SZ;
+ UINTN MaxT0SZ;
+ UINTN LargestT0SZ; // Generally (MaxT0SZ == LargestT0SZ) but at the Level3 Table
+ // the MaxT0SZ is not at the boundary of the table
+} T0SZ_DESCRIPTION_PER_LEVEL;
+
+// Map table for the corresponding Level of Table
+STATIC CONST T0SZ_DESCRIPTION_PER_LEVEL T0SZPerTableLevel[] = {
+ { 16, 24, 24 }, // Table Level 0
+ { 25, 33, 33 }, // Table Level 1
+ { 34, 39, 42 } // Table Level 2
+};
+
+VOID
+GetRootTranslationTableInfo (
+ IN UINTN T0SZ,
+ OUT UINTN *TableLevel,
+ OUT UINTN *TableEntryCount
+ )
+{
+ UINTN Index;
+
+ // Identify the level of the root table from the given T0SZ
+ for (Index = 0; Index < sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL); Index++) {
+ if (T0SZ <= T0SZPerTableLevel[Index].MaxT0SZ) {
+ break;
+ }
+ }
+
+ // If we have not found the corresponding maximum T0SZ then we use the last one
+ if (Index == sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL)) {
+ Index--;
+ }
+
+ // Get the level of the root table
+ if (TableLevel) {
+ *TableLevel = Index;
+ }
+
+ // The Size of the Table is 2^(T0SZ-LargestT0SZ)
+ if (TableEntryCount) {
+ *TableEntryCount = 1 << (T0SZPerTableLevel[Index].LargestT0SZ - T0SZ + 1);
+ }
+}
+
+STATIC
+VOID
+LookupAddresstoRootTable (
+ IN UINT64 MaxAddress,
+ OUT UINTN *T0SZ,
+ OUT UINTN *TableEntryCount
+ )
+{
+ UINTN TopBit;
+
+ // Check the parameters are not NULL
+ ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
+
+ // Look for the highest bit set in MaxAddress
+ for (TopBit = 63; TopBit != 0; TopBit--) {
+ if ((1ULL << TopBit) & MaxAddress) {
+ // MaxAddress top bit is found
+ TopBit = TopBit + 1;
+ break;
+ }
+ }
+ ASSERT (TopBit != 0);
+
+ // Calculate T0SZ from the top bit of the MaxAddress
+ *T0SZ = 64 - TopBit;
+
+ // Get the Table info from T0SZ
+ GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
+}
+
+STATIC
+UINT64*
+GetBlockEntryListFromAddress (
+ IN UINT64 *RootTable,
+ IN UINT64 RegionStart,
+ OUT UINTN *TableLevel,
+ IN OUT UINT64 *BlockEntrySize,
+ IN OUT UINT64 **LastBlockEntry
+ )
+{
+ UINTN RootTableLevel;
+ UINTN RootTableEntryCount;
+ UINT64 *TranslationTable;
+ UINT64 *BlockEntry;
+ UINT64 BlockEntryAddress;
+ UINTN BaseAddressAlignment;
+ UINTN PageLevel;
+ UINTN Index;
+ UINTN IndexLevel;
+ UINTN T0SZ;
+ UINT64 Attributes;
+ UINT64 TableAttributes;
+
+ // Initialize variable
+ BlockEntry = NULL;
+
+ // Ensure the parameters are valid
+ ASSERT (TableLevel && BlockEntrySize && LastBlockEntry);
+
+ // Ensure the Region is aligned on 4KB boundary
+ ASSERT ((RegionStart & (SIZE_4KB - 1)) == 0);
+
+ // Ensure the required size is aligned on 4KB boundary
+ ASSERT ((*BlockEntrySize & (SIZE_4KB - 1)) == 0);
+
+ //
+ // Calculate LastBlockEntry from T0SZ - this is the last block entry of the root Translation table
+ //
+ T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
+ // Get the Table info from T0SZ
+ GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
+ // The last block of the root table depends on the number of entry in this table
+ *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(RootTable, RootTableEntryCount);
+
+ // If the start address is 0x0 then we use the size of the region to identify the alignment
+ if (RegionStart == 0) {
+ // Identify the highest possible alignment for the Region Size
+ for (BaseAddressAlignment = 0; BaseAddressAlignment < 64; BaseAddressAlignment++) {
+ if ((1 << BaseAddressAlignment) & *BlockEntrySize) {
+ break;
+ }
+ }
+ } else {
+ // Identify the highest possible alignment for the Base Address
+ for (BaseAddressAlignment = 0; BaseAddressAlignment < 64; BaseAddressAlignment++) {
+ if ((1 << BaseAddressAlignment) & RegionStart) {
+ break;
+ }
+ }
+ }
+
+ // Identify the Page Level the RegionStart must belongs to
+ PageLevel = 3 - ((BaseAddressAlignment - 12) / 9);
+
+ // If the required size is smaller than the current block size then we need to go to the page below.
+ // The PageLevel was calculated on the Base Address alignment but did not take in account the alignment
+ // of the allocation size
+ if (*BlockEntrySize < TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel)) {
+ // It does not fit so we need to go a page level above
+ PageLevel++;
+ }
+
+ // Expose the found PageLevel to the caller
+ *TableLevel = PageLevel;
+
+ // Now, we have the Table Level we can get the Block Size associated to this table
+ *BlockEntrySize = TT_BLOCK_ENTRY_SIZE_AT_LEVEL (PageLevel);
+
+ //
+ // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
+ //
+
+ TranslationTable = RootTable;
+ for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
+ BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
+
+ if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
+ // Go to the next table
+ TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
+
+ // If we are at the last level then update the output
+ if (IndexLevel == PageLevel) {
+ // And get the appropriate BlockEntry at the next level
+ BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel + 1, RegionStart);
+
+ // Set the last block for this new table
+ *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable, TT_ENTRY_COUNT);
+ }
+ } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
+ // If we are not at the last level then we need to split this BlockEntry
+ if (IndexLevel != PageLevel) {
+ // Retrieve the attributes from the block entry
+ Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
+
+ // Convert the block entry attributes into Table descriptor attributes
+ TableAttributes = TT_TABLE_AP_NO_PERMISSION;
+ if (Attributes & TT_PXN_MASK) {
+ TableAttributes = TT_TABLE_PXN;
+ }
+ if (Attributes & TT_UXN_MASK) {
+ TableAttributes = TT_TABLE_XN;
+ }
+ if (Attributes & TT_NS) {
+ TableAttributes = TT_TABLE_NS;
+ }
+
+ // Get the address corresponding at this entry
+ BlockEntryAddress = RegionStart;
+ BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
+ // Shift back to right to set zero before the effective address
+ BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
+
+ // Set the correct entry type for the next page level
+ if ((IndexLevel + 1) == 3) {
+ Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
+ } else {
+ Attributes |= TT_TYPE_BLOCK_ENTRY;
+ }
+
+ // Create a new translation table
+ TranslationTable = (UINT64*)AllocatePages (EFI_SIZE_TO_PAGES((TT_ENTRY_COUNT * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE));
+ if (TranslationTable == NULL) {
+ return NULL;
+ }
+ TranslationTable = (UINT64*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
+
+ // Fill the BlockEntry with the new TranslationTable
+ *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY;
+ // Update the last block entry with the newly created translation table
+ *LastBlockEntry = TT_LAST_BLOCK_ADDRESS(TranslationTable, TT_ENTRY_COUNT);
+
+ // Populate the newly created lower level table
+ BlockEntry = TranslationTable;
+ for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
+ *BlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
+ BlockEntry++;
+ }
+ // Block Entry points at the beginning of the Translation Table
+ BlockEntry = TranslationTable;
+ }
+ } else {
+ // Case of Invalid Entry and we are at a page level above of the one targetted.
+ if (IndexLevel != PageLevel) {
+ // Create a new translation table
+ TranslationTable = (UINT64*)AllocatePages (EFI_SIZE_TO_PAGES((TT_ENTRY_COUNT * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE));
+ if (TranslationTable == NULL) {
+ return NULL;
+ }
+ TranslationTable = (UINT64*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
+
+ ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
+
+ // Fill the new BlockEntry with the TranslationTable
+ *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
+ }
+ }
+ }
+
+ return BlockEntry;
+}
+
+STATIC
+RETURN_STATUS
+FillTranslationTable (
+ IN UINT64 *RootTable,
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
+ )
+{
+ UINT64 Attributes;
+ UINT32 Type;
+ UINT64 RegionStart;
+ UINT64 RemainingRegionLength;
+ UINT64 *BlockEntry;
+ UINT64 *LastBlockEntry;
+ UINT64 BlockEntrySize;
+ UINTN TableLevel;
+
+ // Ensure the Length is aligned on 4KB boundary
+ ASSERT ((MemoryRegion->Length > 0) && ((MemoryRegion->Length & (SIZE_4KB - 1)) == 0));
+
+ // Variable initialization
+ Attributes = ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF;
+ RemainingRegionLength = MemoryRegion->Length;
+ RegionStart = MemoryRegion->VirtualBase;
+
+ do {
+ // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
+ // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
+ BlockEntrySize = RemainingRegionLength;
+ BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
+ if (BlockEntry == NULL) {
+ // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
+ return RETURN_OUT_OF_RESOURCES;
+ }
+
+ if (TableLevel != 3) {
+ Type = TT_TYPE_BLOCK_ENTRY;
+ } else {
+ Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
+ }
+
+ do {
+ // Fill the Block Entry with attribute and output block address
+ *BlockEntry = (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
+
+ // Go to the next BlockEntry
+ RegionStart += BlockEntrySize;
+ RemainingRegionLength -= BlockEntrySize;
+ BlockEntry++;
+ } while ((RemainingRegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
+ } while (RemainingRegionLength != 0);
+
+ return RETURN_SUCCESS;
+}
+
+RETURN_STATUS
+SetMemoryAttributes (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes,
+ IN EFI_PHYSICAL_ADDRESS VirtualMask
+ )
+{
+ RETURN_STATUS Status;
+ ARM_MEMORY_REGION_DESCRIPTOR MemoryRegion;
+ UINT64 *TranslationTable;
+
+ MemoryRegion.PhysicalBase = BaseAddress;
+ MemoryRegion.VirtualBase = BaseAddress;
+ MemoryRegion.Length = Length;
+ MemoryRegion.Attributes = GcdAttributeToArmAttribute (Attributes);
+
+ TranslationTable = ArmGetTTBR0BaseAddress ();
+
+ Status = FillTranslationTable (TranslationTable, &MemoryRegion);
+ if (RETURN_ERROR (Status)) {
+ return Status;
+ }
+
+ // Flush d-cache so descriptors make it back to uncached memory for subsequent table walks
+ // flush and invalidate pages
+ ArmCleanInvalidateDataCache ();
+
+ ArmInvalidateInstructionCache ();
+
+ // Invalidate all TLB entries so changes are synced
+ ArmInvalidateTlb ();
+
+ return RETURN_SUCCESS;
+}
+
+RETURN_STATUS
+EFIAPI
+ArmConfigureMmu (
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
+ OUT VOID **TranslationTableBase OPTIONAL,
+ OUT UINTN *TranslationTableSize OPTIONAL
+ )
+{
+ VOID* TranslationTable;
+ UINTN TranslationTablePageCount;
+ UINT32 TranslationTableAttribute;
+ ARM_MEMORY_REGION_DESCRIPTOR *MemoryTableEntry;
+ UINT64 MaxAddress;
+ UINT64 TopAddress;
+ UINTN T0SZ;
+ UINTN RootTableEntryCount;
+ UINT64 TCR;
+ RETURN_STATUS Status;
+
+ ASSERT (MemoryTable != NULL);
+
+ // Identify the highest address of the memory table
+ MaxAddress = MemoryTable->PhysicalBase + MemoryTable->Length - 1;
+ MemoryTableEntry = MemoryTable;
+ while (MemoryTableEntry->Length != 0) {
+ TopAddress = MemoryTableEntry->PhysicalBase + MemoryTableEntry->Length - 1;
+ if (TopAddress > MaxAddress) {
+ MaxAddress = TopAddress;
+ }
+ MemoryTableEntry++;
+ }
+
+ // Lookup the Table Level to get the information
+ LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
+
+ //
+ // Set TCR that allows us to retrieve T0SZ in the subsequent functions
+ //
+ // Ideally we will be running at EL2, but should support EL1 as well.
+ // UEFI should not run at EL3.
+ if (ArmReadCurrentEL () == AARCH64_EL2) {
+ //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
+ TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
+
+ // Set the Physical Address Size using MaxAddress
+ if (MaxAddress < SIZE_4GB) {
+ TCR |= TCR_PS_4GB;
+ } else if (MaxAddress < SIZE_64GB) {
+ TCR |= TCR_PS_64GB;
+ } else if (MaxAddress < SIZE_1TB) {
+ TCR |= TCR_PS_1TB;
+ } else if (MaxAddress < SIZE_4TB) {
+ TCR |= TCR_PS_4TB;
+ } else if (MaxAddress < SIZE_16TB) {
+ TCR |= TCR_PS_16TB;
+ } else if (MaxAddress < SIZE_256TB) {
+ TCR |= TCR_PS_256TB;
+ } else {
+ DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
+ ASSERT (0); // Bigger than 48-bit memory space are not supported
+ return RETURN_UNSUPPORTED;
+ }
+ } else if (ArmReadCurrentEL () == AARCH64_EL1) {
+ TCR = T0SZ | TCR_TG0_4KB;
+
+ // Set the Physical Address Size using MaxAddress
+ if (MaxAddress < SIZE_4GB) {
+ TCR |= TCR_IPS_4GB;
+ } else if (MaxAddress < SIZE_64GB) {
+ TCR |= TCR_IPS_64GB;
+ } else if (MaxAddress < SIZE_1TB) {
+ TCR |= TCR_IPS_1TB;
+ } else if (MaxAddress < SIZE_4TB) {
+ TCR |= TCR_IPS_4TB;
+ } else if (MaxAddress < SIZE_16TB) {
+ TCR |= TCR_IPS_16TB;
+ } else if (MaxAddress < SIZE_256TB) {
+ TCR |= TCR_IPS_256TB;
+ } else {
+ DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n", MaxAddress));
+ ASSERT (0); // Bigger than 48-bit memory space are not supported
+ return RETURN_UNSUPPORTED;
+ }
+ } else {
+ ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
+ return RETURN_UNSUPPORTED;
+ }
+
+ // Set TCR
+ ArmSetTCR (TCR);
+
+ // Allocate pages for translation table
+ TranslationTablePageCount = EFI_SIZE_TO_PAGES((RootTableEntryCount * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE);
+ TranslationTable = AllocatePages (TranslationTablePageCount);
+ if (TranslationTable == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+ TranslationTable = (VOID*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
+ // We set TTBR0 just after allocating the table to retrieve its location from the subsequent
+ // functions without needing to pass this value across the functions. The MMU is only enabled
+ // after the translation tables are populated.
+ ArmSetTTBR0 (TranslationTable);
+
+ if (TranslationTableBase != NULL) {
+ *TranslationTableBase = TranslationTable;
+ }
+
+ if (TranslationTableSize != NULL) {
+ *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
+ }
+
+ ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
+
+ // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
+ ArmDisableMmu ();
+ ArmDisableDataCache ();
+ ArmDisableInstructionCache ();
+
+ // Make sure nothing sneaked into the cache
+ ArmCleanInvalidateDataCache ();
+ ArmInvalidateInstructionCache ();
+
+ TranslationTableAttribute = TT_ATTR_INDX_INVALID;
+ while (MemoryTable->Length != 0) {
+ // Find the memory attribute for the Translation Table
+ if (((UINTN)TranslationTable >= MemoryTable->PhysicalBase) &&
+ ((UINTN)TranslationTable <= MemoryTable->PhysicalBase - 1 + MemoryTable->Length)) {
+ TranslationTableAttribute = MemoryTable->Attributes;
+ }
+
+ Status = FillTranslationTable (TranslationTable, MemoryTable);
+ if (RETURN_ERROR (Status)) {
+ goto FREE_TRANSLATION_TABLE;
+ }
+ MemoryTable++;
+ }
+
+ // Translate the Memory Attributes into Translation Table Register Attributes
+ if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED) ||
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED)) {
+ TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_NON_CACHEABLE | TCR_RGN_INNER_NON_CACHEABLE;
+ } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK) ||
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK)) {
+ TCR |= TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WRITE_BACK_ALLOC | TCR_RGN_INNER_WRITE_BACK_ALLOC;
+ } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH) ||
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH)) {
+ TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_WRITE_THROUGH | TCR_RGN_INNER_WRITE_THROUGH;
+ } else {
+ // If we failed to find a mapping that contains the root translation table then it probably means the translation table
+ // is not mapped in the given memory map.
+ ASSERT (0);
+ Status = RETURN_UNSUPPORTED;
+ goto FREE_TRANSLATION_TABLE;
+ }
+
+ ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
+ MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
+ MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
+ MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
+
+ ArmDisableAlignmentCheck ();
+ ArmEnableInstructionCache ();
+ ArmEnableDataCache ();
+
+ ArmEnableMmu ();
+ return RETURN_SUCCESS;
+
+FREE_TRANSLATION_TABLE:
+ FreePages (TranslationTable, TranslationTablePageCount);
+ return Status;
+}
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64Support.S b/ArmPkg/Library/ArmLib/AArch64/AArch64Support.S
new file mode 100644
index 000000000..ad9fdda52
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64Support.S
@@ -0,0 +1,503 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
+# Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#------------------------------------------------------------------------------
+
+#include <Chipset/AArch64.h>
+#include <AsmMacroIoLibV8.h>
+
+.text
+.align 3
+
+GCC_ASM_EXPORT (ArmInvalidateInstructionCache)
+GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryByMVA)
+GCC_ASM_EXPORT (ArmCleanDataCacheEntryByMVA)
+GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryByMVA)
+GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryBySetWay)
+GCC_ASM_EXPORT (ArmCleanDataCacheEntryBySetWay)
+GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryBySetWay)
+GCC_ASM_EXPORT (ArmDrainWriteBuffer)
+GCC_ASM_EXPORT (ArmEnableMmu)
+GCC_ASM_EXPORT (ArmDisableMmu)
+GCC_ASM_EXPORT (ArmDisableCachesAndMmu)
+GCC_ASM_EXPORT (ArmMmuEnabled)
+GCC_ASM_EXPORT (ArmEnableDataCache)
+GCC_ASM_EXPORT (ArmDisableDataCache)
+GCC_ASM_EXPORT (ArmEnableInstructionCache)
+GCC_ASM_EXPORT (ArmDisableInstructionCache)
+GCC_ASM_EXPORT (ArmDisableAlignmentCheck)
+GCC_ASM_EXPORT (ArmEnableAlignmentCheck)
+GCC_ASM_EXPORT (ArmEnableBranchPrediction)
+GCC_ASM_EXPORT (ArmDisableBranchPrediction)
+GCC_ASM_EXPORT (AArch64AllDataCachesOperation)
+GCC_ASM_EXPORT (AArch64PerformPoUDataCacheOperation)
+GCC_ASM_EXPORT (ArmDataMemoryBarrier)
+GCC_ASM_EXPORT (ArmDataSyncronizationBarrier)
+GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)
+GCC_ASM_EXPORT (ArmWriteVBar)
+GCC_ASM_EXPORT (ArmVFPImplemented)
+GCC_ASM_EXPORT (ArmEnableVFP)
+GCC_ASM_EXPORT (ArmCallWFI)
+GCC_ASM_EXPORT (ArmInvalidateInstructionAndDataTlb)
+GCC_ASM_EXPORT (ArmReadMpidr)
+GCC_ASM_EXPORT (ArmReadTpidrurw)
+GCC_ASM_EXPORT (ArmWriteTpidrurw)
+GCC_ASM_EXPORT (ArmIsArchTimerImplemented)
+GCC_ASM_EXPORT (ArmReadIdPfr0)
+GCC_ASM_EXPORT (ArmReadIdPfr1)
+GCC_ASM_EXPORT (ArmWriteHcr)
+GCC_ASM_EXPORT (ArmReadCurrentEL)
+
+.set CTRL_M_BIT, (1 << 0)
+.set CTRL_A_BIT, (1 << 1)
+.set CTRL_C_BIT, (1 << 2)
+.set CTRL_I_BIT, (1 << 12)
+.set CTRL_V_BIT, (1 << 12)
+.set CPACR_VFP_BITS, (3 << 20)
+
+ASM_PFX(ArmInvalidateDataCacheEntryByMVA):
+ dc ivac, x0 // Invalidate single data cache line
+ dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmCleanDataCacheEntryByMVA):
+ dc cvac, x0 // Clean single data cache line
+ dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):
+ dc civac, x0 // Clean and invalidate single data cache line
+ dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):
+ dc isw, x0 // Invalidate this line
+ dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):
+ dc cisw, x0 // Clean and Invalidate this line
+ dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmCleanDataCacheEntryBySetWay):
+ dc csw, x0 // Clean this line
+ dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmInvalidateInstructionCache):
+ ic iallu // Invalidate entire instruction cache
+ dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmEnableMmu):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Read System control register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Read System control register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Read System control register EL3
+4: orr x0, x0, #CTRL_M_BIT // Set MMU enable bit
+ EL1_OR_EL2_OR_EL3(x1)
+1: tlbi vmalle1
+ isb
+ msr sctlr_el1, x0 // Write back
+ b 4f
+2: tlbi alle2
+ isb
+ msr sctlr_el2, x0 // Write back
+ b 4f
+3: tlbi alle3
+ isb
+ msr sctlr_el3, x0 // Write back
+4: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableMmu):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Read System Control Register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Read System Control Register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Read System Control Register EL3
+4: bic x0, x0, #CTRL_M_BIT // Clear MMU enable bit
+ EL1_OR_EL2_OR_EL3(x1)
+1: msr sctlr_el1, x0 // Write back
+ tlbi vmalle1
+ b 4f
+2: msr sctlr_el2, x0 // Write back
+ tlbi alle2
+ b 4f
+3: msr sctlr_el3, x0 // Write back
+ tlbi alle3
+4: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableCachesAndMmu):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Get control register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Get control register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Get control register EL3
+4: bic x0, x0, #CTRL_M_BIT // Disable MMU
+ bic x0, x0, #CTRL_C_BIT // Disable D Cache
+ bic x0, x0, #CTRL_I_BIT // Disable I Cache
+ EL1_OR_EL2_OR_EL3(x1)
+1: msr sctlr_el1, x0 // Write back control register
+ b 4f
+2: msr sctlr_el2, x0 // Write back control register
+ b 4f
+3: msr sctlr_el3, x0 // Write back control register
+4: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmMmuEnabled):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Get control register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Get control register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Get control register EL3
+4: and x0, x0, #CTRL_M_BIT
+ ret
+
+
+ASM_PFX(ArmEnableDataCache):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Get control register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Get control register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Get control register EL3
+4: orr x0, x0, #CTRL_C_BIT // Set C bit
+ EL1_OR_EL2_OR_EL3(x1)
+1: msr sctlr_el1, x0 // Write back control register
+ b 4f
+2: msr sctlr_el2, x0 // Write back control register
+ b 4f
+3: msr sctlr_el3, x0 // Write back control register
+4: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableDataCache):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Get control register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Get control register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Get control register EL3
+4: bic x0, x0, #CTRL_C_BIT // Clear C bit
+ EL1_OR_EL2_OR_EL3(x1)
+1: msr sctlr_el1, x0 // Write back control register
+ b 4f
+2: msr sctlr_el2, x0 // Write back control register
+ b 4f
+3: msr sctlr_el3, x0 // Write back control register
+4: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmEnableInstructionCache):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Get control register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Get control register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Get control register EL3
+4: orr x0, x0, #CTRL_I_BIT // Set I bit
+ EL1_OR_EL2_OR_EL3(x1)
+1: msr sctlr_el1, x0 // Write back control register
+ b 4f
+2: msr sctlr_el2, x0 // Write back control register
+ b 4f
+3: msr sctlr_el3, x0 // Write back control register
+4: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableInstructionCache):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Get control register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Get control register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Get control register EL3
+4: bic x0, x0, #CTRL_I_BIT // Clear I bit
+ EL1_OR_EL2_OR_EL3(x1)
+1: msr sctlr_el1, x0 // Write back control register
+ b 4f
+2: msr sctlr_el2, x0 // Write back control register
+ b 4f
+3: msr sctlr_el3, x0 // Write back control register
+4: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmEnableAlignmentCheck):
+ EL1_OR_EL2(x1)
+1: mrs x0, sctlr_el1 // Get control register EL1
+ b 3f
+2: mrs x0, sctlr_el2 // Get control register EL2
+3: orr x0, x0, #CTRL_A_BIT // Set A (alignment check) bit
+ EL1_OR_EL2(x1)
+1: msr sctlr_el1, x0 // Write back control register
+ b 3f
+2: msr sctlr_el2, x0 // Write back control register
+3: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableAlignmentCheck):
+ EL1_OR_EL2_OR_EL3(x1)
+1: mrs x0, sctlr_el1 // Get control register EL1
+ b 4f
+2: mrs x0, sctlr_el2 // Get control register EL2
+ b 4f
+3: mrs x0, sctlr_el3 // Get control register EL3
+4: bic x0, x0, #CTRL_A_BIT // Clear A (alignment check) bit
+ EL1_OR_EL2_OR_EL3(x1)
+1: msr sctlr_el1, x0 // Write back control register
+ b 4f
+2: msr sctlr_el2, x0 // Write back control register
+ b 4f
+3: msr sctlr_el3, x0 // Write back control register
+4: dsb sy
+ isb
+ ret
+
+
+// Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now
+ASM_PFX(ArmEnableBranchPrediction):
+ ret
+
+
+// Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now.
+ASM_PFX(ArmDisableBranchPrediction):
+ ret
+
+
+ASM_PFX(AArch64AllDataCachesOperation):
+// We can use regs 0-7 and 9-15 without having to save/restore.
+// Save our link register on the stack.
+ str x30, [sp, #-0x10]!
+ mov x1, x0 // Save Function call in x1
+ mrs x6, clidr_el1 // Read EL1 CLIDR
+ and x3, x6, #0x7000000 // Mask out all but Level of Coherency (LoC)
+ lsr x3, x3, #23 // Left align cache level value
+ cbz x3, L_Finished // No need to clean if LoC is 0
+ mov x10, #0 // Start clean at cache level 0
+ b Loop1
+
+ASM_PFX(AArch64PerformPoUDataCacheOperation):
+// We can use regs 0-7 and 9-15 without having to save/restore.
+// Save our link register on the stack.
+ str x30, [sp, #-0x10]!
+ mov x1, x0 // Save Function call in x1
+ mrs x6, clidr_el1 // Read EL1 CLIDR
+ and x3, x6, #0x38000000 // Mask out all but Point of Unification (PoU)
+ lsr x3, x3, #26 // Left align cache level value
+ cbz x3, L_Finished // No need to clean if LoC is 0
+ mov x10, #0 // Start clean at cache level 0
+
+Loop1:
+ add x2, x10, x10, lsr #1 // Work out 3x cachelevel for cache info
+ lsr x12, x6, x2 // bottom 3 bits are the Cache type for this level
+ and x12, x12, #7 // get those 3 bits alone
+ cmp x12, #2 // what cache at this level?
+ b.lt L_Skip // no cache or only instruction cache at this level
+ msr csselr_el1, x10 // write the Cache Size selection register with current level (CSSELR)
+ isb // isb to sync the change to the CacheSizeID reg
+ mrs x12, ccsidr_el1 // reads current Cache Size ID register (CCSIDR)
+ and x2, x12, #0x7 // extract the line length field
+ add x2, x2, #4 // add 4 for the line length offset (log2 16 bytes)
+ mov x4, #0x400
+ sub x4, x4, #1
+ and x4, x4, x12, lsr #3 // x4 is the max number on the way size (right aligned)
+ clz w5, w4 // w5 is the bit position of the way size increment
+ mov x7, #0x00008000
+ sub x7, x7, #1
+ and x7, x7, x12, lsr #13 // x7 is the max number of the index size (right aligned)
+
+Loop2:
+ mov x9, x4 // x9 working copy of the max way size (right aligned)
+
+Loop3:
+ lsl x11, x9, x5
+ orr x0, x10, x11 // factor in the way number and cache number
+ lsl x11, x7, x2
+ orr x0, x0, x11 // factor in the index number
+
+ blr x1 // Goto requested cache operation
+
+ subs x9, x9, #1 // decrement the way number
+ b.ge Loop3
+ subs x7, x7, #1 // decrement the index
+ b.ge Loop2
+L_Skip:
+ add x10, x10, #2 // increment the cache number
+ cmp x3, x10
+ b.gt Loop1
+
+L_Finished:
+ dsb sy
+ isb
+ ldr x30, [sp], #0x10
+ ret
+
+
+ASM_PFX(ArmDataMemoryBarrier):
+ dmb sy
+ ret
+
+
+ASM_PFX(ArmDataSyncronizationBarrier):
+ASM_PFX(ArmDrainWriteBuffer):
+ dsb sy
+ ret
+
+
+ASM_PFX(ArmInstructionSynchronizationBarrier):
+ isb
+ ret
+
+
+ASM_PFX(ArmWriteVBar):
+ EL1_OR_EL2_OR_EL3(x1)
+1: msr vbar_el1, x0 // Set the Address of the EL1 Vector Table in the VBAR register
+ b 4f
+2: msr vbar_el2, x0 // Set the Address of the EL2 Vector Table in the VBAR register
+ b 4f
+3: msr vbar_el3, x0 // Set the Address of the EL3 Vector Table in the VBAR register
+4: isb
+ ret
+
+ASM_PFX(ArmEnableVFP):
+ // Check whether floating-point is implemented in the processor.
+ mov x1, x30 // Save LR
+ bl ArmReadIdPfr0 // Read EL1 Processor Feature Register (PFR0)
+ mov x30, x1 // Restore LR
+ ands x0, x0, #AARCH64_PFR0_FP// Extract bits indicating VFP implementation
+ cmp x0, #0 // VFP is implemented if '0'.
+ b.ne 4f // Exit if VFP not implemented.
+ // FVP is implemented.
+ // Make sure VFP exceptions are not trapped (to any exception level).
+ mrs x0, cpacr_el1 // Read EL1 Coprocessor Access Control Register (CPACR)
+ orr x0, x0, #CPACR_VFP_BITS // Disable FVP traps to EL1
+ msr cpacr_el1, x0 // Write back EL1 Coprocessor Access Control Register (CPACR)
+ mov x1, #AARCH64_CPTR_TFP // TFP Bit for trapping VFP Exceptions
+ EL1_OR_EL2_OR_EL3(x2)
+1:ret // Not configurable in EL1
+2:mrs x0, cptr_el2 // Disable VFP traps to EL2
+ bic x0, x0, x1
+ msr cptr_el2, x0
+ ret
+3:mrs x0, cptr_el3 // Disable VFP traps to EL3
+ bic x0, x0, x1
+ msr cptr_el3, x0
+4:ret
+
+
+ASM_PFX(ArmCallWFI):
+ wfi
+ ret
+
+
+ASM_PFX(ArmInvalidateInstructionAndDataTlb):
+ EL1_OR_EL2_OR_EL3(x0)
+1: tlbi vmalle1
+ b 4f
+2: tlbi alle2
+ b 4f
+3: tlbi alle3
+4: dsb sy
+ isb
+ ret
+
+
+ASM_PFX(ArmReadMpidr):
+ mrs x0, mpidr_el1 // read EL1 MPIDR
+ ret
+
+
+// Keep old function names for C compatibilty for now. Change later?
+ASM_PFX(ArmReadTpidrurw):
+ mrs x0, tpidr_el0 // read tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)
+ ret
+
+
+// Keep old function names for C compatibilty for now. Change later?
+ASM_PFX(ArmWriteTpidrurw):
+ msr tpidr_el0, x0 // write tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)
+ ret
+
+
+// Arch timers are mandatory on AArch64
+ASM_PFX(ArmIsArchTimerImplemented):
+ mov x0, #1
+ ret
+
+
+ASM_PFX(ArmReadIdPfr0):
+ mrs x0, id_aa64pfr0_el1 // Read ID_AA64PFR0 Register
+ ret
+
+
+// Q: id_aa64pfr1_el1 not defined yet. What does this funtion want to access?
+// A: used to setup arch timer. Check if we have security extensions, permissions to set stuff.
+// See: ArmPkg/Library/ArmArchTimerLib/AArch64/ArmArchTimerLib.c
+// Not defined yet, but stick in here for now, should read all zeros.
+ASM_PFX(ArmReadIdPfr1):
+ mrs x0, id_aa64pfr1_el1 // Read ID_PFR1 Register
+ ret
+
+// VOID ArmWriteHcr(UINTN Hcr)
+ASM_PFX(ArmWriteHcr):
+ msr hcr_el2, x0 // Write the passed HCR value
+ ret
+
+// UINTN ArmReadCurrentEL(VOID)
+ASM_PFX(ArmReadCurrentEL):
+ mrs x0, CurrentEL
+ ret
+
+dead:
+ b dead
+
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED
diff --git a/ArmPkg/Library/ArmLib/AArch64/ArmLib.c b/ArmPkg/Library/ArmLib/AArch64/ArmLib.c
new file mode 100644
index 000000000..fa95d352d
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/ArmLib.c
@@ -0,0 +1,53 @@
+/** @file
+
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+ portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#include <Base.h>
+
+#include <Library/ArmLib.h>
+#include <Library/DebugLib.h>
+#include <Library/PcdLib.h>
+
+#include "ArmLibPrivate.h"
+
+VOID
+EFIAPI
+ArmCacheInformation (
+ OUT ARM_CACHE_INFO *CacheInfo
+ )
+{
+ if (CacheInfo != NULL) {
+ CacheInfo->Type = ArmCacheType();
+ CacheInfo->Architecture = ArmCacheArchitecture();
+ CacheInfo->DataCachePresent = ArmDataCachePresent();
+ CacheInfo->DataCacheSize = ArmDataCacheSize();
+ CacheInfo->DataCacheAssociativity = ArmDataCacheAssociativity();
+ CacheInfo->DataCacheLineLength = ArmDataCacheLineLength();
+ CacheInfo->InstructionCachePresent = ArmInstructionCachePresent();
+ CacheInfo->InstructionCacheSize = ArmInstructionCacheSize();
+ CacheInfo->InstructionCacheAssociativity = ArmInstructionCacheAssociativity();
+ CacheInfo->InstructionCacheLineLength = ArmInstructionCacheLineLength();
+ }
+}
+
+VOID
+EFIAPI
+ArmSetAuxCrBit (
+ IN UINT32 Bits
+ )
+{
+ UINT32 val = ArmReadAuxCr();
+ val |= Bits;
+ ArmWriteAuxCr(val);
+}
diff --git a/ArmPkg/Library/ArmLib/AArch64/ArmLibPrivate.h b/ArmPkg/Library/ArmLib/AArch64/ArmLibPrivate.h
new file mode 100644
index 000000000..d2804fc10
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/ArmLibPrivate.h
@@ -0,0 +1,82 @@
+/** @file
+
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+ Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
+
+ This program and the accompanying materials
+ are licensed and made available under the terms and conditions of the BSD License
+ which accompanies this distribution. The full text of the license may be found at
+ http://opensource.org/licenses/bsd-license.php
+
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+
+**/
+
+#ifndef __ARM_LIB_PRIVATE_H__
+#define __ARM_LIB_PRIVATE_H__
+
+#define CACHE_SIZE_4_KB (3UL)
+#define CACHE_SIZE_8_KB (4UL)
+#define CACHE_SIZE_16_KB (5UL)
+#define CACHE_SIZE_32_KB (6UL)
+#define CACHE_SIZE_64_KB (7UL)
+#define CACHE_SIZE_128_KB (8UL)
+
+#define CACHE_ASSOCIATIVITY_DIRECT (0UL)
+#define CACHE_ASSOCIATIVITY_4_WAY (2UL)
+#define CACHE_ASSOCIATIVITY_8_WAY (3UL)
+
+#define CACHE_PRESENT (0UL)
+#define CACHE_NOT_PRESENT (1UL)
+
+#define CACHE_LINE_LENGTH_32_BYTES (2UL)
+
+#define SIZE_FIELD_TO_CACHE_SIZE(x) (((x) >> 6) & 0x0F)
+#define SIZE_FIELD_TO_CACHE_ASSOCIATIVITY(x) (((x) >> 3) & 0x07)
+#define SIZE_FIELD_TO_CACHE_PRESENCE(x) (((x) >> 2) & 0x01)
+#define SIZE_FIELD_TO_CACHE_LINE_LENGTH(x) (((x) >> 0) & 0x03)
+
+#define DATA_CACHE_SIZE_FIELD(x) (((x) >> 12) & 0x0FFF)
+#define INSTRUCTION_CACHE_SIZE_FIELD(x) (((x) >> 0) & 0x0FFF)
+
+#define DATA_CACHE_SIZE(x) (SIZE_FIELD_TO_CACHE_SIZE(DATA_CACHE_SIZE_FIELD(x)))
+#define DATA_CACHE_ASSOCIATIVITY(x) (SIZE_FIELD_TO_CACHE_ASSOCIATIVITY(DATA_CACHE_SIZE_FIELD(x)))
+#define DATA_CACHE_PRESENT(x) (SIZE_FIELD_TO_CACHE_PRESENCE(DATA_CACHE_SIZE_FIELD(x)))
+#define DATA_CACHE_LINE_LENGTH(x) (SIZE_FIELD_TO_CACHE_LINE_LENGTH(DATA_CACHE_SIZE_FIELD(x)))
+
+#define INSTRUCTION_CACHE_SIZE(x) (SIZE_FIELD_TO_CACHE_SIZE(INSTRUCTION_CACHE_SIZE_FIELD(x)))
+#define INSTRUCTION_CACHE_ASSOCIATIVITY(x) (SIZE_FIELD_TO_CACHE_ASSOCIATIVITY(INSTRUCTION_CACHE_SIZE_FIELD(x)))
+#define INSTRUCTION_CACHE_PRESENT(x) (SIZE_FIELD_TO_CACHE_PRESENCE(INSTRUCTION_CACHE_SIZE_FIELD(x)))
+#define INSTRUCTION_CACHE_LINE_LENGTH(x) (SIZE_FIELD_TO_CACHE_LINE_LENGTH(INSTRUCTION_CACHE_SIZE_FIELD(x)))
+
+#define CACHE_TYPE(x) (((x) >> 25) & 0x0F)
+#define CACHE_TYPE_WRITE_BACK (0x0EUL)
+
+#define CACHE_ARCHITECTURE(x) (((x) >> 24) & 0x01)
+#define CACHE_ARCHITECTURE_UNIFIED (0UL)
+#define CACHE_ARCHITECTURE_SEPARATE (1UL)
+
+
+VOID
+CPSRMaskInsert (
+ IN UINT32 Mask,
+ IN UINT32 Value
+ );
+
+UINT32
+CPSRRead (
+ VOID
+ );
+
+UINT32
+ReadCCSIDR (
+ IN UINT32 CSSELR
+ );
+
+UINT32
+ReadCLIDR (
+ VOID
+ );
+
+#endif // __ARM_LIB_PRIVATE_H__
diff --git a/ArmPkg/Library/ArmLib/AArch64/ArmLibSupportV8.S b/ArmPkg/Library/ArmLib/AArch64/ArmLibSupportV8.S
new file mode 100644
index 000000000..aee3dc118
--- /dev/null
+++ b/ArmPkg/Library/ArmLib/AArch64/ArmLibSupportV8.S
@@ -0,0 +1,127 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+# Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.
+#
+# This program and the accompanying materials
+# are licensed and made available under the terms and conditions of the BSD License
+# which accompanies this distribution. The full text of the license may be found at
+# http://opensource.org/licenses/bsd-license.php
+#
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
+#
+#------------------------------------------------------------------------------
+
+#include <AsmMacroIoLib.h>
+
+.text
+.align 3
+
+GCC_ASM_EXPORT (ArmIsMpCore)
+GCC_ASM_EXPORT (ArmEnableAsynchronousAbort)
+GCC_ASM_EXPORT (ArmDisableAsynchronousAbort)
+GCC_ASM_EXPORT (ArmEnableIrq)
+GCC_ASM_EXPORT (ArmDisableIrq)
+GCC_ASM_EXPORT (ArmEnableFiq)
+GCC_ASM_EXPORT (ArmDisableFiq)
+GCC_ASM_EXPORT (ArmEnableInterrupts)
+GCC_ASM_EXPORT (ArmDisableInterrupts)
+GCC_ASM_EXPORT (ArmDisableAllExceptions)
+GCC_ASM_EXPORT (ReadCCSIDR)
+GCC_ASM_EXPORT (ReadCLIDR)
+
+#------------------------------------------------------------------------------
+
+.set MPIDR_U_BIT, (30)
+.set MPIDR_U_MASK, (1 << MPIDR_U_BIT)
+.set DAIF_FIQ_BIT, (1 << 0)
+.set DAIF_IRQ_BIT, (1 << 1)
+.set DAIF_ABORT_BIT, (1 << 2)
+.set DAIF_DEBUG_BIT, (1 << 3)
+.set DAIF_INT_BITS, (DAIF_FIQ_BIT | DAIF_IRQ_BIT)
+.set DAIF_ALL, (DAIF_DEBUG_BIT | DAIF_ABORT_BIT | DAIF_INT_BITS)
+
+
+ASM_PFX(ArmIsMpCore):
+ mrs x0, mpidr_el1 // Read EL1 Mutliprocessor Affinty Reg (MPIDR)
+ and x0, x0, #MPIDR_U_MASK // U Bit clear, the processor is part of a multiprocessor system
+ lsr x0, x0, #MPIDR_U_BIT
+ eor x0, x0, #1
+ ret
+
+
+ASM_PFX(ArmEnableAsynchronousAbort):
+ msr daifclr, #DAIF_ABORT_BIT
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableAsynchronousAbort):
+ msr daifset, #DAIF_ABORT_BIT
+ isb
+ ret
+
+
+ASM_PFX(ArmEnableIrq):
+ msr daifclr, #DAIF_IRQ_BIT
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableIrq):
+ msr daifset, #DAIF_IRQ_BIT
+ isb
+ ret
+
+
+ASM_PFX(ArmEnableFiq):
+ msr daifclr, #DAIF_FIQ_BIT
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableFiq):
+ msr daifset, #DAIF_FIQ_BIT
+ isb
+ ret
+
+
+ASM_PFX(ArmEnableInterrupts):
+ msr daifclr, #DAIF_INT_BITS
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableInterrupts):
+ msr daifset, #DAIF_INT_BITS
+ isb
+ ret
+
+
+ASM_PFX(ArmDisableAllExceptions):
+ msr daifset, #DAIF_ALL
+ isb
+ ret
+
+
+// UINT32
+// ReadCCSIDR (
+// IN UINT32 CSSELR
+// )
+ASM_PFX(ReadCCSIDR):
+ msr csselr_el1, x0 // Write Cache Size Selection Register (CSSELR)
+ isb
+ mrs x0, ccsidr_el1 // Read current Cache Size ID Register (CCSIDR)
+ ret
+
+
+// UINT32
+// ReadCLIDR (
+// IN UINT32 CSSELR
+// )
+ASM_PFX(ReadCLIDR):
+ mrs x0, clidr_el1 // Read Cache Level ID Register
+ ret
+
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED