aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@suse.com>2014-10-06 15:47:12 +0200
committerJan Beulich <jbeulich@suse.com>2014-10-06 15:47:12 +0200
commitd65a9d68949be6042ff745826f95a4512b92b2ee (patch)
tree92e19505b3b6f5357fba77c51687fa41b123fefd
parent7a8f483f462266b0bb0813a3a816cac27910c833 (diff)
parent1aa064329a4cf313ed9f0b5a0306237585dbddb9 (diff)
Merge branch 'staging' of xenbits.xen.org:/home/xen/git/xen into staging
-rw-r--r--tools/libxl/libxl.c2
-rw-r--r--xen/arch/arm/arm64/domctl.c3
-rw-r--r--xen/arch/arm/domain_build.c7
-rw-r--r--xen/arch/arm/p2m.c137
-rw-r--r--xen/arch/arm/setup.c10
-rw-r--r--xen/arch/arm/vgic-v3.c108
-rw-r--r--xen/include/asm-arm/cpufeature.h3
7 files changed, 207 insertions, 63 deletions
diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index 3f0a7ffba2..9c72df27d9 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -4764,7 +4764,7 @@ int libxl_wait_for_memory_target(libxl_ctx *ctx, uint32_t domid, int wait_secs)
rc = ERROR_FAIL;
out:
- return 0;
+ return rc;
}
int libxl_get_physinfo(libxl_ctx *ctx, libxl_physinfo *physinfo)
diff --git a/xen/arch/arm/arm64/domctl.c b/xen/arch/arm/arm64/domctl.c
index 41e2562898..c0ff248bae 100644
--- a/xen/arch/arm/arm64/domctl.c
+++ b/xen/arch/arm/arm64/domctl.c
@@ -11,6 +11,7 @@
#include <xen/sched.h>
#include <xen/hypercall.h>
#include <public/domctl.h>
+#include <asm/cpufeature.h>
static long switch_mode(struct domain *d, enum domain_type type)
{
@@ -35,6 +36,8 @@ long subarch_do_domctl(struct xen_domctl *domctl, struct domain *d,
switch ( domctl->u.address_size.size )
{
case 32:
+ if ( !cpu_has_el1_32 )
+ return -EINVAL;
return switch_mode(d, DOMAIN_32BIT);
case 64:
return switch_mode(d, DOMAIN_64BIT);
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 90abc3ad12..138ca89a30 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -17,6 +17,7 @@
#include <asm/platform.h>
#include <asm/psci.h>
#include <asm/setup.h>
+#include <asm/cpufeature.h>
#include <asm/gic.h>
#include <xen/irq.h>
@@ -1274,6 +1275,12 @@ int construct_dom0(struct domain *d)
return rc;
#ifdef CONFIG_ARM_64
+ /* if aarch32 mode is not supported at EL1 do not allow 32-bit domain */
+ if ( !(cpu_has_el1_32) && kinfo.type == DOMAIN_32BIT )
+ {
+ printk("Platform does not support 32-bit domain\n");
+ return -EINVAL;
+ }
d->arch.type = kinfo.type;
#endif
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 7044431937..1585d359ca 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -227,6 +227,76 @@ int p2m_pod_decrease_reservation(struct domain *d,
return -ENOSYS;
}
+static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a)
+{
+ /* First apply type permissions */
+ switch ( t )
+ {
+ case p2m_ram_rw:
+ e->p2m.xn = 0;
+ e->p2m.write = 1;
+ break;
+
+ case p2m_ram_ro:
+ e->p2m.xn = 0;
+ e->p2m.write = 0;
+ break;
+
+ case p2m_iommu_map_rw:
+ case p2m_map_foreign:
+ case p2m_grant_map_rw:
+ case p2m_mmio_direct:
+ e->p2m.xn = 1;
+ e->p2m.write = 1;
+ break;
+
+ case p2m_iommu_map_ro:
+ case p2m_grant_map_ro:
+ case p2m_invalid:
+ e->p2m.xn = 1;
+ e->p2m.write = 0;
+ break;
+
+ case p2m_max_real_type:
+ BUG();
+ break;
+ }
+
+ /* Then restrict with access permissions */
+ switch ( a )
+ {
+ case p2m_access_rwx:
+ break;
+ case p2m_access_wx:
+ e->p2m.read = 0;
+ break;
+ case p2m_access_rw:
+ e->p2m.xn = 1;
+ break;
+ case p2m_access_w:
+ e->p2m.read = 0;
+ e->p2m.xn = 1;
+ break;
+ case p2m_access_rx:
+ case p2m_access_rx2rw:
+ e->p2m.write = 0;
+ break;
+ case p2m_access_x:
+ e->p2m.write = 0;
+ e->p2m.read = 0;
+ break;
+ case p2m_access_r:
+ e->p2m.write = 0;
+ e->p2m.xn = 1;
+ break;
+ case p2m_access_n:
+ case p2m_access_n2rwx:
+ e->p2m.read = e->p2m.write = 0;
+ e->p2m.xn = 1;
+ break;
+ }
+}
+
static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr,
p2m_type_t t)
{
@@ -258,37 +328,8 @@ static lpae_t mfn_to_p2m_entry(unsigned long mfn, unsigned int mattr,
break;
}
- switch (t)
- {
- case p2m_ram_rw:
- e.p2m.xn = 0;
- e.p2m.write = 1;
- break;
-
- case p2m_ram_ro:
- e.p2m.xn = 0;
- e.p2m.write = 0;
- break;
-
- case p2m_iommu_map_rw:
- case p2m_map_foreign:
- case p2m_grant_map_rw:
- case p2m_mmio_direct:
- e.p2m.xn = 1;
- e.p2m.write = 1;
- break;
-
- case p2m_iommu_map_ro:
- case p2m_grant_map_ro:
- case p2m_invalid:
- e.p2m.xn = 1;
- e.p2m.write = 0;
- break;
-
- case p2m_max_real_type:
- BUG();
- break;
- }
+ /* We pass p2m_access_rwx as a placeholder for now. */
+ p2m_set_permission(&e, t, p2m_access_rwx);
ASSERT(!(pa & ~PAGE_MASK));
ASSERT(!(pa & ~PADDR_MASK));
@@ -451,6 +492,26 @@ static const paddr_t level_masks[] =
static const paddr_t level_shifts[] =
{ ZEROETH_SHIFT, FIRST_SHIFT, SECOND_SHIFT, THIRD_SHIFT };
+static int p2m_shatter_page(struct domain *d,
+ lpae_t *entry,
+ unsigned int level,
+ bool_t flush_cache)
+{
+ const paddr_t level_shift = level_shifts[level];
+ int rc = p2m_create_table(d, entry,
+ level_shift - PAGE_SHIFT, flush_cache);
+
+ if ( !rc )
+ {
+ struct p2m_domain *p2m = &d->arch.p2m;
+ p2m->stats.shattered[level]++;
+ p2m->stats.mappings[level]--;
+ p2m->stats.mappings[level+1] += LPAE_ENTRIES;
+ }
+
+ return rc;
+}
+
/*
* 0 == (P2M_ONE_DESCEND) continue to descend the tree
* +ve == (P2M_ONE_PROGRESS_*) handled at this level, continue, flush,
@@ -582,14 +643,9 @@ static int apply_one_level(struct domain *d,
if ( p2m_mapping(orig_pte) )
{
*flush = true;
- rc = p2m_create_table(d, entry,
- level_shift - PAGE_SHIFT, flush_cache);
+ rc = p2m_shatter_page(d, entry, level, flush_cache);
if ( rc < 0 )
return rc;
-
- p2m->stats.shattered[level]++;
- p2m->stats.mappings[level]--;
- p2m->stats.mappings[level+1] += LPAE_ENTRIES;
} /* else: an existing table mapping -> descend */
BUG_ON(!p2m_table(*entry));
@@ -624,15 +680,10 @@ static int apply_one_level(struct domain *d,
* and descend.
*/
*flush = true;
- rc = p2m_create_table(d, entry,
- level_shift - PAGE_SHIFT, flush_cache);
+ rc = p2m_shatter_page(d, entry, level, flush_cache);
if ( rc < 0 )
return rc;
- p2m->stats.shattered[level]++;
- p2m->stats.mappings[level]--;
- p2m->stats.mappings[level+1] += LPAE_ENTRIES;
-
return P2M_ONE_DESCEND;
}
}
diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
index c43c776a08..d2dcc3a71f 100644
--- a/xen/arch/arm/setup.c
+++ b/xen/arch/arm/setup.c
@@ -127,8 +127,9 @@ static void __init processor_id(void)
printk("32-bit Execution:\n");
printk(" Processor Features: %08"PRIx32":%08"PRIx32"\n",
boot_cpu_data.pfr32.bits[0], boot_cpu_data.pfr32.bits[1]);
- printk(" Instruction Sets:%s%s%s%s%s\n",
+ printk(" Instruction Sets:%s%s%s%s%s%s\n",
cpu_has_aarch32 ? " AArch32" : "",
+ cpu_has_arm ? " A32" : "",
cpu_has_thumb ? " Thumb" : "",
cpu_has_thumb2 ? " Thumb-2" : "",
cpu_has_thumbee ? " ThumbEE" : "",
@@ -852,8 +853,11 @@ void arch_get_xen_caps(xen_capabilities_info_t *info)
snprintf(s, sizeof(s), "xen-%d.%d-aarch64 ", major, minor);
safe_strcat(*info, s);
#endif
- snprintf(s, sizeof(s), "xen-%d.%d-armv7l ", major, minor);
- safe_strcat(*info, s);
+ if ( cpu_has_aarch32 )
+ {
+ snprintf(s, sizeof(s), "xen-%d.%d-armv7l ", major, minor);
+ safe_strcat(*info, s);
+ }
}
/*
diff --git a/xen/arch/arm/vgic-v3.c b/xen/arch/arm/vgic-v3.c
index ac8cf078e4..ff99e5092c 100644
--- a/xen/arch/arm/vgic-v3.c
+++ b/xen/arch/arm/vgic-v3.c
@@ -45,10 +45,42 @@
#define GICV3_GICR_PIDR2 GICV3_GICD_PIDR2
#define GICV3_GICR_PIDR4 GICV3_GICD_PIDR4
+static struct vcpu *vgic_v3_irouter_to_vcpu(struct vcpu *v, uint64_t irouter)
+{
+ irouter &= ~(GICD_IROUTER_SPI_MODE_ANY);
+ irouter = irouter & MPIDR_AFF0_MASK;
+
+ return v->domain->vcpu[irouter];
+}
+
+static uint64_t vgic_v3_vcpu_to_irouter(struct vcpu *v,
+ unsigned int vcpu_id)
+{
+ uint64_t irq_affinity;
+ struct vcpu *v_target;
+
+ v_target = v->domain->vcpu[vcpu_id];
+ irq_affinity = (MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 3) << 32 |
+ MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 2) << 16 |
+ MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 1) << 8 |
+ MPIDR_AFFINITY_LEVEL(v_target->arch.vmpidr, 0));
+
+ return irq_affinity;
+}
+
static struct vcpu *vgic_v3_get_target_vcpu(struct vcpu *v, unsigned int irq)
{
- /* TODO: Return vcpu0 always */
- return v->domain->vcpu[0];
+ uint64_t target;
+ struct vgic_irq_rank *rank = vgic_rank_irq(v, irq);
+
+ ASSERT(spin_is_locked(&rank->lock));
+
+ target = rank->v3.irouter[irq % 32];
+ target &= ~(GICD_IROUTER_SPI_MODE_ANY);
+ target &= MPIDR_AFF0_MASK;
+ ASSERT(target >= 0 && target < v->domain->max_vcpus);
+
+ return v->domain->vcpu[target];
}
static int __vgic_v3_rdistr_rd_mmio_read(struct vcpu *v, mmio_info_t *info,
@@ -353,9 +385,9 @@ static int __vgic_v3_distr_common_mmio_write(struct vcpu *v, mmio_info_t *info,
vgic_lock_rank(v, rank, flags);
tr = rank->ienable;
rank->ienable |= *r;
- vgic_unlock_rank(v, rank, flags);
/* The irq number is extracted from offset. so shift by register size */
vgic_enable_irqs(v, (*r) & (~tr), (reg - GICD_ISENABLER) >> DABT_WORD);
+ vgic_unlock_rank(v, rank, flags);
return 1;
case GICD_ICENABLER ... GICD_ICENABLERN:
if ( dabt.size != DABT_WORD ) goto bad_width;
@@ -364,9 +396,9 @@ static int __vgic_v3_distr_common_mmio_write(struct vcpu *v, mmio_info_t *info,
vgic_lock_rank(v, rank, flags);
tr = rank->ienable;
rank->ienable &= ~*r;
- vgic_unlock_rank(v, rank, flags);
/* The irq number is extracted from offset. so shift by register size */
vgic_disable_irqs(v, (*r) & tr, (reg - GICD_ICENABLER) >> DABT_WORD);
+ vgic_unlock_rank(v, rank, flags);
return 1;
case GICD_ISPENDR ... GICD_ISPENDRN:
if ( dabt.size != DABT_WORD ) goto bad_width;
@@ -620,6 +652,8 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
register_t *r = select_user_reg(regs, dabt.reg);
struct vgic_irq_rank *rank;
unsigned long flags;
+ uint64_t irouter;
+ unsigned int vcpu_id;
int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);
switch ( gicd_reg )
@@ -672,8 +706,17 @@ static int vgic_v3_distr_mmio_read(struct vcpu *v, mmio_info_t *info)
DABT_DOUBLE_WORD);
if ( rank == NULL ) goto read_as_zero;
vgic_lock_rank(v, rank, flags);
- *r = rank->v3.irouter[REG_RANK_INDEX(64,
- (gicd_reg - GICD_IROUTER), DABT_DOUBLE_WORD)];
+ irouter = rank->v3.irouter[REG_RANK_INDEX(64,
+ (gicd_reg - GICD_IROUTER), DABT_DOUBLE_WORD)];
+ /* XXX: bit[31] stores IRQ mode. Just return */
+ if ( irouter & GICD_IROUTER_SPI_MODE_ANY )
+ {
+ *r = GICD_IROUTER_SPI_MODE_ANY;
+ vgic_unlock_rank(v, rank, flags);
+ return 1;
+ }
+ vcpu_id = irouter;
+ *r = vgic_v3_vcpu_to_irouter(v, vcpu_id);
vgic_unlock_rank(v, rank, flags);
return 1;
case GICD_NSACR ... GICD_NSACRN:
@@ -754,6 +797,8 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
register_t *r = select_user_reg(regs, dabt.reg);
struct vgic_irq_rank *rank;
unsigned long flags;
+ uint64_t new_irouter, new_target, old_target;
+ struct vcpu *old_vcpu, *new_vcpu;
int gicd_reg = (int)(info->gpa - v->domain->arch.vgic.dbase);
switch ( gicd_reg )
@@ -810,16 +855,43 @@ static int vgic_v3_distr_mmio_write(struct vcpu *v, mmio_info_t *info)
rank = vgic_rank_offset(v, 64, gicd_reg - GICD_IROUTER,
DABT_DOUBLE_WORD);
if ( rank == NULL ) goto write_ignore_64;
- if ( *r )
+ BUG_ON(v->domain->max_vcpus > 8);
+ new_irouter = *r;
+ vgic_lock_rank(v, rank, flags);
+
+ old_target = rank->v3.irouter[REG_RANK_INDEX(64,
+ (gicd_reg - GICD_IROUTER), DABT_DOUBLE_WORD)];
+ old_target &= ~(GICD_IROUTER_SPI_MODE_ANY);
+ if ( new_irouter & GICD_IROUTER_SPI_MODE_ANY )
{
- /* TODO: Ignored. We don't support irq delivery for vcpu != 0 */
- gdprintk(XENLOG_DEBUG,
- "SPI delivery to secondary cpus not supported\n");
- goto write_ignore_64;
+ /*
+ * IRQ routing mode set. Route any one processor in the entire
+ * system. We chose vcpu 0 and set IRQ mode bit[31] in irouter.
+ */
+ new_target = 0;
+ new_vcpu = v->domain->vcpu[0];
+ new_irouter = GICD_IROUTER_SPI_MODE_ANY;
+ }
+ else
+ {
+ new_target = new_irouter & MPIDR_AFF0_MASK;
+ if ( new_target >= v->domain->max_vcpus )
+ {
+ printk("vGICv3: vGICD: wrong irouter at offset %#08x\n val 0x%lx vcpu %x",
+ gicd_reg, new_target, v->domain->max_vcpus);
+ vgic_unlock_rank(v, rank, flags);
+ return 0;
+ }
+ new_vcpu = vgic_v3_irouter_to_vcpu(v, new_irouter);
+ }
+
+ rank->v3.irouter[REG_RANK_INDEX(64, (gicd_reg - GICD_IROUTER),
+ DABT_DOUBLE_WORD)] = new_irouter;
+ if ( old_target != new_target )
+ {
+ old_vcpu = v->domain->vcpu[old_target];
+ vgic_migrate_irq(old_vcpu, new_vcpu, (gicd_reg - GICD_IROUTER)/8);
}
- vgic_lock_rank(v, rank, flags);
- rank->v3.irouter[REG_RANK_INDEX(64,
- (gicd_reg - GICD_IROUTER), DABT_DOUBLE_WORD)] = *r;
vgic_unlock_rank(v, rank, flags);
return 1;
case GICD_NSACR ... GICD_NSACRN:
@@ -965,8 +1037,14 @@ static int vgic_v3_vcpu_init(struct vcpu *v)
static int vgic_v3_domain_init(struct domain *d)
{
- int i;
+ int i, idx;
+ /* By default deliver to CPU0 */
+ for ( i = 0; i < DOMAIN_NR_RANKS(d); i++ )
+ {
+ for ( idx = 0; idx < 32; idx++ )
+ d->arch.vgic.shared_irqs[i].v3.irouter[idx] = 0;
+ }
/* We rely on gicv init to get dbase and size */
register_mmio_handler(d, &vgic_distr_mmio_handler, d->arch.vgic.dbase,
d->arch.vgic.dbase_size);
diff --git a/xen/include/asm-arm/cpufeature.h b/xen/include/asm-arm/cpufeature.h
index 7a6d3de37d..7b519cddc6 100644
--- a/xen/include/asm-arm/cpufeature.h
+++ b/xen/include/asm-arm/cpufeature.h
@@ -21,11 +21,12 @@
#define cpu_feature32(c, feat) ((c)->pfr32.feat)
#define boot_cpu_feature32(feat) (boot_cpu_data.pfr32.feat)
-#define cpu_has_aarch32 (boot_cpu_feature32(arm) == 1)
+#define cpu_has_arm (boot_cpu_feature32(arm) == 1)
#define cpu_has_thumb (boot_cpu_feature32(thumb) >= 1)
#define cpu_has_thumb2 (boot_cpu_feature32(thumb) >= 3)
#define cpu_has_jazelle (boot_cpu_feature32(jazelle) >= 0)
#define cpu_has_thumbee (boot_cpu_feature32(thumbee) == 1)
+#define cpu_has_aarch32 (cpu_has_arm || cpu_has_thumb)
#ifdef CONFIG_ARM_32
#define cpu_has_gentimer (boot_cpu_feature32(gentimer) == 1)