[PATCH 16/19] KVM: PPC: Book3S HV: add get/set accessors for the EQ configuration
Cédric Le Goater
clg at kaod.org
Wed Feb 6 04:45:04 AEDT 2019
On 2/4/19 6:24 AM, David Gibson wrote:
> On Mon, Jan 07, 2019 at 07:43:28PM +0100, Cédric Le Goater wrote:
>> These are used to capture the XIVE END table of the KVM device. It
>> relies on an OPAL call to retrieve from the XIVE IC the EQ toggle bit
>> and index which are updated by the HW when events are enqueued in the
>> guest RAM.
>>
>> Signed-off-by: Cédric Le Goater <clg at kaod.org>
>> ---
>> arch/powerpc/include/uapi/asm/kvm.h | 21 ++++
>> arch/powerpc/kvm/book3s_xive_native.c | 166 ++++++++++++++++++++++++++
>> 2 files changed, 187 insertions(+)
>>
>> diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
>> index faf024f39858..95302558ce10 100644
>> --- a/arch/powerpc/include/uapi/asm/kvm.h
>> +++ b/arch/powerpc/include/uapi/asm/kvm.h
>> @@ -684,6 +684,7 @@ struct kvm_ppc_cpu_char {
>> #define KVM_DEV_XIVE_GRP_SOURCES 2 /* 64-bit source attributes */
>> #define KVM_DEV_XIVE_GRP_SYNC 3 /* 64-bit source attributes */
>> #define KVM_DEV_XIVE_GRP_EAS 4 /* 64-bit eas attributes */
>> +#define KVM_DEV_XIVE_GRP_EQ 5 /* 64-bit eq attributes */
>>
>> /* Layout of 64-bit XIVE source attribute values */
>> #define KVM_XIVE_LEVEL_SENSITIVE (1ULL << 0)
>> @@ -699,4 +700,24 @@ struct kvm_ppc_cpu_char {
>> #define KVM_XIVE_EAS_EISN_SHIFT 33
>> #define KVM_XIVE_EAS_EISN_MASK 0xfffffffe00000000ULL
>>
>> +/* Layout of 64-bit eq attribute */
>> +#define KVM_XIVE_EQ_PRIORITY_SHIFT 0
>> +#define KVM_XIVE_EQ_PRIORITY_MASK 0x7
>> +#define KVM_XIVE_EQ_SERVER_SHIFT 3
>> +#define KVM_XIVE_EQ_SERVER_MASK 0xfffffff8ULL
>> +
>> +/* Layout of 64-bit eq attribute values */
>> +struct kvm_ppc_xive_eq {
>> + __u32 flags;
>> + __u32 qsize;
>> + __u64 qpage;
>> + __u32 qtoggle;
>> + __u32 qindex;
>
> Should we pad this in case a) we discover some fields in the EQ that
> we thought weren't relevant to the guest actually are or b) future
> XIVE extensions add something we need to migrate.
The underlying XIVE structure is composed of 32bytes. I will double the
size.
Thanks,
C.
>
>> +};
>> +
>> +#define KVM_XIVE_EQ_FLAG_ENABLED 0x00000001
>> +#define KVM_XIVE_EQ_FLAG_ALWAYS_NOTIFY 0x00000002
>> +#define KVM_XIVE_EQ_FLAG_ESCALATE 0x00000004
>> +
>> +
>> #endif /* __LINUX_KVM_POWERPC_H */
>> diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
>> index 0468b605baa7..f4eb71eafc57 100644
>> --- a/arch/powerpc/kvm/book3s_xive_native.c
>> +++ b/arch/powerpc/kvm/book3s_xive_native.c
>> @@ -607,6 +607,164 @@ static int kvmppc_xive_native_get_eas(struct kvmppc_xive *xive, long irq,
>> return 0;
>> }
>>
>> +static int kvmppc_xive_native_set_queue(struct kvmppc_xive *xive, long eq_idx,
>> + u64 addr)
>> +{
>> + struct kvm *kvm = xive->kvm;
>> + struct kvm_vcpu *vcpu;
>> + struct kvmppc_xive_vcpu *xc;
>> + void __user *ubufp = (u64 __user *) addr;
>> + u32 server;
>> + u8 priority;
>> + struct kvm_ppc_xive_eq kvm_eq;
>> + int rc;
>> + __be32 *qaddr = 0;
>> + struct page *page;
>> + struct xive_q *q;
>> +
>> + /*
>> + * Demangle priority/server tuple from the EQ index
>> + */
>> + priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
>> + KVM_XIVE_EQ_PRIORITY_SHIFT;
>> + server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
>> + KVM_XIVE_EQ_SERVER_SHIFT;
>> +
>> + if (copy_from_user(&kvm_eq, ubufp, sizeof(kvm_eq)))
>> + return -EFAULT;
>> +
>> + vcpu = kvmppc_xive_find_server(kvm, server);
>> + if (!vcpu) {
>> + pr_err("Can't find server %d\n", server);
>> + return -ENOENT;
>> + }
>> + xc = vcpu->arch.xive_vcpu;
>> +
>> + if (priority != xive_prio_from_guest(priority)) {
>> + pr_err("Trying to restore invalid queue %d for VCPU %d\n",
>> + priority, server);
>> + return -EINVAL;
>> + }
>> + q = &xc->queues[priority];
>> +
>> + pr_devel("%s VCPU %d priority %d fl:%x sz:%d addr:%llx g:%d idx:%d\n",
>> + __func__, server, priority, kvm_eq.flags,
>> + kvm_eq.qsize, kvm_eq.qpage, kvm_eq.qtoggle, kvm_eq.qindex);
>> +
>> + rc = xive_native_validate_queue_size(kvm_eq.qsize);
>> + if (rc || !kvm_eq.qsize) {
>> + pr_err("invalid queue size %d\n", kvm_eq.qsize);
>> + return rc;
>> + }
>> +
>> + page = gfn_to_page(kvm, gpa_to_gfn(kvm_eq.qpage));
>> + if (is_error_page(page)) {
>> + pr_warn("Couldn't get guest page for %llx!\n", kvm_eq.qpage);
>> + return -ENOMEM;
>> + }
>> + qaddr = page_to_virt(page) + (kvm_eq.qpage & ~PAGE_MASK);
>> +
>> + /* Backup queue page guest address for migration */
>> + q->guest_qpage = kvm_eq.qpage;
>> + q->guest_qsize = kvm_eq.qsize;
>> +
>> + rc = xive_native_configure_queue(xc->vp_id, q, priority,
>> + (__be32 *) qaddr, kvm_eq.qsize, true);
>> + if (rc) {
>> + pr_err("Failed to configure queue %d for VCPU %d: %d\n",
>> + priority, xc->server_num, rc);
>> + put_page(page);
>> + return rc;
>> + }
>> +
>> + rc = xive_native_set_queue_state(xc->vp_id, priority, kvm_eq.qtoggle,
>> + kvm_eq.qindex);
>> + if (rc)
>> + goto error;
>> +
>> + rc = kvmppc_xive_attach_escalation(vcpu, priority);
>> +error:
>> + if (rc)
>> + xive_native_cleanup_queue(vcpu, priority);
>> + return rc;
>> +}
>> +
>> +static int kvmppc_xive_native_get_queue(struct kvmppc_xive *xive, long eq_idx,
>> + u64 addr)
>> +{
>> + struct kvm *kvm = xive->kvm;
>> + struct kvm_vcpu *vcpu;
>> + struct kvmppc_xive_vcpu *xc;
>> + struct xive_q *q;
>> + void __user *ubufp = (u64 __user *) addr;
>> + u32 server;
>> + u8 priority;
>> + struct kvm_ppc_xive_eq kvm_eq;
>> + u64 qpage;
>> + u64 qsize;
>> + u64 qeoi_page;
>> + u32 escalate_irq;
>> + u64 qflags;
>> + int rc;
>> +
>> + /*
>> + * Demangle priority/server tuple from the EQ index
>> + */
>> + priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
>> + KVM_XIVE_EQ_PRIORITY_SHIFT;
>> + server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
>> + KVM_XIVE_EQ_SERVER_SHIFT;
>> +
>> + vcpu = kvmppc_xive_find_server(kvm, server);
>> + if (!vcpu) {
>> + pr_err("Can't find server %d\n", server);
>> + return -ENOENT;
>> + }
>> + xc = vcpu->arch.xive_vcpu;
>> +
>> + if (priority != xive_prio_from_guest(priority)) {
>> + pr_err("invalid priority for queue %d for VCPU %d\n",
>> + priority, server);
>> + return -EINVAL;
>> + }
>> + q = &xc->queues[priority];
>> +
>> + memset(&kvm_eq, 0, sizeof(kvm_eq));
>> +
>> + if (!q->qpage)
>> + return 0;
>> +
>> + rc = xive_native_get_queue_info(xc->vp_id, priority, &qpage, &qsize,
>> + &qeoi_page, &escalate_irq, &qflags);
>> + if (rc)
>> + return rc;
>> +
>> + kvm_eq.flags = 0;
>> + if (qflags & OPAL_XIVE_EQ_ENABLED)
>> + kvm_eq.flags |= KVM_XIVE_EQ_FLAG_ENABLED;
>> + if (qflags & OPAL_XIVE_EQ_ALWAYS_NOTIFY)
>> + kvm_eq.flags |= KVM_XIVE_EQ_FLAG_ALWAYS_NOTIFY;
>> + if (qflags & OPAL_XIVE_EQ_ESCALATE)
>> + kvm_eq.flags |= KVM_XIVE_EQ_FLAG_ESCALATE;
>> +
>> + kvm_eq.qsize = q->guest_qsize;
>> + kvm_eq.qpage = q->guest_qpage;
>> +
>> + rc = xive_native_get_queue_state(xc->vp_id, priority, &kvm_eq.qtoggle,
>> + &kvm_eq.qindex);
>> + if (rc)
>> + return rc;
>> +
>> + pr_devel("%s VCPU %d priority %d fl:%x sz:%d addr:%llx g:%d idx:%d\n",
>> + __func__, server, priority, kvm_eq.flags,
>> + kvm_eq.qsize, kvm_eq.qpage, kvm_eq.qtoggle, kvm_eq.qindex);
>> +
>> + if (copy_to_user(ubufp, &kvm_eq, sizeof(kvm_eq)))
>> + return -EFAULT;
>> +
>> + return 0;
>> +}
>> +
>> static int kvmppc_xive_native_set_attr(struct kvm_device *dev,
>> struct kvm_device_attr *attr)
>> {
>> @@ -628,6 +786,9 @@ static int kvmppc_xive_native_set_attr(struct kvm_device *dev,
>> return kvmppc_xive_native_sync(xive, attr->attr, attr->addr);
>> case KVM_DEV_XIVE_GRP_EAS:
>> return kvmppc_xive_native_set_eas(xive, attr->attr, attr->addr);
>> + case KVM_DEV_XIVE_GRP_EQ:
>> + return kvmppc_xive_native_set_queue(xive, attr->attr,
>> + attr->addr);
>> }
>> return -ENXIO;
>> }
>> @@ -650,6 +811,9 @@ static int kvmppc_xive_native_get_attr(struct kvm_device *dev,
>> break;
>> case KVM_DEV_XIVE_GRP_EAS:
>> return kvmppc_xive_native_get_eas(xive, attr->attr, attr->addr);
>> + case KVM_DEV_XIVE_GRP_EQ:
>> + return kvmppc_xive_native_get_queue(xive, attr->attr,
>> + attr->addr);
>> }
>> return -ENXIO;
>> }
>> @@ -674,6 +838,8 @@ static int kvmppc_xive_native_has_attr(struct kvm_device *dev,
>> attr->attr < KVMPPC_XIVE_NR_IRQS)
>> return 0;
>> break;
>> + case KVM_DEV_XIVE_GRP_EQ:
>> + return 0;
>> }
>> return -ENXIO;
>> }
>
More information about the Linuxppc-dev
mailing list