diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index f9f47c91ee034404ea1a1e38837e9129a15d2e00..623160d51e5498d77eff8380e41f252422616b3c 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -384,33 +384,43 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); raw_spin_lock_irqsave(&irq->irq_lock, flags); - if (test_bit(i, &val)) { - /* - * pending_latch is set irrespective of irq type - * (level or edge) to avoid dependency that VM should - * restore irq config before pending info. - */ - irq->pending_latch = true; - vgic_queue_irq_unlock(vcpu->kvm, irq, flags); - } else { + + /* + * pending_latch is set irrespective of irq type + * (level or edge) to avoid dependency that VM should + * restore irq config before pending info. + */ + irq->pending_latch = test_bit(i, &val); + + if (irq->hw && vgic_irq_is_sgi(irq->intid)) { + irq_set_irqchip_state(irq->host_irq, + IRQCHIP_STATE_PENDING, + irq->pending_latch); + irq->pending_latch = false; + } + #ifdef CONFIG_VIRT_VTIMER_IRQ_BYPASS - /** - * workaround: On reset, userspace clears pending status - * for all PPIs and SGIs by writing all 0's to - * GICR_ISPENDR0. The pending state of vtimer interrupt - * is somehow staying in redistributor and we have to - * explicitly clear it... - * - * P.S., irq->vtimer_info is NULL on restore. - */ - if (irq->vtimer_info) - WARN_ON_ONCE(irq_set_irqchip_state(irq->host_irq, - IRQCHIP_STATE_PENDING, - false)); -#endif + /* + * workaround: On reset, userspace clears pending status + * for all PPIs and SGIs by writing all 0's to + * GICR_ISPENDR0. The pending state of vtimer interrupt + * is somehow staying in redistributor and we have to + * explicitly clear it... + * + * P.S., irq->vtimer_info is NULL on restore. + */ + if (irq->vtimer_info) { + WARN_ON_ONCE(irq_set_irqchip_state(irq->host_irq, + IRQCHIP_STATE_PENDING, + irq->pending_latch)); irq->pending_latch = false; - raw_spin_unlock_irqrestore(&irq->irq_lock, flags); } +#endif + + if (irq->pending_latch) + vgic_queue_irq_unlock(vcpu->kvm, irq, flags); + else + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); }