lguest: experiments in fast_eoi

handle_level_irq does the following:
	set_bit(irq, lguest_data.blocked_interrupts);
	enable interrupts
	call irq handler
	disable interrupts
	clear_bit(irq, lguest_data.blocked_interrupts);

If we set the blocked_interrupts bit in the Host when the interrupt
is delivered, we can use handle_fasteoi_irq which does:

	enable interrupts
	call irq handler
	disable interrupts
	clear_bit(irq, lguest_data.blocked_interrupts);

The main difference is that the Host can detect more of the interrupt
path (except for the tail after we clear blocked_interrupts).

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
---
 arch/x86/lguest/boot.c                |   13 +++++++++++--
 drivers/lguest/interrupts_and_traps.c |   20 ++++++++++++++++++++
 2 files changed, 31 insertions(+), 2 deletions(-)

diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -613,15 +613,23 @@ static void disable_lguest_irq(unsigned 
 
 static void enable_lguest_irq(unsigned int irq)
 {
+	/* FIXME: This can race with hypervisor, must really be SMP
+	 * op, even on UP. */
 	clear_bit(irq, lguest_data.blocked_interrupts);
 }
 
+static void lg_eoi(unsigned int irq)
+{
+	BUG_ON(!test_bit(irq, lguest_data.blocked_interrupts));
+	enable_lguest_irq(irq);
+}
+
 /* This structure describes the lguest IRQ controller. */
 static struct irq_chip lguest_irq_controller = {
 	.name		= "lguest",
 	.mask		= disable_lguest_irq,
-	.mask_ack	= disable_lguest_irq,
 	.unmask		= enable_lguest_irq,
+	.eoi		= lg_eoi,
 };
 
 /* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
@@ -649,7 +657,7 @@ void lguest_setup_irq(unsigned int irq)
 {
 	irq_to_desc_alloc_cpu(irq, 0);
 	set_irq_chip_and_handler_name(irq, &lguest_irq_controller,
-				      handle_level_irq, "level");
+				      handle_fasteoi_irq, "virtio");
 }
 
 /*
@@ -773,6 +781,7 @@ static void lguest_time_irq(unsigned int
 	local_irq_save(flags);
 	lguest_clockevent.event_handler(&lguest_clockevent);
 	local_irq_restore(flags);
+	lg_eoi(0);
 }
 
 /* At some point in the boot process, we get asked to set up our timing
diff --git a/drivers/lguest/interrupts_and_traps.c b/drivers/lguest/interrupts_and_traps.c
--- a/drivers/lguest/interrupts_and_traps.c
+++ b/drivers/lguest/interrupts_and_traps.c
@@ -48,6 +48,23 @@ static void push_guest_stack(struct lg_c
 	lgwrite(cpu, *gstack, u32, val);
 }
 
+/* They're not running now, so fortunately this doesn't need to be atomic. */
+static void mark_interrupt_blocked(struct lg_cpu *cpu, unsigned int irq)
+{
+	unsigned long word;
+	unsigned long __user *uword;
+
+	/* Only need to transfer one word in and out. */
+	uword = cpu->lg->lguest_data->blocked_interrupts + irq/BITS_PER_LONG;
+
+	get_user(word, uword);
+	/* Not quite a BUG_ON: Launcher could play with Guest mem. */
+	if (word & (1 << (irq % BITS_PER_LONG)))
+		kill_guest(cpu, "Interrupt %u already blocked?", irq);
+	word |= (1 << (irq % BITS_PER_LONG));
+	put_user(word, uword);
+}
+
 /*H:210 The set_guest_interrupt() routine actually delivers the interrupt or
  * trap.  The mechanics of delivering traps and interrupts to the Guest are the
  * same, except some traps have an "error code" which gets pushed onto the
@@ -191,6 +208,9 @@ void try_deliver_interrupt(struct lg_cpu
 	if (idt_present(idt->a, idt->b)) {
 		/* OK, mark it no longer pending and deliver it. */
 		clear_bit(irq, cpu->irqs_pending);
+		/* Mark the interrupt as blocked: they unset this when they're
+		 * finished with the interrupt. */
+		mark_interrupt_blocked(cpu, irq);
 		/* set_guest_interrupt() takes the interrupt descriptor and a
 		 * flag to say whether this interrupt pushes an error code onto
 		 * the stack as well: virtual interrupts never do. */
