F_St"^n	>*Un!6_5ǅc[`Dbethߑ<d;{\AVbmo\;"W$Ch@h.3m.1UZ=o~FzgPCl<4-L=7dkc[PWʜYO}E<,lN<ܠ[PLx ;!z>]2.K*FA	4:p?+E
O9u !X1ʍT٬>l)
^ǵ@KNKsAaې?p,85{uL׈qYMZ+P56V+&-ʦr?(b=V8Rtxg❥[Du"nl&.9ò^øI79K3xRczj	pX
%X&n4(,/U4_[ h׶х~I>RG;%kp	]jƝNS '15m&q>Ģb6nI!bRjq
bLCߪ]ac7-b!p2[iΛ˪/_L#	51nH/q]_( -S-Ze\Rϴ44_~n>?^qB$<O:y
4jqpuVaʩ
pNbԊSN@r^bAAEa4;_6ݑOidu'V1Ģ9`l]閱11<,Y(_u1?ҁ7Uh\?Ftqﴤ@/>aB0:
Kcv+ZLmN]Eu8OT)O*"tn;'%ii%a$U#^Rʫ5_5X?866@6Onw;RND>sc"U#ޔp6'1ԇcs9YjweSauTl$D[ߪ]:+Ⓨ?_V]LAF 1lgwV0ρ30,!%UQm\s-zYxd;Z8iP6p`^P?UUËTF8sS9\UMKIjw&oZL@m! UUA.Atg+WEU~r}ˡ
@-N/'.`՝2UA-*ǫNF{'Opl->YwA6cCI65XEM6M_xC3U{hH)ǅAy![J\7Lbݧ3;>u>RRBˌ^9Gi\(GuU.kT<xMO` 4=u@a|b\LY
;}VBt-T>m}L,.@{J`=4J!֝)rӭ9ISsnXp*=?Y:N=6x cpumask_of_cpu(new_cpu);
-		spin_unlock_irqrestore(&desc->lock, flags);
+		set_pending_irq(irq, cpumask_of_cpu(new_cpu));
 	}
 }
 
@@ -534,16 +529,12 @@ tryanotherirq:
 	cpus_and(tmp, target_cpu_mask, allowed_mask);
 
 	if (!cpus_empty(tmp)) {
-		irq_desc_t *desc = irq_desc + selected_irq;
-		unsigned long flags;
 
 		Dprintk("irq = %d moved to cpu = %d\n",
 				selected_irq, min_loaded);
 		/* mark for change destination */
-		spin_lock_irqsave(&desc->lock, flags);
-		pending_irq_cpumask[selected_irq] =
-					cpumask_of_cpu(min_loaded);
-		spin_unlock_irqrestore(&desc->lock, flags);
+		set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
+
 		/* Since we made a change, come back sooner to 
 		 * check for more variation.
 		 */
@@ -575,6 +566,7 @@ static int balanced_irq(void *unused)
 	/* push everything to CPU 0 to give us a starting point.  */
 	for (i = 0 ; i < NR_IRQS ; i++) {
 		pending_irq_cpumask[i] = cpumask_of_cpu(0);
+		set_pending_irq(i, cpumask_of_cpu(0));
 	}
 
 	for ( ; ; ) {
diff -puN include/linux/irq.h~x86-x86_64-deferred-handling-of-writes-to-proc-irq-xx-smp_affinitypatch-added-to-mm-tree-fix-5 include/linux/irq.h
--- 25/include/linux/irq.h~x86-x86_64-deferred-handling-of-writes-to-proc-irq-xx-smp_affinitypatch-added-to-mm-tree-fix-5	2005-05-03 19:13:04.000000000 -0700
+++ 25-akpm/include/linux/irq.h	2005-05-03 19:13:04.000000000 -0700
@@ -67,6 +67,9 @@ typedef struct irq_desc {
 	unsigned int irq_count;		/* For detecting broken interrupts */
 	unsigned int irqs_unhandled;
 	spinlock_t lock;
+#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
+	unsigned int move_irq;		/* Flag need to re-target intr dest*/
+#endif
 } ____cacheline_aligned irq_desc_t;
 
 extern irq_desc_t irq_desc [NR_IRQS];
@@ -78,7 +81,6 @@ irq_descp (int irq)
 	return irq_desc + irq;
 }
 
-
 #include <asm/hw_irq.h> /* the arch dependent stuff */
 
 extern int setup_irq(unsigned int irq, struct irqaction * new);
@@ -101,19 +103,33 @@ static inline void set_native_irq_info(i
 
 #if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
 extern cpumask_t pending_irq_cpumask[NR_IRQS];
-#endif
 
-#ifdef CONFIG_GENERIC_PENDING_IRQ
+static inline void set_pending_irq(unsigned int irq, cpumask_t mask)
+{
+	irq_desc_t *desc = irq_desc + irq;
+	unsigned long flags;
+
+	spin_lock_irqsave(&desc->lock, flags);
+	desc->move_irq = 1;
+	pending_irq_cpumask[irq] = mask;
+	spin_unlock_irqrestore(&desc->lock, flags);
+}
+
 static inline void
 move_native_irq(int irq)
 {
 	cpumask_t tmp;
 	irq_desc_t *desc = irq_descp(irq);
 
+	if (likely (!desc->move_irq))
+		return;
+
+	desc->move_irq = 0;
+
 	if (likely(cpus_empty(pending_irq_cpumask[irq])))
 		return;
 
-	if (unlikely(!desc->handler->set_affinity))
+	if (!desc->handler->set_affinity)
 		return;
 
 	/* note - we hold the desc->lock */
@@ -165,10 +181,11 @@ static inline void set_irq_info(int irq,
 }
 #endif // CONFIG_PCI_MSI
 
-#else	// CONFIG_GENERIC_PENDING_IRQ
+#else	// CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE
 
 #define move_irq(x)
 #define move_native_irq(x)
+#define set_pending_irq(x,y)
 static inline void set_irq_info(int irq, cpumask_t mask)
 {
 	set_native_irq_info(irq, mask);
diff -puN kernel/irq/proc.c~x86-x86_64-deferred-handling-of-writes-to-proc-irq-xx-smp_affinitypatch-added-to-mm-tree-fix-5 kernel/irq/proc.c
--- 25/kernel/irq/proc.c~x86-x86_64-deferred-handling-of-writes-to-proc-irq-xx-smp_affinitypatch-added-to-mm-tree-fix-5	2005-05-03 19:13:04.000000000 -0700
+++ 25-akpm/kernel/irq/proc.c	2005-05-03 19:13:04.000000000 -0700
@@ -22,16 +22,11 @@ static struct proc_dir_entry *smp_affini
 #ifdef CONFIG_GENERIC_PENDING_IRQ
 void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
 {
-	irq_desc_t	*desc = irq_descp(irq);
-	unsigned long flags;
-
 	/*
 	 * Save these away for later use. Re-progam when the
 	 * interrupt is pending
 	 */
-	spin_lock_irqsave(&desc->lock, flags);
-	pending_irq_cpumask[irq] = mask_val;
-	spin_unlock_irqrestore(&desc->lock, flags);
+	set_pending_irq(irq, mask_val);
 }
 #else
 void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
_
