diff -Naurp -X /home/jbarnes/dontdiff 210-nr-free-pages-fix.patch/arch/ia64/mm/tlb.c 230-tlb-global-flush-fix.patch/arch/ia64/mm/tlb.c
--- 210-nr-free-pages-fix.patch/arch/ia64/mm/tlb.c	Tue Jan 20 19:50:18 2004
+++ 230-tlb-global-flush-fix.patch/arch/ia64/mm/tlb.c	Wed Jan 21 09:23:16 2004
@@ -60,7 +60,7 @@ wrap_mmu_context (struct mm_struct *mm)
 	for_each_process(tsk) {
 		if (!tsk->mm)
 			continue;
-		tsk_context = tsk->mm->context;
+		tsk_context = tsk->mm->context.ctx;
 		if (tsk_context == ia64_ctx.next) {
 			if (++ia64_ctx.next >= ia64_ctx.limit) {
 				/* empty range: reset the range limit and start over */
diff -Naurp -X /home/jbarnes/dontdiff 210-nr-free-pages-fix.patch/arch/ia64/sn/kernel/sn2/sn2_smp.c 230-tlb-global-flush-fix.patch/arch/ia64/sn/kernel/sn2/sn2_smp.c
--- 210-nr-free-pages-fix.patch/arch/ia64/sn/kernel/sn2/sn2_smp.c	Tue Jan 20 19:50:39 2004
+++ 230-tlb-global-flush-fix.patch/arch/ia64/sn/kernel/sn2/sn2_smp.c	Wed Jan 21 09:23:16 2004
@@ -74,6 +74,7 @@ sn2_global_tlb_purge (unsigned long star
 	int			cnode, mycnode, nasid, flushed=0;
 	volatile unsigned	long	*ptc0, *ptc1;
 	unsigned long		flags=0, data0, data1;
+	struct mm_struct	*mm=current->active_mm;
 
 	data0 = (1UL<<SH_PTC_0_A_SHFT) |
 		(nbits<<SH_PTC_0_PS_SHFT) |
@@ -89,9 +90,8 @@ sn2_global_tlb_purge (unsigned long star
 
 	do {
 		data1 = start | (1UL<<SH_PTC_1_START_SHFT);
-		for (cnode = 0; cnode < numnodes; cnode++) {
-			if (is_headless_node(cnode))
-				continue;
+		for (cnode=find_first_bit(&mm->context.node_history, numnodes); cnode < numnodes; 
+				cnode=find_next_bit(&mm->context.node_history, numnodes, ++cnode)) {
 			if (cnode == mycnode) {
 				asm volatile ("ptc.ga %0,%1;;srlz.i;;" :: "r"(start), "r"(nbits<<2) : "memory");
 			} else {
diff -Naurp -X /home/jbarnes/dontdiff 210-nr-free-pages-fix.patch/include/asm-ia64/mmu.h 230-tlb-global-flush-fix.patch/include/asm-ia64/mmu.h
--- 210-nr-free-pages-fix.patch/include/asm-ia64/mmu.h	Tue Jan 20 19:49:18 2004
+++ 230-tlb-global-flush-fix.patch/include/asm-ia64/mmu.h	Wed Jan 21 09:23:16 2004
@@ -1,11 +1,20 @@
 #ifndef __MMU_H
 #define __MMU_H
 
+#ifdef CONFIG_NUMA
+#include <linux/cpumask.h>
+#endif
+
 /*
  * Type for a context number.  We declare it volatile to ensure proper ordering when it's
  * accessed outside of spinlock'd critical sections (e.g., as done in activate_mm() and
  * init_new_context()).
  */
-typedef volatile unsigned long mm_context_t;
+typedef struct {
+	volatile unsigned long ctx;
+#ifdef CONFIG_NUMA
+	cpumask_t node_history;			/* ZZZ change to nodemask_t when avail */
+#endif
+} mm_context_t;
 
 #endif
diff -Naurp -X /home/jbarnes/dontdiff 210-nr-free-pages-fix.patch/include/asm-ia64/mmu_context.h 230-tlb-global-flush-fix.patch/include/asm-ia64/mmu_context.h
--- 210-nr-free-pages-fix.patch/include/asm-ia64/mmu_context.h	Tue Jan 20 19:49:24 2004
+++ 230-tlb-global-flush-fix.patch/include/asm-ia64/mmu_context.h	Wed Jan 21 09:23:16 2004
@@ -75,6 +75,12 @@ enter_lazy_tlb (struct mm_struct *mm, st
 {
 }
 
+static inline void
+clear_mm_context(struct mm_struct *mm)
+{
+	memset(&mm->context, 0, sizeof(mm->context));
+}
+
 /*
  * When the context counter wraps around all TLBs need to be flushed because an old
  * context number might have been reused. This is signalled by the ia64_need_tlb_flush
@@ -92,27 +98,28 @@ delayed_tlb_flush (void)
 	}
 }
 
-static inline mm_context_t
+static inline unsigned long
 get_mmu_context (struct mm_struct *mm)
 {
 	unsigned long flags;
-	mm_context_t context = mm->context;
+	mm_context_t *context = &mm->context;
+	unsigned long ctx = context->ctx;
 
-	if (context)
-		return context;
+	if (ctx)
+		return ctx;
 
 	spin_lock_irqsave(&ia64_ctx.lock, flags);
 	{
 		/* re-check, now that we've got the lock: */
-		context = mm->context;
-		if (context == 0) {
+		ctx = context->ctx;
+		if (ctx == 0) {
 			if (ia64_ctx.next >= ia64_ctx.limit)
 				wrap_mmu_context(mm);
-			mm->context = context = ia64_ctx.next++;
+			context->ctx = ctx = ia64_ctx.next++;
 		}
 	}
 	spin_unlock_irqrestore(&ia64_ctx.lock, flags);
-	return context;
+	return ctx;
 }
 
 /*
@@ -123,7 +130,7 @@ static inline int
 init_new_context (struct task_struct *p, struct mm_struct *mm)
 {
 	MMU_TRACE('N', smp_processor_id(), mm, 0);
-	mm->context = 0;
+	clear_mm_context(mm);
 	return 0;
 }
 
@@ -135,7 +142,7 @@ destroy_context (struct mm_struct *mm)
 }
 
 static inline void
-reload_context (mm_context_t context)
+reload_context (unsigned int context)
 {
 	unsigned long rid;
 	unsigned long rid_incr = 0;
@@ -165,15 +172,18 @@ reload_context (mm_context_t context)
 static inline void
 activate_context (struct mm_struct *mm)
 {
-	mm_context_t context;
+	unsigned long ctx;
 
+#ifdef CONFIG_NUMA
+	set_bit(numa_node_id(), &mm->context.node_history);
+#endif
 	do {
-		context = get_mmu_context(mm);
+		ctx = get_mmu_context(mm);
 		MMU_TRACE('A', smp_processor_id(), mm, context);
-		reload_context(context);
+		reload_context(ctx);
 		MMU_TRACE('a', smp_processor_id(), mm, context);
 		/* in the unlikely event of a TLB-flush by another thread, redo the load: */
-	} while (unlikely(context != mm->context));
+	} while (unlikely(ctx != mm->context.ctx));
 }
 
 #define deactivate_mm(tsk,mm)					\
diff -Naurp -X /home/jbarnes/dontdiff 210-nr-free-pages-fix.patch/include/asm-ia64/tlbflush.h 230-tlb-global-flush-fix.patch/include/asm-ia64/tlbflush.h
--- 210-nr-free-pages-fix.patch/include/asm-ia64/tlbflush.h	Tue Jan 20 19:50:32 2004
+++ 230-tlb-global-flush-fix.patch/include/asm-ia64/tlbflush.h	Wed Jan 21 09:23:16 2004
@@ -52,7 +52,7 @@ flush_tlb_mm (struct mm_struct *mm)
 	if (!mm)
 		goto out;
 
-	mm->context = 0;
+	clear_mm_context(mm);
 
 	if (atomic_read(&mm->mm_users) == 0)
 		goto out;		/* happens as a result of exit_mmap() */
diff -Naurp -X /home/jbarnes/dontdiff 210-nr-free-pages-fix.patch/mm/memory.c 230-tlb-global-flush-fix.patch/mm/memory.c
--- 210-nr-free-pages-fix.patch/mm/memory.c	Tue Jan 20 19:49:54 2004
+++ 230-tlb-global-flush-fix.patch/mm/memory.c	Wed Jan 21 09:23:16 2004
@@ -573,9 +573,10 @@ int unmap_vmas(struct mmu_gather **tlbp,
 			if ((long)zap_bytes > 0)
 				continue;
 			if (need_resched()) {
+				int fullmm = (*tlbp)->fullmm;
 				tlb_finish_mmu(*tlbp, tlb_start, start);
 				cond_resched_lock(&mm->page_table_lock);
-				*tlbp = tlb_gather_mmu(mm, 0);
+				*tlbp = tlb_gather_mmu(mm, fullmm);
 				tlb_start_valid = 0;
 			}
 			zap_bytes = ZAP_BLOCK_SIZE;
