
From: Dave McCracken <dmccr@us.ibm.com>

It occurred to me that a simple way to improve objrmap performance would be
to sort the vma chains off address_space by offset.  Here's a patch that
does it.  Tests show no measureable cost, and it could significantly reduce
the impact of the worst case scenario (100 mappings * 100 processes) we've
all worried about.



 25-akpm/mm/mmap.c |   16 ++++++++++++++--
 1 files changed, 14 insertions(+), 2 deletions(-)

diff -puN mm/mmap.c~objrmap-sort-vma-list mm/mmap.c
--- 25/mm/mmap.c~objrmap-sort-vma-list	Wed Apr  2 13:32:18 2003
+++ 25-akpm/mm/mmap.c	Wed Apr  2 13:32:18 2003
@@ -311,14 +311,26 @@ static inline void __vma_link_file(struc
 	if (file) {
 		struct inode * inode = file->f_dentry->d_inode;
 		struct address_space *mapping = inode->i_mapping;
+		struct list_head *vmlist, *vmhead;
 
 		if (vma->vm_flags & VM_DENYWRITE)
 			atomic_dec(&inode->i_writecount);
 
 		if (vma->vm_flags & VM_SHARED)
-			list_add_tail(&vma->shared, &mapping->i_mmap_shared);
+			vmhead = &mapping->i_mmap_shared;
 		else
-			list_add_tail(&vma->shared, &mapping->i_mmap);
+			vmhead = &mapping->i_mmap;
+
+		list_for_each(vmlist, &mapping->i_mmap_shared) {
+			struct vm_area_struct *vmtemp;
+			vmtemp = list_entry(vmlist, struct vm_area_struct, shared);
+			if (vmtemp->vm_pgoff >= vma->vm_pgoff)
+				break;
+		}
+		if (vmlist == vmhead)
+			list_add_tail(&vma->shared, vmlist);
+		else
+			list_add(&vma->shared, vmlist);
 	}
 }
 

_
