lkml.org 
[lkml]   [1999]   [Jul]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[patch] page_cache_size not atomic_t
    All write accesses to page_cache_size are serialized by the pagecache_lock
    spinlock. It make sense to increase/decrease page_cache_size only while
    modifying the hash. So I don't think we'll ever need to change
    page_cache_size outside the pagecache_lock spinlock.

    For this reason I think it's a good idea to make page_cache_size an `int'
    so avoiding not necessary and not scalable SMP lock on the bus.

    Here a patch against 2.3.11-pre6 (I checked it compiles).

    diff -urN 2.3.11-pre6/fs/proc/array.c tmp/fs/proc/array.c
    --- 2.3.11-pre6/fs/proc/array.c Tue Jul 20 18:37:45 1999
    +++ tmp/fs/proc/array.c Wed Jul 21 02:14:53 1999
    @@ -348,7 +348,7 @@
    len = sprintf(buffer, " total: used: free: shared: buffers: cached:\n"
    "Mem: %8lu %8lu %8lu %8lu %8lu %8lu\n"
    "Swap: %8lu %8lu %8lu\n",
    - i.totalram, i.totalram-i.freeram, i.freeram, i.sharedram, i.bufferram, atomic_read(&page_cache_size)*PAGE_SIZE,
    + i.totalram, i.totalram-i.freeram, i.freeram, i.sharedram, i.bufferram, page_cache_size*PAGE_SIZE,
    i.totalswap, i.totalswap-i.freeswap, i.freeswap);
    /*
    * Tagged format, for easy grepping and expansion. The above will go away
    @@ -366,7 +366,7 @@
    i.freeram >> 10,
    i.sharedram >> 10,
    i.bufferram >> 10,
    - atomic_read(&page_cache_size) << (PAGE_SHIFT - 10),
    + page_cache_size << (PAGE_SHIFT - 10),
    i.totalswap >> 10,
    i.freeswap >> 10);
    }
    diff -urN 2.3.11-pre6/include/linux/mm.h tmp/include/linux/mm.h
    --- 2.3.11-pre6/include/linux/mm.h Tue Jul 20 18:47:12 1999
    +++ tmp/include/linux/mm.h Wed Jul 21 02:15:10 1999
    @@ -414,7 +414,7 @@

    #define buffer_under_min() ((atomic_read(&buffermem) >> PAGE_SHIFT) * 100 < \
    buffer_mem.min_percent * num_physpages)
    -#define pgcache_under_min() (atomic_read(&page_cache_size) * 100 < \
    +#define pgcache_under_min() (page_cache_size * 100 < \
    page_cache.min_percent * num_physpages)

    #endif /* __KERNEL__ */
    diff -urN 2.3.11-pre6/include/linux/pagemap.h tmp/include/linux/pagemap.h
    --- 2.3.11-pre6/include/linux/pagemap.h Tue Jul 20 18:47:12 1999
    +++ tmp/include/linux/pagemap.h Wed Jul 21 02:21:31 1999
    @@ -43,7 +43,7 @@
    #define PAGE_HASH_BITS (page_hash_bits)
    #define PAGE_HASH_SIZE (1 << PAGE_HASH_BITS)

    -extern atomic_t page_cache_size; /* # of pages currently in the hash table */
    +extern int page_cache_size; /* # of pages currently in the hash table */
    extern struct page **page_hash_table;

    extern void page_cache_init(unsigned long);
    diff -urN 2.3.11-pre6/include/linux/swap.h tmp/include/linux/swap.h
    --- 2.3.11-pre6/include/linux/swap.h Tue Jul 20 18:47:12 1999
    +++ tmp/include/linux/swap.h Wed Jul 21 02:21:13 1999
    @@ -66,7 +66,6 @@
    extern int nr_free_pages;
    extern atomic_t nr_async_pages;
    extern struct inode swapper_inode;
    -extern atomic_t page_cache_size;
    extern atomic_t buffermem;

    /* Incomplete types for prototype declarations: */
    diff -urN 2.3.11-pre6/mm/filemap.c tmp/mm/filemap.c
    --- 2.3.11-pre6/mm/filemap.c Tue Jul 20 18:37:45 1999
    +++ tmp/mm/filemap.c Wed Jul 21 02:17:05 1999
    @@ -35,7 +35,7 @@
    * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
    */

    -atomic_t page_cache_size = ATOMIC_INIT(0);
    +int page_cache_size;
    unsigned int page_hash_bits;
    struct page **page_hash_table;

    @@ -44,7 +44,7 @@

    void __add_page_to_hash_queue(struct page * page, struct page **p)
    {
    - atomic_inc(&page_cache_size);
    + page_cache_size++; /* serialized by the pagecache_lock */
    if((page->next_hash = *p) != NULL)
    (*p)->pprev_hash = &page->next_hash;
    *p = page;
    @@ -61,7 +61,7 @@
    *page->pprev_hash = page->next_hash;
    page->pprev_hash = NULL;
    }
    - atomic_dec(&page_cache_size);
    + page_cache_size--; /* serialized by the pagecache_lock */
    }

    static void remove_page_from_inode_queue(struct page * page)
    diff -urN 2.3.11-pre6/mm/mmap.c tmp/mm/mmap.c
    --- 2.3.11-pre6/mm/mmap.c Tue Jul 13 02:02:40 1999
    +++ tmp/mm/mmap.c Wed Jul 21 02:17:20 1999
    @@ -63,7 +63,7 @@
    return 1;

    free = atomic_read(&buffermem) >> PAGE_SHIFT;
    - free += atomic_read(&page_cache_size);
    + free += page_cache_size;
    free += nr_free_pages;
    free += nr_swap_pages;
    free -= (page_cache.min_percent + buffer_mem.min_percent + 2)*num_physpages/100;
    Andrea


    -
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.rutgers.edu
    Please read the FAQ at http://www.tux.org/lkml/

    \
     
     \ /
      Last update: 2005-03-22 13:53    [W:2.352 / U:0.004 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site