lkml.org 
[lkml]   [2004]   [Apr]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] deinline put_page if CONFIG_HUGETLB_PAGE=y
Date
On Friday 16 April 2004 22:30, Denis Vlasenko wrote:
> This is next version of 'inline hunter', a tool designed to find
> inlines which are large. It has bug fixes and improvements suggested
> by readers of lkml. Tarball and results are below sig.
>
> Matt, you may simply replace earlier version with this one.
> --
> vda
>
> Size Uses Wasted Name and definition
> ===== ==== ====== ================================================
> 56 461 16560 copy_from_user include/asm/uaccess.h
> 122 119 12036 skb_dequeue include/linux/skbuff.h
> 164 78 11088 skb_queue_purge include/linux/skbuff.h
> 97 141 10780 netif_wake_queue include/linux/netdevice.h
> 43 468 10741 copy_to_user include/asm/uaccess.h
> 43 461 10580 copy_from_user include/asm/uaccess.h
> 145 77 9500 put_page include/linux/mm.h

This patch deinlines put_page if CONFIG_HUGETLB_PAGE=y.
--
vda
diff -urN linux-2.6.5.orig/include/linux/mm.h linux-2.6.5.mm_inline1/include/linux/mm.h
--- linux-2.6.5.orig/include/linux/mm.h Sun Apr 4 06:36:15 2004
+++ linux-2.6.5.mm_inline1/include/linux/mm.h Fri Apr 16 22:49:18 2004
@@ -253,22 +253,7 @@
atomic_inc(&page->count);
}

-static inline void put_page(struct page *page)
-{
- if (PageCompound(page)) {
- page = (struct page *)page->lru.next;
- if (put_page_testzero(page)) {
- if (page->lru.prev) { /* destructor? */
- (*(void (*)(struct page *))page->lru.prev)(page);
- } else {
- __page_cache_release(page);
- }
- }
- return;
- }
- if (!PageReserved(page) && put_page_testzero(page))
- __page_cache_release(page);
-}
+void put_page(struct page *page);

#else /* CONFIG_HUGETLB_PAGE */

diff -urN linux-2.6.5.orig/mm/page_alloc.c linux-2.6.5.mm_inline1/mm/page_alloc.c
--- linux-2.6.5.orig/mm/page_alloc.c Sun Apr 4 06:36:17 2004
+++ linux-2.6.5.mm_inline1/mm/page_alloc.c Fri Apr 16 22:49:51 2004
@@ -109,6 +109,24 @@
* This is only for debug at present. This usage means that zero-order pages
* may not be compound.
*/
+
+void put_page(struct page *page)
+{
+ if (PageCompound(page)) {
+ page = (struct page *)page->lru.next;
+ if (put_page_testzero(page)) {
+ if (page->lru.prev) { /* destructor? */
+ (*(void (*)(struct page *))page->lru.prev)(page);
+ } else {
+ __page_cache_release(page);
+ }
+ }
+ return;
+ }
+ if (!PageReserved(page) && put_page_testzero(page))
+ __page_cache_release(page);
+}
+
static void prep_compound_page(struct page *page, unsigned long order)
{
int i;
\
 
 \ /
  Last update: 2005-03-22 14:02    [W:0.042 / U:1.688 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site