本文共 3650 字,大约阅读时间需要 12 分钟。
int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned intorder)
{
int node_id;
int ret;
if (zone_pagecache_reclaimable(zone)<= zone->min_unmapped_pages &&
zone_page_state(zone, NR_SLAB_RECLAIMABLE) <=zone->min_slab_pages)
return ZONE_RECLAIM_FULL;
if (!zone_reclaimable(zone))
return ZONE_RECLAIM_FULL;
/*
*Do not scan if the allocation should not be delayed.
*/
if (!gfpflags_allow_blocking(gfp_mask) ||(current->flags & PF_MEMALLOC))
return ZONE_RECLAIM_NOSCAN;
/*
*Only run zone reclaim on the local zone or on zones that do not
*have associated processors. This will favor the local processor
*over remote processors and spread off node memory allocations
*as wide as possible.
*/
node_id = zone_to_nid(zone);
if (node_state(node_id, N_CPU) &&node_id != numa_node_id())
return ZONE_RECLAIM_NOSCAN;
if (test_and_set_bit(ZONE_RECLAIM_LOCKED,&zone->flags))
return ZONE_RECLAIM_NOSCAN;
ret = __zone_reclaim(zone,gfp_mask, order);
clear_bit(ZONE_RECLAIM_LOCKED,&zone->flags);
if (!ret)
count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
return ret;
}
/*
* Try to free up some pages from this zonethrough reclaim.
*/
static int__zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
{
/* Minimum pages needed in order to stayon node */
const unsigned long nr_pages = 1 <<order;
struct task_struct *p = current;
struct reclaim_state reclaim_state;
struct scan_control sc = {
.nr_to_reclaim = max(nr_pages,SWAP_CLUSTER_MAX),
.gfp_mask = (gfp_mask =memalloc_noio_flags(gfp_mask)),
.order = order,
.priority = ZONE_RECLAIM_PRIORITY,
.may_writepage =!!(zone_reclaim_mode & RECLAIM_WRITE),
.may_unmap = !!(zone_reclaim_mode& RECLAIM_UNMAP),
.may_swap = 1,
};
cond_resched();
p->flags |= PF_MEMALLOC |PF_SWAPWRITE;
lockdep_set_current_reclaim_state(gfp_mask);
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
if(zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
/*
* Free memory by calling shrink zone withincreasing
* priorities until we have enough memoryfreed.
*/
do {
shrink_zone(zone,&sc, true);
} while (sc.nr_reclaimed < nr_pages&& --sc.priority >= 0);
}
p->reclaim_state = NULL;
current->flags &= ~(PF_MEMALLOC |PF_SWAPWRITE);
lockdep_clear_current_reclaim_state();
return sc.nr_reclaimed >= nr_pages;
}
/* Work out howmany page cache pages we can reclaim in this reclaim_mode */
static unsignedlong zone_pagecache_reclaimable(structzone *zone)
{
unsigned long nr_pagecache_reclaimable;
unsigned long delta = 0;
/*
*If RECLAIM_UNMAP is set, then all file pages are considered
*potentially reclaimable. Otherwise, we have to worry about
*pages like swapcache and zone_unmapped_file_pages() provides
*a better estimate
*/
if (zone_reclaim_mode &RECLAIM_UNMAP)
nr_pagecache_reclaimable =zone_page_state(zone, NR_FILE_PAGES);
else
nr_pagecache_reclaimable =zone_unmapped_file_pages(zone);
/* If we can't clean pages, remove dirtypages from consideration */
if (!(zone_reclaim_mode &RECLAIM_WRITE))
delta += zone_page_state(zone,NR_FILE_DIRTY);
/* Watch for any possible underflows dueto delta */
if (unlikely(delta >nr_pagecache_reclaimable))
delta = nr_pagecache_reclaimable;
return nr_pagecache_reclaimable - delta;
}
static inlineunsigned long zone_unmapped_file_pages(structzone *zone)
{
unsigned long file_mapped =zone_page_state(zone, NR_FILE_MAPPED);
unsigned long file_lru =zone_page_state(zone, NR_INACTIVE_FILE) +
zone_page_state(zone,NR_ACTIVE_FILE);
/*
*It's possible for there to be more file mapped pages than
*accounted for by the pages on the file LRU lists because
*tmpfs pages accounted for as ANON can also be FILE_MAPPED
*/
return (file_lru > file_mapped) ?(file_lru - file_mapped) : 0;
}
转载地址:http://faqti.baihongyu.com/