start_kernel的在mm_init->kmem_cache_init之后,还调用了kmem_cache_init_late函数。

该函数接着kmem_cache_init的处理进度,继续展开,如下:

点击(此处)折叠或打开

  1. void __init kmem_cache_init_late(void)
  2. {
  3.     struct kmem_cache *cachep;

  4.     /* 6) resize the head arrays to their final sizes */
  5.     /× 之前的函数处理到5),这里紧接着从6)继续,遍历cache_chain上的所有的cache ×/
  6.     mutex_lock(&cache_chain_mutex);
  7.     list_for_each_entry(cachep, &cache_chain, next)
  8.         /* 初始化阶段local_cache的大小是固定的,现在要根据对象的大小重新计算 */
  9.         if (enable_cpucache(cachep, GFP_NOWAIT))
  10.             BUG();
  11.     mutex_unlock(&cache_chain_mutex);

  12.     /* 至此,普通cache已经建立起来了 */
  13.     g_cpucache_up = FULL;

  14.     /* Annotate slab for lockdep -- annotate the malloc caches */
  15.     init_lock_keys();

  16.     /*
  17.      * Register a cpu startup notifier callback that initializes
  18.      * cpu_cache_get for all new cpus
  19.      */
  20.     /× 注册CPU up的回调函数,用于在cpu up时配置local cache ×/
  21.     register_cpu_notifier(&cpucache_notifier);

  22. #ifdef CONFIG_NUMA
  23.     /*
  24.      * Register a memory hotplug callback that initializes and frees
  25.      * nodelists.
  26.      */
  27.     hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
  28. #endif

  29.     /*
  30.      * The reap timers are started later, with a module init call: That part
  31.      * of the kernel is not yet operational.
  32.      */
  33. }

点击(此处)折叠或打开

  1. /* Called with cache_chain_mutex held always */
  2. /* 初始化local cache */
  3. static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
  4. {
  5.     int err;
  6.     int limit, shared;

  7.     /*
  8.      * The head array serves three purposes:
  9.      * - create a LIFO ordering, i.e. return objects that are cache-warm
  10.      * - reduce the number of spinlock operations.
  11.      * - reduce the number of linked list operations on the slab and
  12.      * bufctl chains: array operations are cheaper.
  13.      * The numbers are guessed, we should auto-tune as described by
  14.      * Bonwick.
  15.      */
  16.     /* 根据对象的大小计算local cache中对象的数量 */
  17.     if (cachep->buffer_size > 131072)
  18.         limit = 1;
  19.     else if (cachep->buffer_size > PAGE_SIZE)
  20.         limit = 8;
  21.     else if (cachep->buffer_size > 1024)
  22.         limit = 24;
  23.     else if (cachep->buffer_size > 256)
  24.         limit = 54;
  25.     else
  26.         limit = 120;

  27.     /*
  28.      * CPU bound tasks (e.g. network routing) can exhibit cpu bound
  29.      * allocation behaviour: Most allocs on one cpu, most free operations
  30.      * on another cpu. For these cases, an efficient object passing between
  31.      * cpus is necessary. This is provided by a shared array. The array
  32.      * replaces Bonwick's magazine layer.
  33.      * On uniprocessor, it's functionally equivalent (but less efficient)
  34.      * to a larger limit. Thus disabled by default.
  35.      */
  36.     shared = 0;
  37.     /× 多核下设置share local cache中的对象数目 ×/
  38.     if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
  39.         shared = 8;

  40. #if DEBUG
  41.     /*
  42.      * With debugging enabled, large batchcount lead to excessively long
  43.      * periods with disabled local interrupts. Limit the batchcount
  44.      */
  45.     if (limit > 32)
  46.         limit = 32;
  47. #endif
  48.     /× 配置local cache ×/
  49.     err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp);
  50.     if (err)
  51.         printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
  52.          cachep->name, -err);
  53.     return err;
  54. }

点击(此处)折叠或打开

  1. /* Always called with the cache_chain_mutex held */
  2. /* 设置local cache, share local cache和slab的三个链表 */
  3. static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
  4.                 int batchcount, int shared, gfp_t gfp)
  5. {
  6.     struct ccupdate_struct *new;
  7.     int i;

  8.     new = kzalloc(sizeof(*new), gfp);
  9.     if (!new)
  10.         return -ENOMEM;

  11.     for_each_online_cpu(i) {
  12.         /* 为每个cpu分配新的struct array_cache对象 */
  13.         new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
  14.                         batchcount, gfp);
  15.         if (!new->new[i]) {
  16.             for (i--; i >= 0; i--)
  17.                 kfree(new->new[i]);
  18.             kfree(new);
  19.             return -ENOMEM;
  20.         }
  21.     }
  22.     new->cachep = cachep;

  23.     on_each_cpu(do_ccupdate_local, (void *)new, 1);

  24.     check_irq_on();
  25.     cachep->batchcount = batchcount;
  26.     cachep->limit = limit;
  27.     cachep->shared = shared;

  28.     /× 释放旧的local cache ×/
  29.     for_each_online_cpu(i) {
  30.         struct array_cache *ccold = new->new[i];
  31.         if (!ccold)
  32.             continue;
  33.         spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
  34.         /* 释放旧的local cache中的对象 */
  35.         free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
  36.         spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
  37.         /× 释放旧的struct array_cache对象 ×/
  38.         kfree(ccold);
  39.     }
  40.     kfree(new);
  41.     /× 初始化shared local cache和slab三个链表 ×/
  42.     return alloc_kmemlist(cachep, gfp);
  43. }

点击(此处)折叠或打开

  1. /× 更新每个cpu的struct array_cache对象 ×/
  2. static void do_ccupdate_local(void *info)
  3. {
  4.     struct ccupdate_struct *new = info;
  5.     struct array_cache *old;

  6.     check_irq_off();
  7.     old = cpu_cache_get(new->cachep);
  8.     /× 指向新的struct array_cache对象 ×/
  9.     new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
  10.     /× 保存旧的struct array_cache对象 ×/
  11.     new->new[smp_processor_id()] = old;
  12. }

点击(此处)折叠或打开

  1. /*
  2.  * This initializes kmem_list3 or resizes various caches for all nodes.
  3.  */
  4. /× 初始化shared local cache和slab的三个链表 ×/
  5. static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
  6. {
  7.     int node;
  8.     struct kmem_list3 *l3;
  9.     struct array_cache *new_shared;
  10.     struct array_cache **new_alien = NULL;

  11.     for_each_online_node(node) {

  12.                 if (use_alien_caches) {
  13.                         new_alien = alloc_alien_cache(node, cachep->limit, gfp);
  14.                         if (!new_alien)
  15.                                 goto fail;
  16.                 }

  17.         new_shared = NULL;
  18.         /× 分配shared local cache ×/
  19.         if (cachep->shared) {
  20.             new_shared = alloc_arraycache(node,
  21.                 cachep->shared*cachep->batchcount,
  22.                     0xbaadf00d, gfp);
  23.             if (!new_shared) {
  24.                 free_alien_cache(new_alien);
  25.                 goto fail;
  26.             }
  27.         }
  28.         /* 获取旧的slab三个链表 */
  29.         l3 = cachep->nodelists[node];
  30.         if (l3) {
  31.             struct array_cache *shared = l3->shared;

  32.             spin_lock_irq(&l3->list_lock);
  33.             /× 释放旧的shared local cache中的对象 ×/
  34.             if (shared)
  35.                 free_block(cachep, shared->entry,
  36.                         shared->avail, node);
  37.             /× 指向新的share local cache ×/
  38.             l3->shared = new_shared;
  39.             if (!l3->alien) {
  40.                 l3->alien = new_alien;
  41.                 new_alien = NULL;
  42.             }
  43.             /* 计算cache中空闲对象的上限 */
  44.             l3->free_limit = (1 + nr_cpus_node(node)) *
  45.                     cachep->batchcount + cachep->num;
  46.             spin_unlock_irq(&l3->list_lock);
  47.             kfree(shared);
  48.             free_alien_cache(new_alien);
  49.             continue;
  50.         }
  51.         /× 分配新的slab 三链 ×/
  52.         l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node);
  53.         if (!l3) {
  54.             free_alien_cache(new_alien);
  55.             kfree(new_shared);
  56.             goto fail;
  57.         }
  58.         /× 进行初始化 ×/
  59.         kmem_list3_init(l3);
  60.         l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
  61.                 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
  62.         l3->shared = new_shared;
  63.         l3->alien = new_alien;
  64.         l3->free_limit = (1 + nr_cpus_node(node)) *
  65.                     cachep->batchcount + cachep->num;
  66.         cachep->nodelists[node] = l3;
  67.     }
  68.     return 0;

  69. fail:
  70.     if (!cachep->next.next) {
  71.         /* Cache is not active yet. Roll back what we did */
  72.         node--;
  73.         while (node >= 0) {
  74.             if (cachep->nodelists[node]) {
  75.                 l3 = cachep->nodelists[node];

  76.                 kfree(l3->shared);
  77.                 free_alien_cache(l3->alien);
  78.                 kfree(l3);
  79.                 cachep->nodelists[node] = NULL;
  80.             }
  81.             node--;
  82.         }
  83.     }
  84.     return -ENOMEM;
  85. }








01-04 06:08