Size: 1624
Comment:
|
← Revision 7 as of 2025-04-10 15:37:59 ⇥
Size: 1624
Comment:
|
Deletions are marked like this. | Additions are marked like this. |
Line 14: | Line 14: |
page = alloc_page_desc(gfp, order, MEMDESC_TYPE_FOLIO, folio); | page = alloc_page_desc(gfp, order, MEMDESC_TYPE_FOLIO(folio)); |
Line 19: | Line 19: |
folio->pfn = page_to_pfn(page); | |
Line 26: | Line 27: |
folio->pfn = page_to_pfn(page); |
Today with folios being an overlay on struct page, folio_alloc() is simple; we allocate a compound page and cast the result to a struct folio pointer.
Once we live in a MatthewWilcox/Memdescs world, we need to do two allocations. First, we allocate a struct folio from slab. Then we call into the MatthewWilcox/BuddyAllocator to allocate the memory, which will set the memdesc value in each page to refer to the folio. I envisage a function like this:
struct folio *folio_alloc(gfp_t gfp, unsigned int order) { struct slab_cache *folio_slab = order > 0 ? folio_large_slab : folio_small_slab; struct page *page; struct folio *folio = kmem_cache_alloc(folio_slab, gfp); if (!folio) return NULL; page = alloc_page_desc(gfp, order, MEMDESC_TYPE_FOLIO(folio)); if (!page) { kmem_cache_free(folio_slab, folio); return NULL; } folio->pfn = page_to_pfn(page); folio->flags = pfn_flags(folio->pfn); init_list_head(&folio->lru); folio->mapping = NULL; folio->index = -1; folio->private = NULL; atomic_set(&folio->_mapcount, -1); folio->memcg = NULL; folio->order = order; atomic_set(&folio->_pincount, 0); if (order > 0) { atomic_set(&folio->_entire_mapcount, 0); atomic_set(&folio->_nr_pages_mapped, 0); init_list_head(&folio->_deferred_split_list); } atomic_set_release(&folio->_refcount, 1); return folio; }