942
Comment:
|
← Revision 5 as of 2024-05-15 21:29:09 ⇥
1457
|
Deletions are marked like this. | Additions are marked like this. |
Line 19: | Line 19: |
folio->order = order; folio->flags = pfn_flags(folio->pfn); init_list_head(&folio->lru); folio->mapping = NULL; folio->private = NULL; atomic_set(&folio->_mapcount, -1); folio->memcg_data = 0; if (order > 0) { atomic_set(&folio->_entire_mapcount, 0); atomic_set(&folio->_nr_pages_mapped, 0); init_list_head(&folio->_deferred_split_list); } atomic_set(&folio->_refcount, 1); |
Today with folios being an overlay on struct page, folio_alloc() is simple; we allocate a compound page and cast the result to a struct folio pointer.
Once we live in a MatthewWilcox/Memdescs world, we need to do two allocations. First, we allocate a struct folio from slab. Then we call into the MatthewWilcox/BuddyAllocator to allocate the memory, which will set the memdesc value in each page to refer to the folio. I envisage a function like this:
struct folio *folio_alloc(gfp_t gfp, unsigned int order) { struct page *page; struct folio *folio = kmem_cache_alloc(folio_slab, gfp); if (!folio) return NULL; page = alloc_page_desc(gfp, order, MEMDESC_TYPE_FOLIO, folio); if (!page) { kmem_cache_free(folio_slab, folio); return NULL; } folio->pfn = page_to_pfn(page); folio->order = order; folio->flags = pfn_flags(folio->pfn); init_list_head(&folio->lru); folio->mapping = NULL; folio->private = NULL; atomic_set(&folio->_mapcount, -1); folio->memcg_data = 0; if (order > 0) { atomic_set(&folio->_entire_mapcount, 0); atomic_set(&folio->_nr_pages_mapped, 0); init_list_head(&folio->_deferred_split_list); } atomic_set(&folio->_refcount, 1); return folio; }