memblock: drop memblock_alloc_*_nopanic() variants
authorMike Rapoport <rppt@linux.ibm.com>
Tue, 12 Mar 2019 06:30:42 +0000 (23:30 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 12 Mar 2019 17:04:02 +0000 (10:04 -0700)
As all the memblock allocation functions return NULL in case of error
rather than panic(), the duplicates with _nopanic suffix can be removed.

Link: http://lkml.kernel.org/r/1548057848-15136-22-git-send-email-rppt@linux.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Petr Mladek <pmladek@suse.com> [printk]
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: Christoph Hellwig <hch@lst.de>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Guo Ren <guoren@kernel.org>
Cc: Guo Ren <ren_guo@c-sky.com> [c-sky]
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Juergen Gross <jgross@suse.com> [Xen]
Cc: Mark Salter <msalter@redhat.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Rob Herring <robh@kernel.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Stafford Horne <shorne@gmail.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
14 files changed:
arch/arc/kernel/unwind.c
arch/sh/mm/init.c
arch/x86/kernel/setup_percpu.c
arch/x86/mm/kasan_init_64.c
drivers/firmware/memmap.c
drivers/usb/early/xhci-dbc.c
include/linux/memblock.h
kernel/dma/swiotlb.c
kernel/printk/printk.c
mm/memblock.c
mm/page_alloc.c
mm/page_ext.c
mm/percpu.c
mm/sparse.c

index d34f69eb1a955f593925d3f6e187206cdaa3ff20..271e9fafa4796ba18cc4ffb158cfc5d9aebbf3a4 100644 (file)
@@ -181,8 +181,7 @@ static void init_unwind_hdr(struct unwind_table *table,
  */
 static void *__init unw_hdr_alloc_early(unsigned long sz)
 {
  */
 static void *__init unw_hdr_alloc_early(unsigned long sz)
 {
-       return memblock_alloc_from_nopanic(sz, sizeof(unsigned int),
-                                          MAX_DMA_ADDRESS);
+       return memblock_alloc_from(sz, sizeof(unsigned int), MAX_DMA_ADDRESS);
 }
 
 static void *unw_hdr_alloc(unsigned long sz)
 }
 
 static void *unw_hdr_alloc(unsigned long sz)
index fceefd92016fae0dd5e44ad63ebfec8813884d3d..70621324db4128f5f805f80a3b58f1360fe2d527 100644 (file)
@@ -202,7 +202,7 @@ void __init allocate_pgdat(unsigned int nid)
        get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 
 #ifdef CONFIG_NEED_MULTIPLE_NODES
        get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
 
 #ifdef CONFIG_NEED_MULTIPLE_NODES
-       NODE_DATA(nid) = memblock_alloc_try_nid_nopanic(
+       NODE_DATA(nid) = memblock_alloc_try_nid(
                                sizeof(struct pglist_data),
                                SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
                                MEMBLOCK_ALLOC_ACCESSIBLE, nid);
                                sizeof(struct pglist_data),
                                SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
                                MEMBLOCK_ALLOC_ACCESSIBLE, nid);
index 13af08827eefc61e4252f7ffc6cc014f661e1f27..4bf46575568a237678eb4a710d885cd4e033d07f 100644 (file)
@@ -106,22 +106,22 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
        void *ptr;
 
        if (!node_online(node) || !NODE_DATA(node)) {
        void *ptr;
 
        if (!node_online(node) || !NODE_DATA(node)) {
-               ptr = memblock_alloc_from_nopanic(size, align, goal);
+               ptr = memblock_alloc_from(size, align, goal);
                pr_info("cpu %d has no node %d or node-local memory\n",
                        cpu, node);
                pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
                         cpu, size, __pa(ptr));
        } else {
                pr_info("cpu %d has no node %d or node-local memory\n",
                        cpu, node);
                pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
                         cpu, size, __pa(ptr));
        } else {
-               ptr = memblock_alloc_try_nid_nopanic(size, align, goal,
-                                                    MEMBLOCK_ALLOC_ACCESSIBLE,
-                                                    node);
+               ptr = memblock_alloc_try_nid(size, align, goal,
+                                            MEMBLOCK_ALLOC_ACCESSIBLE,
+                                            node);
 
                pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
                         cpu, size, node, __pa(ptr));
        }
        return ptr;
 #else
 
                pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
                         cpu, size, node, __pa(ptr));
        }
        return ptr;
 #else
-       return memblock_alloc_from_nopanic(size, align, goal);
+       return memblock_alloc_from(size, align, goal);
 #endif
 }
 
 #endif
 }
 
index 462fde83b515e60c8b83aea1fa57827eccd09f22..8dc0fc0b1382b6cde08e2774449b39b0607a1a64 100644 (file)
@@ -24,14 +24,16 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
 
 static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
 
 
 static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
 
-static __init void *early_alloc(size_t size, int nid, bool panic)
+static __init void *early_alloc(size_t size, int nid, bool should_panic)
 {
 {
-       if (panic)
-               return memblock_alloc_try_nid(size, size,
-                       __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
-       else
-               return memblock_alloc_try_nid_nopanic(size, size,
+       void *ptr = memblock_alloc_try_nid(size, size,
                        __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
                        __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
+
+       if (!ptr && should_panic)
+               panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
+                     (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
+
+       return ptr;
 }
 
 static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
 }
 
 static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
index ec4fd253a4e92aa0375d10d8fd70873ed54f9f26..d168c87c7d3085655d1fd627a6b65b792129cb03 100644 (file)
@@ -333,7 +333,7 @@ int __init firmware_map_add_early(u64 start, u64 end, const char *type)
 {
        struct firmware_map_entry *entry;
 
 {
        struct firmware_map_entry *entry;
 
-       entry = memblock_alloc_nopanic(sizeof(struct firmware_map_entry),
+       entry = memblock_alloc(sizeof(struct firmware_map_entry),
                               SMP_CACHE_BYTES);
        if (WARN_ON(!entry))
                return -ENOMEM;
                               SMP_CACHE_BYTES);
        if (WARN_ON(!entry))
                return -ENOMEM;
index d2652dccc69975b6733fa597863561dc5bb92dc0..c9cfb100ecdca4e2d75800340cf8b90ee418f2fe 100644 (file)
@@ -94,7 +94,7 @@ static void * __init xdbc_get_page(dma_addr_t *dma_addr)
 {
        void *virt;
 
 {
        void *virt;
 
-       virt = memblock_alloc_nopanic(PAGE_SIZE, PAGE_SIZE);
+       virt = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
        if (!virt)
                return NULL;
 
        if (!virt)
                return NULL;
 
index c077227e6d5387f005a277747eaa5485451a454b..db69ad97aa2e56738c66133cbb5d69255b693207 100644 (file)
@@ -335,9 +335,6 @@ static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
 void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
                                 phys_addr_t min_addr, phys_addr_t max_addr,
                                 int nid);
 void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
                                 phys_addr_t min_addr, phys_addr_t max_addr,
                                 int nid);
-void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align,
-                                    phys_addr_t min_addr, phys_addr_t max_addr,
-                                    int nid);
 void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
                             phys_addr_t min_addr, phys_addr_t max_addr,
                             int nid);
 void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align,
                             phys_addr_t min_addr, phys_addr_t max_addr,
                             int nid);
@@ -364,36 +361,12 @@ static inline void * __init memblock_alloc_from(phys_addr_t size,
                                      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
 }
 
                                      MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
 }
 
-static inline void * __init memblock_alloc_nopanic(phys_addr_t size,
-                                                  phys_addr_t align)
-{
-       return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
-                                             MEMBLOCK_ALLOC_ACCESSIBLE,
-                                             NUMA_NO_NODE);
-}
-
 static inline void * __init memblock_alloc_low(phys_addr_t size,
                                               phys_addr_t align)
 {
        return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
                                      ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
 }
 static inline void * __init memblock_alloc_low(phys_addr_t size,
                                               phys_addr_t align)
 {
        return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
                                      ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE);
 }
-static inline void * __init memblock_alloc_low_nopanic(phys_addr_t size,
-                                                      phys_addr_t align)
-{
-       return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT,
-                                             ARCH_LOW_ADDRESS_LIMIT,
-                                             NUMA_NO_NODE);
-}
-
-static inline void * __init memblock_alloc_from_nopanic(phys_addr_t size,
-                                                       phys_addr_t align,
-                                                       phys_addr_t min_addr)
-{
-       return memblock_alloc_try_nid_nopanic(size, align, min_addr,
-                                             MEMBLOCK_ALLOC_ACCESSIBLE,
-                                             NUMA_NO_NODE);
-}
 
 static inline void * __init memblock_alloc_node(phys_addr_t size,
                                                phys_addr_t align, int nid)
 
 static inline void * __init memblock_alloc_node(phys_addr_t size,
                                                phys_addr_t align, int nid)
@@ -402,14 +375,6 @@ static inline void * __init memblock_alloc_node(phys_addr_t size,
                                      MEMBLOCK_ALLOC_ACCESSIBLE, nid);
 }
 
                                      MEMBLOCK_ALLOC_ACCESSIBLE, nid);
 }
 
-static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size,
-                                                       int nid)
-{
-       return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES,
-                                             MEMBLOCK_LOW_LIMIT,
-                                             MEMBLOCK_ALLOC_ACCESSIBLE, nid);
-}
-
 static inline void __init memblock_free_early(phys_addr_t base,
                                              phys_addr_t size)
 {
 static inline void __init memblock_free_early(phys_addr_t base,
                                              phys_addr_t size)
 {
index 56ac77a80b1fec2b1a3c51af7fbf995fe67a5186..53012db1e53c6915ee29b0bcd7e543def476232b 100644 (file)
@@ -256,7 +256,7 @@ swiotlb_init(int verbose)
        bytes = io_tlb_nslabs << IO_TLB_SHIFT;
 
        /* Get IO TLB memory from the low pages */
        bytes = io_tlb_nslabs << IO_TLB_SHIFT;
 
        /* Get IO TLB memory from the low pages */
-       vstart = memblock_alloc_low_nopanic(PAGE_ALIGN(bytes), PAGE_SIZE);
+       vstart = memblock_alloc_low(PAGE_ALIGN(bytes), PAGE_SIZE);
        if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
                return;
 
        if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
                return;
 
index 8eee85bb26877f407b43d7f6a0a7c806956b0075..6b7654b8001fc4d07dc8bcd4a00eddee31e09891 100644 (file)
@@ -1143,14 +1143,7 @@ void __init setup_log_buf(int early)
        if (!new_log_buf_len)
                return;
 
        if (!new_log_buf_len)
                return;
 
-       if (early) {
-               new_log_buf =
-                       memblock_alloc(new_log_buf_len, LOG_ALIGN);
-       } else {
-               new_log_buf = memblock_alloc_nopanic(new_log_buf_len,
-                                                         LOG_ALIGN);
-       }
-
+       new_log_buf = memblock_alloc(new_log_buf_len, LOG_ALIGN);
        if (unlikely(!new_log_buf)) {
                pr_err("log_buf_len: %lu bytes not available\n",
                        new_log_buf_len);
        if (unlikely(!new_log_buf)) {
                pr_err("log_buf_len: %lu bytes not available\n",
                        new_log_buf_len);
index a838c50ca9a866ce9e3347e25296ef4070ebdec8..0ab30d0185bc83ae8510ad2f590406889e820f11 100644 (file)
@@ -1433,41 +1433,6 @@ void * __init memblock_alloc_try_nid_raw(
        return ptr;
 }
 
        return ptr;
 }
 
-/**
- * memblock_alloc_try_nid_nopanic - allocate boot memory block
- * @size: size of memory block to be allocated in bytes
- * @align: alignment of the region and block's size
- * @min_addr: the lower bound of the memory region from where the allocation
- *       is preferred (phys address)
- * @max_addr: the upper bound of the memory region from where the allocation
- *           is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
- *           allocate only from memory limited by memblock.current_limit value
- * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
- *
- * Public function, provides additional debug information (including caller
- * info), if enabled. This function zeroes the allocated memory.
- *
- * Return:
- * Virtual address of allocated memory block on success, NULL on failure.
- */
-void * __init memblock_alloc_try_nid_nopanic(
-                               phys_addr_t size, phys_addr_t align,
-                               phys_addr_t min_addr, phys_addr_t max_addr,
-                               int nid)
-{
-       void *ptr;
-
-       memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n",
-                    __func__, (u64)size, (u64)align, nid, &min_addr,
-                    &max_addr, (void *)_RET_IP_);
-
-       ptr = memblock_alloc_internal(size, align,
-                                          min_addr, max_addr, nid);
-       if (ptr)
-               memset(ptr, 0, size);
-       return ptr;
-}
-
 /**
  * memblock_alloc_try_nid - allocate boot memory block
  * @size: size of memory block to be allocated in bytes
 /**
  * memblock_alloc_try_nid - allocate boot memory block
  * @size: size of memory block to be allocated in bytes
index 3eb01dedfb5059ac59d8825c6f6eb18c11720a2c..03fcf73d47dabde0987f3542c3c87fca33bf5a5d 100644 (file)
@@ -6445,8 +6445,8 @@ static void __ref setup_usemap(struct pglist_data *pgdat,
        zone->pageblock_flags = NULL;
        if (usemapsize) {
                zone->pageblock_flags =
        zone->pageblock_flags = NULL;
        if (usemapsize) {
                zone->pageblock_flags =
-                       memblock_alloc_node_nopanic(usemapsize,
-                                                        pgdat->node_id);
+                       memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
+                                           pgdat->node_id);
                if (!zone->pageblock_flags)
                        panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
                              usemapsize, zone->name, pgdat->node_id);
                if (!zone->pageblock_flags)
                        panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
                              usemapsize, zone->name, pgdat->node_id);
@@ -6679,7 +6679,8 @@ static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
                end = pgdat_end_pfn(pgdat);
                end = ALIGN(end, MAX_ORDER_NR_PAGES);
                size =  (end - start) * sizeof(struct page);
                end = pgdat_end_pfn(pgdat);
                end = ALIGN(end, MAX_ORDER_NR_PAGES);
                size =  (end - start) * sizeof(struct page);
-               map = memblock_alloc_node_nopanic(size, pgdat->node_id);
+               map = memblock_alloc_node(size, SMP_CACHE_BYTES,
+                                         pgdat->node_id);
                if (!map)
                        panic("Failed to allocate %ld bytes for node %d memory map\n",
                              size, pgdat->node_id);
                if (!map)
                        panic("Failed to allocate %ld bytes for node %d memory map\n",
                              size, pgdat->node_id);
@@ -7959,8 +7960,7 @@ void *__init alloc_large_system_hash(const char *tablename,
                size = bucketsize << log2qty;
                if (flags & HASH_EARLY) {
                        if (flags & HASH_ZERO)
                size = bucketsize << log2qty;
                if (flags & HASH_EARLY) {
                        if (flags & HASH_ZERO)
-                               table = memblock_alloc_nopanic(size,
-                                                              SMP_CACHE_BYTES);
+                               table = memblock_alloc(size, SMP_CACHE_BYTES);
                        else
                                table = memblock_alloc_raw(size,
                                                           SMP_CACHE_BYTES);
                        else
                                table = memblock_alloc_raw(size,
                                                           SMP_CACHE_BYTES);
index ab4244920e0f83ac71134d9a3de2c9b2888faaea..d8f1aca4ad43696aa8f368c335a04c69918b77ef 100644 (file)
@@ -161,7 +161,7 @@ static int __init alloc_node_page_ext(int nid)
 
        table_size = get_entry_size() * nr_pages;
 
 
        table_size = get_entry_size() * nr_pages;
 
-       base = memblock_alloc_try_nid_nopanic(
+       base = memblock_alloc_try_nid(
                        table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
                        MEMBLOCK_ALLOC_ACCESSIBLE, nid);
        if (!base)
                        table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
                        MEMBLOCK_ALLOC_ACCESSIBLE, nid);
        if (!base)
index 3f9fb3086a9b1cd2ed6839664eb65738e2a57225..2e6fc8d552c96d58f615be2fd3addadefd01f5c0 100644 (file)
@@ -1905,7 +1905,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
                          __alignof__(ai->groups[0].cpu_map[0]));
        ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
 
                          __alignof__(ai->groups[0].cpu_map[0]));
        ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
 
-       ptr = memblock_alloc_nopanic(PFN_ALIGN(ai_size), PAGE_SIZE);
+       ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
        if (!ptr)
                return NULL;
        ai = ptr;
        if (!ptr)
                return NULL;
        ai = ptr;
@@ -2496,7 +2496,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
        size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
        areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
 
        size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
        areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
 
-       areas = memblock_alloc_nopanic(areas_size, SMP_CACHE_BYTES);
+       areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
        if (!areas) {
                rc = -ENOMEM;
                goto out_free;
        if (!areas) {
                rc = -ENOMEM;
                goto out_free;
@@ -2729,8 +2729,7 @@ EXPORT_SYMBOL(__per_cpu_offset);
 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
                                       size_t align)
 {
 static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
                                       size_t align)
 {
-       return  memblock_alloc_from_nopanic(
-                       size, align, __pa(MAX_DMA_ADDRESS));
+       return  memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS));
 }
 
 static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
 }
 
 static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
@@ -2778,9 +2777,7 @@ void __init setup_per_cpu_areas(void)
        void *fc;
 
        ai = pcpu_alloc_alloc_info(1, 1);
        void *fc;
 
        ai = pcpu_alloc_alloc_info(1, 1);
-       fc = memblock_alloc_from_nopanic(unit_size,
-                                             PAGE_SIZE,
-                                             __pa(MAX_DMA_ADDRESS));
+       fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
        if (!ai || !fc)
                panic("Failed to allocate memory for percpu areas.");
        /* kmemleak tracks the percpu allocations separately */
        if (!ai || !fc)
                panic("Failed to allocate memory for percpu areas.");
        /* kmemleak tracks the percpu allocations separately */
index 7397fb4e78b4e09de3dcd00de9eb752577ff2241..69904aa6165bf13b89a44d6abf84609fea2076ba 100644 (file)
@@ -330,9 +330,7 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
        limit = goal + (1UL << PA_SECTION_SHIFT);
        nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
 again:
        limit = goal + (1UL << PA_SECTION_SHIFT);
        nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
 again:
-       p = memblock_alloc_try_nid_nopanic(size,
-                                               SMP_CACHE_BYTES, goal, limit,
-                                               nid);
+       p = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
        if (!p && limit) {
                limit = 0;
                goto again;
        if (!p && limit) {
                limit = 0;
                goto again;
@@ -386,7 +384,7 @@ static unsigned long * __init
 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
                                         unsigned long size)
 {
 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
                                         unsigned long size)
 {
-       return memblock_alloc_node_nopanic(size, pgdat->node_id);
+       return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id);
 }
 
 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
 }
 
 static void __init check_usemap_section_nr(int nid, unsigned long *usemap)