Back to home page

Quest Cross Reference

 
 

    


Warning, cross-references for /kernel/lwip/core/mem.c need to be fixed.

0001 /**
0002  * @file
0003  * Dynamic memory manager
0004  *
0005  * This is a lightweight replacement for the standard C library malloc().
0006  *
0007  * If you want to use the standard C library malloc() instead, define
0008  * MEM_LIBC_MALLOC to 1 in your lwipopts.h
0009  *
0010  * To let mem_malloc() use pools (prevents fragmentation and is much faster than
0011  * a heap but might waste some memory), define MEM_USE_POOLS to 1, define
0012  * MEM_USE_CUSTOM_POOLS to 1 and create a file "lwippools.h" that includes a list
0013  * of pools like this (more pools can be added between _START and _END):
0014  *
0015  * Define three pools with sizes 256, 512, and 1512 bytes
0016  * LWIP_MALLOC_MEMPOOL_START
0017  * LWIP_MALLOC_MEMPOOL(20, 256)
0018  * LWIP_MALLOC_MEMPOOL(10, 512)
0019  * LWIP_MALLOC_MEMPOOL(5, 1512)
0020  * LWIP_MALLOC_MEMPOOL_END
0021  */
0022 
0023 /*
0024  * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
0025  * All rights reserved.
0026  *
0027  * Redistribution and use in source and binary forms, with or without modification,
0028  * are permitted provided that the following conditions are met:
0029  *
0030  * 1. Redistributions of source code must retain the above copyright notice,
0031  *    this list of conditions and the following disclaimer.
0032  * 2. Redistributions in binary form must reproduce the above copyright notice,
0033  *    this list of conditions and the following disclaimer in the documentation
0034  *    and/or other materials provided with the distribution.
0035  * 3. The name of the author may not be used to endorse or promote products
0036  *    derived from this software without specific prior written permission.
0037  *
0038  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
0039  * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
0040  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
0041  * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
0042  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
0043  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
0044  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
0045  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
0046  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
0047  * OF SUCH DAMAGE.
0048  *
0049  * This file is part of the lwIP TCP/IP stack.
0050  *
0051  * Author: Adam Dunkels <adam@sics.se>
0052  *         Simon Goldschmidt
0053  *
0054  */
0055 
0056 #include "lwip/opt.h"
0057 
0058 #if !MEM_LIBC_MALLOC /* don't build if not configured for use in lwipopts.h */
0059 
0060 #include "lwip/def.h"
0061 #include "lwip/mem.h"
0062 #include "lwip/sys.h"
0063 #include "lwip/stats.h"
0064 
0065 #include <string.h>
0066 
0067 #if MEM_USE_POOLS
0068 /* lwIP head implemented with different sized pools */
0069 
0070 /**
0071  * Allocate memory: determine the smallest pool that is big enough
0072  * to contain an element of 'size' and get an element from that pool.
0073  *
0074  * @param size the size in bytes of the memory needed
0075  * @return a pointer to the allocated memory or NULL if the pool is empty
0076  */
0077 void *
0078 mem_malloc(mem_size_t size)
0079 {
0080   struct memp_malloc_helper *element;
0081   memp_t poolnr;
0082   mem_size_t required_size = size + sizeof(struct memp_malloc_helper);
0083 
0084   for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr++) {
0085 #if MEM_USE_POOLS_TRY_BIGGER_POOL
0086 again:
0087 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
0088     /* is this pool big enough to hold an element of the required size
0089        plus a struct memp_malloc_helper that saves the pool this element came from? */
0090     if (required_size <= memp_sizes[poolnr]) {
0091       break;
0092     }
0093   }
0094   if (poolnr > MEMP_POOL_LAST) {
0095     LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
0096     return NULL;
0097   }
0098   element = (struct memp_malloc_helper*)memp_malloc(poolnr);
0099   if (element == NULL) {
0100     /* No need to DEBUGF or ASSERT: This error is already
0101        taken care of in memp.c */
0102 #if MEM_USE_POOLS_TRY_BIGGER_POOL
0103     /** Try a bigger pool if this one is empty! */
0104     if (poolnr < MEMP_POOL_LAST) {
0105       poolnr++;
0106       goto again;
0107     }
0108 #endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
0109     return NULL;
0110   }
0111 
0112   /* save the pool number this element came from */
0113   element->poolnr = poolnr;
0114   /* and return a pointer to the memory directly after the struct memp_malloc_helper */
0115   element++;
0116 
0117   return element;
0118 }
0119 
0120 /**
0121  * Free memory previously allocated by mem_malloc. Loads the pool number
0122  * and calls memp_free with that pool number to put the element back into
0123  * its pool
0124  *
0125  * @param rmem the memory element to free
0126  */
0127 void
0128 mem_free(void *rmem)
0129 {
0130   struct memp_malloc_helper *hmem = (struct memp_malloc_helper*)rmem;
0131 
0132   LWIP_ASSERT("rmem != NULL", (rmem != NULL));
0133   LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
0134 
0135   /* get the original struct memp_malloc_helper */
0136   hmem--;
0137 
0138   LWIP_ASSERT("hmem != NULL", (hmem != NULL));
0139   LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
0140   LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
0141 
0142   /* and put it in the pool we saved earlier */
0143   memp_free(hmem->poolnr, hmem);
0144 }
0145 
0146 #else /* MEM_USE_POOLS */
0147 /* lwIP replacement for your libc malloc() */
0148 
0149 /**
0150  * The heap is made up as a list of structs of this type.
0151  * This does not have to be aligned since for getting its size,
0152  * we only use the macro SIZEOF_STRUCT_MEM, which automatically alignes.
0153  */
0154 struct mem {
0155   /** index (-> ram[next]) of the next struct */
0156   mem_size_t next;
0157   /** index (-> ram[next]) of the next struct */
0158   mem_size_t prev;
0159   /** 1: this area is used; 0: this area is unused */
0160   u8_t used;
0161 };
0162 
0163 /** All allocated blocks will be MIN_SIZE bytes big, at least!
0164  * MIN_SIZE can be overridden to suit your needs. Smaller values save space,
0165  * larger values could prevent too small blocks to fragment the RAM too much. */
0166 #ifndef MIN_SIZE
0167 #define MIN_SIZE             12
0168 #endif /* MIN_SIZE */
0169 /* some alignment macros: we define them here for better source code layout */
0170 #define MIN_SIZE_ALIGNED     LWIP_MEM_ALIGN_SIZE(MIN_SIZE)
0171 #define SIZEOF_STRUCT_MEM    LWIP_MEM_ALIGN_SIZE(sizeof(struct mem))
0172 #define MEM_SIZE_ALIGNED     LWIP_MEM_ALIGN_SIZE(MEM_SIZE)
0173 
0174 /** the heap. we need one struct mem at the end and some room for alignment */
0175 static u8_t ram_heap[MEM_SIZE_ALIGNED + (2*SIZEOF_STRUCT_MEM) + MEM_ALIGNMENT];
0176 /** pointer to the heap (ram_heap): for alignment, ram is now a pointer instead of an array */
0177 static u8_t *ram;
0178 /** the last entry, always unused! */
0179 static struct mem *ram_end;
0180 /** pointer to the lowest free block, this is used for faster search */
0181 static struct mem *lfree;
0182 
0183 /** concurrent access protection */
0184 static sys_sem_t mem_sem;
0185 
0186 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
0187 
0188 static volatile u8_t mem_free_count;
0189 
0190 /* Allow mem_free from other (e.g. interrupt) context */
0191 #define LWIP_MEM_FREE_DECL_PROTECT()  SYS_ARCH_DECL_PROTECT(lev_free)
0192 #define LWIP_MEM_FREE_PROTECT()       SYS_ARCH_PROTECT(lev_free)
0193 #define LWIP_MEM_FREE_UNPROTECT()     SYS_ARCH_UNPROTECT(lev_free)
0194 #define LWIP_MEM_ALLOC_DECL_PROTECT() SYS_ARCH_DECL_PROTECT(lev_alloc)
0195 #define LWIP_MEM_ALLOC_PROTECT()      SYS_ARCH_PROTECT(lev_alloc)
0196 #define LWIP_MEM_ALLOC_UNPROTECT()    SYS_ARCH_UNPROTECT(lev_alloc)
0197 
0198 #else /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
0199 
0200 /* Protect the heap only by using a semaphore */
0201 #define LWIP_MEM_FREE_DECL_PROTECT()
0202 #define LWIP_MEM_FREE_PROTECT()    sys_arch_sem_wait(mem_sem, 0)
0203 #define LWIP_MEM_FREE_UNPROTECT()  sys_sem_signal(mem_sem)
0204 /* mem_malloc is protected using semaphore AND LWIP_MEM_ALLOC_PROTECT */
0205 #define LWIP_MEM_ALLOC_DECL_PROTECT()
0206 #define LWIP_MEM_ALLOC_PROTECT()
0207 #define LWIP_MEM_ALLOC_UNPROTECT()
0208 
0209 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
0210 
0211 
0212 /**
0213  * "Plug holes" by combining adjacent empty struct mems.
0214  * After this function is through, there should not exist
0215  * one empty struct mem pointing to another empty struct mem.
0216  *
0217  * @param mem this points to a struct mem which just has been freed
0218  * @internal this function is only called by mem_free() and mem_realloc()
0219  *
0220  * This assumes access to the heap is protected by the calling function
0221  * already.
0222  */
0223 static void
0224 plug_holes(struct mem *mem)
0225 {
0226   struct mem *nmem;
0227   struct mem *pmem;
0228 
0229   LWIP_ASSERT("plug_holes: mem >= ram", (u8_t *)mem >= ram);
0230   LWIP_ASSERT("plug_holes: mem < ram_end", (u8_t *)mem < (u8_t *)ram_end);
0231   LWIP_ASSERT("plug_holes: mem->used == 0", mem->used == 0);
0232 
0233   /* plug hole forward */
0234   LWIP_ASSERT("plug_holes: mem->next <= MEM_SIZE_ALIGNED", mem->next <= MEM_SIZE_ALIGNED);
0235 
0236   nmem = (struct mem *)&ram[mem->next];
0237   if (mem != nmem && nmem->used == 0 && (u8_t *)nmem != (u8_t *)ram_end) {
0238     /* if mem->next is unused and not end of ram, combine mem and mem->next */
0239     if (lfree == nmem) {
0240       lfree = mem;
0241     }
0242     mem->next = nmem->next;
0243     ((struct mem *)&ram[nmem->next])->prev = (u8_t *)mem - ram;
0244   }
0245 
0246   /* plug hole backward */
0247   pmem = (struct mem *)&ram[mem->prev];
0248   if (pmem != mem && pmem->used == 0) {
0249     /* if mem->prev is unused, combine mem and mem->prev */
0250     if (lfree == mem) {
0251       lfree = pmem;
0252     }
0253     pmem->next = mem->next;
0254     ((struct mem *)&ram[mem->next])->prev = (u8_t *)pmem - ram;
0255   }
0256 }
0257 
0258 /**
0259  * Zero the heap and initialize start, end and lowest-free
0260  */
0261 void
0262 mem_init(void)
0263 {
0264   struct mem *mem;
0265 
0266   LWIP_ASSERT("Sanity check alignment",
0267     (SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0);
0268 
0269   /* align the heap */
0270   ram = LWIP_MEM_ALIGN(ram_heap);
0271   /* initialize the start of the heap */
0272   mem = (struct mem *)ram;
0273   mem->next = MEM_SIZE_ALIGNED;
0274   mem->prev = 0;
0275   mem->used = 0;
0276   /* initialize the end of the heap */
0277   ram_end = (struct mem *)&ram[MEM_SIZE_ALIGNED];
0278   ram_end->used = 1;
0279   ram_end->next = MEM_SIZE_ALIGNED;
0280   ram_end->prev = MEM_SIZE_ALIGNED;
0281 
0282   mem_sem = sys_sem_new(1);
0283 
0284   /* initialize the lowest-free pointer to the start of the heap */
0285   lfree = (struct mem *)ram;
0286 
0287   MEM_STATS_AVAIL(avail, MEM_SIZE_ALIGNED);
0288 }
0289 
0290 /**
0291  * Put a struct mem back on the heap
0292  *
0293  * @param rmem is the data portion of a struct mem as returned by a previous
0294  *             call to mem_malloc()
0295  */
0296 void
0297 mem_free(void *rmem)
0298 {
0299   struct mem *mem;
0300   LWIP_MEM_FREE_DECL_PROTECT();
0301 
0302   if (rmem == NULL) {
0303     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n"));
0304     return;
0305   }
0306   LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0);
0307 
0308   LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
0309     (u8_t *)rmem < (u8_t *)ram_end);
0310 
0311   if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
0312     SYS_ARCH_DECL_PROTECT(lev);
0313     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n"));
0314     /* protect mem stats from concurrent access */
0315     SYS_ARCH_PROTECT(lev);
0316     MEM_STATS_INC(illegal);
0317     SYS_ARCH_UNPROTECT(lev);
0318     return;
0319   }
0320   /* protect the heap from concurrent access */
0321   LWIP_MEM_FREE_PROTECT();
0322   /* Get the corresponding struct mem ... */
0323   mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
0324   /* ... which has to be in a used state ... */
0325   LWIP_ASSERT("mem_free: mem->used", mem->used);
0326   /* ... and is now unused. */
0327   mem->used = 0;
0328 
0329   if (mem < lfree) {
0330     /* the newly freed struct is now the lowest */
0331     lfree = mem;
0332   }
0333 
0334   MEM_STATS_DEC_USED(used, mem->next - ((u8_t *)mem - ram));
0335 
0336   /* finally, see if prev or next are free also */
0337   plug_holes(mem);
0338 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
0339   mem_free_count = 1;
0340 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
0341   LWIP_MEM_FREE_UNPROTECT();
0342 }
0343 
0344 /**
0345  * In contrast to its name, mem_realloc can only shrink memory, not expand it.
0346  * Since the only use (for now) is in pbuf_realloc (which also can only shrink),
0347  * this shouldn't be a problem!
0348  *
0349  * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
0350  * @param newsize required size after shrinking (needs to be smaller than or
0351  *                equal to the previous size)
0352  * @return for compatibility reasons: is always == rmem, at the moment
0353  *         or NULL if newsize is > old size, in which case rmem is NOT touched
0354  *         or freed!
0355  */
0356 void *
0357 mem_realloc(void *rmem, mem_size_t newsize)
0358 {
0359   mem_size_t size;
0360   mem_size_t ptr, ptr2;
0361   struct mem *mem, *mem2;
0362   /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
0363   LWIP_MEM_FREE_DECL_PROTECT();
0364 
0365   /* Expand the size of the allocated memory region so that we can
0366      adjust for alignment. */
0367   newsize = LWIP_MEM_ALIGN_SIZE(newsize);
0368 
0369   if(newsize < MIN_SIZE_ALIGNED) {
0370     /* every data block must be at least MIN_SIZE_ALIGNED long */
0371     newsize = MIN_SIZE_ALIGNED;
0372   }
0373 
0374   if (newsize > MEM_SIZE_ALIGNED) {
0375     return NULL;
0376   }
0377 
0378   LWIP_ASSERT("mem_realloc: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
0379    (u8_t *)rmem < (u8_t *)ram_end);
0380 
0381   if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
0382     SYS_ARCH_DECL_PROTECT(lev);
0383     LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_realloc: illegal memory\n"));
0384     /* protect mem stats from concurrent access */
0385     SYS_ARCH_PROTECT(lev);
0386     MEM_STATS_INC(illegal);
0387     SYS_ARCH_UNPROTECT(lev);
0388     return rmem;
0389   }
0390   /* Get the corresponding struct mem ... */
0391   mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
0392   /* ... and its offset pointer */
0393   ptr = (u8_t *)mem - ram;
0394 
0395   size = mem->next - ptr - SIZEOF_STRUCT_MEM;
0396   LWIP_ASSERT("mem_realloc can only shrink memory", newsize <= size);
0397   if (newsize > size) {
0398     /* not supported */
0399     return NULL;
0400   }
0401   if (newsize == size) {
0402     /* No change in size, simply return */
0403     return rmem;
0404   }
0405 
0406   /* protect the heap from concurrent access */
0407   LWIP_MEM_FREE_PROTECT();
0408 
0409   MEM_STATS_DEC_USED(used, (size - newsize));
0410 
0411   mem2 = (struct mem *)&ram[mem->next];
0412   if(mem2->used == 0) {
0413     /* The next struct is unused, we can simply move it at little */
0414     mem_size_t next;
0415     /* remember the old next pointer */
0416     next = mem2->next;
0417     /* create new struct mem which is moved directly after the shrinked mem */
0418     ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
0419     if (lfree == mem2) {
0420       lfree = (struct mem *)&ram[ptr2];
0421     }
0422     mem2 = (struct mem *)&ram[ptr2];
0423     mem2->used = 0;
0424     /* restore the next pointer */
0425     mem2->next = next;
0426     /* link it back to mem */
0427     mem2->prev = ptr;
0428     /* link mem to it */
0429     mem->next = ptr2;
0430     /* last thing to restore linked list: as we have moved mem2,
0431      * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
0432      * the end of the heap */
0433     if (mem2->next != MEM_SIZE_ALIGNED) {
0434       ((struct mem *)&ram[mem2->next])->prev = ptr2;
0435     }
0436     /* no need to plug holes, we've already done that */
0437   } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
0438     /* Next struct is used but there's room for another struct mem with
0439      * at least MIN_SIZE_ALIGNED of data.
0440      * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
0441      * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
0442      * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
0443      *       region that couldn't hold data, but when mem->next gets freed,
0444      *       the 2 regions would be combined, resulting in more free memory */
0445     ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
0446     mem2 = (struct mem *)&ram[ptr2];
0447     if (mem2 < lfree) {
0448       lfree = mem2;
0449     }
0450     mem2->used = 0;
0451     mem2->next = mem->next;
0452     mem2->prev = ptr;
0453     mem->next = ptr2;
0454     if (mem2->next != MEM_SIZE_ALIGNED) {
0455       ((struct mem *)&ram[mem2->next])->prev = ptr2;
0456     }
0457     /* the original mem->next is used, so no need to plug holes! */
0458   }
0459   /* else {
0460     next struct mem is used but size between mem and mem2 is not big enough
0461     to create another struct mem
0462     -> don't do anyhting. 
0463     -> the remaining space stays unused since it is too small
0464   } */
0465 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
0466   mem_free_count = 1;
0467 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
0468   LWIP_MEM_FREE_UNPROTECT();
0469   return rmem;
0470 }
0471 
0472 /**
0473  * Adam's mem_malloc() plus solution for bug #17922
0474  * Allocate a block of memory with a minimum of 'size' bytes.
0475  *
0476  * @param size is the minimum size of the requested block in bytes.
0477  * @return pointer to allocated memory or NULL if no free memory was found.
0478  *
0479  * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
0480  */
0481 void *
0482 mem_malloc(mem_size_t size)
0483 {
0484   mem_size_t ptr, ptr2;
0485   struct mem *mem, *mem2;
0486 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
0487   u8_t local_mem_free_count = 0;
0488 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
0489   LWIP_MEM_ALLOC_DECL_PROTECT();
0490 
0491   if (size == 0) {
0492     return NULL;
0493   }
0494 
0495   /* Expand the size of the allocated memory region so that we can
0496      adjust for alignment. */
0497   size = LWIP_MEM_ALIGN_SIZE(size);
0498 
0499   if(size < MIN_SIZE_ALIGNED) {
0500     /* every data block must be at least MIN_SIZE_ALIGNED long */
0501     size = MIN_SIZE_ALIGNED;
0502   }
0503 
0504   if (size > MEM_SIZE_ALIGNED) {
0505     return NULL;
0506   }
0507 
0508   /* protect the heap from concurrent access */
0509   sys_arch_sem_wait(mem_sem, 0);
0510   LWIP_MEM_ALLOC_PROTECT();
0511 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
0512   /* run as long as a mem_free disturbed mem_malloc */
0513   do {
0514     local_mem_free_count = 0;
0515 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
0516 
0517     /* Scan through the heap searching for a free block that is big enough,
0518      * beginning with the lowest free block.
0519      */
0520     for (ptr = (u8_t *)lfree - ram; ptr < MEM_SIZE_ALIGNED - size;
0521          ptr = ((struct mem *)&ram[ptr])->next) {
0522       mem = (struct mem *)&ram[ptr];
0523 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
0524       mem_free_count = 0;
0525       LWIP_MEM_ALLOC_UNPROTECT();
0526       /* allow mem_free to run */
0527       LWIP_MEM_ALLOC_PROTECT();
0528       if (mem_free_count != 0) {
0529         local_mem_free_count = mem_free_count;
0530       }
0531       mem_free_count = 0;
0532 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
0533 
0534       if ((!mem->used) &&
0535           (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
0536         /* mem is not used and at least perfect fit is possible:
0537          * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
0538 
0539         if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
0540           /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
0541            * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
0542            * -> split large block, create empty remainder,
0543            * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
0544            * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
0545            * struct mem would fit in but no data between mem2 and mem2->next
0546            * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
0547            *       region that couldn't hold data, but when mem->next gets freed,
0548            *       the 2 regions would be combined, resulting in more free memory
0549            */
0550           ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
0551           /* create mem2 struct */
0552           mem2 = (struct mem *)&ram[ptr2];
0553           mem2->used = 0;
0554           mem2->next = mem->next;
0555           mem2->prev = ptr;
0556           /* and insert it between mem and mem->next */
0557           mem->next = ptr2;
0558           mem->used = 1;
0559 
0560           if (mem2->next != MEM_SIZE_ALIGNED) {
0561             ((struct mem *)&ram[mem2->next])->prev = ptr2;
0562           }
0563           MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
0564         } else {
0565           /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
0566            * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
0567            * take care of this).
0568            * -> near fit or excact fit: do not split, no mem2 creation
0569            * also can't move mem->next directly behind mem, since mem->next
0570            * will always be used at this point!
0571            */
0572           mem->used = 1;
0573           MEM_STATS_INC_USED(used, mem->next - ((u8_t *)mem - ram));
0574         }
0575 
0576         if (mem == lfree) {
0577           /* Find next free block after mem and update lowest free pointer */
0578           while (lfree->used && lfree != ram_end) {
0579             LWIP_MEM_ALLOC_UNPROTECT();
0580             /* prevent high interrupt latency... */
0581             LWIP_MEM_ALLOC_PROTECT();
0582             lfree = (struct mem *)&ram[lfree->next];
0583           }
0584           LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
0585         }
0586         LWIP_MEM_ALLOC_UNPROTECT();
0587         sys_sem_signal(mem_sem);
0588         LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
0589          (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
0590         LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
0591          ((mem_ptr_t)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
0592         LWIP_ASSERT("mem_malloc: sanity check alignment",
0593           (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);
0594 
0595         return (u8_t *)mem + SIZEOF_STRUCT_MEM;
0596       }
0597     }
0598 #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
0599     /* if we got interrupted by a mem_free, try again */
0600   } while(local_mem_free_count != 0);
0601 #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
0602   LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
0603   MEM_STATS_INC(err);
0604   LWIP_MEM_ALLOC_UNPROTECT();
0605   sys_sem_signal(mem_sem);
0606   return NULL;
0607 }
0608 
0609 #endif /* MEM_USE_POOLS */
0610 /**
0611  * Contiguously allocates enough space for count objects that are size bytes
0612  * of memory each and returns a pointer to the allocated memory.
0613  *
0614  * The allocated memory is filled with bytes of value zero.
0615  *
0616  * @param count number of objects to allocate
0617  * @param size size of the objects to allocate
0618  * @return pointer to allocated memory / NULL pointer if there is an error
0619  */
0620 void *mem_calloc(mem_size_t count, mem_size_t size)
0621 {
0622   void *p;
0623 
0624   /* allocate 'count' objects of size 'size' */
0625   p = mem_malloc(count * size);
0626   if (p) {
0627     /* zero the memory */
0628     memset(p, 0, count * size);
0629   }
0630   return p;
0631 }
0632 
0633 #endif /* !MEM_LIBC_MALLOC */