summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter D'Hoye <peter.dhoye@gmail.com>2009-07-21 14:01:51 +0000
committerPeter D'Hoye <peter.dhoye@gmail.com>2009-07-21 14:01:51 +0000
commit1a4b0cadd66849f83eecd6be2c59df234a4eabc8 (patch)
tree078d6bf9c840f98ec6ab56fa746163ee85b54b98
parentc8195d80bf750d13ec37139a24bec915144d3109 (diff)
By Wincent Balin: add a new (better) memory allocation library for pdbox. Next commit will remove dbestfit and make pdbox use this one.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@21993 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r--apps/plugins/pdbox/TLSF-2.4.4/COPYING23
-rw-r--r--apps/plugins/pdbox/TLSF-2.4.4/Changelog119
-rw-r--r--apps/plugins/pdbox/TLSF-2.4.4/README163
-rw-r--r--apps/plugins/pdbox/TLSF-2.4.4/TODO9
-rw-r--r--apps/plugins/pdbox/TLSF-2.4.4/src/target.h12
-rw-r--r--apps/plugins/pdbox/TLSF-2.4.4/src/tlsf.c995
-rw-r--r--apps/plugins/pdbox/TLSF-2.4.4/src/tlsf.h39
7 files changed, 1360 insertions, 0 deletions
diff --git a/apps/plugins/pdbox/TLSF-2.4.4/COPYING b/apps/plugins/pdbox/TLSF-2.4.4/COPYING
new file mode 100644
index 0000000000..78fdbdc061
--- /dev/null
+++ b/apps/plugins/pdbox/TLSF-2.4.4/COPYING
@@ -0,0 +1,23 @@
+ LICENSE INFORMATION
+
+TLSF is released as LGPL and GPL. A copy of both licences can be found in this
+directoy. For the GPL licence, the following exception applies.
+
+
+TLSF is free software; you can redistribute it and/or modify it under terms of
+the GNU General Public License as published by the Free Software Foundation;
+either version 2, or (at your option) any later version. TLSF is distributed
+in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
+implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+the GNU General Public License for more details. You should have received a
+copy of the GNU General Public License along with TLSF; see file COPYING. If
+not, write to the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
+USA.
+
+As a special exception, including TLSF header files in a file, or linking with
+other files objects to produce an executable application, is merely considered
+normal use of the library, and does *not* fall under the heading of "derived
+work". Therfore does not by itself cause the resulting executable application
+to be covered by the GNU General Public License. This exception does not
+however invalidate any other reasons why the executable file might be covered
+by the GNU Public License.
diff --git a/apps/plugins/pdbox/TLSF-2.4.4/Changelog b/apps/plugins/pdbox/TLSF-2.4.4/Changelog
new file mode 100644
index 0000000000..0cdb34a8ab
--- /dev/null
+++ b/apps/plugins/pdbox/TLSF-2.4.4/Changelog
@@ -0,0 +1,119 @@
+ Version History
+ ---------------
+ -v2.4.4 (October 13 2008)
+ * Corrected minor syntactic bug on statistic gathering code.
+ Reported by Tim Cussins and P. Mantegazza.
+
+ -v2.4.3 (July 30 2008)
+ * Minor fixes to compile with the greenhills compiler.
+ Reported by "Kaya, Sinan SEA" <sinan.kaya@siemens.com>
+ * Small change in the license in order to include TLSF in the RTEMS
+ project.
+
+ -v2.4.2 (May 16 2008) (Herman ten Brugge)
+ * Memory usage statistics added again, with cleaner and more compacted
+ code.
+
+ -v2.4.1 (April 30 2008)
+ * Fixed a bug in the tlsf_realloc function: init the pool automatically
+ on the first call.
+ Reported by: Alejandro Mery <amery@geeks.cl>
+
+ -v2.4 (Feb 19 2008)
+ * "rtl_*" functions renamed to "tlsf_*".
+ * Added the add_new_area function to insert new memory areas to an
+ existing memory pool.
+ * A single TLSF pool can manage non-contiguous memory areas.
+ * Support for mmap and sbrk added.
+ * The init_memory_pool is not longer needed when used on a system
+ with mmap or sbrk.
+ * Removed the get_used_size counting.The same functionality can be
+ implemented outside the TLSF code.
+
+ -v2.3.2 (Sep 27 2007)
+ * Minor cosmetic code improvements.
+
+ -v2.3.1 (Jul 30 2007)
+ * Fixed some minor bugs in the version 2.3. Herman ten Brugge
+ <hermantenbrugge@home.nl>
+
+ -v2.3 (Jul 28 2007) Released a new version with all the contributions
+ received from Herman ten Brugge <hermantenbrugge@home.nl>
+ (This is his summary of changes in the TLSF's code):
+ * Add 64 bit support. It now runs on x86_64 and solaris64.
+ * I also tested this on vxworks/32 and solaris/32 and i386/32
+ processors.
+ * Remove assembly code. I could not measure any performance difference
+ on my core2 processor. This also makes the code more portable.
+ * Moved defines/typedefs from tlsf.h to tlsf.c
+ * Changed MIN_BLOCK_SIZE to sizeof (free_ptr_t) and BHDR_OVERHEAD to
+ (sizeof (bhdr_t) - MIN_BLOCK_SIZE). This does not change the fact
+ that the minumum size is still sizeof (bhdr_t).
+ * Changed all C++ comment style to C style. (// -> /* ... *./)
+ * Used ls_bit instead of ffs and ms_bit instead of fls. I did this to
+ avoid confusion with the standard ffs function which returns
+ different values.
+ * Created set_bit/clear_bit fuctions because they are not present
+ on x86_64.
+ * Added locking support + extra file target.h to show how to use it.
+ * Added get_used_size function
+ * Added rtl_realloc and rtl_calloc function
+ * Implemented realloc clever support.
+ * Added some test code in the example directory.
+
+ -- Thank you very much for your help Herman!
+
+ -v2.2.1 (Oct 23 2006)
+ * Support for ARMv5 implemented by Adam Scislowicz
+ <proteuskor@gmail.com>. Thank you for your contribution.
+
+ - v2.2.0 (Jun 30 2006) Miguel Masmano & Ismael Ripoll.
+
+ * Blocks smaller than 128 bytes are stored on a single
+ segregated list. The already existing bits maps and data
+ structures are used.
+ * Minor code speed-up improvements.
+ * Worst case response time both on malloc and free improved.
+ * External fragmantation also improved!.
+ * Segragared lists are AGAIN sorted by LIFO order. Version
+ 2.1b was proven to be no better than 2.1.
+
+ - v2.1b: Allocation policy has been always a LIFO Good-Fit, that
+ is, between several free blocks in the same range, TLSF will
+ always allocate the most recently released. In this version of
+ TLSF, we have implemented a FIFO Good-Fit. However,
+ fragmentation doesn't seems to be altered so is it worth it?.
+
+ - v2.1: Realloc and calloc included again in TLSF 2.0.
+
+ - v2.0: In this version, TLSF has been programmed from scratch.
+ Now the allocator is provided as an unique file. Realloc and
+ calloc are not longer implemented.
+
+
+ - v1.4: Created the section "Version History". Studied real
+ behaviour of actual applications (regular applications tend
+ to require small memory blocks (less than 16 bytes) whereas
+ TLSF is optimised to be used with blocks larger than 16
+ bytes: Added special lists to deal with blocks smaller than
+ 16 bytes.
+
+
+ - v1.3: Change of concept, now the main TLSF structure is created
+ inside of the beginning of the block instead of being an
+ static structure, allowing multiple TLSFs working at the
+ same time. Now, TLSF uses specific processor instructions to
+ deal with bitmaps. TLSF sanity functions added to find TLSF
+ overflows. The TLSF code will not be RTLinux-oriented any
+ more.
+
+ - v1.1 ... v1.2: Many little bugs fixed, code cleaned and splitted
+ in several files because of cosmetic requirements.
+ Starting from TLSF v1.1, MaRTE OS
+ (http://marte.unican.es) uses the TLSF allocator
+ as its default memory allocator.
+
+ - v0.1 ... v1.0: First implementations were created for testing and
+ research purposes. Basically TLSF is implemented to
+ be used by RTLinux-GPL (www.rtlinux-gpl.org), so
+ it is RTLinux-oriented.
diff --git a/apps/plugins/pdbox/TLSF-2.4.4/README b/apps/plugins/pdbox/TLSF-2.4.4/README
new file mode 100644
index 0000000000..d755905b16
--- /dev/null
+++ b/apps/plugins/pdbox/TLSF-2.4.4/README
@@ -0,0 +1,163 @@
+
+TLSF Memory Storage allocator implementation.
+Version 2.4 Feb 2008
+
+Authors: Miguel Masmano, Ismael Ripoll & Alfons Crespo.
+Copyright UPVLC, OCERA Consortium.
+
+TLSF is released in the GPL/LGPL licence. The exact terms of the licence
+are described in the COPYING file.
+
+This component provides basic memory allocation functions:
+malloc and free, as defined in the standard "C" library.
+
+This allocator was designed to provide real-time performance, that is:
+1.- Bounded time malloc and free.
+2.- Fast response time.
+3.- Efficient memory management, that is low fragmentation.
+
+
+The worst response time for both malloc and free is O(1).
+
+
+
+How to use it:
+
+This code is prepared to be used as a stand-alone code that can be
+linked with a regular application or it can be compiled to be a Linux
+module (which required the BigPhysicalArea patch). Initially the
+module was designed to work jointly with RTLinux-GPL, but it can be
+used as a stand alone Linux module.
+
+When compiled as a regular linux process the API is:
+
+Initialisation and destruction functions
+----------------------------------------
+
+init_memory_pool may be called before any request or release call:
+
+- size_t init_memory_pool(size_t, void *);
+- void destroy_memory_pool(void *);
+
+Request and release functions
+-----------------------------
+
+As can be seen, there are two functions for each traditional memory
+allocation function (malloc, free, realloc, and calloc). One with the
+prefix "tlsf_" and the other with the suffix "_ex".
+
+The versions with the prefix "tlsf_" provides the expected behaviour,
+that is, allocating/releasing memory from the default memory pool. The
+default memory pool is the last pool initialised by the
+init_memory_pool function.
+
+On the other hand, the functions with the prefix "_ex" enable the use of several memory pools.
+
+- void *tlsf_malloc(size_t);
+- void *malloc_ex(size_t, void *);
+
+- void tlsf_free(void *ptr);
+- void free_ex(void *, void *);
+
+- void *tlsf_realloc(void *ptr, size_t size);
+- void *realloc_ex(void *, size_t, void *);
+
+- void *tlsf_calloc(size_t nelem, size_t elem_size);
+- void *calloc_ex(size_t, size_t, void *);
+
+EXAMPLE OF USE:
+
+char memory_pool[1024*1024];
+
+{
+ ...
+
+ init_memory_pool(1024*1024, memory_pool);
+
+ ...
+
+ ptr1=malloc_ex(100, memory_pool);
+ ptr2=tlsf_malloc(100); // This function will use memory_pool
+
+ ...
+
+ tlsf_free(ptr2);
+ free_ex(ptr1, memory_pool);
+}
+
+Growing the memory pool
+-----------------------
+
+Starting from the version 2.4, the function add_new_area adds an
+memory area to an existing memory pool.
+
+- size_t add_new_area(void *, size_t, void *);
+
+This feature is pretty useful when an existing memory pool is running
+low and we want to add more free memory to it.
+EXAMPLE OF USE:
+
+char memory_pool[1024*1024];
+char memory_pool2[1024*1024];
+
+{
+ ...
+
+ init_memory_pool(1024*1024, memory_pool);
+
+ ...
+
+ ptr[0]=malloc_ex(1024*256 memory_pool);
+ ptr[1]=malloc_ex(1024*512, memory_pool);
+ add_new_area(memory_pool2, 1024*1024, memory_pool);
+ // Now we have an extra free memory area of 1Mb
+ // The next malloc may not fail
+ ptr[2]=malloc_ex(1024*512, memory_pool);
+
+ ...
+
+}
+
+
+SBRK and MMAP support
+---------------------
+
+The version 2.4 can use the functions SBRK and MMAP to _automatically_
+growing the memory pool, before running out of memory.
+
+So, when this feature is enabled, unless the operating system were out
+of memory, a malloc operation would not fail due to an "out-of-memory"
+error.
+
+To enable this support, compile tlsf.c with the FLAGS -DUSE_MMAP=1 or
+-DUSE_SBRK=1 depending on whether you want to use "mmap" or "sbrk" or both.
+
+** By default (default Makefile) this feature is enabled.
+
+EXAMPLE OF USE:
+
+gcc -o tlsf.o -O2 -Wall -DUSE_MMAP=1 -DUSE_SBRK=1
+
+---
+
+If the sbrk/mmap support is enabled and we are _only_ going to use one
+memory pool, it is not necessary to call init_memory_pool
+
+EXAMPLE OF USE (with MMAP/SBRK support enabled):
+
+{
+ ...
+
+ ptr2=tlsf_malloc(100); // This function will use memory_pool
+
+ ...
+
+ tlsf_free(ptr2);
+}
+
+
+
+
+This work has been supported by the followin projects:
+EUROPEAN: IST-2001-35102(OCERA) http://www.ocera.org.
+SPANISH: TIN2005-08665-C3-03
diff --git a/apps/plugins/pdbox/TLSF-2.4.4/TODO b/apps/plugins/pdbox/TLSF-2.4.4/TODO
new file mode 100644
index 0000000000..d7c07b8421
--- /dev/null
+++ b/apps/plugins/pdbox/TLSF-2.4.4/TODO
@@ -0,0 +1,9 @@
+To do list
+==========
+
+* Add mmap/sbrk support (DONE - V2.4).
+
+* TLSF rounds-up request size to the head of a free list.
+ It has been shown to be a good policy for small blocks (<2048).
+ But for larger blocks this policy may cause excesive fragmentation.
+ A deeper analisys should be done.
diff --git a/apps/plugins/pdbox/TLSF-2.4.4/src/target.h b/apps/plugins/pdbox/TLSF-2.4.4/src/target.h
new file mode 100644
index 0000000000..1afd62aef2
--- /dev/null
+++ b/apps/plugins/pdbox/TLSF-2.4.4/src/target.h
@@ -0,0 +1,12 @@
+#ifndef _TARGET_H_
+#define _TARGET_H_
+
+#include <pthread.h>
+
+#define TLSF_MLOCK_T pthread_mutex_t
+#define TLSF_CREATE_LOCK(l) pthread_mutex_init (l, NULL)
+#define TLSF_DESTROY_LOCK(l) pthread_mutex_destroy(l)
+#define TLSF_ACQUIRE_LOCK(l) pthread_mutex_lock(l)
+#define TLSF_RELEASE_LOCK(l) pthread_mutex_unlock(l)
+
+#endif
diff --git a/apps/plugins/pdbox/TLSF-2.4.4/src/tlsf.c b/apps/plugins/pdbox/TLSF-2.4.4/src/tlsf.c
new file mode 100644
index 0000000000..47b461bac5
--- /dev/null
+++ b/apps/plugins/pdbox/TLSF-2.4.4/src/tlsf.c
@@ -0,0 +1,995 @@
+/*
+ * Two Levels Segregate Fit memory allocator (TLSF)
+ * Version 2.4.4
+ *
+ * Written by Miguel Masmano Tello <mimastel@doctor.upv.es>
+ *
+ * Thanks to Ismael Ripoll for his suggestions and reviews
+ *
+ * Copyright (C) 2008, 2007, 2006, 2005, 2004
+ *
+ * This code is released using a dual license strategy: GPL/LGPL
+ * You can choose the licence that better fits your requirements.
+ *
+ * Released under the terms of the GNU General Public License Version 2.0
+ * Released under the terms of the GNU Lesser General Public License Version 2.1
+ *
+ */
+
+/*
+ * Code contributions:
+ *
+ * (Jul 28 2007) Herman ten Brugge <hermantenbrugge@home.nl>:
+ *
+ * - Add 64 bit support. It now runs on x86_64 and solaris64.
+ * - I also tested this on vxworks/32and solaris/32 and i386/32 processors.
+ * - Remove assembly code. I could not measure any performance difference
+ * on my core2 processor. This also makes the code more portable.
+ * - Moved defines/typedefs from tlsf.h to tlsf.c
+ * - Changed MIN_BLOCK_SIZE to sizeof (free_ptr_t) and BHDR_OVERHEAD to
+ * (sizeof (bhdr_t) - MIN_BLOCK_SIZE). This does not change the fact
+ * that the minumum size is still sizeof
+ * (bhdr_t).
+ * - Changed all C++ comment style to C style. (// -> /.* ... *./)
+ * - Used ls_bit instead of ffs and ms_bit instead of fls. I did this to
+ * avoid confusion with the standard ffs function which returns
+ * different values.
+ * - Created set_bit/clear_bit fuctions because they are not present
+ * on x86_64.
+ * - Added locking support + extra file target.h to show how to use it.
+ * - Added get_used_size function (REMOVED in 2.4)
+ * - Added rtl_realloc and rtl_calloc function
+ * - Implemented realloc clever support.
+ * - Added some test code in the example directory.
+ *
+ *
+ * (Oct 23 2006) Adam Scislowicz:
+ *
+ * - Support for ARMv5 implemented
+ *
+ */
+
+/*#define USE_SBRK (0) */
+/*#define USE_MMAP (0) */
+
+#include <stdio.h>
+#include <string.h>
+
+#ifndef TLSF_USE_LOCKS
+#define TLSF_USE_LOCKS (0)
+#endif
+
+#ifndef TLSF_STATISTIC
+#define TLSF_STATISTIC (0)
+#endif
+
+#ifndef USE_MMAP
+#define USE_MMAP (0)
+#endif
+
+#ifndef USE_SBRK
+#define USE_SBRK (0)
+#endif
+
+
+#if TLSF_USE_LOCKS
+#include "target.h"
+#else
+#define TLSF_CREATE_LOCK(_unused_) do{}while(0)
+#define TLSF_DESTROY_LOCK(_unused_) do{}while(0)
+#define TLSF_ACQUIRE_LOCK(_unused_) do{}while(0)
+#define TLSF_RELEASE_LOCK(_unused_) do{}while(0)
+#endif
+
+#if TLSF_STATISTIC
+#define TLSF_ADD_SIZE(tlsf, b) do { \
+ tlsf->used_size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD; \
+ if (tlsf->used_size > tlsf->max_size) \
+ tlsf->max_size = tlsf->used_size; \
+ } while(0)
+
+#define TLSF_REMOVE_SIZE(tlsf, b) do { \
+ tlsf->used_size -= (b->size & BLOCK_SIZE) + BHDR_OVERHEAD; \
+ } while(0)
+#else
+#define TLSF_ADD_SIZE(tlsf, b) do{}while(0)
+#define TLSF_REMOVE_SIZE(tlsf, b) do{}while(0)
+#endif
+
+#if USE_MMAP || USE_SBRK
+#include <unistd.h>
+#endif
+
+#if USE_MMAP
+#include <sys/mman.h>
+#endif
+
+#include "tlsf.h"
+
+#if !defined(__GNUC__)
+#ifndef __inline__
+#define __inline__
+#endif
+#endif
+
+/* The debug functions only can be used when _DEBUG_TLSF_ is set. */
+#ifndef _DEBUG_TLSF_
+#define _DEBUG_TLSF_ (0)
+#endif
+
+/*************************************************************************/
+/* Definition of the structures used by TLSF */
+
+
+/* Some IMPORTANT TLSF parameters */
+/* Unlike the preview TLSF versions, now they are statics */
+#define BLOCK_ALIGN (sizeof(void *) * 2)
+
+#define MAX_FLI (30)
+#define MAX_LOG2_SLI (5)
+#define MAX_SLI (1 << MAX_LOG2_SLI) /* MAX_SLI = 2^MAX_LOG2_SLI */
+
+#define FLI_OFFSET (6) /* tlsf structure just will manage blocks bigger */
+/* than 128 bytes */
+#define SMALL_BLOCK (128)
+#define REAL_FLI (MAX_FLI - FLI_OFFSET)
+#define MIN_BLOCK_SIZE (sizeof (free_ptr_t))
+#define BHDR_OVERHEAD (sizeof (bhdr_t) - MIN_BLOCK_SIZE)
+#define TLSF_SIGNATURE (0x2A59FA59)
+
+#define PTR_MASK (sizeof(void *) - 1)
+#define BLOCK_SIZE (0xFFFFFFFF - PTR_MASK)
+
+#define GET_NEXT_BLOCK(_addr, _r) ((bhdr_t *) ((char *) (_addr) + (_r)))
+#define MEM_ALIGN ((BLOCK_ALIGN) - 1)
+#define ROUNDUP_SIZE(_r) (((_r) + MEM_ALIGN) & ~MEM_ALIGN)
+#define ROUNDDOWN_SIZE(_r) ((_r) & ~MEM_ALIGN)
+#define ROUNDUP(_x, _v) ((((~(_x)) + 1) & ((_v)-1)) + (_x))
+
+#define BLOCK_STATE (0x1)
+#define PREV_STATE (0x2)
+
+/* bit 0 of the block size */
+#define FREE_BLOCK (0x1)
+#define USED_BLOCK (0x0)
+
+/* bit 1 of the block size */
+#define PREV_FREE (0x2)
+#define PREV_USED (0x0)
+
+
+#define DEFAULT_AREA_SIZE (1024*10)
+
+#ifdef USE_MMAP
+#define PAGE_SIZE (getpagesize())
+#endif
+
+#define PRINT_MSG(fmt, args...) printf(fmt, ## args)
+#define ERROR_MSG(fmt, args...) printf(fmt, ## args)
+
+typedef unsigned int u32_t; /* NOTE: Make sure that this type is 4 bytes long on your computer */
+typedef unsigned char u8_t; /* NOTE: Make sure that this type is 1 byte on your computer */
+
+typedef struct free_ptr_struct {
+ struct bhdr_struct *prev;
+ struct bhdr_struct *next;
+} free_ptr_t;
+
+typedef struct bhdr_struct {
+ /* This pointer is just valid if the first bit of size is set */
+ struct bhdr_struct *prev_hdr;
+ /* The size is stored in bytes */
+ size_t size; /* bit 0 indicates whether the block is used and */
+ /* bit 1 allows to know whether the previous block is free */
+ union {
+ struct free_ptr_struct free_ptr;
+ u8_t buffer[1]; /*sizeof(struct free_ptr_struct)]; */
+ } ptr;
+} bhdr_t;
+
+/* This structure is embedded at the beginning of each area, giving us
+ * enough information to cope with a set of areas */
+
+typedef struct area_info_struct {
+ bhdr_t *end;
+ struct area_info_struct *next;
+} area_info_t;
+
+typedef struct TLSF_struct {
+ /* the TLSF's structure signature */
+ u32_t tlsf_signature;
+
+#if TLSF_USE_LOCKS
+ TLSF_MLOCK_T lock;
+#endif
+
+#if TLSF_STATISTIC
+ /* These can not be calculated outside tlsf because we
+ * do not know the sizes when freeing/reallocing memory. */
+ size_t used_size;
+ size_t max_size;
+#endif
+
+ /* A linked list holding all the existing areas */
+ area_info_t *area_head;
+
+ /* the first-level bitmap */
+ /* This array should have a size of REAL_FLI bits */
+ u32_t fl_bitmap;
+
+ /* the second-level bitmap */
+ u32_t sl_bitmap[REAL_FLI];
+
+ bhdr_t *matrix[REAL_FLI][MAX_SLI];
+} tlsf_t;
+
+
+/******************************************************************/
+/************** Helping functions **************************/
+/******************************************************************/
+static __inline__ void set_bit(int nr, u32_t * addr);
+static __inline__ void clear_bit(int nr, u32_t * addr);
+static __inline__ int ls_bit(int x);
+static __inline__ int ms_bit(int x);
+static __inline__ void MAPPING_SEARCH(size_t * _r, int *_fl, int *_sl);
+static __inline__ void MAPPING_INSERT(size_t _r, int *_fl, int *_sl);
+static __inline__ bhdr_t *FIND_SUITABLE_BLOCK(tlsf_t * _tlsf, int *_fl, int *_sl);
+static __inline__ bhdr_t *process_area(void *area, size_t size);
+#if USE_SBRK || USE_MMAP
+static __inline__ void *get_new_area(size_t * size);
+#endif
+
+static const int table[] = {
+ -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4,
+ 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5,
+ 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6,
+ 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6,
+ 6, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7,
+ 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7,
+ 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7,
+ 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7,
+ 7, 7, 7, 7, 7, 7, 7
+};
+
+static __inline__ int ls_bit(int i)
+{
+ unsigned int a;
+ unsigned int x = i & -i;
+
+ a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24);
+ return table[x >> a] + a;
+}
+
+static __inline__ int ms_bit(int i)
+{
+ unsigned int a;
+ unsigned int x = (unsigned int) i;
+
+ a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24);
+ return table[x >> a] + a;
+}
+
+static __inline__ void set_bit(int nr, u32_t * addr)
+{
+ addr[nr >> 5] |= 1 << (nr & 0x1f);
+}
+
+static __inline__ void clear_bit(int nr, u32_t * addr)
+{
+ addr[nr >> 5] &= ~(1 << (nr & 0x1f));
+}
+
+static __inline__ void MAPPING_SEARCH(size_t * _r, int *_fl, int *_sl)
+{
+ int _t;
+
+ if (*_r < SMALL_BLOCK) {
+ *_fl = 0;
+ *_sl = *_r / (SMALL_BLOCK / MAX_SLI);
+ } else {
+ _t = (1 << (ms_bit(*_r) - MAX_LOG2_SLI)) - 1;
+ *_r = *_r + _t;
+ *_fl = ms_bit(*_r);
+ *_sl = (*_r >> (*_fl - MAX_LOG2_SLI)) - MAX_SLI;
+ *_fl -= FLI_OFFSET;
+ /*if ((*_fl -= FLI_OFFSET) < 0) // FL wil be always >0!
+ *_fl = *_sl = 0;
+ */
+ *_r &= ~_t;
+ }
+}
+
+static __inline__ void MAPPING_INSERT(size_t _r, int *_fl, int *_sl)
+{
+ if (_r < SMALL_BLOCK) {
+ *_fl = 0;
+ *_sl = _r / (SMALL_BLOCK / MAX_SLI);
+ } else {
+ *_fl = ms_bit(_r);
+ *_sl = (_r >> (*_fl - MAX_LOG2_SLI)) - MAX_SLI;
+ *_fl -= FLI_OFFSET;
+ }
+}
+
+
+static __inline__ bhdr_t *FIND_SUITABLE_BLOCK(tlsf_t * _tlsf, int *_fl, int *_sl)
+{
+ u32_t _tmp = _tlsf->sl_bitmap[*_fl] & (~0 << *_sl);
+ bhdr_t *_b = NULL;
+
+ if (_tmp) {
+ *_sl = ls_bit(_tmp);
+ _b = _tlsf->matrix[*_fl][*_sl];
+ } else {
+ *_fl = ls_bit(_tlsf->fl_bitmap & (~0 << (*_fl + 1)));
+ if (*_fl > 0) { /* likely */
+ *_sl = ls_bit(_tlsf->sl_bitmap[*_fl]);
+ _b = _tlsf->matrix[*_fl][*_sl];
+ }
+ }
+ return _b;
+}
+
+
+#define EXTRACT_BLOCK_HDR(_b, _tlsf, _fl, _sl) do { \
+ _tlsf -> matrix [_fl] [_sl] = _b -> ptr.free_ptr.next; \
+ if (_tlsf -> matrix[_fl][_sl]) \
+ _tlsf -> matrix[_fl][_sl] -> ptr.free_ptr.prev = NULL; \
+ else { \
+ clear_bit (_sl, &_tlsf -> sl_bitmap [_fl]); \
+ if (!_tlsf -> sl_bitmap [_fl]) \
+ clear_bit (_fl, &_tlsf -> fl_bitmap); \
+ } \
+ _b -> ptr.free_ptr.prev = NULL; \
+ _b -> ptr.free_ptr.next = NULL; \
+ }while(0)
+
+
+#define EXTRACT_BLOCK(_b, _tlsf, _fl, _sl) do { \
+ if (_b -> ptr.free_ptr.next) \
+ _b -> ptr.free_ptr.next -> ptr.free_ptr.prev = _b -> ptr.free_ptr.prev; \
+ if (_b -> ptr.free_ptr.prev) \
+ _b -> ptr.free_ptr.prev -> ptr.free_ptr.next = _b -> ptr.free_ptr.next; \
+ if (_tlsf -> matrix [_fl][_sl] == _b) { \
+ _tlsf -> matrix [_fl][_sl] = _b -> ptr.free_ptr.next; \
+ if (!_tlsf -> matrix [_fl][_sl]) { \
+ clear_bit (_sl, &_tlsf -> sl_bitmap[_fl]); \
+ if (!_tlsf -> sl_bitmap [_fl]) \
+ clear_bit (_fl, &_tlsf -> fl_bitmap); \
+ } \
+ } \
+ _b -> ptr.free_ptr.prev = NULL; \
+ _b -> ptr.free_ptr.next = NULL; \
+ } while(0)
+
+#define INSERT_BLOCK(_b, _tlsf, _fl, _sl) do { \
+ _b -> ptr.free_ptr.prev = NULL; \
+ _b -> ptr.free_ptr.next = _tlsf -> matrix [_fl][_sl]; \
+ if (_tlsf -> matrix [_fl][_sl]) \
+ _tlsf -> matrix [_fl][_sl] -> ptr.free_ptr.prev = _b; \
+ _tlsf -> matrix [_fl][_sl] = _b; \
+ set_bit (_sl, &_tlsf -> sl_bitmap [_fl]); \
+ set_bit (_fl, &_tlsf -> fl_bitmap); \
+ } while(0)
+
+#if USE_SBRK || USE_MMAP
+static __inline__ void *get_new_area(size_t * size)
+{
+ void *area;
+
+#if USE_SBRK
+ area = (void *)sbrk(0);
+ if (((void *)sbrk(*size)) != ((void *) -1))
+ return area;
+#endif
+
+#if USE_MMAP
+ *size = ROUNDUP(*size, PAGE_SIZE);
+ if ((area = mmap(0, *size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0)) != MAP_FAILED)
+ return area;
+#endif
+ return ((void *) ~0);
+}
+#endif
+
+static __inline__ bhdr_t *process_area(void *area, size_t size)
+{
+ bhdr_t *b, *lb, *ib;
+ area_info_t *ai;
+
+ ib = (bhdr_t *) area;
+ ib->size =
+ (sizeof(area_info_t) <
+ MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(sizeof(area_info_t)) | USED_BLOCK | PREV_USED;
+ b = (bhdr_t *) GET_NEXT_BLOCK(ib->ptr.buffer, ib->size & BLOCK_SIZE);
+ b->size = ROUNDDOWN_SIZE(size - 3 * BHDR_OVERHEAD - (ib->size & BLOCK_SIZE)) | USED_BLOCK | PREV_USED;
+ b->ptr.free_ptr.prev = b->ptr.free_ptr.next = 0;
+ lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
+ lb->prev_hdr = b;
+ lb->size = 0 | USED_BLOCK | PREV_FREE;
+ ai = (area_info_t *) ib->ptr.buffer;
+ ai->next = 0;
+ ai->end = lb;
+ return ib;
+}
+
+/******************************************************************/
+/******************** Begin of the allocator code *****************/
+/******************************************************************/
+
+static char *mp = NULL; /* Default memory pool. */
+
+/******************************************************************/
+size_t init_memory_pool(size_t mem_pool_size, void *mem_pool)
+{
+/******************************************************************/
+ tlsf_t *tlsf;
+ bhdr_t *b, *ib;
+
+ if (!mem_pool || !mem_pool_size || mem_pool_size < sizeof(tlsf_t) + BHDR_OVERHEAD * 8) {
+ ERROR_MSG("init_memory_pool (): memory_pool invalid\n");
+ return -1;
+ }
+
+ if (((unsigned long) mem_pool & PTR_MASK)) {
+ ERROR_MSG("init_memory_pool (): mem_pool must be aligned to a word\n");
+ return -1;
+ }
+ tlsf = (tlsf_t *) mem_pool;
+ /* Check if already initialised */
+ if (tlsf->tlsf_signature == TLSF_SIGNATURE) {
+ mp = mem_pool;
+ b = GET_NEXT_BLOCK(mp, ROUNDUP_SIZE(sizeof(tlsf_t)));
+ return b->size & BLOCK_SIZE;
+ }
+
+ mp = mem_pool;
+
+ /* Zeroing the memory pool */
+ memset(mem_pool, 0, sizeof(tlsf_t));
+
+ tlsf->tlsf_signature = TLSF_SIGNATURE;
+
+ TLSF_CREATE_LOCK(&tlsf->lock);
+
+ ib = process_area(GET_NEXT_BLOCK
+ (mem_pool, ROUNDUP_SIZE(sizeof(tlsf_t))), ROUNDDOWN_SIZE(mem_pool_size - sizeof(tlsf_t)));
+ b = GET_NEXT_BLOCK(ib->ptr.buffer, ib->size & BLOCK_SIZE);
+ free_ex(b->ptr.buffer, tlsf);
+ tlsf->area_head = (area_info_t *) ib->ptr.buffer;
+
+#if TLSF_STATISTIC
+ tlsf->used_size = mem_pool_size - (b->size & BLOCK_SIZE);
+ tlsf->max_size = tlsf->used_size;
+#endif
+
+ return (b->size & BLOCK_SIZE);
+}
+
+/******************************************************************/
+size_t add_new_area(void *area, size_t area_size, void *mem_pool)
+{
+/******************************************************************/
+ tlsf_t *tlsf = (tlsf_t *) mem_pool;
+ area_info_t *ptr, *ptr_prev, *ai;
+ bhdr_t *ib0, *b0, *lb0, *ib1, *b1, *lb1, *next_b;
+
+ memset(area, 0, area_size);
+ ptr = tlsf->area_head;
+ ptr_prev = 0;
+
+ ib0 = process_area(area, area_size);
+ b0 = GET_NEXT_BLOCK(ib0->ptr.buffer, ib0->size & BLOCK_SIZE);
+ lb0 = GET_NEXT_BLOCK(b0->ptr.buffer, b0->size & BLOCK_SIZE);
+
+ /* Before inserting the new area, we have to merge this area with the
+ already existing ones */
+
+ while (ptr) {
+ ib1 = (bhdr_t *) ((char *) ptr - BHDR_OVERHEAD);
+ b1 = GET_NEXT_BLOCK(ib1->ptr.buffer, ib1->size & BLOCK_SIZE);
+ lb1 = ptr->end;
+
+ /* Merging the new area with the next physically contigous one */
+ if ((unsigned long) ib1 == (unsigned long) lb0 + BHDR_OVERHEAD) {
+ if (tlsf->area_head == ptr) {
+ tlsf->area_head = ptr->next;
+ ptr = ptr->next;
+ } else {
+ ptr_prev->next = ptr->next;
+ ptr = ptr->next;
+ }
+
+ b0->size =
+ ROUNDDOWN_SIZE((b0->size & BLOCK_SIZE) +
+ (ib1->size & BLOCK_SIZE) + 2 * BHDR_OVERHEAD) | USED_BLOCK | PREV_USED;
+
+ b1->prev_hdr = b0;
+ lb0 = lb1;
+
+ continue;
+ }
+
+ /* Merging the new area with the previous physically contigous
+ one */
+ if ((unsigned long) lb1->ptr.buffer == (unsigned long) ib0) {
+ if (tlsf->area_head == ptr) {
+ tlsf->area_head = ptr->next;
+ ptr = ptr->next;
+ } else {
+ ptr_prev->next = ptr->next;
+ ptr = ptr->next;
+ }
+
+ lb1->size =
+ ROUNDDOWN_SIZE((b0->size & BLOCK_SIZE) +
+ (ib0->size & BLOCK_SIZE) + 2 * BHDR_OVERHEAD) | USED_BLOCK | (lb1->size & PREV_STATE);
+ next_b = GET_NEXT_BLOCK(lb1->ptr.buffer, lb1->size & BLOCK_SIZE);
+ next_b->prev_hdr = lb1;
+ b0 = lb1;
+ ib0 = ib1;
+
+ continue;
+ }
+ ptr_prev = ptr;
+ ptr = ptr->next;
+ }
+
+ /* Inserting the area in the list of linked areas */
+ ai = (area_info_t *) ib0->ptr.buffer;
+ ai->next = tlsf->area_head;
+ ai->end = lb0;
+ tlsf->area_head = ai;
+ free_ex(b0->ptr.buffer, mem_pool);
+ return (b0->size & BLOCK_SIZE);
+}
+
+
+/******************************************************************/
+size_t get_used_size(void *mem_pool)
+{
+/******************************************************************/
+#if TLSF_STATISTIC
+ return ((tlsf_t *) mem_pool)->used_size;
+#else
+ return 0;
+#endif
+}
+
+/******************************************************************/
+size_t get_max_size(void *mem_pool)
+{
+/******************************************************************/
+#if TLSF_STATISTIC
+ return ((tlsf_t *) mem_pool)->max_size;
+#else
+ return 0;
+#endif
+}
+
+/******************************************************************/
+void destroy_memory_pool(void *mem_pool)
+{
+/******************************************************************/
+ tlsf_t *tlsf = (tlsf_t *) mem_pool;
+
+ tlsf->tlsf_signature = 0;
+
+ TLSF_DESTROY_LOCK(&tlsf->lock);
+
+}
+
+
+/******************************************************************/
+void *tlsf_malloc(size_t size)
+{
+/******************************************************************/
+ void *ret;
+
+#if USE_MMAP || USE_SBRK
+ if (!mp) {
+ size_t area_size;
+ void *area;
+
+ area_size = sizeof(tlsf_t) + BHDR_OVERHEAD * 8; /* Just a safety constant */
+ area_size = (area_size > DEFAULT_AREA_SIZE) ? area_size : DEFAULT_AREA_SIZE;
+ area = get_new_area(&area_size);
+ if (area == ((void *) ~0))
+ return NULL; /* Not enough system memory */
+ init_memory_pool(area_size, area);
+ }
+#endif
+
+ TLSF_ACQUIRE_LOCK(&((tlsf_t *)mp)->lock);
+
+ ret = malloc_ex(size, mp);
+
+ TLSF_RELEASE_LOCK(&((tlsf_t *)mp)->lock);
+
+ return ret;
+}
+
+/******************************************************************/
+void tlsf_free(void *ptr)
+{
+/******************************************************************/
+
+ TLSF_ACQUIRE_LOCK(&((tlsf_t *)mp)->lock);
+
+ free_ex(ptr, mp);
+
+ TLSF_RELEASE_LOCK(&((tlsf_t *)mp)->lock);
+
+}
+
+/******************************************************************/
+void *tlsf_realloc(void *ptr, size_t size)
+{
+/******************************************************************/
+ void *ret;
+
+#if USE_MMAP || USE_SBRK
+ if (!mp) {
+ return tlsf_malloc(size);
+ }
+#endif
+
+ TLSF_ACQUIRE_LOCK(&((tlsf_t *)mp)->lock);
+
+ ret = realloc_ex(ptr, size, mp);
+
+ TLSF_RELEASE_LOCK(&((tlsf_t *)mp)->lock);
+
+ return ret;
+}
+
+/******************************************************************/
+void *tlsf_calloc(size_t nelem, size_t elem_size)
+{
+/******************************************************************/
+ void *ret;
+
+ TLSF_ACQUIRE_LOCK(&((tlsf_t *)mp)->lock);
+
+ ret = calloc_ex(nelem, elem_size, mp);
+
+ TLSF_RELEASE_LOCK(&((tlsf_t *)mp)->lock);
+
+ return ret;
+}
+
+/******************************************************************/
+void *malloc_ex(size_t size, void *mem_pool)
+{
+/******************************************************************/
+ tlsf_t *tlsf = (tlsf_t *) mem_pool;
+ bhdr_t *b, *b2, *next_b;
+ int fl, sl;
+ size_t tmp_size;
+
+ size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size);
+
+ /* Rounding up the requested size and calculating fl and sl */
+ MAPPING_SEARCH(&size, &fl, &sl);
+
+ /* Searching a free block, recall that this function changes the values of fl and sl,
+ so they are not longer valid when the function fails */
+ b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
+
+#if USE_MMAP || USE_SBRK
+ if (!b) {
+ size_t area_size;
+ void *area;
+ /* Growing the pool size when needed */
+ area_size = size + BHDR_OVERHEAD * 8; /* size plus enough room for the requered headers. */
+ area_size = (area_size > DEFAULT_AREA_SIZE) ? area_size : DEFAULT_AREA_SIZE;
+ area = get_new_area(&area_size); /* Call sbrk or mmap */
+ if (area == ((void *) ~0))
+ return NULL; /* Not enough system memory */
+ add_new_area(area, area_size, mem_pool);
+ /* Rounding up the requested size and calculating fl and sl */
+ MAPPING_SEARCH(&size, &fl, &sl);
+ /* Searching a free block */
+ b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
+ }
+#endif
+ if (!b)
+ return NULL; /* Not found */
+
+ EXTRACT_BLOCK_HDR(b, tlsf, fl, sl);
+
+ /*-- found: */
+ next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
+ /* Should the block be split? */
+ tmp_size = (b->size & BLOCK_SIZE) - size;
+ if (tmp_size >= sizeof(bhdr_t)) {
+ tmp_size -= BHDR_OVERHEAD;
+ b2 = GET_NEXT_BLOCK(b->ptr.buffer, size);
+ b2->size = tmp_size | FREE_BLOCK | PREV_USED;
+ next_b->prev_hdr = b2;
+ MAPPING_INSERT(tmp_size, &fl, &sl);
+ INSERT_BLOCK(b2, tlsf, fl, sl);
+
+ b->size = size | (b->size & PREV_STATE);
+ } else {
+ next_b->size &= (~PREV_FREE);
+ b->size &= (~FREE_BLOCK); /* Now it's used */
+ }
+
+ TLSF_ADD_SIZE(tlsf, b);
+
+ return (void *) b->ptr.buffer;
+}
+
+/******************************************************************/
+void free_ex(void *ptr, void *mem_pool)
+{
+/******************************************************************/
+ tlsf_t *tlsf = (tlsf_t *) mem_pool;
+ bhdr_t *b, *tmp_b;
+ int fl = 0, sl = 0;
+
+ if (!ptr) {
+ return;
+ }
+ b = (bhdr_t *) ((char *) ptr - BHDR_OVERHEAD);
+ b->size |= FREE_BLOCK;
+
+ TLSF_REMOVE_SIZE(tlsf, b);
+
+ b->ptr.free_ptr.prev = NULL;
+ b->ptr.free_ptr.next = NULL;
+ tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
+ if (tmp_b->size & FREE_BLOCK) {
+ MAPPING_INSERT(tmp_b->size & BLOCK_SIZE, &fl, &sl);
+ EXTRACT_BLOCK(tmp_b, tlsf, fl, sl);
+ b->size += (tmp_b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
+ }
+ if (b->size & PREV_FREE) {
+ tmp_b = b->prev_hdr;
+ MAPPING_INSERT(tmp_b->size & BLOCK_SIZE, &fl, &sl);
+ EXTRACT_BLOCK(tmp_b, tlsf, fl, sl);
+ tmp_b->size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
+ b = tmp_b;
+ }
+ MAPPING_INSERT(b->size & BLOCK_SIZE, &fl, &sl);
+ INSERT_BLOCK(b, tlsf, fl, sl);
+
+ tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
+ tmp_b->size |= PREV_FREE;
+ tmp_b->prev_hdr = b;
+}
+
+/******************************************************************/
+void *realloc_ex(void *ptr, size_t new_size, void *mem_pool)
+{
+/******************************************************************/
+ tlsf_t *tlsf = (tlsf_t *) mem_pool;
+ void *ptr_aux;
+ unsigned int cpsize;
+ bhdr_t *b, *tmp_b, *next_b;
+ int fl, sl;
+ size_t tmp_size;
+
+ if (!ptr) {
+ if (new_size)
+ return (void *) malloc_ex(new_size, mem_pool);
+ if (!new_size)
+ return NULL;
+ } else if (!new_size) {
+ free_ex(ptr, mem_pool);
+ return NULL;
+ }
+
+ b = (bhdr_t *) ((char *) ptr - BHDR_OVERHEAD);
+ next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
+ new_size = (new_size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(new_size);
+ tmp_size = (b->size & BLOCK_SIZE);
+ if (new_size <= tmp_size) {
+ TLSF_REMOVE_SIZE(tlsf, b);
+ if (next_b->size & FREE_BLOCK) {
+ MAPPING_INSERT(next_b->size & BLOCK_SIZE, &fl, &sl);
+ EXTRACT_BLOCK(next_b, tlsf, fl, sl);
+ tmp_size += (next_b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
+ next_b = GET_NEXT_BLOCK(next_b->ptr.buffer, next_b->size & BLOCK_SIZE);
+ /* We allways reenter this free block because tmp_size will
+ be greater then sizeof (bhdr_t) */
+ }
+ tmp_size -= new_size;
+ if (tmp_size >= sizeof(bhdr_t)) {
+ tmp_size -= BHDR_OVERHEAD;
+ tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, new_size);
+ tmp_b->size = tmp_size | FREE_BLOCK | PREV_USED;
+ next_b->prev_hdr = tmp_b;
+ next_b->size |= PREV_FREE;
+ MAPPING_INSERT(tmp_size, &fl, &sl);
+ INSERT_BLOCK(tmp_b, tlsf, fl, sl);
+ b->size = new_size | (b->size & PREV_STATE);
+ }
+ TLSF_ADD_SIZE(tlsf, b);
+ return (void *) b->ptr.buffer;
+ }
+ if ((next_b->size & FREE_BLOCK)) {
+ if (new_size <= (tmp_size + (next_b->size & BLOCK_SIZE))) {
+ TLSF_REMOVE_SIZE(tlsf, b);
+ MAPPING_INSERT(next_b->size & BLOCK_SIZE, &fl, &sl);
+ EXTRACT_BLOCK(next_b, tlsf, fl, sl);
+ b->size += (next_b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
+ next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
+ next_b->prev_hdr = b;
+ next_b->size &= ~PREV_FREE;
+ tmp_size = (b->size & BLOCK_SIZE) - new_size;
+ if (tmp_size >= sizeof(bhdr_t)) {
+ tmp_size -= BHDR_OVERHEAD;
+ tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, new_size);
+ tmp_b->size = tmp_size | FREE_BLOCK | PREV_USED;
+ next_b->prev_hdr = tmp_b;
+ next_b->size |= PREV_FREE;
+ MAPPING_INSERT(tmp_size, &fl, &sl);
+ INSERT_BLOCK(tmp_b, tlsf, fl, sl);
+ b->size = new_size | (b->size & PREV_STATE);
+ }
+ TLSF_ADD_SIZE(tlsf, b);
+ return (void *) b->ptr.buffer;
+ }
+ }
+
+ ptr_aux = malloc_ex(new_size, mem_pool);
+
+ cpsize = ((b->size & BLOCK_SIZE) > new_size) ? new_size : (b->size & BLOCK_SIZE);
+
+ memcpy(ptr_aux, ptr, cpsize);
+
+ free_ex(ptr, mem_pool);
+ return ptr_aux;
+}
+
+
+/******************************************************************/
+void *calloc_ex(size_t nelem, size_t elem_size, void *mem_pool)
+{
+/******************************************************************/
+ void *ptr;
+
+ if (nelem <= 0 || elem_size <= 0)
+ return NULL;
+
+ if (!(ptr = malloc_ex(nelem * elem_size, mem_pool)))
+ return NULL;
+ memset(ptr, 0, nelem * elem_size);
+
+ return ptr;
+}
+
+
+
+#if _DEBUG_TLSF_
+
+/*************** DEBUG FUNCTIONS **************/
+
+/* The following functions have been designed to ease the debugging of */
+/* the TLSF structure. For non-developing purposes, it may be they */
+/* haven't too much worth. To enable them, _DEBUG_TLSF_ must be set. */
+
+extern void dump_memory_region(unsigned char *mem_ptr, unsigned int size);
+extern void print_block(bhdr_t * b);
+extern void print_tlsf(tlsf_t * tlsf);
+void print_all_blocks(tlsf_t * tlsf);
+
+void dump_memory_region(unsigned char *mem_ptr, unsigned int size)
+{
+
+ unsigned long begin = (unsigned long) mem_ptr;
+ unsigned long end = (unsigned long) mem_ptr + size;
+ int column = 0;
+
+ begin >>= 2;
+ begin <<= 2;
+
+ end >>= 2;
+ end++;
+ end <<= 2;
+
+ PRINT_MSG("\nMemory region dumped: 0x%lx - 0x%lx\n\n", begin, end);
+
+ column = 0;
+ PRINT_MSG("0x%lx ", begin);
+
+ while (begin < end) {
+ if (((unsigned char *) begin)[0] == 0)
+ PRINT_MSG("00");
+ else
+ PRINT_MSG("%02x", ((unsigned char *) begin)[0]);
+ if (((unsigned char *) begin)[1] == 0)
+ PRINT_MSG("00 ");
+ else
+ PRINT_MSG("%02x ", ((unsigned char *) begin)[1]);
+ begin += 2;
+ column++;
+ if (column == 8) {
+ PRINT_MSG("\n0x%lx ", begin);
+ column = 0;
+ }
+
+ }
+ PRINT_MSG("\n\n");
+}
+
+void print_block(bhdr_t * b)
+{
+ if (!b)
+ return;
+ PRINT_MSG(">> [%p] (", b);
+ if ((b->size & BLOCK_SIZE))
+ PRINT_MSG("%lu bytes, ", (unsigned long) (b->size & BLOCK_SIZE));
+ else
+ PRINT_MSG("sentinel, ");
+ if ((b->size & BLOCK_STATE) == FREE_BLOCK)
+ PRINT_MSG("free [%p, %p], ", b->ptr.free_ptr.prev, b->ptr.free_ptr.next);
+ else
+ PRINT_MSG("used, ");
+ if ((b->size & PREV_STATE) == PREV_FREE)
+ PRINT_MSG("prev. free [%p])\n", b->prev_hdr);
+ else
+ PRINT_MSG("prev used)\n");
+}
+
+void print_tlsf(tlsf_t * tlsf)
+{
+ bhdr_t *next;
+ int i, j;
+
+ PRINT_MSG("\nTLSF at %p\n", tlsf);
+
+ PRINT_MSG("FL bitmap: 0x%x\n\n", (unsigned) tlsf->fl_bitmap);
+
+ for (i = 0; i < REAL_FLI; i++) {
+ if (tlsf->sl_bitmap[i])
+ PRINT_MSG("SL bitmap 0x%x\n", (unsigned) tlsf->sl_bitmap[i]);
+ for (j = 0; j < MAX_SLI; j++) {
+ next = tlsf->matrix[i][j];
+ if (next)
+ PRINT_MSG("-> [%d][%d]\n", i, j);
+ while (next) {
+ print_block(next);
+ next = next->ptr.free_ptr.next;
+ }
+ }
+ }
+}
+
+void print_all_blocks(tlsf_t * tlsf)
+{
+ area_info_t *ai;
+ bhdr_t *next;
+ PRINT_MSG("\nTLSF at %p\nALL BLOCKS\n\n", tlsf);
+ ai = tlsf->area_head;
+ while (ai) {
+ next = (bhdr_t *) ((char *) ai - BHDR_OVERHEAD);
+ while (next) {
+ print_block(next);
+ if ((next->size & BLOCK_SIZE))
+ next = GET_NEXT_BLOCK(next->ptr.buffer, next->size & BLOCK_SIZE);
+ else
+ next = NULL;
+ }
+ ai = ai->next;
+ }
+}
+
+#endif
diff --git a/apps/plugins/pdbox/TLSF-2.4.4/src/tlsf.h b/apps/plugins/pdbox/TLSF-2.4.4/src/tlsf.h
new file mode 100644
index 0000000000..5d016f4369
--- /dev/null
+++ b/apps/plugins/pdbox/TLSF-2.4.4/src/tlsf.h
@@ -0,0 +1,39 @@
+/*
+ * Two Levels Segregate Fit memory allocator (TLSF)
+ * Version 2.4.4
+ *
+ * Written by Miguel Masmano Tello <mimastel@doctor.upv.es>
+ *
+ * Thanks to Ismael Ripoll for his suggestions and reviews
+ *
+ * Copyright (C) 2008, 2007, 2006, 2005, 2004
+ *
+ * This code is released using a dual license strategy: GPL/LGPL
+ * You can choose the licence that better fits your requirements.
+ *
+ * Released under the terms of the GNU General Public License Version 2.0
+ * Released under the terms of the GNU Lesser General Public License Version 2.1
+ *
+ */
+
+#ifndef _TLSF_H_
+#define _TLSF_H_
+
+#include <sys/types.h>
+
+extern size_t init_memory_pool(size_t, void *);
+extern size_t get_used_size(void *);
+extern size_t get_max_size(void *);
+extern void destroy_memory_pool(void *);
+extern size_t add_new_area(void *, size_t, void *);
+extern void *malloc_ex(size_t, void *);
+extern void free_ex(void *, void *);
+extern void *realloc_ex(void *, size_t, void *);
+extern void *calloc_ex(size_t, size_t, void *);
+
+extern void *tlsf_malloc(size_t size);
+extern void tlsf_free(void *ptr);
+extern void *tlsf_realloc(void *ptr, size_t size);
+extern void *tlsf_calloc(size_t nelem, size_t elem_size);
+
+#endif