Introducing global context.

Currently the static variables of the allocator are moved into a
global structure.

The future plan is supporting multiple context models with different
advantages and disadvantages. Users can select the most appropriate
context model for their own use case. This context model must be
selected at compile time using compiler defines.

Currently only one model is implemented, which will be the default
context model: the context is stored in global variables, so only
a single context is available which always exists.

JerryScript-DCO-1.0-Signed-off-by: Zoltan Herczeg zherczeg.u-szeged@partner.samsung.com
This commit is contained in:
Zoltan Herczeg
2016-07-19 05:10:12 -07:00
parent ca39875690
commit 3a78d30897
7 changed files with 361 additions and 238 deletions
+8 -13
View File
@@ -18,20 +18,15 @@
* Allocator implementation
*/
#include "jrt.h"
#include "jrt-libc-includes.h"
#include "jcontext.h"
#include "jmem-allocator.h"
#include "jmem-heap.h"
#include "jmem-poolman.h"
#include "jrt-libc-includes.h"
#define JMEM_ALLOCATOR_INTERNAL
#include "jmem-allocator-internal.h"
/**
* The 'try to give memory back' callback
*/
static jmem_free_unused_memory_callback_t jmem_free_unused_memory_callback = NULL;
/**
* Initialize memory allocators.
*/
@@ -94,9 +89,9 @@ void
jmem_register_free_unused_memory_callback (jmem_free_unused_memory_callback_t callback) /**< callback routine */
{
/* Currently only one callback is supported */
JERRY_ASSERT (jmem_free_unused_memory_callback == NULL);
JERRY_ASSERT (JERRY_CONTEXT (jmem_free_unused_memory_callback) == NULL);
jmem_free_unused_memory_callback = callback;
JERRY_CONTEXT (jmem_free_unused_memory_callback) = callback;
} /* jmem_register_free_unused_memory_callback */
/**
@@ -106,9 +101,9 @@ void
jmem_unregister_free_unused_memory_callback (jmem_free_unused_memory_callback_t callback) /**< callback routine */
{
/* Currently only one callback is supported */
JERRY_ASSERT (jmem_free_unused_memory_callback == callback);
JERRY_ASSERT (JERRY_CONTEXT (jmem_free_unused_memory_callback) == callback);
jmem_free_unused_memory_callback = NULL;
JERRY_CONTEXT (jmem_free_unused_memory_callback) = NULL;
} /* jmem_unregister_free_unused_memory_callback */
/**
@@ -117,9 +112,9 @@ jmem_unregister_free_unused_memory_callback (jmem_free_unused_memory_callback_t
void
jmem_run_free_unused_memory_callbacks (jmem_free_unused_memory_severity_t severity) /**< severity of the request */
{
if (jmem_free_unused_memory_callback != NULL)
if (JERRY_CONTEXT (jmem_free_unused_memory_callback) != NULL)
{
jmem_free_unused_memory_callback (severity);
JERRY_CONTEXT (jmem_free_unused_memory_callback) (severity);
}
jmem_pools_collect_empty ();
+18 -1
View File
@@ -69,7 +69,24 @@ typedef enum
} jmem_free_unused_memory_severity_t;
/**
* A 'try give memory back' callback routine type.
* Free region node
*/
typedef struct
{
uint32_t next_offset; /* Offset of next region in list */
uint32_t size; /* Size of region */
} jmem_heap_free_t;
/**
* Node for free chunk list
*/
typedef struct mem_pools_chunk
{
struct mem_pools_chunk *next_p; /* pointer to next pool chunk */
} jmem_pools_chunk_t;
/**
* A free memory callback routine type.
*/
typedef void (*jmem_free_unused_memory_callback_t) (jmem_free_unused_memory_severity_t);
+110 -148
View File
@@ -18,12 +18,12 @@
* Heap implementation
*/
#include "jrt.h"
#include "jrt-bit-fields.h"
#include "jrt-libc-includes.h"
#include "jcontext.h"
#include "jmem-allocator.h"
#include "jmem-config.h"
#include "jmem-heap.h"
#include "jrt-bit-fields.h"
#include "jrt-libc-includes.h"
#define JMEM_ALLOCATOR_INTERNAL
#include "jmem-allocator-internal.h"
@@ -54,22 +54,17 @@
#ifdef JERRY_VALGRIND_FREYA
# include "memcheck.h"
/**
* Tells whether a pool manager allocator request is in progress.
*/
static bool valgrind_freya_mempool_request = false;
/**
* Called by pool manager before a heap allocation or free.
*/
void jmem_heap_valgrind_freya_mempool_request (void)
{
valgrind_freya_mempool_request = true;
JERRY_CONTEXT (valgrind_freya_mempool_request) = true;
} /* jmem_heap_valgrind_freya_mempool_request */
# define VALGRIND_FREYA_CHECK_MEMPOOL_REQUEST \
bool mempool_request = valgrind_freya_mempool_request; \
valgrind_freya_mempool_request = false
bool mempool_request = JERRY_CONTEXT (valgrind_freya_mempool_request); \
JERRY_CONTEXT (valgrind_freya_mempool_request) = false
# define VALGRIND_FREYA_MALLOCLIKE_SPACE(p, s) \
if (!mempool_request) \
@@ -89,26 +84,18 @@ void jmem_heap_valgrind_freya_mempool_request (void)
# define VALGRIND_FREYA_FREELIKE_SPACE(p)
#endif /* JERRY_VALGRIND_FREYA */
/* Calculate heap area size, leaving space for a pointer to the free list */
#define JMEM_HEAP_AREA_SIZE (JMEM_HEAP_SIZE - JMEM_ALIGNMENT)
/**
* End of list marker.
*/
#define JMEM_HEAP_END_OF_LIST ((jmem_heap_free_t *const) ~((uint32_t) 0x0))
/**
* Free region node
*/
typedef struct
{
uint32_t next_offset; /**< Offset of next region in list */
uint32_t size; /**< Size of region */
} jmem_heap_free_t;
#if UINTPTR_MAX > UINT32_MAX
#define JMEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) ((uint8_t *) (p) - (uint8_t *) jmem_heap.area))
#define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *) &jmem_heap.area[u])
#define JMEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) ((uint8_t *) (p) - JERRY_HEAP_CONTEXT (area)))
#define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *) (JERRY_HEAP_CONTEXT (area) + (u)))
#else /* UINTPTR_MAX <= UINT32_MAX */
/* In this case we simply store the pointer, since it fits anyway. */
#define JMEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) (p))
#define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *)(u))
#define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *) (u))
#endif /* UINTPTR_MAX > UINT32_MAX */
/**
@@ -120,54 +107,13 @@ jmem_heap_get_region_end (jmem_heap_free_t *curr_p) /**< current region */
return (jmem_heap_free_t *)((uint8_t *) curr_p + curr_p->size);
} /* jmem_heap_get_region_end */
/**
* Heap structure
*/
typedef struct
{
/** First node in free region list */
jmem_heap_free_t first;
/**
* Heap area
*/
uint8_t area[JMEM_HEAP_AREA_SIZE] __attribute__ ((aligned (JMEM_ALIGNMENT)));
} jmem_heap_t;
/**
* Heap
*/
#ifndef JERRY_HEAP_SECTION_ATTR
jmem_heap_t jmem_heap;
#else /* JERRY_HEAP_SECTION_ATTR */
jmem_heap_t jmem_heap __attribute__ ((section (JERRY_HEAP_SECTION_ATTR)));
#endif /* !JERRY_HEAP_SECTION_ATTR */
/**
* Check size of heap is corresponding to configuration
*/
JERRY_STATIC_ASSERT (sizeof (jmem_heap) <= JMEM_HEAP_SIZE,
JERRY_STATIC_ASSERT (sizeof (jmem_heap_t) <= JMEM_HEAP_SIZE,
size_of_mem_heap_must_be_less_than_or_equal_to_MEM_HEAP_SIZE);
/**
* Size of allocated regions
*/
size_t jmem_heap_allocated_size;
/**
* Current limit of heap usage, that is upon being reached, causes call of "try give memory back" callbacks
*/
size_t jmem_heap_limit;
/* This is used to speed up deallocation. */
jmem_heap_free_t *jmem_heap_list_skip_p;
#ifdef JMEM_STATS
/**
* Heap's memory usage statistics
*/
static jmem_heap_stats_t jmem_heap_stats;
static void jmem_heap_stat_init (void);
static void jmem_heap_stat_alloc (size_t num);
static void jmem_heap_stat_free (size_t num);
@@ -199,22 +145,30 @@ static void jmem_heap_stat_free_iter ();
void
jmem_heap_init (void)
{
JERRY_ASSERT ((uintptr_t) jmem_heap.area % JMEM_ALIGNMENT == 0);
JERRY_STATIC_ASSERT ((1u << JMEM_HEAP_OFFSET_LOG) >= JMEM_HEAP_SIZE,
two_pow_mem_heap_offset_should_not_be_less_than_mem_heap_size);
jmem_heap_allocated_size = 0;
jmem_heap_limit = CONFIG_MEM_HEAP_DESIRED_LIMIT;
jmem_heap.first.size = 0;
jmem_heap_free_t *const region_p = (jmem_heap_free_t *) jmem_heap.area;
jmem_heap.first.next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (region_p);
region_p->size = sizeof (jmem_heap.area);
JERRY_ASSERT ((uintptr_t) JERRY_HEAP_CONTEXT (area) % JMEM_ALIGNMENT == 0);
JERRY_CONTEXT (jmem_heap_allocated_size) = 0;
JERRY_CONTEXT (jmem_heap_limit) = CONFIG_MEM_HEAP_DESIRED_LIMIT;
JERRY_CONTEXT (jmem_free_unused_memory_callback) = NULL;
jmem_heap_free_t *const region_p = (jmem_heap_free_t *) JERRY_HEAP_CONTEXT (area);
region_p->size = JMEM_HEAP_AREA_SIZE;
region_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (JMEM_HEAP_END_OF_LIST);
jmem_heap_list_skip_p = &jmem_heap.first;
JERRY_HEAP_CONTEXT (first).size = 0;
JERRY_HEAP_CONTEXT (first).next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (region_p);
VALGRIND_NOACCESS_SPACE (jmem_heap.area, JMEM_HEAP_AREA_SIZE);
JERRY_CONTEXT (jmem_heap_list_skip_p) = &JERRY_HEAP_CONTEXT (first);
#ifdef JERRY_VALGRIND_FREYA
JERRY_CONTEXT (valgrind_freya_mempool_request) = false;
#endif /* JERRY_VALGRIND_FREYA */
VALGRIND_NOACCESS_SPACE (JERRY_HEAP_CONTEXT (area), JMEM_HEAP_AREA_SIZE);
JMEM_HEAP_STAT_INIT ();
} /* jmem_heap_init */
@@ -224,8 +178,8 @@ jmem_heap_init (void)
*/
void jmem_heap_finalize (void)
{
JERRY_ASSERT (jmem_heap_allocated_size == 0);
VALGRIND_NOACCESS_SPACE (&jmem_heap, sizeof (jmem_heap));
JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_allocated_size) == 0);
VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_t));
} /* jmem_heap_finalize */
/**
@@ -244,48 +198,51 @@ void *jmem_heap_alloc_block_internal (const size_t size)
const size_t required_size = ((size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT) * JMEM_ALIGNMENT;
jmem_heap_free_t *data_space_p = NULL;
VALGRIND_DEFINED_SPACE (&jmem_heap.first, sizeof (jmem_heap_free_t));
VALGRIND_DEFINED_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
// Fast path for 8 byte chunks, first region is guaranteed to be sufficient
if (required_size == JMEM_ALIGNMENT
&& likely (jmem_heap.first.next_offset != JMEM_HEAP_GET_OFFSET_FROM_ADDR (JMEM_HEAP_END_OF_LIST)))
&& likely (JERRY_HEAP_CONTEXT (first).next_offset != JMEM_HEAP_GET_OFFSET_FROM_ADDR (JMEM_HEAP_END_OF_LIST)))
{
data_space_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (jmem_heap.first.next_offset);
data_space_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset);
JERRY_ASSERT (jmem_is_heap_pointer (data_space_p));
VALGRIND_DEFINED_SPACE (data_space_p, sizeof (jmem_heap_free_t));
jmem_heap_allocated_size += JMEM_ALIGNMENT;
JERRY_CONTEXT (jmem_heap_allocated_size) += JMEM_ALIGNMENT;
JMEM_HEAP_STAT_ALLOC_ITER ();
if (data_space_p->size == JMEM_ALIGNMENT)
{
jmem_heap.first.next_offset = data_space_p->next_offset;
JERRY_HEAP_CONTEXT (first).next_offset = data_space_p->next_offset;
}
else
{
JERRY_ASSERT (data_space_p->size > JMEM_ALIGNMENT);
jmem_heap_free_t *const remaining_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (jmem_heap.first.next_offset) + 1;
jmem_heap_free_t *remaining_p;
remaining_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset) + 1;
VALGRIND_DEFINED_SPACE (remaining_p, sizeof (jmem_heap_free_t));
remaining_p->size = data_space_p->size - JMEM_ALIGNMENT;
remaining_p->next_offset = data_space_p->next_offset;
VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (jmem_heap_free_t));
jmem_heap.first.next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p);
JERRY_HEAP_CONTEXT (first).next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p);
}
VALGRIND_UNDEFINED_SPACE (data_space_p, sizeof (jmem_heap_free_t));
if (unlikely (data_space_p == jmem_heap_list_skip_p))
if (unlikely (data_space_p == JERRY_CONTEXT (jmem_heap_list_skip_p)))
{
jmem_heap_list_skip_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (jmem_heap.first.next_offset);
JERRY_CONTEXT (jmem_heap_list_skip_p) = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset);
}
}
// Slow path for larger regions
else
{
jmem_heap_free_t *current_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (jmem_heap.first.next_offset);
jmem_heap_free_t *prev_p = &jmem_heap.first;
jmem_heap_free_t *current_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset);
jmem_heap_free_t *prev_p = &JERRY_HEAP_CONTEXT (first);
while (current_p != JMEM_HEAP_END_OF_LIST)
{
JERRY_ASSERT (jmem_is_heap_pointer (current_p));
@@ -300,7 +257,7 @@ void *jmem_heap_alloc_block_internal (const size_t size)
{
// Region is sufficiently big, store address
data_space_p = current_p;
jmem_heap_allocated_size += required_size;
JERRY_CONTEXT (jmem_heap_allocated_size) += required_size;
// Region was larger than necessary
if (current_p->size > required_size)
@@ -328,7 +285,7 @@ void *jmem_heap_alloc_block_internal (const size_t size)
VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
}
jmem_heap_list_skip_p = prev_p;
JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p;
// Found enough space
break;
@@ -341,12 +298,12 @@ void *jmem_heap_alloc_block_internal (const size_t size)
}
}
while (jmem_heap_allocated_size >= jmem_heap_limit)
while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
{
jmem_heap_limit += CONFIG_MEM_HEAP_DESIRED_LIMIT;
JERRY_CONTEXT (jmem_heap_limit) += CONFIG_MEM_HEAP_DESIRED_LIMIT;
}
VALGRIND_NOACCESS_SPACE (&jmem_heap.first, sizeof (jmem_heap_free_t));
VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
if (unlikely (!data_space_p))
{
@@ -382,7 +339,7 @@ jmem_heap_alloc_block (const size_t size)
jmem_run_free_unused_memory_callbacks (JMEM_FREE_UNUSED_MEMORY_SEVERITY_HIGH);
#endif /* JMEM_GC_BEFORE_EACH_ALLOC */
if (jmem_heap_allocated_size + size >= jmem_heap_limit)
if (JERRY_CONTEXT (jmem_heap_allocated_size) + size >= JERRY_CONTEXT (jmem_heap_limit))
{
jmem_run_free_unused_memory_callbacks (JMEM_FREE_UNUSED_MEMORY_SEVERITY_LOW);
}
@@ -447,7 +404,7 @@ jmem_heap_free_block (void *ptr, /**< pointer to beginning of data space of the
/* checking that ptr points to the heap */
JERRY_ASSERT (jmem_is_heap_pointer (ptr));
JERRY_ASSERT (size > 0);
JERRY_ASSERT (jmem_heap_limit >= jmem_heap_allocated_size);
JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size));
VALGRIND_FREYA_FREELIKE_SPACE (ptr);
VALGRIND_NOACCESS_SPACE (ptr, size);
@@ -457,16 +414,16 @@ jmem_heap_free_block (void *ptr, /**< pointer to beginning of data space of the
jmem_heap_free_t *prev_p;
jmem_heap_free_t *next_p;
VALGRIND_DEFINED_SPACE (&jmem_heap.first, sizeof (jmem_heap_free_t));
VALGRIND_DEFINED_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
if (block_p > jmem_heap_list_skip_p)
if (block_p > JERRY_CONTEXT (jmem_heap_list_skip_p))
{
prev_p = jmem_heap_list_skip_p;
prev_p = JERRY_CONTEXT (jmem_heap_list_skip_p);
JMEM_HEAP_STAT_SKIP ();
}
else
{
prev_p = &jmem_heap.first;
prev_p = &JERRY_HEAP_CONTEXT (first);
JMEM_HEAP_STAT_NONSKIP ();
}
@@ -513,9 +470,9 @@ jmem_heap_free_block (void *ptr, /**< pointer to beginning of data space of the
// Update next
if (jmem_heap_get_region_end (block_p) == next_p)
{
if (unlikely (next_p == jmem_heap_list_skip_p))
if (unlikely (next_p == JERRY_CONTEXT (jmem_heap_list_skip_p)))
{
jmem_heap_list_skip_p = block_p;
JERRY_CONTEXT (jmem_heap_list_skip_p) = block_p;
}
// Can be merged
@@ -528,22 +485,22 @@ jmem_heap_free_block (void *ptr, /**< pointer to beginning of data space of the
block_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (next_p);
}
jmem_heap_list_skip_p = prev_p;
JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p;
VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
VALGRIND_NOACCESS_SPACE (block_p, size);
VALGRIND_NOACCESS_SPACE (next_p, sizeof (jmem_heap_free_t));
JERRY_ASSERT (jmem_heap_allocated_size > 0);
jmem_heap_allocated_size -= aligned_size;
JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_allocated_size) > 0);
JERRY_CONTEXT (jmem_heap_allocated_size) -= aligned_size;
while (jmem_heap_allocated_size + CONFIG_MEM_HEAP_DESIRED_LIMIT <= jmem_heap_limit)
while (JERRY_CONTEXT (jmem_heap_allocated_size) + CONFIG_MEM_HEAP_DESIRED_LIMIT <= JERRY_CONTEXT (jmem_heap_limit))
{
jmem_heap_limit -= CONFIG_MEM_HEAP_DESIRED_LIMIT;
JERRY_CONTEXT (jmem_heap_limit) -= CONFIG_MEM_HEAP_DESIRED_LIMIT;
}
VALGRIND_NOACCESS_SPACE (&jmem_heap.first, sizeof (jmem_heap_free_t));
JERRY_ASSERT (jmem_heap_limit >= jmem_heap_allocated_size);
VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size));
JMEM_HEAP_STAT_FREE (size);
} /* jmem_heap_free_block */
@@ -570,7 +527,7 @@ jmem_heap_compress_pointer (const void *pointer_p) /**< pointer to compress */
JERRY_ASSERT (jmem_is_heap_pointer (pointer_p));
uintptr_t int_ptr = (uintptr_t) pointer_p;
const uintptr_t heap_start = (uintptr_t) &jmem_heap;
const uintptr_t heap_start = (uintptr_t) &JERRY_HEAP_CONTEXT (first);
JERRY_ASSERT (int_ptr % JMEM_ALIGNMENT == 0);
@@ -595,7 +552,7 @@ jmem_heap_decompress_pointer (uintptr_t compressed_pointer) /**< pointer to deco
JERRY_ASSERT (compressed_pointer != JMEM_CP_NULL);
uintptr_t int_ptr = compressed_pointer;
const uintptr_t heap_start = (uintptr_t) &jmem_heap;
const uintptr_t heap_start = (uintptr_t) &JERRY_HEAP_CONTEXT (first);
int_ptr <<= JMEM_ALIGNMENT_LOG;
int_ptr += heap_start;
@@ -617,8 +574,8 @@ jmem_heap_decompress_pointer (uintptr_t compressed_pointer) /**< pointer to deco
bool
jmem_is_heap_pointer (const void *pointer) /**< pointer */
{
return ((uint8_t *) pointer >= jmem_heap.area
&& (uint8_t *) pointer <= ((uint8_t *) jmem_heap.area + JMEM_HEAP_AREA_SIZE));
return ((uint8_t *) pointer >= JERRY_HEAP_CONTEXT (area)
&& (uint8_t *) pointer <= (JERRY_HEAP_CONTEXT (area) + JMEM_HEAP_AREA_SIZE));
} /* jmem_is_heap_pointer */
#endif /* !JERRY_NDEBUG */
@@ -631,7 +588,7 @@ jmem_heap_get_stats (jmem_heap_stats_t *out_heap_stats_p) /**< [out] heap stats
{
JERRY_ASSERT (out_heap_stats_p != NULL);
*out_heap_stats_p = jmem_heap_stats;
*out_heap_stats_p = JERRY_CONTEXT (jmem_heap_stats);
} /* jmem_heap_get_stats */
/**
@@ -640,8 +597,8 @@ jmem_heap_get_stats (jmem_heap_stats_t *out_heap_stats_p) /**< [out] heap stats
void
jmem_heap_stats_reset_peak (void)
{
jmem_heap_stats.peak_allocated_bytes = jmem_heap_stats.allocated_bytes;
jmem_heap_stats.peak_waste_bytes = jmem_heap_stats.waste_bytes;
JERRY_CONTEXT (jmem_heap_stats).peak_allocated_bytes = JERRY_CONTEXT (jmem_heap_stats).allocated_bytes;
JERRY_CONTEXT (jmem_heap_stats).peak_waste_bytes = JERRY_CONTEXT (jmem_heap_stats).waste_bytes;
} /* jmem_heap_stats_reset_peak */
/**
@@ -650,6 +607,8 @@ jmem_heap_stats_reset_peak (void)
void
jmem_heap_stats_print (void)
{
jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats);
printf ("Heap stats:\n"
" Heap size = %zu bytes\n"
" Allocated = %zu bytes\n"
@@ -660,17 +619,17 @@ jmem_heap_stats_print (void)
" Average alloc iteration = %zu.%04zu\n"
" Average free iteration = %zu.%04zu\n"
"\n",
jmem_heap_stats.size,
jmem_heap_stats.allocated_bytes,
jmem_heap_stats.waste_bytes,
jmem_heap_stats.peak_allocated_bytes,
jmem_heap_stats.peak_waste_bytes,
jmem_heap_stats.skip_count / jmem_heap_stats.nonskip_count,
jmem_heap_stats.skip_count % jmem_heap_stats.nonskip_count * 10000 / jmem_heap_stats.nonskip_count,
jmem_heap_stats.alloc_iter_count / jmem_heap_stats.alloc_count,
jmem_heap_stats.alloc_iter_count % jmem_heap_stats.alloc_count * 10000 / jmem_heap_stats.alloc_count,
jmem_heap_stats.free_iter_count / jmem_heap_stats.free_count,
jmem_heap_stats.free_iter_count % jmem_heap_stats.free_count * 10000 / jmem_heap_stats.free_count);
heap_stats->size,
heap_stats->allocated_bytes,
heap_stats->waste_bytes,
heap_stats->peak_allocated_bytes,
heap_stats->peak_waste_bytes,
heap_stats->skip_count / heap_stats->nonskip_count,
heap_stats->skip_count % heap_stats->nonskip_count * 10000 / heap_stats->nonskip_count,
heap_stats->alloc_iter_count / heap_stats->alloc_count,
heap_stats->alloc_iter_count % heap_stats->alloc_count * 10000 / heap_stats->alloc_count,
heap_stats->free_iter_count / heap_stats->free_count,
heap_stats->free_iter_count % heap_stats->free_count * 10000 / heap_stats->free_count);
} /* jmem_heap_stats_print */
/**
@@ -679,9 +638,9 @@ jmem_heap_stats_print (void)
static void
jmem_heap_stat_init ()
{
memset (&jmem_heap_stats, 0, sizeof (jmem_heap_stats));
memset (&JERRY_CONTEXT (jmem_heap_stats), 0, sizeof (jmem_heap_stats_t));
jmem_heap_stats.size = JMEM_HEAP_AREA_SIZE;
JERRY_CONTEXT (jmem_heap_stats).size = JMEM_HEAP_AREA_SIZE;
} /* jmem_heap_stat_init */
/**
@@ -693,27 +652,28 @@ jmem_heap_stat_alloc (size_t size) /**< Size of allocated block */
const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
const size_t waste_bytes = aligned_size - size;
jmem_heap_stats.allocated_bytes += aligned_size;
jmem_heap_stats.waste_bytes += waste_bytes;
jmem_heap_stats.alloc_count++;
jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats);
heap_stats->allocated_bytes += aligned_size;
heap_stats->waste_bytes += waste_bytes;
heap_stats->alloc_count++;
if (jmem_heap_stats.allocated_bytes > jmem_heap_stats.peak_allocated_bytes)
if (heap_stats->allocated_bytes > heap_stats->peak_allocated_bytes)
{
jmem_heap_stats.peak_allocated_bytes = jmem_heap_stats.allocated_bytes;
heap_stats->peak_allocated_bytes = heap_stats->allocated_bytes;
}
if (jmem_heap_stats.allocated_bytes > jmem_heap_stats.global_peak_allocated_bytes)
if (heap_stats->allocated_bytes > heap_stats->global_peak_allocated_bytes)
{
jmem_heap_stats.global_peak_allocated_bytes = jmem_heap_stats.allocated_bytes;
heap_stats->global_peak_allocated_bytes = heap_stats->allocated_bytes;
}
if (jmem_heap_stats.waste_bytes > jmem_heap_stats.peak_waste_bytes)
if (heap_stats->waste_bytes > heap_stats->peak_waste_bytes)
{
jmem_heap_stats.peak_waste_bytes = jmem_heap_stats.waste_bytes;
heap_stats->peak_waste_bytes = heap_stats->waste_bytes;
}
if (jmem_heap_stats.waste_bytes > jmem_heap_stats.global_peak_waste_bytes)
if (heap_stats->waste_bytes > heap_stats->global_peak_waste_bytes)
{
jmem_heap_stats.global_peak_waste_bytes = jmem_heap_stats.waste_bytes;
heap_stats->global_peak_waste_bytes = heap_stats->waste_bytes;
}
} /* jmem_heap_stat_alloc */
@@ -726,9 +686,11 @@ jmem_heap_stat_free (size_t size) /**< Size of freed block */
const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
const size_t waste_bytes = aligned_size - size;
jmem_heap_stats.free_count++;
jmem_heap_stats.allocated_bytes -= aligned_size;
jmem_heap_stats.waste_bytes -= waste_bytes;
jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats);
heap_stats->free_count++;
heap_stats->allocated_bytes -= aligned_size;
heap_stats->waste_bytes -= waste_bytes;
} /* jmem_heap_stat_free */
/**
@@ -737,7 +699,7 @@ jmem_heap_stat_free (size_t size) /**< Size of freed block */
static void
jmem_heap_stat_skip ()
{
jmem_heap_stats.skip_count++;
JERRY_CONTEXT (jmem_heap_stats).skip_count++;
} /* jmem_heap_stat_skip */
/**
@@ -746,7 +708,7 @@ jmem_heap_stat_skip ()
static void
jmem_heap_stat_nonskip ()
{
jmem_heap_stats.nonskip_count++;
JERRY_CONTEXT (jmem_heap_stats).nonskip_count++;
} /* jmem_heap_stat_nonskip */
/**
@@ -755,7 +717,7 @@ jmem_heap_stat_nonskip ()
static void
jmem_heap_stat_alloc_iter ()
{
jmem_heap_stats.alloc_iter_count++;
JERRY_CONTEXT (jmem_heap_stats).alloc_iter_count++;
} /* jmem_heap_stat_alloc_iter */
/**
@@ -764,7 +726,7 @@ jmem_heap_stat_alloc_iter ()
static void
jmem_heap_stat_free_iter ()
{
jmem_heap_stats.free_iter_count++;
JERRY_CONTEXT (jmem_heap_stats).free_iter_count++;
} /* jmem_heap_stat_free_iter */
#endif /* JMEM_STATS */
+48 -58
View File
@@ -18,11 +18,11 @@
* Memory pool manager implementation
*/
#include "jrt.h"
#include "jrt-libc-includes.h"
#include "jcontext.h"
#include "jmem-allocator.h"
#include "jmem-heap.h"
#include "jmem-poolman.h"
#include "jrt-libc-includes.h"
#define JMEM_ALLOCATOR_INTERNAL
#include "jmem-allocator-internal.h"
@@ -34,26 +34,8 @@
* @{
*/
/**
* Node for free chunk list
*/
typedef struct jmem_pools_chunk
{
struct jmem_pools_chunk *next_p; /**< pointer to next pool chunk */
} jmem_pools_chunk_t;
/**
* List of free pool chunks
*/
jmem_pools_chunk_t *jmem_free_chunk_p;
#ifdef JMEM_STATS
/**
* Pools' memory usage statistics
*/
jmem_pools_stats_t jmem_pools_stats;
static void jmem_pools_stat_init (void);
static void jmem_pools_stat_free_pool (void);
static void jmem_pools_stat_new_alloc (void);
@@ -107,7 +89,7 @@ jmem_pools_init (void)
JERRY_STATIC_ASSERT (sizeof (jmem_pools_chunk_t) <= JMEM_POOL_CHUNK_SIZE,
size_of_mem_pools_chunk_t_must_be_less_than_or_equal_to_MEM_POOL_CHUNK_SIZE);
jmem_free_chunk_p = NULL;
JERRY_CONTEXT (jmem_free_chunk_p) = NULL;
JMEM_POOLS_STAT_INIT ();
} /* jmem_pools_init */
@@ -120,7 +102,7 @@ jmem_pools_finalize (void)
{
jmem_pools_collect_empty ();
JERRY_ASSERT (jmem_free_chunk_p == NULL);
JERRY_ASSERT (JERRY_CONTEXT (jmem_free_chunk_p) == NULL);
} /* jmem_pools_finalize */
/**
@@ -136,15 +118,15 @@ jmem_pools_alloc (void)
jmem_run_free_unused_memory_callbacks (JMEM_FREE_UNUSED_MEMORY_SEVERITY_HIGH);
#endif /* JMEM_GC_BEFORE_EACH_ALLOC */
if (jmem_free_chunk_p != NULL)
if (JERRY_CONTEXT (jmem_free_chunk_p) != NULL)
{
const jmem_pools_chunk_t *const chunk_p = jmem_free_chunk_p;
const jmem_pools_chunk_t *const chunk_p = JERRY_CONTEXT (jmem_free_chunk_p);
JMEM_POOLS_STAT_REUSE ();
VALGRIND_DEFINED_SPACE (chunk_p, JMEM_POOL_CHUNK_SIZE);
jmem_free_chunk_p = chunk_p->next_p;
JERRY_CONTEXT (jmem_free_chunk_p) = chunk_p->next_p;
VALGRIND_UNDEFINED_SPACE (chunk_p, JMEM_POOL_CHUNK_SIZE);
@@ -167,8 +149,8 @@ jmem_pools_free (void *chunk_p) /**< pointer to the chunk */
VALGRIND_DEFINED_SPACE (chunk_to_free_p, JMEM_POOL_CHUNK_SIZE);
chunk_to_free_p->next_p = jmem_free_chunk_p;
jmem_free_chunk_p = chunk_to_free_p;
chunk_to_free_p->next_p = JERRY_CONTEXT (jmem_free_chunk_p);
JERRY_CONTEXT (jmem_free_chunk_p) = chunk_to_free_p;
VALGRIND_NOACCESS_SPACE (chunk_to_free_p, JMEM_POOL_CHUNK_SIZE);
@@ -181,15 +163,15 @@ jmem_pools_free (void *chunk_p) /**< pointer to the chunk */
void
jmem_pools_collect_empty ()
{
while (jmem_free_chunk_p)
while (JERRY_CONTEXT (jmem_free_chunk_p))
{
VALGRIND_DEFINED_SPACE (jmem_free_chunk_p, sizeof (jmem_pools_chunk_t));
jmem_pools_chunk_t *const next_p = jmem_free_chunk_p->next_p;
VALGRIND_NOACCESS_SPACE (jmem_free_chunk_p, sizeof (jmem_pools_chunk_t));
VALGRIND_DEFINED_SPACE (JERRY_CONTEXT (jmem_free_chunk_p), sizeof (jmem_pools_chunk_t));
jmem_pools_chunk_t *const next_p = JERRY_CONTEXT (jmem_free_chunk_p)->next_p;
VALGRIND_NOACCESS_SPACE (JERRY_CONTEXT (jmem_free_chunk_p), sizeof (jmem_pools_chunk_t));
jmem_heap_free_block (jmem_free_chunk_p, JMEM_POOL_CHUNK_SIZE);
jmem_heap_free_block (JERRY_CONTEXT (jmem_free_chunk_p), JMEM_POOL_CHUNK_SIZE);
JMEM_POOLS_STAT_DEALLOC ();
jmem_free_chunk_p = next_p;
JERRY_CONTEXT (jmem_free_chunk_p) = next_p;
}
} /* jmem_pools_collect_empty */
@@ -202,7 +184,7 @@ jmem_pools_get_stats (jmem_pools_stats_t *out_pools_stats_p) /**< [out] pools' s
{
JERRY_ASSERT (out_pools_stats_p != NULL);
*out_pools_stats_p = jmem_pools_stats;
*out_pools_stats_p = JERRY_CONTEXT (jmem_pools_stats);
} /* jmem_pools_get_stats */
/**
@@ -211,7 +193,7 @@ jmem_pools_get_stats (jmem_pools_stats_t *out_pools_stats_p) /**< [out] pools' s
void
jmem_pools_stats_reset_peak (void)
{
jmem_pools_stats.peak_pools_count = jmem_pools_stats.pools_count;
JERRY_CONTEXT (jmem_pools_stats).peak_pools_count = JERRY_CONTEXT (jmem_pools_stats.pools_count);
} /* jmem_pools_stats_reset_peak */
/**
@@ -220,6 +202,8 @@ jmem_pools_stats_reset_peak (void)
void
jmem_pools_stats_print (void)
{
jmem_pools_stats_t *pools_stats = &JERRY_CONTEXT (jmem_pools_stats);
printf ("Pools stats:\n"
" Chunk size: %zu\n"
" Pool chunks: %zu\n"
@@ -227,11 +211,11 @@ jmem_pools_stats_print (void)
" Free chunks: %zu\n"
" Pool reuse ratio: %zu.%04zu\n",
JMEM_POOL_CHUNK_SIZE,
jmem_pools_stats.pools_count,
jmem_pools_stats.peak_pools_count,
jmem_pools_stats.free_chunks,
jmem_pools_stats.reused_count / jmem_pools_stats.new_alloc_count,
jmem_pools_stats.reused_count % jmem_pools_stats.new_alloc_count * 10000 / jmem_pools_stats.new_alloc_count);
pools_stats->pools_count,
pools_stats->peak_pools_count,
pools_stats->free_chunks,
pools_stats->reused_count / pools_stats->new_alloc_count,
pools_stats->reused_count % pools_stats->new_alloc_count * 10000 / pools_stats->new_alloc_count);
} /* jmem_pools_stats_print */
/**
@@ -240,7 +224,7 @@ jmem_pools_stats_print (void)
static void
jmem_pools_stat_init (void)
{
memset (&jmem_pools_stats, 0, sizeof (jmem_pools_stats));
memset (&JERRY_CONTEXT (jmem_pools_stats), 0, sizeof (jmem_pools_stats_t));
} /* jmem_pools_stat_init */
/**
@@ -249,16 +233,18 @@ jmem_pools_stat_init (void)
static void
jmem_pools_stat_new_alloc (void)
{
jmem_pools_stats.pools_count++;
jmem_pools_stats.new_alloc_count++;
jmem_pools_stats_t *pools_stats = &JERRY_CONTEXT (jmem_pools_stats);
if (jmem_pools_stats.pools_count > jmem_pools_stats.peak_pools_count)
pools_stats->pools_count++;
pools_stats->new_alloc_count++;
if (pools_stats->pools_count > pools_stats->peak_pools_count)
{
jmem_pools_stats.peak_pools_count = jmem_pools_stats.pools_count;
pools_stats->peak_pools_count = pools_stats->pools_count;
}
if (jmem_pools_stats.pools_count > jmem_pools_stats.global_peak_pools_count)
if (pools_stats->pools_count > pools_stats->global_peak_pools_count)
{
jmem_pools_stats.global_peak_pools_count = jmem_pools_stats.pools_count;
pools_stats->global_peak_pools_count = pools_stats->pools_count;
}
} /* jmem_pools_stat_new_alloc */
@@ -269,17 +255,19 @@ jmem_pools_stat_new_alloc (void)
static void
jmem_pools_stat_reuse (void)
{
jmem_pools_stats.pools_count++;
jmem_pools_stats.free_chunks--;
jmem_pools_stats.reused_count++;
jmem_pools_stats_t *pools_stats = &JERRY_CONTEXT (jmem_pools_stats);
if (jmem_pools_stats.pools_count > jmem_pools_stats.peak_pools_count)
pools_stats->pools_count++;
pools_stats->free_chunks--;
pools_stats->reused_count++;
if (pools_stats->pools_count > pools_stats->peak_pools_count)
{
jmem_pools_stats.peak_pools_count = jmem_pools_stats.pools_count;
pools_stats->peak_pools_count = pools_stats->pools_count;
}
if (jmem_pools_stats.pools_count > jmem_pools_stats.global_peak_pools_count)
if (pools_stats->pools_count > pools_stats->global_peak_pools_count)
{
jmem_pools_stats.global_peak_pools_count = jmem_pools_stats.pools_count;
pools_stats->global_peak_pools_count = pools_stats->pools_count;
}
} /* jmem_pools_stat_reuse */
@@ -290,10 +278,12 @@ jmem_pools_stat_reuse (void)
static void
jmem_pools_stat_free_pool (void)
{
JERRY_ASSERT (jmem_pools_stats.pools_count > 0);
jmem_pools_stats_t *pools_stats = &JERRY_CONTEXT (jmem_pools_stats);
jmem_pools_stats.pools_count--;
jmem_pools_stats.free_chunks++;
JERRY_ASSERT (pools_stats->pools_count > 0);
pools_stats->pools_count--;
pools_stats->free_chunks++;
} /* jmem_pools_stat_free_pool */
/**
@@ -302,7 +292,7 @@ jmem_pools_stat_free_pool (void)
static void
jmem_pools_stat_dealloc (void)
{
jmem_pools_stats.free_chunks--;
JERRY_CONTEXT (jmem_pools_stats).free_chunks--;
} /* jmem_pools_stat_dealloc */
#endif /* JMEM_STATS */