mirror of
https://github.com/dlang/phobos.git
synced 2025-05-04 09:00:22 +03:00
687 lines
21 KiB
D
687 lines
21 KiB
D
module std.experimental.allocator.region;
|
|
|
|
import std.experimental.allocator.common;
|
|
import std.experimental.allocator.null_allocator;
|
|
import std.typecons : Flag, Yes, No;
|
|
|
|
/**
|
|
A $(D Region) allocator allocates memory straight from one contiguous chunk.
|
|
There is no deallocation, and once the region is full, allocation requests
|
|
return $(D null). Therefore, $(D Region)s are often used (a) in conjunction with
|
|
more sophisticated allocators; or (b) for batch-style very fast allocations
|
|
that deallocate everything at once.
|
|
|
|
The region only stores three pointers, corresponding to the current position in
|
|
the store and the limits. One allocation entails rounding up the allocation
|
|
size for alignment purposes, bumping the current pointer, and comparing it
|
|
against the limit.
|
|
|
|
If $(D ParentAllocator) is different from $(D NullAllocator), $(D Region)
|
|
deallocates the chunk of memory during destruction.
|
|
|
|
The $(D minAlign) parameter establishes alignment. If $(D minAlign > 1), the
|
|
sizes of all allocation requests are rounded up to a multiple of $(D minAlign).
|
|
Applications aiming at maximum speed may want to choose $(D minAlign = 1) and
|
|
control alignment externally.
|
|
|
|
*/
|
|
struct Region(ParentAllocator = NullAllocator,
|
|
uint minAlign = platformAlignment,
|
|
Flag!"defineDeallocate" defineDeallocate = No.defineDeallocate)
|
|
{
|
|
//import std.exception : enforce;
|
|
|
|
static assert(minAlign.isGoodStaticAlignment);
|
|
static assert(ParentAllocator.alignment >= minAlign);
|
|
|
|
// state {
|
|
/**
|
|
The _parent allocator. Depending on whether $(D ParentAllocator) holds state
|
|
or not, this is a member variable or an alias for $(D ParentAllocator.it).
|
|
*/
|
|
static if (stateSize!ParentAllocator)
|
|
{
|
|
ParentAllocator parent;
|
|
}
|
|
else
|
|
{
|
|
alias parent = ParentAllocator.it;
|
|
}
|
|
private void* _current, _end, _begin;
|
|
// }
|
|
|
|
/**
|
|
Constructs a region backed by a user-provided store. Assumes $(D store) is
|
|
aligned at $(D minAlign). Also assumes the memory was allocated with $(D
|
|
ParentAllocator) (if different from $(D NullAllocator)).
|
|
|
|
Params:
|
|
store = User-provided store backing up the region. $(D store) must be
|
|
aligned at $(D minAlign) (enforced with $(D assert)). If $(D
|
|
ParentAllocator) is different from $(D NullAllocator), memory is assumed to
|
|
have been allocated with $(D ParentAllocator).
|
|
n = Bytes to allocate using $(D ParentAllocator). This constructor is only
|
|
defined If $(D ParentAllocator) is different from $(D NullAllocator). If
|
|
$(D parent.allocate(n)) returns $(D null), the region will be initialized
|
|
as empty (correctly initialized but unable to allocate).
|
|
*/
|
|
this(void[] store)
|
|
{
|
|
assert(store.ptr.alignedAt(minAlign));
|
|
_current = store.ptr;
|
|
_end = _current + store.length;
|
|
_begin = _current;
|
|
}
|
|
|
|
/// Ditto
|
|
static if (!is(ParentAllocator == NullAllocator))
|
|
this(size_t n)
|
|
{
|
|
this(parent.allocate(n));
|
|
}
|
|
|
|
/*
|
|
The postblit of $(D BasicRegion) is disabled because such objects should not
|
|
be copied around naively.
|
|
*/
|
|
//@disable this(this);
|
|
|
|
/**
|
|
Alignment offered.
|
|
*/
|
|
alias alignment = minAlign;
|
|
|
|
/**
|
|
Allocates $(D n) bytes of memory. The shortest path involves an alignment adjustment (if $(D alignment > 1)), an increment, and a comparison.
|
|
|
|
Params:
|
|
n = number of bytes to allocate
|
|
|
|
Returns:
|
|
A properly-aligned buffer of size $(D n) or $(D null) if request could not
|
|
be satisfied.
|
|
*/
|
|
void[] allocate(size_t n)
|
|
{
|
|
auto result = _current[0 .. n];
|
|
static if (minAlign > 1)
|
|
{
|
|
immutable uint slack = cast(uint) n & (alignment - 1);
|
|
const rounded = slack
|
|
? n + alignment - slack
|
|
: n;
|
|
assert(rounded >= n);
|
|
}
|
|
else
|
|
{
|
|
alias rounded = n;
|
|
}
|
|
_current += rounded;
|
|
if (_current <= _end) return result;
|
|
// Slow path, backtrack
|
|
_current -= rounded;
|
|
return null;
|
|
}
|
|
|
|
/**
|
|
Allocates $(D n) bytes of memory aligned at alignment $(D a).
|
|
|
|
Params:
|
|
n = number of bytes to allocate
|
|
a = alignment for the allocated block
|
|
|
|
Returns:
|
|
Either a suitable block of $(D n) bytes aligned at $(D a), or $(D null).
|
|
*/
|
|
void[] alignedAllocate(size_t n, uint a)
|
|
{
|
|
assert(a.isPowerOf2);
|
|
// Just bump the pointer to the next good allocation
|
|
auto save = _current;
|
|
_current = _current.alignUpTo(a);
|
|
auto result = allocate(n);
|
|
if (result.ptr)
|
|
{
|
|
assert(result.length == n);
|
|
return result;
|
|
}
|
|
// Failed, rollback
|
|
_current = save;
|
|
return null;
|
|
}
|
|
|
|
/// Allocates and returns all memory available to this region.
|
|
void[] allocateAll()
|
|
{
|
|
auto result = _current[0 .. available];
|
|
_current = _end;
|
|
return result;
|
|
}
|
|
|
|
/**
|
|
Expands an allocated block in place. Expansion will succeed only if the
|
|
block is the last allocated.
|
|
*/
|
|
bool expand(ref void[] b, size_t delta)
|
|
{
|
|
assert(owns(b) || b.ptr is null);
|
|
assert(b.ptr + b.length <= _current || b.ptr is null);
|
|
if (!b.ptr)
|
|
{
|
|
b = allocate(delta);
|
|
return b.length == delta;
|
|
}
|
|
auto newLength = b.length + delta;
|
|
if (_current < b.ptr + b.length + alignment)
|
|
{
|
|
// This was the last allocation! Allocate some more and we're done.
|
|
if (this.goodAllocSize(b.length) == this.goodAllocSize(newLength)
|
|
|| allocate(delta).length == delta)
|
|
{
|
|
b = b.ptr[0 .. newLength];
|
|
assert(_current < b.ptr + b.length + alignment);
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
/**
|
|
Deallocates $(D b). This works only if $(D b) was obtained as the last call
|
|
to $(D allocate); otherwise (i.e. another allocation has occurred since) it
|
|
does nothing. This semantics is tricky and therefore $(D deallocate) is
|
|
defined only if $(D Region) is instantiated with $(D Yes.defineDeallocate)
|
|
as the third template argument.
|
|
|
|
Params:
|
|
b = Block previously obtained by a call to $(D allocate) against this
|
|
allocator ($(D null) is allowed).
|
|
*/
|
|
static if (defineDeallocate == Yes.defineDeallocate)
|
|
void deallocate(void[] b)
|
|
{
|
|
assert(owns(b) || b.ptr is null);
|
|
assert(b.ptr + b.length <= _current || b.ptr is null);
|
|
if (_current < b.ptr + b.length + alignment)
|
|
{
|
|
assert(b.ptr !is null);
|
|
_current = b.ptr;
|
|
}
|
|
}
|
|
|
|
/**
|
|
Deallocates all memory allocated by this region, which can be subsequently
|
|
reused for new allocations.
|
|
*/
|
|
void deallocateAll()
|
|
{
|
|
_current = _begin;
|
|
}
|
|
|
|
/**
|
|
Queries whether $(D b) has been allocated with this region.
|
|
|
|
Params:
|
|
b = Arbitrary block of memory ($(D null) is allowed; $(D owns(null))
|
|
returns $(D false)).
|
|
|
|
Returns:
|
|
$(D true) if $(D b) has been allocated with this region, $(D false)
|
|
otherwise.
|
|
*/
|
|
bool owns(void[] b) const
|
|
{
|
|
return b.ptr >= _begin && b.ptr + b.length <= _end;
|
|
}
|
|
|
|
/// Returns $(D true) if no memory has been allocated in this region.
|
|
bool empty() const
|
|
{
|
|
return _current == _begin;
|
|
}
|
|
|
|
/// Nonstandard property that returns bytes available for allocation.
|
|
size_t available() const
|
|
{
|
|
return _end - _current;
|
|
}
|
|
}
|
|
|
|
///
|
|
unittest
|
|
{
|
|
import std.experimental.allocator.mallocator;
|
|
import std.experimental.allocator.allocator_list;
|
|
import std.algorithm : max;
|
|
// Create a scalable list of regions. Each gets at least 1MB at a time by
|
|
// using malloc.
|
|
auto batchAllocator = AllocatorList!(
|
|
(size_t n) => Region!Mallocator(max(n, 1024 * 1024))
|
|
)();
|
|
auto b = batchAllocator.allocate(101);
|
|
assert(b.length == 101);
|
|
// This will cause a second allocation
|
|
b = batchAllocator.allocate(2 * 1024 * 1024);
|
|
assert(b.length == 2 * 1024 * 1024);
|
|
// Destructor will free the memory
|
|
}
|
|
|
|
unittest
|
|
{
|
|
import std.experimental.allocator.mallocator;
|
|
// Create a 64 KB region allocated with malloc
|
|
auto reg = Region!(Mallocator)(1024 * 64);
|
|
auto b = reg.allocate(101);
|
|
assert(b.length == 101);
|
|
// Destructor will free the memory
|
|
}
|
|
|
|
/**
|
|
|
|
$(D InSituRegion) is a convenient region that carries its storage within itself
|
|
(in the form of a statically-sized array).
|
|
|
|
The first template argument is the size of the region and the second is the
|
|
needed alignment. Depending on the alignment requested and platform details,
|
|
the actual available storage may be smaller than the compile-time parameter. To
|
|
make sure that at least $(D n) bytes are available in the region, use
|
|
$(D InSituRegion!(n + a - 1, a)).
|
|
|
|
*/
|
|
struct InSituRegion(size_t size, size_t minAlign = platformAlignment,
|
|
Flag!"defineDeallocate" defineDeallocate = No.defineDeallocate)
|
|
{
|
|
import std.algorithm : max;
|
|
import std.conv : to;
|
|
|
|
static assert(minAlign.isGoodStaticAlignment);
|
|
static assert(size >= minAlign);
|
|
|
|
@disable this(this);
|
|
|
|
// state {
|
|
Region!(NullAllocator, minAlign, defineDeallocate) _impl;
|
|
union
|
|
{
|
|
private ubyte[size] _store = void;
|
|
private double _forAlignmentOnly1 = void;
|
|
}
|
|
// }
|
|
|
|
/**
|
|
An alias for $(D minAlign), which must be a valid alignment (nonzero power
|
|
of 2). The start of the region and all allocation requests will be rounded
|
|
up to a multiple of the alignment.
|
|
|
|
----
|
|
InSituRegion!(4096) a1;
|
|
assert(a1.alignment == platformAlignment);
|
|
InSituRegion!(4096, 64) a2;
|
|
assert(a2.alignment == 64);
|
|
----
|
|
*/
|
|
alias alignment = minAlign;
|
|
|
|
private void lazyInit()
|
|
{
|
|
assert(!_impl._current);
|
|
static if (alignment > double.alignof)
|
|
_impl._current = _store.ptr.alignUpTo(alignment);
|
|
else
|
|
_impl._current = _store.ptr;
|
|
_impl._end = _store.ptr + _store.length;
|
|
_impl._begin = _impl._current;
|
|
assert(_impl._current.alignedAt(alignment));
|
|
}
|
|
|
|
/**
|
|
Allocates $(D bytes) and returns them, or $(D null) if the region cannot
|
|
accommodate the request. For efficiency reasons, if $(D bytes == 0) the
|
|
function returns an empty non-null slice.
|
|
*/
|
|
void[] allocate(size_t n)
|
|
{
|
|
// Fast path
|
|
entry:
|
|
auto result = _impl.allocate(n);
|
|
if (result.length == n) return result;
|
|
// Slow path
|
|
if (_impl._current) return null; // no more room
|
|
lazyInit;
|
|
assert(_impl._current);
|
|
goto entry;
|
|
}
|
|
|
|
/**
|
|
As above, but the memory allocated is aligned at $(D a) bytes.
|
|
*/
|
|
void[] alignedAllocate(size_t n, uint a)
|
|
{
|
|
// Fast path
|
|
entry:
|
|
auto result = _impl.alignedAllocate(n, a);
|
|
if (result.length == n) return result;
|
|
// Slow path
|
|
if (_impl._current) return null; // no more room
|
|
lazyInit;
|
|
assert(_impl._current);
|
|
goto entry;
|
|
}
|
|
|
|
/**
|
|
Deallocates $(D b). This works only if $(D b) was obtained as the last call
|
|
to $(D allocate); otherwise (i.e. another allocation has occurred since) it
|
|
does nothing. This semantics is tricky and therefore $(D deallocate) is
|
|
defined only if $(D Region) is instantiated with $(D Yes.defineDeallocate)
|
|
as the third template argument.
|
|
|
|
Params:
|
|
b = Block previously obtained by a call to $(D allocate) against this
|
|
allocator ($(D null) is allowed).
|
|
*/
|
|
static if (defineDeallocate == Yes.defineDeallocate)
|
|
void deallocate(void[] b)
|
|
{
|
|
if (!_impl._current) lazyInit;
|
|
return _impl.deallocate(b);
|
|
}
|
|
|
|
/**
|
|
Returns $(D true) if and only if $(D b) is the result of a successful
|
|
allocation. For efficiency reasons, if $(D b is null) the function returns
|
|
$(D false).
|
|
*/
|
|
bool owns(void[] b)
|
|
{
|
|
if (!_impl._current) lazyInit;
|
|
return _impl.owns(b);
|
|
}
|
|
|
|
/**
|
|
Expands an allocated block in place. Expansion will succeed only if the
|
|
block is the last allocated.
|
|
*/
|
|
bool expand(ref void[] b, size_t delta)
|
|
{
|
|
if (!_impl._current) lazyInit;
|
|
return _impl.expand(b, delta);
|
|
}
|
|
|
|
/**
|
|
Deallocates all memory allocated with this allocator.
|
|
*/
|
|
void deallocateAll()
|
|
{
|
|
// We don't care to lazily init the region
|
|
_impl.deallocateAll;
|
|
}
|
|
|
|
/**
|
|
Allocates all memory available with this allocator.
|
|
*/
|
|
void[] allocateAll()
|
|
{
|
|
if (!_impl._current) lazyInit;
|
|
return _impl.allocateAll;
|
|
}
|
|
|
|
/**
|
|
Nonstandard function that returns the bytes available for allocation.
|
|
*/
|
|
size_t available()
|
|
{
|
|
if (!_impl._current) lazyInit;
|
|
return _impl.available;
|
|
}
|
|
}
|
|
|
|
///
|
|
unittest
|
|
{
|
|
// 128KB region, allocated to x86's cache line
|
|
InSituRegion!(128 * 1024, 16) r1;
|
|
auto a1 = r1.allocate(101);
|
|
assert(a1.length == 101);
|
|
|
|
// 128KB region, with fallback to the garbage collector.
|
|
import std.experimental.allocator.fallback_allocator;
|
|
import std.experimental.allocator.free_list;
|
|
import std.experimental.allocator.gc_allocator;
|
|
import std.experimental.allocator.heap_block;
|
|
FallbackAllocator!(InSituRegion!(128 * 1024), GCAllocator) r2;
|
|
auto a2 = r1.allocate(102);
|
|
assert(a2.length == 102);
|
|
|
|
// Reap with GC fallback.
|
|
InSituRegion!(128 * 1024, 8) tmp3;
|
|
FallbackAllocator!(HeapBlock!(64, 8), GCAllocator) r3;
|
|
r3.primary = HeapBlock!(64, 8)(tmp3.allocateAll());
|
|
auto a3 = r3.allocate(103);
|
|
assert(a3.length == 103);
|
|
|
|
// Reap/GC with a freelist for small objects up to 16 bytes.
|
|
InSituRegion!(128 * 1024, 64) tmp4;
|
|
FreeList!(FallbackAllocator!(HeapBlock!(64, 64), GCAllocator), 0, 16) r4;
|
|
r4.parent.primary = HeapBlock!(64, 64)(tmp4.allocateAll());
|
|
auto a4 = r4.allocate(104);
|
|
assert(a4.length == 104);
|
|
}
|
|
|
|
unittest
|
|
{
|
|
InSituRegion!(4096, 1) r1;
|
|
auto a = r1.allocate(2001);
|
|
assert(a.length == 2001);
|
|
import std.conv : text;
|
|
assert(r1.available == 2095, text(r1.available));
|
|
|
|
InSituRegion!(65536, 1024*4) r2;
|
|
assert(r2.available <= 65536);
|
|
a = r2.allocate(2001);
|
|
assert(a.length == 2001);
|
|
}
|
|
|
|
private extern(C) void* sbrk(long);
|
|
private extern(C) int brk(shared void*);
|
|
|
|
/**
|
|
|
|
Allocator backed by $(D $(LUCKY sbrk)) for Posix systems. Due to the fact that
|
|
$(D sbrk) is not thread-safe $(WEB lifecs.likai.org/2010/02/sbrk-is-not-thread-
|
|
safe.html, by design), $(D SbrkRegion) uses a mutex internally. This implies
|
|
that uncontrolled calls to $(D brk) and $(D sbrk) may affect the workings of $(D
|
|
SbrkRegion) adversely.
|
|
|
|
*/
|
|
version(Posix) struct SbrkRegion(uint minAlign = platformAlignment)
|
|
{
|
|
import core.sys.posix.pthread;
|
|
static shared pthread_mutex_t sbrkMutex;
|
|
|
|
static assert(minAlign.isGoodStaticAlignment);
|
|
static assert(size_t.sizeof == (void*).sizeof);
|
|
private shared void* _brkInitial, _brkCurrent;
|
|
|
|
/**
|
|
Instance shared by all callers.
|
|
*/
|
|
static shared SbrkRegion it;
|
|
|
|
/**
|
|
Standard allocator primitives.
|
|
*/
|
|
enum uint alignment = minAlign;
|
|
|
|
/// Ditto
|
|
void[] allocate(size_t bytes) shared
|
|
{
|
|
static if (minAlign > 1)
|
|
const rounded = bytes.roundUpToMultipleOf(alignment);
|
|
else
|
|
alias rounded = bytes;
|
|
pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) || assert(0);
|
|
scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex)
|
|
|| assert(0);
|
|
// Assume sbrk returns the old break. Most online documentation confirms
|
|
// that, except for http://www.inf.udec.cl/~leo/Malloc_tutorial.pdf,
|
|
// which claims the returned value is not portable.
|
|
auto p = sbrk(rounded);
|
|
if (p == cast(void*) -1)
|
|
{
|
|
return null;
|
|
}
|
|
if (!_brkInitial)
|
|
{
|
|
_brkInitial = cast(shared) p;
|
|
assert(cast(size_t) _brkInitial % minAlign == 0,
|
|
"Too large alignment chosen for " ~ typeof(this).stringof);
|
|
}
|
|
_brkCurrent = cast(shared) (p + rounded);
|
|
return p[0 .. bytes];
|
|
}
|
|
|
|
/// Ditto
|
|
void[] alignedAllocate(size_t bytes, uint a) shared
|
|
{
|
|
pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) || assert(0);
|
|
scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex)
|
|
|| assert(0);
|
|
if (!_brkInitial)
|
|
{
|
|
// This is one extra call, but it'll happen only once.
|
|
_brkInitial = cast(shared) sbrk(0);
|
|
assert(cast(size_t) _brkInitial % minAlign == 0,
|
|
"Too large alignment chosen for " ~ typeof(this).stringof);
|
|
(_brkInitial != cast(void*) -1) || assert(0);
|
|
_brkCurrent = _brkInitial;
|
|
}
|
|
immutable size_t delta = cast(shared void*) roundUpToMultipleOf(
|
|
cast(size_t) _brkCurrent, a) - _brkCurrent;
|
|
// Still must make sure the total size is aligned to the allocator's
|
|
// alignment.
|
|
immutable rounded = (bytes + delta).roundUpToMultipleOf(alignment);
|
|
|
|
auto p = sbrk(rounded);
|
|
if (p == cast(void*) -1)
|
|
{
|
|
return null;
|
|
}
|
|
_brkCurrent = cast(shared) (p + rounded);
|
|
return p[delta .. delta + bytes];
|
|
}
|
|
|
|
/**
|
|
|
|
The $(D expand) method may only succeed if the argument is the last block
|
|
allocated. In that case, $(D expand) attempts to push the break pointer to
|
|
the right.
|
|
|
|
*/
|
|
bool expand(ref void[] b, size_t delta) shared
|
|
{
|
|
if (b is null) return (b = allocate(delta)) !is null;
|
|
assert(_brkInitial && _brkCurrent); // otherwise where did b come from?
|
|
pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) || assert(0);
|
|
scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex)
|
|
|| assert(0);
|
|
if (_brkCurrent != b.ptr + b.length) return false;
|
|
// Great, can expand the last block
|
|
static if (minAlign > 1)
|
|
const rounded = delta.roundUpToMultipleOf(alignment);
|
|
else
|
|
alias rounded = bytes;
|
|
auto p = sbrk(rounded);
|
|
if (p == cast(void*) -1)
|
|
{
|
|
return false;
|
|
}
|
|
_brkCurrent = cast(shared) (p + rounded);
|
|
b = b.ptr[0 .. b.length + delta];
|
|
return true;
|
|
}
|
|
|
|
/// Ditto
|
|
bool owns(void[] b) shared
|
|
{
|
|
// No need to lock here.
|
|
assert(!_brkCurrent || b.ptr + b.length <= _brkCurrent);
|
|
return _brkInitial && b.ptr >= _brkInitial;
|
|
}
|
|
|
|
/**
|
|
|
|
The $(D deallocate) method only works (and returns $(D true)) on systems
|
|
that support reducing the break address (i.e. accept calls to $(D sbrk)
|
|
with negative offsets). OSX does not accept such. In addition the argument
|
|
must be the last block allocated.
|
|
|
|
*/
|
|
bool deallocate(void[] b) shared
|
|
{
|
|
static if (minAlign > 1)
|
|
const rounded = b.length.roundUpToMultipleOf(alignment);
|
|
else
|
|
const rounded = b.length;
|
|
pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) || assert(0);
|
|
scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex)
|
|
|| assert(0);
|
|
if (_brkCurrent != b.ptr + b.length) return false;
|
|
assert(b.ptr >= _brkInitial);
|
|
if (sbrk(-rounded) == cast(void*) -1)
|
|
return false;
|
|
_brkCurrent = cast(shared) b.ptr;
|
|
return true;
|
|
}
|
|
|
|
/**
|
|
The $(D deallocateAll) method only works (and returns $(D true)) on systems
|
|
that support reducing the break address (i.e. accept calls to $(D sbrk)
|
|
with negative offsets). OSX does not accept such.
|
|
*/
|
|
bool deallocateAll() shared
|
|
{
|
|
pthread_mutex_lock(cast(pthread_mutex_t*) &sbrkMutex) || assert(0);
|
|
scope(exit) pthread_mutex_unlock(cast(pthread_mutex_t*) &sbrkMutex)
|
|
|| assert(0);
|
|
return !_brkInitial || brk(_brkInitial) == 0;
|
|
}
|
|
|
|
/// Standard allocator API.
|
|
bool empty()
|
|
{
|
|
// Also works when they're both null.
|
|
return _brkCurrent == _brkInitial;
|
|
}
|
|
|
|
/// Ditto
|
|
enum bool zeroesAllocations = true;
|
|
}
|
|
|
|
version(Posix) unittest
|
|
{
|
|
// Let's test the assumption that sbrk(n) returns the old address
|
|
auto p1 = sbrk(0);
|
|
auto p2 = sbrk(4096);
|
|
assert(p1 == p2);
|
|
auto p3 = sbrk(0);
|
|
assert(p3 == p2 + 4096);
|
|
// Try to reset brk, but don't make a fuss if it doesn't work
|
|
sbrk(-4096);
|
|
}
|
|
|
|
version(Posix) unittest
|
|
{
|
|
alias alloc = SbrkRegion!(8).it;
|
|
auto a = alloc.alignedAllocate(2001, 4096);
|
|
assert(a.length == 2001);
|
|
auto b = alloc.allocate(2001);
|
|
assert(b.length == 2001);
|
|
assert(alloc.owns(a));
|
|
assert(alloc.owns(b));
|
|
// reducing the brk does not work on OSX
|
|
version(OSX) {} else
|
|
{
|
|
assert(alloc.deallocate(b));
|
|
assert(alloc.deallocateAll);
|
|
}
|
|
}
|