Moved docs above functions outside of mixin

Comments
This commit is contained in:
Alexandru Jercaianu 2018-02-11 18:42:27 +02:00
parent 42612b6897
commit f0e7988d87

View file

@ -185,6 +185,7 @@ struct AscendingPageAllocator
{ {
import std.typecons : Ternary; import std.typecons : Ternary;
// Docs for mixin functions
version (StdDdoc) version (StdDdoc)
{ {
/** /**
@ -197,35 +198,6 @@ struct AscendingPageAllocator
*/ */
this(size_t n) nothrow @nogc; this(size_t n) nothrow @nogc;
/**
Rounds the allocation size to the next multiple of the page size.
The allocation only reserves a range of virtual pages but the actual
physical memory is allocated on demand, when accessing the memory.
Params:
n = Bytes to allocate
Returns:
`null` on failure or if the requested size exceeds the remaining capacity.
*/
void[] allocate(size_t n) nothrow @nogc;
/**
Rounds the allocation size to the next multiple of the page size.
The allocation only reserves a range of virtual pages but the actual
physical memory is allocated on demand, when accessing the memory.
The allocated memory is aligned to the specified alignment `a`.
Params:
n = Bytes to allocate
a = Alignment
Returns:
`null` on failure or if the requested size exceeds the remaining capacity.
*/
void[] alignedAllocate(size_t n, uint a) nothrow @nogc;
/** /**
Rounds the requested size to the next multiple of the page size. Rounds the requested size to the next multiple of the page size.
*/ */
@ -240,14 +212,6 @@ struct AscendingPageAllocator
*/ */
void deallocate(void[] b) nothrow @nogc; void deallocate(void[] b) nothrow @nogc;
/**
If the passed buffer is not the last allocation, then `delta` can be
at most the number of bytes left on the last page.
Otherwise, we can expand the last allocation until the end of the virtual
address range.
*/
bool expand(ref void[] b, size_t delta) nothrow @nogc;
/** /**
Returns `Ternary.yes` if the passed buffer is inside the range of virtual adresses. Returns `Ternary.yes` if the passed buffer is inside the range of virtual adresses.
Does not guarantee that the passed buffer is still valid. Does not guarantee that the passed buffer is still valid.
@ -264,21 +228,9 @@ struct AscendingPageAllocator
Returns the available size for further allocations in bytes. Returns the available size for further allocations in bytes.
*/ */
size_t getAvailableSize() nothrow @nogc; size_t getAvailableSize() nothrow @nogc;
/**
Unmaps the whole virtual address range on destruction.
*/
~this() nothrow @nogc;
/**
Returns `Ternary.yes` if the allocator does not contain any alive objects
and `Ternary.no` otherwise.
*/
Ternary empty() nothrow @nogc;
} }
else
{ private:
private:
size_t pageSize; size_t pageSize;
size_t numPages; size_t numPages;
@ -296,12 +248,23 @@ struct AscendingPageAllocator
void* readWriteLimit; void* readWriteLimit;
enum extraAllocPages = 1000; enum extraAllocPages = 1000;
public: public:
enum uint alignment = 4096; enum uint alignment = 4096;
// Inject common function implementations // Inject common function implementations
mixin AscendingPageAllocatorImpl!false; mixin AscendingPageAllocatorImpl!false;
/**
Rounds the allocation size to the next multiple of the page size.
The allocation only reserves a range of virtual pages but the actual
physical memory is allocated on demand, when accessing the memory.
Params:
n = Bytes to allocate
Returns:
`null` on failure or if the requested size exceeds the remaining capacity.
*/
void[] allocate(size_t n) nothrow @nogc void[] allocate(size_t n) nothrow @nogc
{ {
import std.algorithm.comparison : min; import std.algorithm.comparison : min;
@ -336,6 +299,20 @@ struct AscendingPageAllocator
return cast(void[]) result[0 .. n]; return cast(void[]) result[0 .. n];
} }
/**
Rounds the allocation size to the next multiple of the page size.
The allocation only reserves a range of virtual pages but the actual
physical memory is allocated on demand, when accessing the memory.
The allocated memory is aligned to the specified alignment `a`.
Params:
n = Bytes to allocate
a = Alignment
Returns:
`null` on failure or if the requested size exceeds the remaining capacity.
*/
void[] alignedAllocate(size_t n, uint a) nothrow @nogc void[] alignedAllocate(size_t n, uint a) nothrow @nogc
{ {
void* alignedStart = cast(void*) roundUpToMultipleOf(cast(size_t) offset, a); void* alignedStart = cast(void*) roundUpToMultipleOf(cast(size_t) offset, a);
@ -355,6 +332,12 @@ struct AscendingPageAllocator
return result; return result;
} }
/**
If the passed buffer is not the last allocation, then `delta` can be
at most the number of bytes left on the last page.
Otherwise, we can expand the last allocation until the end of the virtual
address range.
*/
bool expand(ref void[] b, size_t delta) nothrow @nogc bool expand(ref void[] b, size_t delta) nothrow @nogc
{ {
import std.algorithm.comparison : min; import std.algorithm.comparison : min;
@ -407,17 +390,23 @@ struct AscendingPageAllocator
return true; return true;
} }
/**
Returns `Ternary.yes` if the allocator does not contain any alive objects
and `Ternary.no` otherwise.
*/
Ternary empty() nothrow @nogc Ternary empty() nothrow @nogc
{ {
return Ternary(pagesUsed == 0); return Ternary(pagesUsed == 0);
} }
/**
Unmaps the whole virtual address range on destruction.
*/
~this() nothrow @nogc ~this() nothrow @nogc
{ {
if (data) if (data)
deallocateAll(); deallocateAll();
} }
}
} }
/// ///
@ -452,6 +441,7 @@ shared struct SharedAscendingPageAllocator
import std.typecons : Ternary; import std.typecons : Ternary;
import core.internal.spinlock : SpinLock; import core.internal.spinlock : SpinLock;
// Docs for mixin functions
version (StdDdoc) version (StdDdoc)
{ {
/** /**
@ -464,35 +454,6 @@ shared struct SharedAscendingPageAllocator
*/ */
this(size_t n) nothrow @nogc; this(size_t n) nothrow @nogc;
/**
Rounds the allocation size to the next multiple of the page size.
The allocation only reserves a range of virtual pages but the actual
physical memory is allocated on demand, when accessing the memory.
Params:
n = Bytes to allocate
Returns:
`null` on failure or if the requested size exceeds the remaining capacity.
*/
void[] allocate(size_t n) nothrow @nogc;
/**
Rounds the allocation size to the next multiple of the page size.
The allocation only reserves a range of virtual pages but the actual
physical memory is allocated on demand, when accessing the memory.
The allocated memory is aligned to the specified alignment `a`.
Params:
n = Bytes to allocate
a = Alignment
Returns:
`null` on failure or if the requested size exceeds the remaining capacity.
*/
void[] alignedAllocate(size_t n, uint a) nothrow @nogc;
/** /**
Rounds the requested size to the next multiple of the page size. Rounds the requested size to the next multiple of the page size.
*/ */
@ -507,14 +468,6 @@ shared struct SharedAscendingPageAllocator
*/ */
void deallocate(void[] b) nothrow @nogc; void deallocate(void[] b) nothrow @nogc;
/**
If the passed buffer is not the last allocation, then `delta` can be
at most the number of bytes left on the last page.
Otherwise, we can expand the last allocation until the end of the virtual
address range.
*/
bool expand(ref void[] b, size_t delta) nothrow @nogc;
/** /**
Returns `Ternary.yes` if the passed buffer is inside the range of virtual adresses. Returns `Ternary.yes` if the passed buffer is inside the range of virtual adresses.
Does not guarantee that the passed buffer is still valid. Does not guarantee that the passed buffer is still valid.
@ -532,9 +485,8 @@ shared struct SharedAscendingPageAllocator
*/ */
size_t getAvailableSize() nothrow @nogc; size_t getAvailableSize() nothrow @nogc;
} }
else
{ private:
private:
size_t pageSize; size_t pageSize;
size_t numPages; size_t numPages;
@ -550,17 +502,42 @@ shared struct SharedAscendingPageAllocator
enum extraAllocPages = 1000; enum extraAllocPages = 1000;
SpinLock lock; SpinLock lock;
public: public:
enum uint alignment = 4096; enum uint alignment = 4096;
// Inject common function implementations // Inject common function implementations
mixin AscendingPageAllocatorImpl!true; mixin AscendingPageAllocatorImpl!true;
/**
Rounds the allocation size to the next multiple of the page size.
The allocation only reserves a range of virtual pages but the actual
physical memory is allocated on demand, when accessing the memory.
Params:
n = Bytes to allocate
Returns:
`null` on failure or if the requested size exceeds the remaining capacity.
*/
void[] allocate(size_t n) nothrow @nogc void[] allocate(size_t n) nothrow @nogc
{ {
return allocateImpl(n, 1); return allocateImpl(n, 1);
} }
/**
Rounds the allocation size to the next multiple of the page size.
The allocation only reserves a range of virtual pages but the actual
physical memory is allocated on demand, when accessing the memory.
The allocated memory is aligned to the specified alignment `a`.
Params:
n = Bytes to allocate
a = Alignment
Returns:
`null` on failure or if the requested size exceeds the remaining capacity.
*/
void[] alignedAllocate(size_t n, uint a) nothrow @nogc void[] alignedAllocate(size_t n, uint a) nothrow @nogc
{ {
// For regular `allocate` calls, `a` will be set to 1 // For regular `allocate` calls, `a` will be set to 1
@ -614,6 +591,12 @@ shared struct SharedAscendingPageAllocator
return cast(void[]) localResult[0 .. n]; return cast(void[]) localResult[0 .. n];
} }
/**
If the passed buffer is not the last allocation, then `delta` can be
at most the number of bytes left on the last page.
Otherwise, we can expand the last allocation until the end of the virtual
address range.
*/
bool expand(ref void[] b, size_t delta) nothrow @nogc bool expand(ref void[] b, size_t delta) nothrow @nogc
{ {
import std.algorithm.comparison : min; import std.algorithm.comparison : min;
@ -668,7 +651,6 @@ shared struct SharedAscendingPageAllocator
b = cast(void[]) b.ptr[0 .. b.length + delta]; b = cast(void[]) b.ptr[0 .. b.length + delta];
return true; return true;
} }
}
} }
/// ///