Templatize _d_arraysetcapacity hook (#21143)

This commit is contained in:
Albert24GG 2025-04-15 13:07:47 +03:00 committed by GitHub
parent 513293b0d8
commit 03c8f2723f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 194 additions and 116 deletions

View file

@ -9,6 +9,143 @@
*/
module core.internal.array.capacity;
import core.attribute : weak;
// for now, all GC array functions are not exposed via core.memory.
extern (C)
{
size_t gc_reserveArrayCapacity(void[] slice, size_t request, bool atomic) nothrow pure;
bool gc_shrinkArrayUsed(void[] slice, size_t existingUsed, bool atomic) nothrow pure;
}
/**
Set the array capacity.
If the array capacity isn't currently large enough
to hold the requested capacity (in number of elements), then the array is
resized/reallocated to the appropriate size.
Pass in a requested capacity of 0 to get the current capacity.
Params:
T = the type of the elements in the array (this should be unqualified)
newcapacity = requested new capacity
p = pointer to array to set. Its `length` is left unchanged.
isshared = true if the underlying data is shared
Returns: the number of elements that can actually be stored once the resizing is done
*/
size_t _d_arraysetcapacityPureNothrow(T)(size_t newcapacity, void[]* p, bool isshared) pure nothrow @trusted
do
{
alias PureNothrowType = size_t function(size_t, void[]*, bool) pure nothrow @trusted;
return (cast(PureNothrowType) &_d_arraysetcapacity!T)(newcapacity, p, isshared);
}
size_t _d_arraysetcapacity(T)(size_t newcapacity, void[]* p, bool isshared) @trusted
in
{
assert(!(*p).length || (*p).ptr);
}
do
{
import core.exception : onOutOfMemoryError;
import core.stdc.string : memcpy, memset;
import core.internal.array.utils: __typeAttrs;
import core.internal.lifetime : __doPostblit;
import core.memory : GC;
alias BlkAttr = GC.BlkAttr;
auto size = T.sizeof;
version (D_InlineAsm_X86)
{
size_t reqsize = void;
asm nothrow pure
{
mov EAX, newcapacity;
mul EAX, size;
mov reqsize, EAX;
jnc Lcontinue;
}
}
else version (D_InlineAsm_X86_64)
{
size_t reqsize = void;
asm nothrow pure
{
mov RAX, newcapacity;
mul RAX, size;
mov reqsize, RAX;
jnc Lcontinue;
}
}
else
{
bool overflow = false;
size_t reqsize = mulu(size, newcapacity, overflow);
if (!overflow)
goto Lcontinue;
}
Loverflow:
onOutOfMemoryError();
assert(0);
Lcontinue:
// step 1, see if we can ensure the capacity is valid in-place
auto datasize = (*p).length * size;
auto curCapacity = gc_reserveArrayCapacity((*p).ptr[0 .. datasize], reqsize, isshared);
if (curCapacity != 0) // in-place worked!
return curCapacity / size;
if (reqsize <= datasize) // requested size is less than array size, the current array satisfies
// the request. But this is not an appendable GC array, so return 0.
return 0;
// step 2, if reserving in-place doesn't work, allocate a new array with at
// least the requested allocated size.
auto attrs = __typeAttrs!T((*p).ptr) | BlkAttr.APPENDABLE;
// use this static enum to avoid recomputing TypeInfo for every call.
static enum ti = typeid(T);
auto ptr = GC.malloc(reqsize, attrs, ti);
if (ptr is null)
goto Loverflow;
// copy the data over.
// note that malloc will have initialized the data we did not request to 0.
memcpy(ptr, (*p).ptr, datasize);
// handle postblit
__doPostblit!T(cast(T[])ptr[0 .. datasize]);
if (!(attrs & BlkAttr.NO_SCAN))
{
// need to memset the newly requested data, except for the data that
// malloc returned that we didn't request.
void* endptr = ptr + reqsize;
void* begptr = ptr + datasize;
// sanity check
assert(endptr >= begptr);
memset(begptr, 0, endptr - begptr);
}
*p = ptr[0 .. (*p).length];
// set up the correct length. Note that we need to do this here, because
// the GC malloc will automatically set the used size to what we requested.
gc_shrinkArrayUsed(ptr[0 .. datasize], reqsize, isshared);
curCapacity = gc_reserveArrayCapacity(ptr[0 .. datasize], 0, isshared);
assert(curCapacity);
return curCapacity / size;
}
// HACK: `nothrow` and `pure` is faked.
private extern (C) void[] _d_arraysetlengthT(const TypeInfo ti, size_t newlength, void[]* p) nothrow pure;
private extern (C) void[] _d_arraysetlengthiT(const TypeInfo ti, size_t newlength, void[]* p) nothrow pure;

View file

@ -156,3 +156,28 @@ void[] __arrayAlloc(T)(size_t arrSize) @trusted
return ptr[0 .. arrSize];
return null;
}
uint __typeAttrs(T)(void *copyAttrsFrom = null)
{
import core.internal.traits : hasIndirections, hasElaborateDestructor;
import core.memory : GC;
alias BlkAttr = GC.BlkAttr;
if (copyAttrsFrom)
{
// try to copy attrs from the given block
auto info = GC.query(copyAttrsFrom);
if (info.base)
return info.attr;
}
uint attrs = 0;
static if (!hasIndirections!T)
attrs |= BlkAttr.NO_SCAN;
static if (is(T == struct) && hasElaborateDestructor!T)
attrs |= BlkAttr.FINALIZE;
return attrs;
}

View file

@ -202,3 +202,22 @@ void swap(T)(ref T lhs, ref T rhs)
moveEmplace(rhs, lhs);
moveEmplace(tmp, rhs);
}
void __doPostblit(T)(T[] arr)
{
// infer static postblit type, run postblit if any
static if (__traits(hasPostblit, T))
{
static if (__traits(isStaticArray, T) && is(T : E[], E))
__doPostblit(cast(E[]) arr);
else static if (!__traits(compiles, arr[0].__xpostblit))
{
alias Unqual_T = Unqual!T;
foreach (ref elem; (() @trusted => cast(Unqual_T[]) arr)())
elem.__xpostblit();
}
else
foreach (ref elem; arr)
elem.__xpostblit();
}
}

View file

@ -3901,9 +3901,11 @@ private size_t getArrayHash(const scope TypeInfo element, const scope void* ptr,
assert(s == "abc");
}
// HACK: This is a lie. `_d_arraysetcapacity` is neither `nothrow` nor `pure`, but this lie is
// necessary for now to prevent breaking code.
private extern (C) size_t _d_arraysetcapacity(const TypeInfo ti, size_t newcapacity, void[]* arrptr) pure nothrow;
import core.internal.array.capacity : _d_arraysetcapacityPureNothrow;
import core.internal.traits: Unqual;
/**
(Property) Gets the current _capacity of a slice. The _capacity is the size
@ -3918,7 +3920,10 @@ Note: The _capacity of a slice may be impacted by operations on other slices.
*/
@property size_t capacity(T)(T[] arr) pure nothrow @trusted
{
return _d_arraysetcapacity(typeid(T[]), 0, cast(void[]*)&arr);
const isshared = is(T == shared);
alias Unqual_T = Unqual!T;
// The postblit of T may be impure, so we need to use the `pure nothrow` wrapper
return _d_arraysetcapacityPureNothrow!Unqual_T(0, cast(void[]*)&arr, isshared);
}
///
@ -3957,7 +3962,12 @@ size_t reserve(T)(ref T[] arr, size_t newcapacity) pure nothrow @trusted
if (__ctfe)
return newcapacity;
else
return _d_arraysetcapacity(typeid(T[]), newcapacity, cast(void[]*)&arr);
{
const isshared = is(T == shared);
alias Unqual_T = Unqual!T;
// The postblit of T may be impure, so we need to use the `pure nothrow` wrapper
return _d_arraysetcapacityPureNothrow!Unqual_T(newcapacity, cast(void[]*)&arr, isshared);
}
}
///

View file

@ -313,119 +313,6 @@ void __doPostblit(void *ptr, size_t len, const TypeInfo ti)
}
}
/**
Set the array capacity.
If the array capacity isn't currently large enough
to hold the requested capacity (in number of elements), then the array is
resized/reallocated to the appropriate size.
Pass in a requested capacity of 0 to get the current capacity.
Params:
ti = type info of element type
newcapacity = requested new capacity
p = pointer to array to set. Its `length` is left unchanged.
Returns: the number of elements that can actually be stored once the resizing is done
*/
extern(C) size_t _d_arraysetcapacity(const TypeInfo ti, size_t newcapacity, void[]* p) @weak
in
{
assert(ti);
assert(!(*p).length || (*p).ptr);
}
do
{
auto isshared = typeid(ti) is typeid(TypeInfo_Shared);
auto tinext = unqualify(ti.next);
auto size = tinext.tsize;
version (D_InlineAsm_X86)
{
size_t reqsize = void;
asm
{
mov EAX, newcapacity;
mul EAX, size;
mov reqsize, EAX;
jnc Lcontinue;
}
}
else version (D_InlineAsm_X86_64)
{
size_t reqsize = void;
asm
{
mov RAX, newcapacity;
mul RAX, size;
mov reqsize, RAX;
jnc Lcontinue;
}
}
else
{
bool overflow = false;
size_t reqsize = mulu(size, newcapacity, overflow);
if (!overflow)
goto Lcontinue;
}
Loverflow:
onOutOfMemoryError();
assert(0);
Lcontinue:
// step 1, see if we can ensure the capacity is valid in-place
auto datasize = (*p).length * size;
auto curCapacity = gc_reserveArrayCapacity((*p).ptr[0 .. datasize], reqsize, isshared);
if (curCapacity != 0)
// in-place worked!
return curCapacity / size;
if (reqsize <= datasize)
// requested size is less than array size, the current array satisfies
// the request. But this is not an appendable GC array, so return 0.
return 0;
// step 2, if reserving in-place doesn't work, allocate a new array with at
// least the requested allocated size.
auto attrs = __typeAttrs(tinext, (*p).ptr) | BlkAttr.APPENDABLE;
auto ptr = GC.malloc(reqsize, attrs, tinext);
if (ptr is null)
goto Loverflow;
// copy the data over.
// note that malloc will have initialized the data we did not request to 0.
memcpy(ptr, (*p).ptr, datasize);
// handle postblit
__doPostblit(ptr, datasize, tinext);
if (!(attrs & BlkAttr.NO_SCAN))
{
// need to memset the newly requested data, except for the data that
// malloc returned that we didn't request.
void *endptr = ptr + reqsize;
void *begptr = ptr + datasize;
// sanity check
assert(endptr >= begptr);
memset(begptr, 0, endptr - begptr);
}
*p = ptr[0 .. (*p).length];
// set up the correct length. Note that we need to do this here, because
// the GC malloc will automatically set the used size to what we requested.
gc_shrinkArrayUsed(ptr[0 .. datasize], reqsize, isshared);
curCapacity = gc_reserveArrayCapacity(ptr[0 .. datasize], 0, isshared);
assert(curCapacity);
return curCapacity / size;
}
/**
Allocate an array with the garbage collector.