Add Valgrind GC integration (#15304)

* rt.minfo: Avoid calling realloc with size 0

This is not portable, and memcheck complains about this.

* Add C Valgrind API

Add the BSD-licensed C header files which provide the API for
controlling Valgrind from within programs executed under it.

The files are from Valgrind v3.21.0.

* Add some D bindings for the Valgrind C API

We will use these in the GC implementation to tell Valgrind which
memory operations are OK or not.

* Silence Valgrind warnings caused by GC marking

Allow the conservative GC to scan memory, whether it has been
initialized by the application or not.

* Add test suite for Valgrind

* Unify VALGRIND with MEMSTOMP

The two share the same goal: mark memory which should not be accessed
any more.

* Mask "invalid" access to free lists

The GC stores free lists in the cells of the objects on the list.

We would like to allow the GC to work with these lists, but still
raise a warning if application code attempts to access them.

* Distinguish between writable and unwritable invalidation

Freshly allocated memory should be writable but not readable.

Explicitly deallocated or GC-ed memory should be neither readable or
writable.

* Add use-after-free test

* Invalidate freshly-allocated memory

* Integrate VALGRIND with SENTINEL

* Fix reporting of arrays of structs with destructors

* Re-invalidate reused blocks during malloc/calloc

* Add Changelog entry

* .pre-commit-config.yaml: Exempt vendored files

* etc.valgrind.valgrind: Propagate return values of the vbits functions

* etc.valgrind.valgrind: Add documentation for functions

Copy/adapt the documentation in memcheck.h.

* druntime/test/valgrind: Add no_use_after_gc test

* ci/cirrusci.sh: Install Valgrind on Linux

Run the Druntime Valgrind integration tests.

libc6-dbg is needed to allow Valgrind to redirect certain functions.

* etc.valgrind.valgrind: Add @nogc

Although the GC is the primary user, this is not a restriction of
these Valgrind API wrappers.

* druntime/test/valgrind: Fix no_use_after_gc test with -release

Trick the optimizer to pretend we're doing something with the result
of those invalid memory accesses.
This commit is contained in:
Vladimir Panteleev 2023-06-15 08:01:42 +00:00 committed by GitHub
parent 2772261adb
commit 7cdae6e3bb
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
19 changed files with 7854 additions and 30 deletions

View file

@ -7,12 +7,14 @@ repos:
(?x)^(
^compiler/test/runnable/extra-files/.*$|
^compiler/test/compilable/extra-files/.*$|
^druntime/src/etc/valgrind/.*\.h$|
)$
- id: end-of-file-fixer
exclude: >
(?x)^(
^compiler/test/runnable/extra-files/.*$|
^compiler/test/compilable/extra-files/.*$|
^druntime/src/etc/valgrind/.*\.h$|
)$
- id: check-merge-conflict
- id: check-added-large-files

View file

@ -0,0 +1,42 @@
Integration with the Valgrind `memcheck` tool has been added to the garbage collector
The garbage collector gained a compile-time option to enable integration with Valgrind's `memcheck` tool.
When it is enabled, the GC will communicate with Valgrind and inform it which memory access operations are valid and which are not.
The integration allows catching memory errors in D programs which mix safe (GC) and unsafe (manual) memory management, for example:
---
import core.memory;
void main()
{
auto arr = new int[3];
GC.free(arr.ptr);
arr[1] = 42; // use after free
}
---
To use it, obtain the DMD source code, then include the garbage collector and lifetime implementation into your program's compilation and compile with the `-debug=VALGRIND` option:
```
git clone -b v2.105.0 --depth=1 https://github.com/dlang/dmd
dmd -g -debug=VALGRIND program.d -Idmd/druntime/src dmd/druntime/src/{core/internal/gc/impl/conservative/gc,rt/lifetime,etc/valgrind/valgrind}.d
valgrind --tool=memcheck ./program
```
The option is compatible with other GC debugging build options, such as `MEMSTOMP` and `SENTINEL`.
Dub users can try the following equivalent recipe:
```
git clone -b v2.105.0 --depth=1 https://github.com/dlang/dmd
cat >> dub.sdl <<EOF
debugVersions "VALGRIND"
sourceFiles "dmd/druntime/src/core/internal/gc/impl/conservative/gc.d"
sourceFiles "dmd/druntime/src/rt/lifetime.d"
sourceFiles "dmd/druntime/src/etc/valgrind/valgrind.d"
importPaths "dmd/druntime/src"
EOF
dub build
valgrind --tool=memcheck ./program
```

View file

@ -16,10 +16,10 @@ if [ -z ${HOST_DMD+x} ] ; then echo "Variable 'HOST_DMD' needs to be set."; exit
if [ "$OS_NAME" == "linux" ]; then
export DEBIAN_FRONTEND=noninteractive
packages="git-core make g++ gdb gnupg curl libcurl4 tzdata zip unzip xz-utils llvm"
packages="git-core make g++ gdb gnupg curl libcurl4 tzdata zip unzip xz-utils llvm valgrind libc6-dbg"
if [ "$MODEL" == "32" ]; then
dpkg --add-architecture i386
packages="$packages g++-multilib libcurl4:i386"
packages="$packages g++-multilib libcurl4:i386 libc6-dbg:i386"
fi
if [ "${HOST_DMD:0:4}" == "gdmd" ]; then
# ci/run.sh uses `sudo add-apt-repository ...` to add a PPA repo

View file

@ -546,4 +546,5 @@ COPY=\
$(IMPDIR)\core\thread\osthread.d \
$(IMPDIR)\core\thread\package.d \
\
$(IMPDIR)\etc\valgrind\valgrind.d \
$(IMPDIR)\etc\linux\memoryerror.d

View file

@ -581,4 +581,5 @@ SRCS=\
src\rt\util\typeinfo.d \
src\rt\util\utility.d \
\
src\etc\valgrind\valgrind.d \
src\etc\linux\memoryerror.d

View file

@ -126,7 +126,7 @@ SRCS:=$(subst \,/,$(SRCS))
# NOTE: a pre-compiled minit.obj has been provided in dmd for Win32 and
# minit.asm is not used by dmd for Linux
OBJS= $(ROOT)/errno_c.o $(ROOT)/threadasm.o
OBJS= $(ROOT)/errno_c.o $(ROOT)/threadasm.o $(ROOT)/valgrind.o
# use timelimit to avoid deadlocks if available
TIMELIMIT:=$(if $(shell which timelimit 2>/dev/null || true),timelimit -t 10 ,)
@ -367,6 +367,10 @@ $(ROOT)/threadasm.o : src/core/threadasm.S
@mkdir -p $(dir $@)
$(CC) -c $(CFLAGS) $< -o$@
$(ROOT)/valgrind.o : src/etc/valgrind/valgrind.c src/etc/valgrind/valgrind.h src/etc/valgrind/memcheck.h
@mkdir -p `dirname $@`
$(CC) -c $(CFLAGS) $< -o$@
######################## Create a shared library ##############################
$(DRUNTIMESO) $(DRUNTIMESOLIB) dll: DFLAGS+=-version=Shared -fPIC
@ -392,7 +396,7 @@ ifeq ($(HAS_ADDITIONAL_TESTS),1)
ADDITIONAL_TESTS:=test/init_fini test/exceptions test/coverage test/profile test/cycles test/allocations test/typeinfo \
test/aa test/cpuid test/gc test/hash test/lifetime \
test/thread test/unittest test/imports test/betterc test/stdcpp test/config \
test/traits
test/traits test/valgrind
ADDITIONAL_TESTS+=$(if $(SHARED),test/shared,)
endif

View file

@ -26,6 +26,7 @@ module core.internal.gc.impl.conservative.gc;
//debug = INVARIANT; // enable invariants
//debug = PROFILE_API; // profile API calls for config.profile > 1
//debug = GC_RECURSIVE_LOCK; // check for recursive locking on the same thread
//debug = VALGRIND; // Valgrind memcheck integration
/***************************************************/
version = COLLECT_PARALLEL; // parallel scanning
@ -52,6 +53,8 @@ version (GNU) import gcc.builtins;
debug (PRINTF_TO_FILE) import core.stdc.stdio : sprintf, fprintf, fopen, fflush, FILE;
else import core.stdc.stdio : sprintf, printf; // needed to output profiling results
debug (VALGRIND) import etc.valgrind.valgrind;
import core.time;
alias currTime = MonoTime.currTime;
@ -480,6 +483,8 @@ class ConservativeGC : GC
auto p = runLocked!(mallocNoSync, mallocTime, numMallocs)(size, bits, localAllocSize, ti);
invalidate(p[0 .. localAllocSize], 0xF0, true);
if (!(bits & BlkAttr.NO_SCAN))
{
memset(p + size, 0, localAllocSize - size);
@ -567,6 +572,9 @@ class ConservativeGC : GC
auto p = runLocked!(mallocNoSync, mallocTime, numMallocs)(size, bits, localAllocSize, ti);
debug (VALGRIND) makeMemUndefined(p[0..size]);
invalidate((p + size)[0 .. localAllocSize - size], 0xF0, true);
memset(p, 0, size);
if (!(bits & BlkAttr.NO_SCAN))
{
@ -688,7 +696,7 @@ class ConservativeGC : GC
else if (newsz < psz)
{
// Shrink in place
debug (MEMSTOMP) memset(p + size, 0xF2, psize - size);
invalidate((p + size)[0 .. psize - size], 0xF2, false);
lpool.freePages(pagenum + newsz, psz - newsz);
lpool.mergeFreePageOffsets!(false, true)(pagenum + newsz, psz - newsz);
lpool.bPageOffsets[pagenum] = cast(uint) newsz;
@ -704,7 +712,7 @@ class ConservativeGC : GC
if (freesz < newPages)
return doMalloc(); // free range too small
debug (MEMSTOMP) memset(p + psize, 0xF0, size - psize);
invalidate((p + psize)[0 .. size - psize], 0xF0, true);
debug (PRINTF) printFreeInfo(pool);
memset(&lpool.pagetable[pagenum + psz], Bins.B_PAGEPLUS, newPages);
lpool.bPageOffsets[pagenum] = cast(uint) newsz;
@ -795,7 +803,7 @@ class ConservativeGC : GC
if (freesz < minsz)
return 0;
size_t sz = freesz > maxsz ? maxsz : freesz;
debug (MEMSTOMP) memset(pool.baseAddr + (pagenum + psz) * PAGESIZE, 0xF0, sz * PAGESIZE);
invalidate((pool.baseAddr + (pagenum + psz) * PAGESIZE)[0 .. sz * PAGESIZE], 0xF0, true);
memset(lpool.pagetable + pagenum + psz, Bins.B_PAGEPLUS, sz);
lpool.bPageOffsets[pagenum] = cast(uint) (psz + sz);
for (auto offset = psz; offset < psz + sz; offset++)
@ -911,7 +919,7 @@ class ConservativeGC : GC
size_t npages = lpool.bPageOffsets[pagenum];
auto size = npages * PAGESIZE;
ssize = sentinel_size(q, size);
debug (MEMSTOMP) memset(p, 0xF2, size);
invalidate(p[0 .. size], 0xF2, false);
lpool.freePages(pagenum, npages);
lpool.mergeFreePageOffsets!(true, true)(pagenum, npages);
}
@ -925,13 +933,13 @@ class ConservativeGC : GC
auto size = binsize[bin];
ssize = sentinel_size(q, size);
debug (MEMSTOMP) memset(p, 0xF2, size);
invalidate(p[0 .. size], 0xF2, false);
// in case the page hasn't been recovered yet, don't add the object to the free list
if (!gcx.recoverPool[bin] || pool.binPageChain[pagenum] == Pool.PageRecovered)
{
list.next = gcx.bucket[bin];
list.pool = pool;
undefinedWrite(list.next, gcx.bucket[bin]);
undefinedWrite(list.pool, pool);
gcx.bucket[bin] = list;
}
pool.freebits.set(biti);
@ -1965,8 +1973,8 @@ struct Gcx
assert(p !is null);
L_hasBin:
// Return next item from free list
bucket[bin] = (cast(List*)p).next;
auto pool = (cast(List*)p).pool;
bucket[bin] = undefinedRead((cast(List*)p).next);
auto pool = undefinedRead((cast(List*)p).pool);
auto biti = (p - pool.baseAddr) >> pool.shiftBy;
assert(pool.freebits.test(biti));
@ -1976,7 +1984,7 @@ struct Gcx
if (bits)
pool.setBits(biti, bits);
//debug(PRINTF) printf("\tmalloc => %p\n", p);
debug (MEMSTOMP) memset(p, 0xF0, alloc_size);
invalidate(p[0 .. alloc_size], 0xF0, true);
if (ConservativeGC.isPrecise)
{
@ -2059,7 +2067,7 @@ struct Gcx
auto p = pool.baseAddr + pn * PAGESIZE;
debug(PRINTF) printf("Got large alloc: %p, pt = %d, np = %d\n", p, pool.pagetable[pn], npages);
debug (MEMSTOMP) memset(p, 0xF1, size);
invalidate(p[0 .. size], 0xF1, true);
alloc_size = npages * PAGESIZE;
//debug(PRINTF) printf("\tp = %p\n", p);
@ -2241,6 +2249,7 @@ struct Gcx
immutable ncap = _cap ? 2 * _cap : initSize / RANGE.sizeof;
auto p = cast(RANGE*)os_mem_map(ncap * RANGE.sizeof);
if (p is null) onOutOfMemoryErrorNoGC();
debug (VALGRIND) makeMemUndefined(p[0..ncap]);
if (_p !is null)
{
p[0 .. _length] = _p[0 .. _length];
@ -2295,7 +2304,8 @@ struct Gcx
for (;;)
{
auto p = *cast(void**)(rng.pbot);
auto p = undefinedRead(*cast(void**)(rng.pbot));
debug (VALGRIND) makeMemDefined((&p)[0 .. 1]);
debug(MARK_PRINTF) printf("\tmark %p: %p\n", rng.pbot, p);
@ -2525,6 +2535,7 @@ struct Gcx
for (auto p = cast(void**)pbot; cast(void*)p < ptop; p++)
{
auto ptr = *p;
debug (VALGRIND) makeMemDefined((&ptr)[0 .. 1]);
if (cast(size_t)(ptr - minAddr) < memSize)
toscanRoots.push(ptr);
}
@ -2650,7 +2661,7 @@ struct Gcx
pool.freepages += npages;
numFree += npages;
debug (MEMSTOMP) memset(p, 0xF3, npages * PAGESIZE);
invalidate(p[0 .. npages * PAGESIZE], 0xF3, false);
// Don't need to update searchStart here because
// pn is guaranteed to be greater than last time
// we updated it.
@ -2765,7 +2776,7 @@ struct Gcx
debug(COLLECT_PRINTF) printf("\tcollecting %p\n", p);
leakDetector.log_free(q, sentinel_size(q, size));
debug (MEMSTOMP) memset(p, 0xF3, size);
invalidate(p[0 .. size], 0xF3, false);
}
}
}
@ -2830,11 +2841,11 @@ struct Gcx
if (!core.bitop.bt(freebitsdata, u / 16))
continue;
auto elem = cast(List *)(p + u);
elem.pool = &pool.base;
*bucketTail = elem;
undefinedWrite(elem.pool, &pool.base);
undefinedWrite(*bucketTail, elem);
bucketTail = &elem.next;
}
*bucketTail = null;
undefinedWrite(*bucketTail, null);
assert(bucket[bin] !is null);
return true;
}
@ -3572,6 +3583,7 @@ struct Pool
//debug(PRINTF) printf("Pool::Pool(%u)\n", npages);
poolsize = npages * PAGESIZE;
baseAddr = cast(byte *)os_mem_map(poolsize);
version (VALGRIND) makeMemNoAccess(baseAddr[0..poolsize]);
// Some of the code depends on page alignment of memory pools
assert((cast(size_t)baseAddr & (PAGESIZE - 1)) == 0);
@ -4275,7 +4287,7 @@ struct LargeObjectPool
for (; pn + n < npages; ++n)
if (pagetable[pn + n] != Bins.B_PAGEPLUS)
break;
debug (MEMSTOMP) memset(baseAddr + pn * PAGESIZE, 0xF3, n * PAGESIZE);
invalidate((baseAddr + pn * PAGESIZE)[0 .. n * PAGESIZE], 0xF3, false);
freePages(pn, n);
mergeFreePageOffsets!(true, true)(pn, n);
}
@ -4394,7 +4406,7 @@ struct SmallObjectPool
debug(COLLECT_PRINTF) printf("\tcollecting %p\n", p);
//log_free(sentinel_add(p));
debug (MEMSTOMP) memset(p, 0xF3, size);
invalidate(p[0 .. size], 0xF3, false);
}
if (freeBits)
@ -4431,11 +4443,11 @@ struct SmallObjectPool
void* ptop = p + PAGESIZE - 2 * size + 1;
for (; p < ptop; p += size)
{
(cast(List *)p).next = cast(List *)(p + size);
(cast(List *)p).pool = &base;
undefinedWrite((cast(List *)p).next, cast(List *)(p + size));
undefinedWrite((cast(List *)p).pool, &base);
}
(cast(List *)p).next = null;
(cast(List *)p).pool = &base;
undefinedWrite((cast(List *)p).next, null);
undefinedWrite((cast(List *)p).pool, &base);
return first;
}
}
@ -4823,13 +4835,22 @@ debug (SENTINEL)
{
assert(size <= uint.max);
*sentinel_psize(p) = cast(uint)size;
*sentinel_pre(p) = SENTINEL_PRE;
*sentinel_post(p) = SENTINEL_POST;
debug (VALGRIND)
{
makeMemNoAccess(sentinel_pre(p)[0..1]);
makeMemNoAccess(sentinel_post(p)[0..1]);
}
else
{
*sentinel_pre(p) = SENTINEL_PRE;
*sentinel_post(p) = SENTINEL_POST;
}
}
void sentinel_Invariant(const void *p) nothrow @nogc
{
debug (VALGRIND) {} else
debug
{
assert(*sentinel_pre(p) == SENTINEL_PRE);
@ -5063,3 +5084,53 @@ unittest
printf("unexpected pointers %p and %p\n", p.ptr, q.ptr);
}
}
/* ============================ MEMSTOMP =============================== */
/// Mark the specified memory region as uninitialized -
/// reading from this region is an error.
/// If writable is false, writing to it is also an error.
pragma(inline, true)
void invalidate(void[] mem, ubyte pattern, bool writable) nothrow @nogc
{
debug (MEMSTOMP) memset(mem.ptr, pattern, mem.length);
debug (VALGRIND)
{
if (writable)
makeMemUndefined(mem);
else
makeMemNoAccess(mem);
}
}
/// Read memory that should otherwise be marked as unreadable
/// (e.g. free lists overlapped with unallocated heap objects).
pragma(inline, true)
T undefinedRead(T)(ref T var) nothrow
{
debug (VALGRIND)
{
auto varArr = (&var)[0..1];
disableAddrReportingInRange(varArr);
T result = var;
enableAddrReportingInRange(varArr);
return result;
}
else
return var;
}
/// Write memory that should otherwise be marked as unwritable.
pragma(inline, true)
void undefinedWrite(T)(ref T var, T value) nothrow
{
debug (VALGRIND)
{
auto varArr = (&var)[0..1];
disableAddrReportingInRange(varArr);
var = value;
enableAddrReportingInRange(varArr);
}
else
var = value;
}

View file

@ -0,0 +1,309 @@
/*
----------------------------------------------------------------
Notice that the following BSD-style license applies to this one
file (memcheck.h) only. The rest of Valgrind is licensed under the
terms of the GNU General Public License, version 2, unless
otherwise indicated. See the COPYING file in the source
distribution for details.
----------------------------------------------------------------
This file is part of MemCheck, a heavyweight Valgrind tool for
detecting memory errors.
Copyright (C) 2000-2017 Julian Seward. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. The origin of this software must not be misrepresented; you must
not claim that you wrote the original software. If you use this
software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
4. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------
Notice that the above BSD-style license applies to this one file
(memcheck.h) only. The entire rest of Valgrind is licensed under
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
----------------------------------------------------------------
*/
#ifndef __MEMCHECK_H
#define __MEMCHECK_H
/* This file is for inclusion into client (your!) code.
You can use these macros to manipulate and query memory permissions
inside your own programs.
See comment near the top of valgrind.h on how to use them.
*/
#include "valgrind.h"
/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE ORDER OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end. */
typedef
enum {
VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
VG_USERREQ__MAKE_MEM_UNDEFINED,
VG_USERREQ__MAKE_MEM_DEFINED,
VG_USERREQ__DISCARD,
VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
VG_USERREQ__CHECK_MEM_IS_DEFINED,
VG_USERREQ__DO_LEAK_CHECK,
VG_USERREQ__COUNT_LEAKS,
VG_USERREQ__GET_VBITS,
VG_USERREQ__SET_VBITS,
VG_USERREQ__CREATE_BLOCK,
VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
/* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
VG_USERREQ__COUNT_LEAK_BLOCKS,
VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE,
VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE,
/* This is just for memcheck's internal use - don't use it */
_VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR
= VG_USERREQ_TOOL_BASE('M','C') + 256
} Vg_MemCheckClientRequest;
/* Client-code macros to manipulate the state of memory. */
/* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_NOACCESS, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similarly, mark memory at _qzz_addr as addressable but undefined
for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_UNDEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similarly, mark memory at _qzz_addr as addressable and defined
for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_DEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is
not altered: bytes which are addressable are marked as defined,
but those which are not addressable are left unchanged. */
#define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Create a block-description handle. The description is an ascii
string which is included in any messages pertaining to addresses
within the specified memory range. Has no other effect on the
properties of the memory range. */
#define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__CREATE_BLOCK, \
(_qzz_addr), (_qzz_len), (_qzz_desc), \
0, 0)
/* Discard a block-description-handle. Returns 1 for an
invalid handle, 0 for a valid handle. */
#define VALGRIND_DISCARD(_qzz_blkindex) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__DISCARD, \
0, (_qzz_blkindex), 0, 0, 0)
/* Client-code macros to check the state of memory. */
/* Check that memory at _qzz_addr is addressable for _qzz_len bytes.
If suitable addressibility is not established, Valgrind prints an
error message and returns the address of the first offending byte.
Otherwise it returns zero. */
#define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Check that memory at _qzz_addr is addressable and defined for
_qzz_len bytes. If suitable addressibility and definedness are not
established, Valgrind prints an error message and returns the
address of the first offending byte. Otherwise it returns zero. */
#define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__CHECK_MEM_IS_DEFINED, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
/* Use this macro to force the definedness and addressibility of an
lvalue to be checked. If suitable addressibility and definedness
are not established, Valgrind prints an error message and returns
the address of the first offending byte. Otherwise it returns
zero. */
#define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \
VALGRIND_CHECK_MEM_IS_DEFINED( \
(volatile unsigned char *)&(__lvalue), \
(unsigned long)(sizeof (__lvalue)))
/* Do a full memory leak check (like --leak-check=full) mid-execution. */
#define VALGRIND_DO_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 0, 0, 0, 0)
/* Same as VALGRIND_DO_LEAK_CHECK but only showing the entries for
which there was an increase in leaked bytes or leaked nr of blocks
since the previous leak search. */
#define VALGRIND_DO_ADDED_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 1, 0, 0, 0)
/* Same as VALGRIND_DO_ADDED_LEAK_CHECK but showing entries with
increased or decreased leaked bytes/blocks since previous leak
search. */
#define VALGRIND_DO_CHANGED_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 2, 0, 0, 0)
/* Same as VALGRIND_DO_LEAK_CHECK but only showing new entries
i.e. loss records that were not there in the previous leak
search. */
#define VALGRIND_DO_NEW_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
0, 3, 0, 0, 0)
/* Do a summary memory leak check (like --leak-check=summary) mid-execution. */
#define VALGRIND_DO_QUICK_LEAK_CHECK \
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \
1, 0, 0, 0, 0)
/* Return number of leaked, dubious, reachable and suppressed bytes found by
all previous leak checks. They must be lvalues. */
#define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \
/* For safety on 64-bit platforms we assign the results to private
unsigned long variables, then assign these to the lvalues the user
specified, which works no matter what type 'leaked', 'dubious', etc
are. We also initialise '_qzz_leaked', etc because
VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
defined. */ \
{ \
unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__COUNT_LEAKS, \
&_qzz_leaked, &_qzz_dubious, \
&_qzz_reachable, &_qzz_suppressed, 0); \
leaked = _qzz_leaked; \
dubious = _qzz_dubious; \
reachable = _qzz_reachable; \
suppressed = _qzz_suppressed; \
}
/* Return number of leaked, dubious, reachable and suppressed bytes found by
all previous leak checks. They must be lvalues. */
#define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \
/* For safety on 64-bit platforms we assign the results to private
unsigned long variables, then assign these to the lvalues the user
specified, which works no matter what type 'leaked', 'dubious', etc
are. We also initialise '_qzz_leaked', etc because
VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as
defined. */ \
{ \
unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \
unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \
VALGRIND_DO_CLIENT_REQUEST_STMT( \
VG_USERREQ__COUNT_LEAK_BLOCKS, \
&_qzz_leaked, &_qzz_dubious, \
&_qzz_reachable, &_qzz_suppressed, 0); \
leaked = _qzz_leaked; \
dubious = _qzz_dubious; \
reachable = _qzz_reachable; \
suppressed = _qzz_suppressed; \
}
/* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it
into the provided zzvbits array. Return values:
0 if not running on valgrind
1 success
2 [previously indicated unaligned arrays; these are now allowed]
3 if any parts of zzsrc/zzvbits are not addressable.
The metadata is not copied in cases 0, 2 or 3 so it should be
impossible to segfault your system by using this call.
*/
#define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__GET_VBITS, \
(const char*)(zza), \
(char*)(zzvbits), \
(zznbytes), 0, 0)
/* Set the validity data for addresses [zza..zza+zznbytes-1], copying it
from the provided zzvbits array. Return values:
0 if not running on valgrind
1 success
2 [previously indicated unaligned arrays; these are now allowed]
3 if any parts of zza/zzvbits are not addressable.
The metadata is not copied in cases 0, 2 or 3 so it should be
impossible to segfault your system by using this call.
*/
#define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \
(unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \
VG_USERREQ__SET_VBITS, \
(const char*)(zza), \
(const char*)(zzvbits), \
(zznbytes), 0, 0 )
/* Disable and re-enable reporting of addressing errors in the
specified address range. */
#define VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
#define VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \
VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \
VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE, \
(_qzz_addr), (_qzz_len), 0, 0, 0)
#endif

View file

@ -0,0 +1,38 @@
#include "valgrind.h" /*<valgrind/valgrind.h>*/
#include "memcheck.h" /*<valgrind/memcheck.h>*/
#include <stddef.h> /* for size_t */
void _d_valgrind_make_mem_noaccess(const void* addr, size_t len)
{
VALGRIND_MAKE_MEM_NOACCESS(addr, len);
}
void _d_valgrind_make_mem_undefined(const void* addr, size_t len)
{
VALGRIND_MAKE_MEM_UNDEFINED(addr, len);
}
void _d_valgrind_make_mem_defined(const void* addr, size_t len)
{
VALGRIND_MAKE_MEM_DEFINED(addr, len);
}
unsigned _d_valgrind_get_vbits(const void* addr, char* bits, size_t len)
{
return VALGRIND_GET_VBITS(addr, bits, len);
}
unsigned _d_valgrind_set_vbits(const void* addr, char* bits, size_t len)
{
return VALGRIND_SET_VBITS(addr, bits, len);
}
void _d_valgrind_disable_addr_reporting_in_range(const void* addr, size_t len)
{
VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(addr, len);
}
void _d_valgrind_enable_addr_reporting_in_range(const void* addr, size_t len)
{
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(addr, len);
}

View file

@ -0,0 +1,85 @@
/// D wrapper for the Valgrind client API.
/// Note that you must include this file into your program's compilation
/// and compile with `-debug=VALGRIND` to access the declarations below.
module etc.valgrind.valgrind;
version (StdDdoc)
{
/// Mark the memory covered by `mem` as unaddressable.
void makeMemNoAccess (const(void)[] mem) nothrow @nogc;
/// Similarly, mark memory covered by `mem` as addressable but undefined.
void makeMemUndefined(const(void)[] mem) nothrow @nogc;
/// Similarly, mark memory covered by `mem` as addressable and defined.
void makeMemDefined (const(void)[] mem) nothrow @nogc;
/// Get the validity data for the address range covered by `mem` and copy it
/// into the provided `bits` array.
/// Returns:
/// - 0 if not running on valgrind
/// - 1 success
/// - 2 [previously indicated unaligned arrays; these are now allowed]
/// - 3 if any parts of `mem`/`bits` are not addressable.
/// The metadata is not copied in cases 0, 2 or 3 so it should be
/// impossible to segfault your system by using this call.
uint getVBits(const(void)[] mem, ubyte[] bits) nothrow @nogc;
/// Set the validity data for the address range covered by `mem`, copying it
/// from the provided `bits` array.
/// Returns:
/// - 0 if not running on valgrind
/// - 1 success
/// - 2 [previously indicated unaligned arrays; these are now allowed]
/// - 3 if any parts of `mem`/`bits` are not addressable.
/// The metadata is not copied in cases 0, 2 or 3 so it should be
/// impossible to segfault your system by using this call.
uint setVBits(const(void)[] mem, ubyte[] bits) nothrow @nogc;
/// Disable and re-enable reporting of addressing errors in the
/// address range covered by `mem`.
void disableAddrReportingInRange(const(void)[] mem) nothrow @nogc;
/// ditto
void enableAddrReportingInRange(const(void)[] mem) nothrow @nogc;
}
else:
debug(VALGRIND):
private extern(C) nothrow @nogc
{
void _d_valgrind_make_mem_noaccess (const(void)* addr, size_t len);
void _d_valgrind_make_mem_undefined(const(void)* addr, size_t len);
void _d_valgrind_make_mem_defined (const(void)* addr, size_t len);
uint _d_valgrind_get_vbits(const(void)* addr, ubyte* bits, size_t len);
uint _d_valgrind_set_vbits(const(void)* addr, ubyte* bits, size_t len);
void _d_valgrind_disable_addr_reporting_in_range(const(void)* addr, size_t len);
void _d_valgrind_enable_addr_reporting_in_range (const(void)* addr, size_t len);
}
void makeMemNoAccess (const(void)[] mem) nothrow @nogc { _d_valgrind_make_mem_noaccess (mem.ptr, mem.length); }
void makeMemUndefined(const(void)[] mem) nothrow @nogc { _d_valgrind_make_mem_undefined(mem.ptr, mem.length); }
void makeMemDefined (const(void)[] mem) nothrow @nogc { _d_valgrind_make_mem_defined (mem.ptr, mem.length); }
uint getVBits(const(void)[] mem, ubyte[] bits) nothrow @nogc
{
assert(mem.length == bits.length);
return _d_valgrind_get_vbits(mem.ptr, bits.ptr, mem.length);
}
uint setVBits(const(void)[] mem, ubyte[] bits) nothrow @nogc
{
assert(mem.length == bits.length);
return _d_valgrind_set_vbits(mem.ptr, bits.ptr, mem.length);
}
void disableAddrReportingInRange(const(void)[] mem) nothrow @nogc
{
_d_valgrind_disable_addr_reporting_in_range(mem.ptr, mem.length);
}
void enableAddrReportingInRange(const(void)[] mem) nothrow @nogc
{
_d_valgrind_enable_addr_reporting_in_range(mem.ptr, mem.length);
}

File diff suppressed because it is too large Load diff

View file

@ -1307,12 +1307,19 @@ int hasArrayFinalizerInSegment(void* p, size_t size, in void[] segment) nothrow
return cast(size_t)(cast(void*)si.xdtor - segment.ptr) < segment.length;
}
debug (VALGRIND) import etc.valgrind.valgrind;
// called by the GC
void finalize_array2(void* p, size_t size) nothrow
{
debug(PRINTF) printf("rt_finalize_array2(p = %p)\n", p);
TypeInfo_Struct si = void;
debug (VALGRIND)
{
auto block = p[0..size];
disableAddrReportingInRange(block);
}
if (size <= 256)
{
si = *cast(TypeInfo_Struct*)(p + size - size_t.sizeof);
@ -1329,6 +1336,7 @@ void finalize_array2(void* p, size_t size) nothrow
size = *cast(size_t*)p;
p += LARGEPREFIX;
}
debug (VALGRIND) enableAddrReportingInRange(block);
try
{

View file

@ -268,7 +268,9 @@ struct ModuleGroup
}
}
// trim space to what is needed.
edges[i] = (cast(int*)realloc(edge, int.sizeof * nEdges))[0 .. nEdges];
edges[i] = nEdges > 0
? (cast(int*)realloc(edge, int.sizeof * nEdges))[0 .. nEdges]
: null;
}
}

View file

@ -0,0 +1,34 @@
include ../common.mak
TESTS := ok_append no_use_after_free no_oob no_oob_sentinel no_use_after_gc
GC_SRC = \
../../src/core/internal/gc/impl/conservative/gc.d \
../../src/etc/valgrind/valgrind.d \
../../src/rt/lifetime.d
.PHONY: all clean
all: $(addprefix $(ROOT)/,$(addsuffix .done,$(TESTS)))
$(ROOT)/ok_%.done: $(ROOT)/ok_%
@echo Testing ok_$*
$(QUIET)if ! command -v valgrind >/dev/null; then \
echo valgrind not installed, skipping; \
else \
$(TIMELIMIT)valgrind --quiet --tool=memcheck --error-exitcode=8 $(ROOT)/ok_$* $(RUN_ARGS); \
fi
$(QUIET)touch $@
$(ROOT)/no_%.done: $(ROOT)/no_%
@echo Testing no_$*
$(QUIET)if ! command -v valgrind >/dev/null; then \
echo valgrind not installed, skipping; \
else \
( ! $(TIMELIMIT)valgrind --quiet --tool=memcheck --error-exitcode=8 $(ROOT)/no_$* $(RUN_ARGS); ) \
fi
$(QUIET)touch $@
$(ROOT)/%: $(SRC)/%.d $(GC_SRC)
$(QUIET)$(DMD) -debug=VALGRIND -debug=SENTINEL $(DFLAGS) -of$@ $< $(GC_SRC)
clean:
rm -rf $(ROOT)

View file

@ -0,0 +1,7 @@
// Out-of-bounds access (within the same block).
void main()
{
auto arr = new ubyte[4];
arr.ptr[5] = 2;
}

View file

@ -0,0 +1,7 @@
// Out-of-bounds access (SENTINEL).
void main()
{
auto arr = new ubyte[4];
arr.ptr[-1] = 42;
}

View file

@ -0,0 +1,10 @@
// Explicit use-after-free.
import core.memory;
int main()
{
auto p = cast(int*)GC.malloc(int.sizeof * 3);
GC.free(p);
return *p;
}

View file

@ -0,0 +1,26 @@
// Use-after-GC (escaping heap reference).
struct S
{
S* other;
~this()
{
// Dereferencing other GC-allocated values in a destructor is not allowed,
// as the deallocation/destruction order is undefined,
// and here even forms a loop.
int dummy = other.other !is &this;
result += dummy;
}
}
__gshared int result; // Trick the optimizer
int main()
{
auto a = new S;
auto b = new S;
a.other = b;
b.other = a;
return result;
}

View file

@ -0,0 +1,12 @@
// Simply generate some garbage.
// This program should not trigger any Valgrind warnings.
void main()
{
foreach (i; 0..100)
{
string s;
foreach (j; 0..1000)
s ~= 'x';
}
}