Browse Source

import changes from `dev' branch of rmottola/Arctic-Fox:

- Bug 1189709 - Reduce scope of MessageChannel window neutering. r=jimm (a73623101e)
- Bug 1202051 - Use a PersistentRooted to automate tracing of unwrappedException_; r=sfink (32964b4bcb)
- missing part of Bug 1135236 - Remove unused print callbacks in profiler backend. (a6427e4a23)
- Bug 1164785 - Append line number to systrace scopedTrace object name. r=BenWa (be4fb076b0)
- missing parts of Bug 779291: Implement SPS stackwalk (fefa7c961c)
- Bug 1186709 - Remove MOZ_IMPLICIT from security/sandbox/chromium. r=bobowen (7c1419cd3a)
- Bug 1274253. Properly test the cpuid bits. (039f594ab3)
- Bug 1168291 - Install mozcrt.lib instead of mozglue.lib in the SDK. r=mshal (00d4309281)
- Bug 1198334 (follow-up) - Fix SM(e) bustage (which doesn't show up on try pushes, grr). r=bustage. (4431457ede)
- Bug 1194560 - Add tools/power/rapl, a RAPL-reading program for power rofiling. r=erahm,glandium. (47b61fd39c)
- Bug 1147243 - Build memory/jemalloc in unified mode; r=glandium (81173f8bc2)
- Bug 1201738 - Update jemalloc4 to 594c759 + two pending patches. r=njn (66f4f3fe49)
- Bug 1135583: Prevent the inclusion of Char16.h in VS2015's fallible.obj. r=glandium (3e5ac84efb)
- bug 1171122 - Swap some XP_MACOSX for XP_DARWIN in mozalloc. r=glandium (6d03543291)
- Bug 1170177 - Disable our own abort() method with MOZ_ASAN. r=froydnj (cdc43fcb8c)
- Bug 1120793 - Remove obsolete _Throw wrapping. r=froydnj (a5c53780ec)
- Bug 1189967 - Avoid conflicting declarations for our raise wrappers on Windows. r=nfroyd (e0a606ef14)
- Bug 1203476 - Fix an Android-only warning in mozalloc_abort.cpp. r=glandium. (806b791d54)
- pointer style (97a2b4ffea)
- Bug 1147353 - Odin: simplify the masked index bounds check test. r=sfink, r=luke (fa6007c8dd)
pull/8/head
roytam1 5 months ago
parent
commit
850741b596
  1. 3
      config/moz.build
  2. 68
      configure.in
  3. 14
      ipc/glue/MessageChannel.cpp
  4. 9
      ipc/glue/WindowsMessageLoop.cpp
  5. 19
      js/src/asmjs/AsmJSValidate.cpp
  6. 22
      js/src/jit/RegisterSets.h
  7. 6
      js/src/jscntxt.cpp
  8. 2
      js/src/jscntxt.h
  9. 5
      memory/fallible/moz.build
  10. 10
      memory/jemalloc/moz.build
  11. 19
      memory/jemalloc/src/ChangeLog
  12. 2
      memory/jemalloc/src/VERSION
  13. 41
      memory/jemalloc/src/configure
  14. 22
      memory/jemalloc/src/configure.ac
  15. 111
      memory/jemalloc/src/include/jemalloc/internal/arena.h
  16. 28
      memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in
  17. 2
      memory/jemalloc/src/include/jemalloc/internal/private_symbols.txt
  18. 10
      memory/jemalloc/src/include/jemalloc/internal/prof.h
  19. 18
      memory/jemalloc/src/include/jemalloc/internal/tcache.h
  20. 2
      memory/jemalloc/src/include/jemalloc/jemalloc_protos.h.in
  21. 66
      memory/jemalloc/src/src/arena.c
  22. 14
      memory/jemalloc/src/src/huge.c
  23. 29
      memory/jemalloc/src/src/jemalloc.c
  24. 2
      memory/jemalloc/src/src/prof.c
  25. 8
      memory/jemalloc/src/src/tcache.c
  26. 4
      memory/jemalloc/src/test/integration/chunk.c
  27. 49
      memory/jemalloc/src/test/unit/prof_reset.c
  28. 2
      memory/jemalloc/src/test/unit/size_classes.c
  29. 6
      memory/jemalloc/src/test/unit/tsd.c
  30. 4
      memory/jemalloc/upstream.info
  31. 12
      memory/mozalloc/moz.build
  32. 4
      memory/mozalloc/mozalloc.cpp
  33. 17
      memory/mozalloc/mozalloc_abort.cpp
  34. 13
      memory/mozalloc/msvc_raise_wrappers.cpp
  35. 22
      memory/mozalloc/msvc_raise_wrappers.h
  36. 30
      memory/mozalloc/msvc_throw_wrapper.cpp
  37. 18
      memory/mozalloc/msvc_throw_wrapper.h
  38. 2
      memory/mozalloc/throw_msvc.h
  39. 2
      mozglue/build/SSE.cpp
  40. 3
      mozglue/build/moz.build
  41. 8
      mozglue/crt/Makefile.in
  42. 4
      security/sandbox/chromium/base/memory/ref_counted.h
  43. 4
      security/sandbox/chromium/base/strings/string_piece.h
  44. 4
      security/sandbox/chromium/sandbox/linux/bpf_dsl/bpf_dsl.cc
  45. 1
      toolkit/toolkit.mozbuild
  46. 12
      tools/power/moz.build
  47. 772
      tools/power/rapl.cpp
  48. 7
      tools/profiler/public/GeckoProfiler.h
  49. 40
      tools/profiler/public/GeckoProfilerImpl.h

3
config/moz.build

@ -45,3 +45,6 @@ PYTHON_UNIT_TESTS += [
if CONFIG['GNU_CC'] and CONFIG['MOZ_OPTIMIZE']:
CFLAGS += ['-O3']
# XXX: We should fix these warnings.
ALLOW_COMPILER_WARNINGS = True

68
configure.in

@ -94,7 +94,6 @@ dnl ==============================================================
_topsrcdir=`cd \`dirname $0\`; pwd`
_objdir=`pwd`
dnl TODO Don't exempt L10N builds once bug 842760 is resolved.
if test "$_topsrcdir" = "$_objdir" -a "${with_l10n_base+set}" != set; then
echo " ***"
@ -612,66 +611,30 @@ See https://developer.mozilla.org/en/Windows_Build_Prerequisites.])
unset _MSVC_VER_FILTER
AC_CACHE_CHECK(for std::_Throw, ac_cv_have_std__Throw,
AC_CACHE_CHECK(for overridable _RAISE,
ac_cv_have__RAISE,
[
AC_LANG_SAVE
AC_LANG_CPLUSPLUS
_SAVE_CXXFLAGS="$CXXFLAGS"
CXXFLAGS="${CXXFLAGS} -D_HAS_EXCEPTIONS=0"
AC_TRY_COMPILE([#include <exception>],
[std::_Throw(std::exception()); return 0;],
ac_cv_have_std__Throw="yes",
ac_cv_have_std__Throw="no")
AC_TRY_COMPILE([#include <xstddef>
#undef _RAISE
#define _RAISE(x) externallyDefinedFunction((x).what())
#include <vector>
],
[std::vector<int> v; return v.at(1);],
ac_cv_have__RAISE="no",
ac_cv_have__RAISE="yes")
CXXFLAGS="$_SAVE_CXXFLAGS"
AC_LANG_RESTORE
])
if test "$ac_cv_have_std__Throw" = "yes"; then
AC_CACHE_CHECK(for |class __declspec(dllimport) exception| bug,
ac_cv_have_dllimport_exception_bug,
[
AC_LANG_SAVE
AC_LANG_CPLUSPLUS
_SAVE_CXXFLAGS="$CXXFLAGS"
CXXFLAGS="${CXXFLAGS} -D_HAS_EXCEPTIONS=0"
AC_TRY_LINK([#include <vector>],
[std::vector<int> v; return v.at(1);],
ac_cv_have_dllimport_exception_bug="no",
ac_cv_have_dllimport_exception_bug="yes")
CXXFLAGS="$_SAVE_CXXFLAGS"
AC_LANG_RESTORE
])
if test "$ac_cv_have_dllimport_exception_bug" = "no"; then
WRAP_STL_INCLUDES=1
MOZ_MSVC_STL_WRAP_Throw=1
AC_DEFINE(MOZ_MSVC_STL_WRAP_Throw)
fi
if test "$ac_cv_have__RAISE" = "yes"; then
WRAP_STL_INCLUDES=1
MOZ_MSVC_STL_WRAP_RAISE=1
AC_DEFINE(MOZ_MSVC_STL_WRAP_RAISE)
else
AC_CACHE_CHECK(for overridable _RAISE,
ac_cv_have__RAISE,
[
AC_LANG_SAVE
AC_LANG_CPLUSPLUS
_SAVE_CXXFLAGS="$CXXFLAGS"
CXXFLAGS="${CXXFLAGS} -D_HAS_EXCEPTIONS=0"
AC_TRY_COMPILE([#include <xstddef>
#undef _RAISE
#define _RAISE(x) externallyDefinedFunction((x).what())
#include <vector>
],
[std::vector<int> v; return v.at(1);],
ac_cv_have__RAISE="no",
ac_cv_have__RAISE="yes")
CXXFLAGS="$_SAVE_CXXFLAGS"
AC_LANG_RESTORE
])
if test "$ac_cv_have__RAISE" = "yes"; then
WRAP_STL_INCLUDES=1
MOZ_MSVC_STL_WRAP_RAISE=1
AC_DEFINE(MOZ_MSVC_STL_WRAP_RAISE)
else
AC_MSG_ERROR([Gecko exception wrapping doesn't understand your your MSVC/SDK. Please file a bug describing this error and your build configuration.])
fi
AC_MSG_ERROR([Gecko exception wrapping doesn't understand your your MSVC/SDK. Please file a bug describing this error and your build configuration.])
fi
if test "$WRAP_STL_INCLUDES" = "1"; then
@ -788,7 +751,6 @@ AC_SUBST(INTEL_CXX)
AC_SUBST(STL_FLAGS)
AC_SUBST(WRAP_STL_INCLUDES)
AC_SUBST(MOZ_MSVC_STL_WRAP_Throw)
AC_SUBST(MOZ_MSVC_STL_WRAP_RAISE)
dnl ========================================================

14
ipc/glue/MessageChannel.cpp

@ -992,7 +992,6 @@ MessageChannel::Call(Message* aMsg, Message* aReply)
#ifdef OS_WIN
SyncStackFrame frame(this, true);
NeuteredWindowRegion neuteredRgn(mFlags & REQUIRE_DEFERRED_MESSAGE_PROTECTION);
#endif
// This must come before MonitorAutoLock, as its destructor acquires the
@ -1038,9 +1037,16 @@ MessageChannel::Call(Message* aMsg, Message* aReply)
}
#ifdef OS_WIN
/* We should pump messages at this point to ensure that the IPC peer
does not become deadlocked on a pending inter-thread SendMessage() */
neuteredRgn.PumpOnce();
// We need to limit the scoped of neuteredRgn to this spot in the code.
// Window neutering can't be enabled during some plugin calls because
// we then risk the neutered window procedure being subclassed by a
// plugin.
{
NeuteredWindowRegion neuteredRgn(mFlags & REQUIRE_DEFERRED_MESSAGE_PROTECTION);
/* We should pump messages at this point to ensure that the IPC peer
does not become deadlocked on a pending inter-thread SendMessage() */
neuteredRgn.PumpOnce();
}
#endif
// Now might be the time to process a message deferred because of race

9
ipc/glue/WindowsMessageLoop.cpp

@ -894,10 +894,10 @@ StopNeutering()
}
NeuteredWindowRegion::NeuteredWindowRegion(bool aDoNeuter MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
: mNeuteredByThis(!gWindowHook)
: mNeuteredByThis(!gWindowHook && aDoNeuter)
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
if (aDoNeuter && mNeuteredByThis) {
if (mNeuteredByThis) {
StartNeutering();
}
}
@ -912,6 +912,11 @@ NeuteredWindowRegion::~NeuteredWindowRegion()
void
NeuteredWindowRegion::PumpOnce()
{
if (!gWindowHook) {
// This should be a no-op if nothing has been neutered.
return;
}
MSG msg = {0};
// Pump any COM messages so that we don't hang due to STA marshaling.
if (gCOMWindow && ::PeekMessageW(&msg, gCOMWindow, 0, 0, PM_REMOVE)) {

19
js/src/asmjs/AsmJSValidate.cpp

@ -5558,20 +5558,13 @@ FoldMaskedArrayIndex(FunctionBuilder& f, ParseNode** indexExpr, int32_t* mask,
uint32_t mask2;
if (IsLiteralOrConstInt(f, maskNode, &mask2)) {
// Flag the access to skip the bounds check if the mask ensures that an 'out of
// bounds' access can not occur based on the current heap length constraint.
if (mask2 == 0) {
// Flag the access to skip the bounds check if the mask ensures that an
// 'out of bounds' access can not occur based on the current heap length
// constraint. The unsigned maximum of a masked index is the mask
// itself, so check that the mask is not negative and compare the mask
// to the known minimum heap length.
if (int32_t(mask2) >= 0 && mask2 < f.m().minHeapLength())
*needsBoundsCheck = NO_BOUNDS_CHECK;
} else {
uint32_t minHeap = f.m().minHeapLength();
uint32_t minHeapZeroes = CountLeadingZeroes32(minHeap - 1);
uint32_t maskZeroes = CountLeadingZeroes32(mask2);
if ((minHeapZeroes < maskZeroes) ||
(IsPowerOfTwo(minHeap) && minHeapZeroes == maskZeroes))
{
*needsBoundsCheck = NO_BOUNDS_CHECK;
}
}
*mask &= mask2;
*indexExpr = indexNode;
return true;

22
js/src/jit/RegisterSets.h

@ -492,16 +492,16 @@ class RegisterSet {
MOZ_CONSTEXPR GeneralRegisterSet gprs() const {
return gpr_;
}
GeneralRegisterSet &gprs() {
GeneralRegisterSet& gprs() {
return gpr_;
}
MOZ_CONSTEXPR FloatRegisterSet fpus() const {
return fpu_;
}
FloatRegisterSet &fpus() {
FloatRegisterSet& fpus() {
return fpu_;
}
bool operator ==(const RegisterSet &other) const {
bool operator ==(const RegisterSet& other) const {
return other.gpr_ == gpr_ && other.fpu_ == fpu_;
}
@ -824,13 +824,13 @@ class SpecializedRegSet<Accessors, RegisterSet> : public Accessors
GeneralRegisterSet gprs() const {
return this->Parent::set_.gprs();
}
GeneralRegisterSet &gprs() {
GeneralRegisterSet& gprs() {
return this->Parent::set_.gprs();
}
FloatRegisterSet fpus() const {
return this->Parent::set_.fpus();
}
FloatRegisterSet &fpus() {
FloatRegisterSet& fpus() {
return this->Parent::set_.fpus();
}
@ -937,7 +937,7 @@ class CommonRegSet : public SpecializedRegSet<Accessors, Set>
RegSet set() const {
return this->Parent::set_;
}
RegSet &set() {
RegSet& set() {
return this->Parent::set_;
}
@ -1129,7 +1129,7 @@ class TypedRegisterBackwardIterator
{ }
explicit TypedRegisterBackwardIterator(LiveSet<TypedRegisterSet<T>> regset) : regset_(regset)
{ }
TypedRegisterBackwardIterator(const TypedRegisterBackwardIterator &other)
TypedRegisterBackwardIterator(const TypedRegisterBackwardIterator& other)
: regset_(other.regset_)
{ }
@ -1161,7 +1161,7 @@ class TypedRegisterForwardIterator
{ }
explicit TypedRegisterForwardIterator(LiveSet<TypedRegisterSet<T>> regset) : regset_(regset)
{ }
TypedRegisterForwardIterator(const TypedRegisterForwardIterator &other) : regset_(other.regset_)
TypedRegisterForwardIterator(const TypedRegisterForwardIterator& other) : regset_(other.regset_)
{ }
bool more() const {
@ -1200,13 +1200,13 @@ class AnyRegisterIterator
AnyRegisterIterator(GeneralRegisterSet genset, FloatRegisterSet floatset)
: geniter_(genset), floatiter_(floatset)
{ }
explicit AnyRegisterIterator(const RegisterSet &set)
explicit AnyRegisterIterator(const RegisterSet& set)
: geniter_(set.gpr_), floatiter_(set.fpu_)
{ }
explicit AnyRegisterIterator(const LiveSet<RegisterSet> &set)
explicit AnyRegisterIterator(const LiveSet<RegisterSet>& set)
: geniter_(set.gprs()), floatiter_(set.fpus())
{ }
AnyRegisterIterator(const AnyRegisterIterator &other)
AnyRegisterIterator(const AnyRegisterIterator& other)
: geniter_(other.geniter_), floatiter_(other.floatiter_)
{ }
bool more() const {

6
js/src/jscntxt.cpp

@ -953,7 +953,7 @@ ExclusiveContext::recoverFromOutOfMemory()
JSContext::JSContext(JSRuntime* rt)
: ExclusiveContext(rt, &rt->mainThread, Context_JS),
throwing(false),
unwrappedException_(UndefinedValue()),
unwrappedException_(this),
options_(),
overRecursed_(false),
propagatingForcedReturn_(false),
@ -1142,10 +1142,6 @@ JSContext::mark(JSTracer* trc)
{
/* Stack frames and slots are traced by StackSpace::mark. */
/* Mark other roots-by-definition in the JSContext. */
if (isExceptionPending())
TraceRoot(trc, &unwrappedException_, "unwrapped exception");
TraceCycleDetectionSet(trc, cycleDetectorSet);
if (compartment_)

2
js/src/jscntxt.h

@ -305,7 +305,7 @@ struct JSContext : public js::ExclusiveContext,
private:
/* Exception state -- the exception member is a GC root by definition. */
bool throwing; /* is there a pending exception? */
js::Value unwrappedException_; /* most-recently-thrown exception */
JS::PersistentRooted<JS::Value> unwrappedException_; /* most-recently-thrown exception */
/* Per-context options. */
JS::ContextOptions options_;

5
memory/fallible/moz.build

@ -28,3 +28,8 @@ if CONFIG['_MSC_VER']:
CXXFLAGS += [
'-Zl',
]
if CONFIG['_MSC_VER'] >= '1900':
# This further prevents the CRT name from getting into the .obj file,
# by avoiding pulling in a bunch of string code that uses the CRT.
DEFINES['mozilla_Char16_h'] = True

10
memory/jemalloc/moz.build

@ -4,7 +4,7 @@
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
SOURCES += [
UNIFIED_SOURCES += [
'src/src/arena.c',
'src/src/atomic.c',
'src/src/base.c',
@ -13,7 +13,6 @@ SOURCES += [
'src/src/chunk_dss.c',
'src/src/chunk_mmap.c',
'src/src/ckh.c',
'src/src/ctl.c',
'src/src/extent.c',
'src/src/hash.c',
'src/src/huge.c',
@ -32,10 +31,15 @@ SOURCES += [
# 'src/src/valgrind.c',
]
SOURCES += [
# This file cannot be built in unified mode because of symbol clash on arena_purge.
'src/src/ctl.c',
]
# Only OSX needs the zone allocation implementation,
# but only if replace-malloc is not enabled.
if CONFIG['OS_TARGET'] == 'Darwin' and not CONFIG['MOZ_REPLACE_MALLOC']:
SOURCES += [
UNIFIED_SOURCES += [
'src/src/zone.c',
]

19
memory/jemalloc/src/ChangeLog

@ -4,6 +4,25 @@ brevity. Much more detail can be found in the git revision history:
https://github.com/jemalloc/jemalloc
* 4.0.1 (XXX)
Bug fixes:
- Fix arenas_cache_cleanup() and arena_get_hard() to handle
allocation/deallocation within the application's thread-specific data
cleanup functions even after arenas_cache is torn down.
- Don't bitshift by negative amounts when encoding/decoding run sizes in chunk
header maps. This affected systems with page sizes greater than 8 KiB.
- Rename index_t to szind_t to avoid an existing type on Solaris.
- Add JEMALLOC_CXX_THROW to the memalign() function prototype, in order to
match glibc and avoid compilation errors when including both
jemalloc/jemalloc.h and malloc.h in C++ code.
- Fix chunk purge hook calls for in-place huge shrinking reallocation to
specify the old chunk size rather than the new chunk size. This bug caused
no correctness issues for the default chunk purge function, but was
visible to custom functions set via the "arena.<i>.chunk_hooks" mallctl.
- Fix TLS configuration such that it is enabled by default for platforms on
which it works correctly.
* 4.0.0 (August 17, 2015)
This version contains many speed and space optimizations, both minor and

2
memory/jemalloc/src/VERSION

@ -1 +1 @@
4.0.0-0-g6e98caf8f064482b9ab292ef3638dea67420bbc2
4.0.0-12-ged4883285e111b426e5769b24dad164ebacaa5b9

41
memory/jemalloc/src/configure vendored

@ -728,6 +728,7 @@ infodir
docdir
oldincludedir
includedir
runstatedir
localstatedir
sharedstatedir
sysconfdir
@ -831,6 +832,7 @@ datadir='${datarootdir}'
sysconfdir='${prefix}/etc'
sharedstatedir='${prefix}/com'
localstatedir='${prefix}/var'
runstatedir='${localstatedir}/run'
includedir='${prefix}/include'
oldincludedir='/usr/include'
docdir='${datarootdir}/doc/${PACKAGE}'
@ -1083,6 +1085,15 @@ do
| -silent | --silent | --silen | --sile | --sil)
silent=yes ;;
-runstatedir | --runstatedir | --runstatedi | --runstated \
| --runstate | --runstat | --runsta | --runst | --runs \
| --run | --ru | --r)
ac_prev=runstatedir ;;
-runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
| --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
| --run=* | --ru=* | --r=*)
runstatedir=$ac_optarg ;;
-sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
ac_prev=sbindir ;;
-sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
@ -1220,7 +1231,7 @@ fi
for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \
datadir sysconfdir sharedstatedir localstatedir includedir \
oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
libdir localedir mandir
libdir localedir mandir runstatedir
do
eval ac_val=\$$ac_var
# Remove trailing slashes.
@ -1373,6 +1384,7 @@ Fine tuning of the installation directories:
--sysconfdir=DIR read-only single-machine data [PREFIX/etc]
--sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com]
--localstatedir=DIR modifiable single-machine data [PREFIX/var]
--runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run]
--libdir=DIR object code libraries [EPREFIX/lib]
--includedir=DIR C header files [PREFIX/include]
--oldincludedir=DIR C header files for non-gcc [/usr/include]
@ -6964,7 +6976,6 @@ else
fi
set -x
if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
rm -f "${objroot}VERSION"
@ -6992,8 +7003,6 @@ $as_echo "Missing VERSION file, and unable to generate it; creating bogus VERSIO
cp ${srcroot}VERSION ${objroot}VERSION
fi
fi
set +x
jemalloc_version=`cat "${objroot}VERSION"`
jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $1}'`
jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print $2}'`
@ -7283,15 +7292,18 @@ else
fi
if test "x${enable_tls}" = "x" -a "x${force_tls}" = "x1" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing TLS to avoid allocator/threading bootstrap issues" >&5
if test "x${enable_tls}" = "x" ; then
if test "x${force_tls}" = "x1" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing TLS to avoid allocator/threading bootstrap issues" >&5
$as_echo "Forcing TLS to avoid allocator/threading bootstrap issues" >&6; }
enable_tls="1"
fi
if test "x${enable_tls}" = "x" -a "x${force_tls}" = "x0" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no TLS to avoid allocator/threading bootstrap issues" >&5
enable_tls="1"
elif test "x${force_tls}" = "x0" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: Forcing no TLS to avoid allocator/threading bootstrap issues" >&5
$as_echo "Forcing no TLS to avoid allocator/threading bootstrap issues" >&6; }
enable_tls="0"
enable_tls="0"
else
enable_tls="1"
fi
fi
if test "x${enable_tls}" = "x1" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for TLS" >&5
@ -7327,12 +7339,17 @@ else
fi
if test "x${enable_tls}" = "x1" ; then
if test "x${force_tls}" = "x0" ; then
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: TLS enabled despite being marked unusable on this platform" >&5
$as_echo "$as_me: WARNING: TLS enabled despite being marked unusable on this platform" >&2;}
fi
cat >>confdefs.h <<_ACEOF
#define JEMALLOC_TLS
_ACEOF
elif test "x${force_tls}" = "x1" ; then
as_fn_error $? "Failed to configure TLS, which is mandatory for correct function" "$LINENO" 5
{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: TLS disabled despite being marked critical on this platform" >&5
$as_echo "$as_me: WARNING: TLS disabled despite being marked critical on this platform" >&2;}
fi

22
memory/jemalloc/src/configure.ac

@ -1272,13 +1272,16 @@ fi
,
enable_tls=""
)
if test "x${enable_tls}" = "x" -a "x${force_tls}" = "x1" ; then
AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues])
enable_tls="1"
fi
if test "x${enable_tls}" = "x" -a "x${force_tls}" = "x0" ; then
AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues])
enable_tls="0"
if test "x${enable_tls}" = "x" ; then
if test "x${force_tls}" = "x1" ; then
AC_MSG_RESULT([Forcing TLS to avoid allocator/threading bootstrap issues])
enable_tls="1"
elif test "x${force_tls}" = "x0" ; then
AC_MSG_RESULT([Forcing no TLS to avoid allocator/threading bootstrap issues])
enable_tls="0"
else
enable_tls="1"
fi
fi
if test "x${enable_tls}" = "x1" ; then
AC_MSG_CHECKING([for TLS])
@ -1298,9 +1301,12 @@ else
fi
AC_SUBST([enable_tls])
if test "x${enable_tls}" = "x1" ; then
if test "x${force_tls}" = "x0" ; then
AC_MSG_WARN([TLS enabled despite being marked unusable on this platform])
fi
AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ])
elif test "x${force_tls}" = "x1" ; then
AC_MSG_ERROR([Failed to configure TLS, which is mandatory for correct function])
AC_MSG_WARN([TLS disabled despite being marked critical on this platform])
fi
dnl ============================================================================

111
memory/jemalloc/src/include/jemalloc/internal/arena.h

@ -39,7 +39,7 @@ typedef struct arena_s arena_t;
#ifdef JEMALLOC_ARENA_STRUCTS_A
struct arena_run_s {
/* Index of bin this run is associated with. */
index_t binind;
szind_t binind;
/* Number of free regions in run. */
unsigned nfree;
@ -448,7 +448,7 @@ bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
void arena_maybe_purge(arena_t *arena);
void arena_purge_all(arena_t *arena);
void arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin,
index_t binind, uint64_t prof_accumbytes);
szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
#ifdef JEMALLOC_JET
@ -519,17 +519,19 @@ arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbitsp_read(size_t *mapbitsp);
size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_size_decode(size_t mapbits);
size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
size_t pageind);
size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
index_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
size_t arena_mapbits_size_encode(size_t size);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
@ -539,21 +541,21 @@ void arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind,
void arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind,
size_t size, size_t flags);
void arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
index_t binind);
szind_t binind);
void arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind,
size_t runind, index_t binind, size_t flags);
size_t runind, szind_t binind, size_t flags);
void arena_metadata_allocated_add(arena_t *arena, size_t size);
void arena_metadata_allocated_sub(arena_t *arena, size_t size);
size_t arena_metadata_allocated_get(arena_t *arena);
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
index_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
index_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
unsigned arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
prof_tctx_t *arena_prof_tctx_get(const void *ptr);
void arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, bool zero,
tcache_t *tcache);
arena_t *arena_aalloc(const void *ptr);
@ -652,6 +654,22 @@ arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_size_decode(size_t mapbits)
{
size_t size;
#if CHUNK_MAP_SIZE_SHIFT > 0
size = (mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT;
#elif CHUNK_MAP_SIZE_SHIFT == 0
size = mapbits & CHUNK_MAP_SIZE_MASK;
#else
size = (mapbits & CHUNK_MAP_SIZE_MASK) << -CHUNK_MAP_SIZE_SHIFT;
#endif
return (size);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
{
@ -659,7 +677,7 @@ arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT);
return (arena_mapbits_size_decode(mapbits));
}
JEMALLOC_ALWAYS_INLINE size_t
@ -670,7 +688,7 @@ arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
mapbits = arena_mapbits_get(chunk, pageind);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) ==
(CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED));
return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT);
return (arena_mapbits_size_decode(mapbits));
}
JEMALLOC_ALWAYS_INLINE size_t
@ -684,11 +702,11 @@ arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
return (mapbits >> CHUNK_MAP_RUNIND_SHIFT);
}
JEMALLOC_ALWAYS_INLINE index_t
JEMALLOC_ALWAYS_INLINE szind_t
arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
index_t binind;
szind_t binind;
mapbits = arena_mapbits_get(chunk, pageind);
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
@ -754,6 +772,23 @@ arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits)
*mapbitsp = mapbits;
}
JEMALLOC_ALWAYS_INLINE size_t
arena_mapbits_size_encode(size_t size)
{
size_t mapbits;
#if CHUNK_MAP_SIZE_SHIFT > 0
mapbits = size << CHUNK_MAP_SIZE_SHIFT;
#elif CHUNK_MAP_SIZE_SHIFT == 0
mapbits = size;
#else
mapbits = size >> -CHUNK_MAP_SIZE_SHIFT;
#endif
assert((mapbits & ~CHUNK_MAP_SIZE_MASK) == 0);
return (mapbits);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
@ -761,11 +796,10 @@ arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert(((size << CHUNK_MAP_SIZE_SHIFT) & ~CHUNK_MAP_SIZE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
arena_mapbitsp_write(mapbitsp, (size << CHUNK_MAP_SIZE_SHIFT) |
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
CHUNK_MAP_BININD_INVALID | flags);
}
@ -777,10 +811,9 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert((size & PAGE_MASK) == 0);
assert(((size << CHUNK_MAP_SIZE_SHIFT) & ~CHUNK_MAP_SIZE_MASK) == 0);
assert((mapbits & (CHUNK_MAP_LARGE|CHUNK_MAP_ALLOCATED)) == 0);
arena_mapbitsp_write(mapbitsp, (size << CHUNK_MAP_SIZE_SHIFT) | (mapbits
& ~CHUNK_MAP_SIZE_MASK));
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
(mapbits & ~CHUNK_MAP_SIZE_MASK));
}
JEMALLOC_ALWAYS_INLINE void
@ -799,18 +832,17 @@ arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert(((size << CHUNK_MAP_SIZE_SHIFT) & ~CHUNK_MAP_SIZE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
assert((flags & CHUNK_MAP_DECOMMITTED) == 0 || (flags &
(CHUNK_MAP_DIRTY|CHUNK_MAP_UNZEROED)) == 0);
arena_mapbitsp_write(mapbitsp, (size << CHUNK_MAP_SIZE_SHIFT) |
arena_mapbitsp_write(mapbitsp, arena_mapbits_size_encode(size) |
CHUNK_MAP_BININD_INVALID | flags | CHUNK_MAP_LARGE |
CHUNK_MAP_ALLOCATED);
}
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
index_t binind)
szind_t binind)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
@ -824,7 +856,7 @@ arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
index_t binind, size_t flags)
szind_t binind, size_t flags)
{
size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
@ -901,10 +933,10 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes)
}
}
JEMALLOC_ALWAYS_INLINE index_t
JEMALLOC_ALWAYS_INLINE szind_t
arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
{
index_t binind;
szind_t binind;
binind = (mapbits & CHUNK_MAP_BININD_MASK) >> CHUNK_MAP_BININD_SHIFT;
@ -916,7 +948,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
size_t rpages_ind;
arena_run_t *run;
arena_bin_t *bin;
index_t run_binind, actual_binind;
szind_t run_binind, actual_binind;
arena_bin_info_t *bin_info;
arena_chunk_map_misc_t *miscelm;
void *rpages;
@ -950,10 +982,10 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
# endif /* JEMALLOC_ARENA_INLINE_A */
# ifdef JEMALLOC_ARENA_INLINE_B
JEMALLOC_INLINE index_t
JEMALLOC_INLINE szind_t
arena_bin_index(arena_t *arena, arena_bin_t *bin)
{
index_t binind = bin - arena->bins;
szind_t binind = bin - arena->bins;
assert(binind < NBINS);
return (binind);
}
@ -1060,7 +1092,7 @@ arena_prof_tctx_get(const void *ptr)
}
JEMALLOC_INLINE void
arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
arena_chunk_t *chunk;
@ -1070,12 +1102,25 @@ arena_prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) {
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
if (unlikely(arena_mapbits_large_get(chunk, pageind) != 0)) {
arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
pageind);
if (unlikely(usize > SMALL_MAXCLASS || tctx >
(prof_tctx_t *)(uintptr_t)1U)) {
arena_chunk_map_misc_t *elm;
assert(arena_mapbits_large_get(chunk, pageind) != 0);
elm = arena_miscelm_get(chunk, pageind);
atomic_write_p(&elm->prof_tctx_pun, tctx);
} else {
/*
* tctx must always be initialized for large runs.
* Assert that the surrounding conditional logic is
* equivalent to checking whether ptr refers to a large
* run.
*/
assert(arena_mapbits_large_get(chunk, pageind) == 0);
}
} else
huge_prof_tctx_set(ptr, tctx);
@ -1131,7 +1176,7 @@ arena_salloc(const void *ptr, bool demote)
size_t ret;
arena_chunk_t *chunk;
size_t pageind;
index_t binind;
szind_t binind;
assert(ptr != NULL);
@ -1190,7 +1235,7 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
if (likely((mapbits & CHUNK_MAP_LARGE) == 0)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
index_t binind = arena_ptr_small_binind_get(ptr,
szind_t binind = arena_ptr_small_binind_get(ptr,
mapbits);
tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
@ -1242,7 +1287,7 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
if (likely(size <= SMALL_MAXCLASS)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
index_t binind = size2index(size);
szind_t binind = size2index(size);
tcache_dalloc_small(tsd, tcache, ptr, binind);
} else {
size_t pageind = ((uintptr_t)ptr -

28
memory/jemalloc/src/include/jemalloc/internal/jemalloc_internal.h.in

@ -184,7 +184,7 @@ static const bool config_cache_oblivious =
#include "jemalloc/internal/jemalloc_internal_macros.h"
/* Size class index type. */
typedef unsigned index_t;
typedef unsigned szind_t;
/*
* Flags bits:
@ -511,12 +511,12 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/huge.h"
#ifndef JEMALLOC_ENABLE_INLINE
index_t size2index_compute(size_t size);
index_t size2index_lookup(size_t size);
index_t size2index(size_t size);
size_t index2size_compute(index_t index);
size_t index2size_lookup(index_t index);
size_t index2size(index_t index);
szind_t size2index_compute(size_t size);
szind_t size2index_lookup(size_t size);
szind_t size2index(size_t size);
size_t index2size_compute(szind_t index);
size_t index2size_lookup(szind_t index);
size_t index2size(szind_t index);
size_t s2u_compute(size_t size);
size_t s2u_lookup(size_t size);
size_t s2u(size_t size);
@ -527,7 +527,7 @@ arena_t *arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
JEMALLOC_INLINE index_t
JEMALLOC_INLINE szind_t
size2index_compute(size_t size)
{
@ -558,7 +558,7 @@ size2index_compute(size_t size)
}
}
JEMALLOC_ALWAYS_INLINE index_t
JEMALLOC_ALWAYS_INLINE szind_t
size2index_lookup(size_t size)
{
@ -571,7 +571,7 @@ size2index_lookup(size_t size)
}
}
JEMALLOC_ALWAYS_INLINE index_t
JEMALLOC_ALWAYS_INLINE szind_t
size2index(size_t size)
{
@ -582,7 +582,7 @@ size2index(size_t size)
}
JEMALLOC_INLINE size_t
index2size_compute(index_t index)
index2size_compute(szind_t index)
{
#if (NTBINS > 0)
@ -609,7 +609,7 @@ index2size_compute(index_t index)
}
JEMALLOC_ALWAYS_INLINE size_t
index2size_lookup(index_t index)
index2size_lookup(szind_t index)
{
size_t ret = (size_t)index2size_tab[index];
assert(ret == index2size_compute(index));
@ -617,7 +617,7 @@ index2size_lookup(index_t index)
}
JEMALLOC_ALWAYS_INLINE size_t
index2size(index_t index)
index2size(szind_t index)
{
assert(index < NSIZES);
@ -976,7 +976,7 @@ u2rz(size_t usize)
size_t ret;
if (usize <= SMALL_MAXCLASS) {
index_t binind = size2index(usize);
szind_t binind = size2index(usize);
ret = arena_bin_info[binind].redzone_size;
} else
ret = 0;

2
memory/jemalloc/src/include/jemalloc/internal/private_symbols.txt

@ -50,6 +50,8 @@ arena_mapbits_large_size_get
arena_mapbitsp_get
arena_mapbitsp_read
arena_mapbitsp_write
arena_mapbits_size_decode
arena_mapbits_size_encode
arena_mapbits_small_runind_get
arena_mapbits_small_set
arena_mapbits_unallocated_set

10
memory/jemalloc/src/include/jemalloc/internal/prof.h

@ -332,7 +332,7 @@ bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_tdata_t **tdata_out);
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool update);
prof_tctx_t *prof_tctx_get(const void *ptr);
void prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
void prof_malloc_sample_object(const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
@ -402,13 +402,13 @@ prof_tctx_get(const void *ptr)
}
JEMALLOC_ALWAYS_INLINE void
prof_tctx_set(const void *ptr, prof_tctx_t *tctx)
prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
arena_prof_tctx_set(ptr, tctx);
arena_prof_tctx_set(ptr, usize, tctx);
}
JEMALLOC_ALWAYS_INLINE bool
@ -473,7 +473,7 @@ prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx)
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_malloc_sample_object(ptr, usize, tctx);
else
prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_ALWAYS_INLINE void
@ -503,7 +503,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_malloc_sample_object(ptr, usize, tctx);
else
prof_tctx_set(ptr, (prof_tctx_t *)(uintptr_t)1U);
prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_ALWAYS_INLINE void

18
memory/jemalloc/src/include/jemalloc/internal/tcache.h

@ -77,7 +77,7 @@ struct tcache_s {
ql_elm(tcache_t) link; /* Used for aggregating stats. */
uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */
unsigned ev_cnt; /* Event count since incremental GC. */
index_t next_gc_bin; /* Next bin to GC. */
szind_t next_gc_bin; /* Next bin to GC. */
tcache_bin_t tbins[1]; /* Dynamically sized. */
/*
* The pointer stacks associated with tbins follow as a contiguous
@ -126,10 +126,10 @@ extern tcaches_t *tcaches;
size_t tcache_salloc(const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, index_t binind);
tcache_bin_t *tbin, szind_t binind);
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
index_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind,
szind_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache);
void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
@ -161,7 +161,7 @@ void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, bool zero);
void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr,
index_t binind);
szind_t binind);
void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr,
size_t size);
tcache_t *tcaches_get(tsd_t *tsd, unsigned ind);
@ -267,7 +267,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
bool zero)
{
void *ret;
index_t binind;
szind_t binind;
size_t usize;
tcache_bin_t *tbin;
@ -312,7 +312,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
bool zero)
{
void *ret;
index_t binind;
szind_t binind;
size_t usize;
tcache_bin_t *tbin;
@ -360,7 +360,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind)
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind)
{
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
@ -386,7 +386,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, index_t binind)
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size)
{
index_t binind;
szind_t binind;
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;

2
memory/jemalloc/src/include/jemalloc/jemalloc_protos.h.in

@ -56,7 +56,7 @@ JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_usable_size(
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *@je_@memalign(size_t alignment, size_t size)
JEMALLOC_ATTR(malloc);
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC

66
memory/jemalloc/src/src/arena.c

@ -39,7 +39,7 @@ JEMALLOC_INLINE_C arena_chunk_map_misc_t *
arena_miscelm_key_create(size_t size)
{
return ((arena_chunk_map_misc_t *)((size << CHUNK_MAP_SIZE_SHIFT) |
return ((arena_chunk_map_misc_t *)(arena_mapbits_size_encode(size) |
CHUNK_MAP_KEY));
}
@ -58,8 +58,7 @@ arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm)
assert(arena_miscelm_is_key(miscelm));
return (((uintptr_t)miscelm & CHUNK_MAP_SIZE_MASK) >>
CHUNK_MAP_SIZE_SHIFT);
return (arena_mapbits_size_decode((uintptr_t)miscelm));
}
JEMALLOC_INLINE_C size_t
@ -73,7 +72,7 @@ arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
pageind = arena_miscelm_to_pageind(miscelm);
mapbits = arena_mapbits_get(chunk, pageind);
return ((mapbits & CHUNK_MAP_SIZE_MASK) >> CHUNK_MAP_SIZE_SHIFT);
return (arena_mapbits_size_decode(mapbits));
}
JEMALLOC_INLINE_C int
@ -315,7 +314,7 @@ arena_run_reg_dalloc(arena_run_t *run, void *ptr)
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t mapbits = arena_mapbits_get(chunk, pageind);
index_t binind = arena_ptr_small_binind_get(ptr, mapbits);
szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
arena_bin_info_t *bin_info = &arena_bin_info[binind];
unsigned regind = arena_run_regind(run, bin_info, ptr);
@ -508,7 +507,7 @@ arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
static bool
arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
index_t binind)
szind_t binind)
{
arena_chunk_t *chunk;
arena_chunk_map_misc_t *miscelm;
@ -780,7 +779,7 @@ arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk)
static void
arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
{
index_t index = size2index(usize) - nlclasses - NBINS;
szind_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
@ -793,7 +792,7 @@ arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
static void
arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
{
index_t index = size2index(usize) - nlclasses - NBINS;
szind_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
@ -806,7 +805,7 @@ arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
static void
arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
{
index_t index = size2index(usize) - nlclasses - NBINS;
szind_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
@ -819,7 +818,7 @@ arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
static void
arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
{
index_t index = size2index(usize) - nlclasses - NBINS;
szind_t index = size2index(usize) - nlclasses - NBINS;
cassert(config_stats);
@ -1125,7 +1124,7 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
}
static arena_run_t *
arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind)
arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
{
arena_run_t *run = arena_run_first_best_fit(arena, size);
if (run != NULL) {
@ -1136,7 +1135,7 @@ arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind)
}
static arena_run_t *
arena_run_alloc_small(arena_t *arena, size_t size, index_t binind)
arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind)
{
arena_chunk_t *chunk;
arena_run_t *run;
@ -1749,15 +1748,6 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned,
arena_maybe_purge(arena);
}
static void
arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk,
arena_run_t *run)
{
bool committed = arena_run_decommit(arena