Browse Source

Merge remote-tracking branch 'origin/master' into custom

custom
roytam1 3 months ago
parent
commit
02a8a3cf55
  1. 4
      layout/style/nsRuleNode.cpp
  2. 36
      mfbt/SegmentedVector.h
  3. 290
      xpcom/base/nsCycleCollector.cpp

4
layout/style/nsRuleNode.cpp

@ -4640,8 +4640,8 @@ struct LengthPercentNumberCalcOps : public css::NumbersAlreadyNormalizedOps
MergeMultiplicativeR(nsCSSUnit aCalcFunction,
result_type aValue1, float aValue2)
{
MOZ_ASSERT(aCalcFunction == eCSSUnit_Calc_TimesR ||
aCalcFunction == eCSSUnit_Divided,
MOZ_ASSERT(aCalcFunction == eCSSUnit_Calc_Times_R ||
aCalcFunction == eCSSUnit_Calc_Divided,
"unexpected unit");
result_type result;
if (aCalcFunction == eCSSUnit_Calc_Divided) {

36
mfbt/SegmentedVector.h

@ -120,9 +120,9 @@ class SegmentedVector : private AllocPolicy
? (IdealSegmentSize - kSingleElementSegmentSize) / sizeof(T) + 1
: 1;
public:
typedef SegmentImpl<kSegmentCapacity> Segment;
public:
// The |aIdealSegmentSize| is only for sanity checking. If it's specified, we
// check that the actual segment size is as close as possible to it. This
// serves as a sanity check for SegmentedVectorCapacity's capacity
@ -259,7 +259,6 @@ public:
// than we want to pop.
MOZ_ASSERT(last);
MOZ_ASSERT(last == mSegments.getLast());
MOZ_ASSERT(aNumElements != 0);
MOZ_ASSERT(aNumElements < last->Length());
for (uint32_t i = 0; i < aNumElements; ++i) {
last->PopLast();
@ -274,6 +273,10 @@ public:
// f(elem);
// }
//
// Note, adding new entries to the SegmentedVector while using iterators
// is supported, but removing is not!
// If an iterator has entered Done() state, adding more entries to the
// vector doesn't affect it.
class IterImpl
{
friend class SegmentedVector;
@ -281,10 +284,14 @@ public:
Segment* mSegment;
size_t mIndex;
explicit IterImpl(SegmentedVector* aVector)
: mSegment(aVector->mSegments.getFirst())
, mIndex(0)
{}
explicit IterImpl(SegmentedVector* aVector, bool aFromFirst)
: mSegment(aFromFirst ? aVector->mSegments.getFirst() :
aVector->mSegments.getLast())
, mIndex(aFromFirst ? 0 :
(mSegment ? mSegment->Length() - 1 : 0))
{
MOZ_ASSERT_IF(mSegment, mSegment->Length() > 0);
}
public:
bool Done() const { return !mSegment; }
@ -310,10 +317,23 @@ public:
mIndex = 0;
}
}
};
IterImpl Iter() { return IterImpl(this); }
void Prev()
{
MOZ_ASSERT(!Done());
if (mIndex == 0) {
mSegment = mSegment->getPrevious();
if (mSegment) {
mIndex = mSegment->Length() - 1;
}
} else {
--mIndex;
}
}
};
IterImpl Iter() { return IterImpl(this, true); }
IterImpl IterFromLast() { return IterImpl(this, false); }
// Measure the memory consumption of the vector excluding |this|. Note that
// it only measures the vector itself. If the vector elements contain
// pointers to other memory blocks, those blocks must be measured separately

290
xpcom/base/nsCycleCollector.cpp

@ -159,6 +159,7 @@
/* This must occur *after* base/process_util.h to avoid typedefs conflicts. */
#include "mozilla/LinkedList.h"
#include "mozilla/MemoryReporting.h"
#include "mozilla/Move.h"
#include "mozilla/SegmentedVector.h"
#include "nsCycleCollectionParticipant.h"
@ -961,14 +962,43 @@ CanonicalizeParticipant(void** aParti, nsCycleCollectionParticipant** aCp)
struct nsPurpleBufferEntry
{
union
nsPurpleBufferEntry(void* aObject, nsCycleCollectingAutoRefCnt* aRefCnt,
nsCycleCollectionParticipant* aParticipant)
: mObject(aObject)
, mRefCnt(aRefCnt)
, mParticipant(aParticipant)
{}
nsPurpleBufferEntry(nsPurpleBufferEntry&& aOther)
: mObject(nullptr)
, mRefCnt(nullptr)
, mParticipant(nullptr)
{
void* mObject; // when low bit unset
nsPurpleBufferEntry* mNextInFreeList; // when low bit set
};
Swap(aOther);
}
nsCycleCollectingAutoRefCnt* mRefCnt;
void Swap(nsPurpleBufferEntry& aOther) {
std::swap(mObject, aOther.mObject);
std::swap(mRefCnt, aOther.mRefCnt);
std::swap(mParticipant, aOther.mParticipant);
}
void Clear() {
mRefCnt->RemoveFromPurpleBuffer();
mRefCnt = nullptr;
mObject = nullptr;
mParticipant = nullptr;
}
~nsPurpleBufferEntry() {
if (mRefCnt) {
mRefCnt->RemoveFromPurpleBuffer();
}
}
void* mObject;
nsCycleCollectingAutoRefCnt* mRefCnt;
nsCycleCollectionParticipant* mParticipant; // nullptr for nsISupports
};
@ -977,118 +1007,113 @@ class nsCycleCollector;
struct nsPurpleBuffer
{
private:
struct PurpleBlock
{
PurpleBlock* mNext;
// Try to match the size of a jemalloc bucket, to minimize slop bytes.
// - On 32-bit platforms sizeof(nsPurpleBufferEntry) is 12, so mEntries
// is 16,380 bytes, which leaves 4 bytes for mNext.
// - On 64-bit platforms sizeof(nsPurpleBufferEntry) is 24, so mEntries
// is 32,544 bytes, which leaves 8 bytes for mNext.
nsPurpleBufferEntry mEntries[1365];
PurpleBlock() : mNext(nullptr)
{
// Ensure PurpleBlock is the right size (see above).
static_assert(
sizeof(PurpleBlock) == 16384 || // 32-bit
sizeof(PurpleBlock) == 32768, // 64-bit
"ill-sized nsPurpleBuffer::PurpleBlock"
);
InitNextPointers();
}
// Put all the entries in the block on the free list.
void InitNextPointers()
{
for (uint32_t i = 1; i < ArrayLength(mEntries); ++i) {
mEntries[i - 1].mNextInFreeList =
(nsPurpleBufferEntry*)(uintptr_t(mEntries + i) | 1);
}
mEntries[ArrayLength(mEntries) - 1].mNextInFreeList =
(nsPurpleBufferEntry*)1;
}
template<class PurpleVisitor>
void VisitEntries(nsPurpleBuffer& aBuffer, PurpleVisitor& aVisitor)
{
nsPurpleBufferEntry* eEnd = ArrayEnd(mEntries);
for (nsPurpleBufferEntry* e = mEntries; e != eEnd; ++e) {
MOZ_ASSERT(e->mObject, "There should be no null mObject when we iterate over the purple buffer");
if (!(uintptr_t(e->mObject) & uintptr_t(1)) && e->mObject) {
aVisitor.Visit(aBuffer, e);
}
}
}
};
// This class wraps a linked list of the elements in the purple
// buffer.
uint32_t mCount;
PurpleBlock mFirstBlock;
nsPurpleBufferEntry* mFreeList;
// Try to match the size of a jemalloc bucket, to minimize slop bytes.
// - On 32-bit platforms sizeof(nsPurpleBufferEntry) is 12, so mEntries'
// Segment is 16,372 bytes.
// - On 64-bit platforms sizeof(nsPurpleBufferEntry) is 24, so mEntries'
// Segment is 32,760 bytes.
static const uint32_t kEntriesPerSegment = 1365;
static const size_t kSegmentSize =
sizeof(nsPurpleBufferEntry) * kEntriesPerSegment;
typedef
SegmentedVector<nsPurpleBufferEntry, kSegmentSize, InfallibleAllocPolicy>
PurpleBufferVector;
PurpleBufferVector mEntries;
public:
nsPurpleBuffer()
: mCount(0)
{
InitBlocks();
static_assert(
sizeof(PurpleBufferVector::Segment) == 16372 || // 32-bit
sizeof(PurpleBufferVector::Segment) == 32760 || // 64-bit
sizeof(PurpleBufferVector::Segment) == 32744, // 64-bit Windows
"ill-sized nsPurpleBuffer::mEntries");
}
~nsPurpleBuffer()
{
FreeBlocks();
}
{}
// This method compacts mEntries.
template<class PurpleVisitor>
void VisitEntries(PurpleVisitor& aVisitor)
{
for (PurpleBlock* b = &mFirstBlock; b; b = b->mNext) {
b->VisitEntries(*this, aVisitor);
void VisitEntries(PurpleVisitor& aVisitor) {
if (mEntries.IsEmpty()) {
return;
}
}
void InitBlocks()
{
mCount = 0;
mFreeList = mFirstBlock.mEntries;
}
uint32_t oldLength = mEntries.Length();
uint32_t newLength = 0;
auto revIter = mEntries.IterFromLast();
auto iter = mEntries.Iter();
// After iteration this points to the first empty entry.
auto firstEmptyIter = mEntries.Iter();
auto iterFromLastEntry = mEntries.IterFromLast();
for (; !iter.Done(); iter.Next()) {
nsPurpleBufferEntry& e = iter.Get();
if (e.mObject) {
aVisitor.Visit(*this, &e);
}
void FreeBlocks()
{
if (mCount > 0) {
UnmarkRemainingPurple(&mFirstBlock);
}
PurpleBlock* b = mFirstBlock.mNext;
while (b) {
if (mCount > 0) {
UnmarkRemainingPurple(b);
// Visit call above may have cleared the entry, or the entry was empty
// already.
if (!e.mObject) {
// Try to find a non-empty entry from the end of the vector.
for (; !revIter.Done(); revIter.Prev()) {
nsPurpleBufferEntry& otherEntry = revIter.Get();
if (&e == &otherEntry) {
break;
}
if (otherEntry.mObject) {
aVisitor.Visit(*this, &otherEntry);
// Visit may have cleared otherEntry.
if (otherEntry.mObject) {
e.Swap(otherEntry);
revIter.Prev(); // We've swapped this now empty entry.
break;
}
}
}
}
// Entry is non-empty even after the Visit call, ensure it is kept
// in mEntries.
if (e.mObject) {
firstEmptyIter.Next();
++newLength;
}
if (&e == &revIter.Get()) {
break;
}
PurpleBlock* next = b->mNext;
delete b;
b = next;
}
mFirstBlock.mNext = nullptr;
}
struct UnmarkRemainingPurpleVisitor
{
void
Visit(nsPurpleBuffer& aBuffer, nsPurpleBufferEntry* aEntry)
{
if (aEntry->mRefCnt) {
aEntry->mRefCnt->RemoveFromPurpleBuffer();
aEntry->mRefCnt = nullptr;
// There were some empty entries.
if (oldLength != newLength) {
// While visiting entries, some new ones were possibly added. This can
// happen during CanSkip. Move all such new entries to be after other
// entries. Note, we don't call Visit on newly added entries!
if (&iterFromLastEntry.Get() != &mEntries.GetLast()) {
iterFromLastEntry.Next(); // Now pointing to the first added entry.
auto& iterForNewEntries = iterFromLastEntry;
while (!iterForNewEntries.Done()) {
MOZ_ASSERT(!firstEmptyIter.Done());
MOZ_ASSERT(!firstEmptyIter.Get().mObject);
firstEmptyIter.Get().Swap(iterForNewEntries.Get());
firstEmptyIter.Next();
iterForNewEntries.Next();
++newLength; // We keep all the new entries.
}
}
aEntry->mObject = nullptr;
--aBuffer.mCount;
mEntries.PopLastN(oldLength - newLength);
}
};
}
void UnmarkRemainingPurple(PurpleBlock* aBlock)
{
UnmarkRemainingPurpleVisitor visitor;
aBlock->VisitEntries(*this, visitor);
void FreeBlocks() {
mCount = 0;
mEntries.Clear();
}
void SelectPointers(CCGraphBuilder& aBuilder);
@ -1105,73 +1130,27 @@ public:
bool aAsyncSnowWhiteFreeing,
CC_ForgetSkippableCallback aCb);
MOZ_ALWAYS_INLINE nsPurpleBufferEntry* NewEntry()
{
if (MOZ_UNLIKELY(!mFreeList)) {
PurpleBlock* b = new PurpleBlock;
mFreeList = b->mEntries;
// Add the new block as the second block in the list.
b->mNext = mFirstBlock.mNext;
mFirstBlock.mNext = b;
}
nsPurpleBufferEntry* e = mFreeList;
mFreeList = (nsPurpleBufferEntry*)
(uintptr_t(mFreeList->mNextInFreeList) & ~uintptr_t(1));
return e;
}
MOZ_ALWAYS_INLINE void Put(void* aObject, nsCycleCollectionParticipant* aCp,
nsCycleCollectingAutoRefCnt* aRefCnt)
{
nsPurpleBufferEntry* e = NewEntry();
nsCycleCollectingAutoRefCnt* aRefCnt) {
nsPurpleBufferEntry entry(aObject, aRefCnt, aCp);
Unused << mEntries.Append(Move(entry));
MOZ_ASSERT(!entry.mRefCnt, "CC: PurpleBufferEntry:Put() Move failed!");
++mCount;
e->mObject = aObject;
e->mRefCnt = aRefCnt;
e->mParticipant = aCp;
}
void Remove(nsPurpleBufferEntry* aEntry)
{
void Remove(nsPurpleBufferEntry* aEntry) {
MOZ_ASSERT(mCount != 0, "must have entries");
if (aEntry->mRefCnt) {
aEntry->mRefCnt->RemoveFromPurpleBuffer();
aEntry->mRefCnt = nullptr;
}
aEntry->mNextInFreeList =
(nsPurpleBufferEntry*)(uintptr_t(mFreeList) | uintptr_t(1));
mFreeList = aEntry;
--mCount;
aEntry->Clear();
}
uint32_t Count() const
{
uint32_t Count() const {
return mCount;
}
size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const
{
size_t n = 0;
// Don't measure mFirstBlock because it's within |this|.
const PurpleBlock* block = mFirstBlock.mNext;
while (block) {
n += aMallocSizeOf(block);
block = block->mNext;
}
// mFreeList is deliberately not measured because it points into
// the purple buffer, which is within mFirstBlock and thus within |this|.
//
// We also don't measure the things pointed to by mEntries[] because
// those pointers are non-owning.
return n;
size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
return mEntries.SizeOfExcludingThis(aMallocSizeOf);
}
};
@ -1203,16 +1182,13 @@ private:
};
void
nsPurpleBuffer::SelectPointers(CCGraphBuilder& aBuilder)
{
nsPurpleBuffer::SelectPointers(CCGraphBuilder& aBuilder) {
SelectPointersVisitor visitor(aBuilder);
VisitEntries(visitor);
NS_ASSERTION(mCount == 0, "AddPurpleRoot failed");
if (mCount == 0) {
FreeBlocks();
InitBlocks();
mFirstBlock.InitNextPointers();
}
}

Loading…
Cancel
Save