Skip to content

Commit 2529f0a

Browse files
igchorbyrnedj
authored andcommitted
Initial multi-tier support implementation (rebased with NUMA and cs part 2)
fix for compressed ptr (upstream) -> compress from false to true
1 parent c3a4db9 commit 2529f0a

File tree

8 files changed

+412
-198
lines changed

8 files changed

+412
-198
lines changed

cachelib/allocator/CacheAllocator-inl.h

Lines changed: 303 additions & 155 deletions
Large diffs are not rendered by default.

cachelib/allocator/CacheAllocator.h

Lines changed: 79 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -806,7 +806,7 @@ class CacheAllocator : public CacheBase {
806806
// @param config new config for the pool
807807
//
808808
// @throw std::invalid_argument if the poolId is invalid
809-
void overridePoolConfig(PoolId pid, const MMConfig& config);
809+
void overridePoolConfig(TierId tid, PoolId pid, const MMConfig& config);
810810

811811
// update an existing pool's rebalance strategy
812812
//
@@ -847,8 +847,9 @@ class CacheAllocator : public CacheBase {
847847
// @return true if the operation succeeded. false if the size of the pool is
848848
// smaller than _bytes_
849849
// @throw std::invalid_argument if the poolId is invalid.
850+
// TODO: should call shrinkPool for specific tier?
850851
bool shrinkPool(PoolId pid, size_t bytes) {
851-
return allocator_->shrinkPool(pid, bytes);
852+
return allocator_[currentTier()]->shrinkPool(pid, bytes);
852853
}
853854

854855
// grow an existing pool by _bytes_. This will fail if there is no
@@ -857,8 +858,9 @@ class CacheAllocator : public CacheBase {
857858
// @return true if the pool was grown. false if the necessary number of
858859
// bytes were not available.
859860
// @throw std::invalid_argument if the poolId is invalid.
861+
// TODO: should call growPool for specific tier?
860862
bool growPool(PoolId pid, size_t bytes) {
861-
return allocator_->growPool(pid, bytes);
863+
return allocator_[currentTier()]->growPool(pid, bytes);
862864
}
863865

864866
// move bytes from one pool to another. The source pool should be at least
@@ -871,7 +873,7 @@ class CacheAllocator : public CacheBase {
871873
// correct size to do the transfer.
872874
// @throw std::invalid_argument if src or dest is invalid pool
873875
bool resizePools(PoolId src, PoolId dest, size_t bytes) override {
874-
return allocator_->resizePools(src, dest, bytes);
876+
return allocator_[currentTier()]->resizePools(src, dest, bytes);
875877
}
876878

877879
// Add a new compact cache with given name and size
@@ -1076,12 +1078,13 @@ class CacheAllocator : public CacheBase {
10761078
// @throw std::invalid_argument if the memory does not belong to this
10771079
// cache allocator
10781080
AllocInfo getAllocInfo(const void* memory) const {
1079-
return allocator_->getAllocInfo(memory);
1081+
return allocator_[getTierId(memory)]->getAllocInfo(memory);
10801082
}
10811083

10821084
// return the ids for the set of existing pools in this cache.
10831085
std::set<PoolId> getPoolIds() const override final {
1084-
return allocator_->getPoolIds();
1086+
// all tiers have the same pool ids. TODO: deduplicate
1087+
return allocator_[0]->getPoolIds();
10851088
}
10861089

10871090
// return a list of pool ids that are backing compact caches. This includes
@@ -1093,27 +1096,28 @@ class CacheAllocator : public CacheBase {
10931096

10941097
// return the pool with speicified id.
10951098
const MemoryPool& getPool(PoolId pid) const override final {
1096-
return allocator_->getPool(pid);
1099+
return allocator_[currentTier()]->getPool(pid);
10971100
}
10981101

10991102
// calculate the number of slabs to be advised/reclaimed in each pool
11001103
PoolAdviseReclaimData calcNumSlabsToAdviseReclaim() override final {
11011104
auto regularPoolIds = getRegularPoolIds();
1102-
return allocator_->calcNumSlabsToAdviseReclaim(regularPoolIds);
1105+
return allocator_[currentTier()]->calcNumSlabsToAdviseReclaim(regularPoolIds);
11031106
}
11041107

11051108
// update number of slabs to advise in the cache
11061109
void updateNumSlabsToAdvise(int32_t numSlabsToAdvise) override final {
1107-
allocator_->updateNumSlabsToAdvise(numSlabsToAdvise);
1110+
allocator_[currentTier()]->updateNumSlabsToAdvise(numSlabsToAdvise);
11081111
}
11091112

11101113
// returns a valid PoolId corresponding to the name or kInvalidPoolId if the
11111114
// name is not a recognized pool
11121115
PoolId getPoolId(folly::StringPiece name) const noexcept;
11131116

11141117
// returns the pool's name by its poolId.
1115-
std::string getPoolName(PoolId poolId) const override {
1116-
return allocator_->getPoolName(poolId);
1118+
std::string getPoolName(PoolId poolId) const {
1119+
// all tiers have the same pool names.
1120+
return allocator_[0]->getPoolName(poolId);
11171121
}
11181122

11191123
// get stats related to all kinds of slab release events.
@@ -1391,19 +1395,27 @@ class CacheAllocator : public CacheBase {
13911395

13921396
using MMContainerPtr = std::unique_ptr<MMContainer>;
13931397
using MMContainers =
1394-
std::array<std::array<MMContainerPtr, MemoryAllocator::kMaxClasses>,
1395-
MemoryPoolManager::kMaxPools>;
1398+
std::vector<std::array<std::array<MMContainerPtr, MemoryAllocator::kMaxClasses>,
1399+
MemoryPoolManager::kMaxPools>>;
13961400

13971401
void createMMContainers(const PoolId pid, MMConfig config);
13981402

1403+
TierId getTierId(const Item& item) const;
1404+
TierId getTierId(const void* ptr) const;
1405+
13991406
// acquire the MMContainer corresponding to the the Item's class and pool.
14001407
//
14011408
// @return pointer to the MMContainer.
14021409
// @throw std::invalid_argument if the Item does not point to a valid
14031410
// allocation from the memory allocator.
14041411
MMContainer& getMMContainer(const Item& item) const noexcept;
14051412

1406-
MMContainer& getMMContainer(PoolId pid, ClassId cid) const noexcept;
1413+
MMContainer& getMMContainer(TierId tid, PoolId pid, ClassId cid) const noexcept;
1414+
1415+
// Get stats of the specified pid and cid.
1416+
// If such mmcontainer is not valid (pool id or cid out of bound)
1417+
// or the mmcontainer is not initialized, return an empty stat.
1418+
MMContainerStat getMMContainerStat(TierId tid, PoolId pid, ClassId cid) const noexcept;
14071419

14081420
// create a new cache allocation. The allocation can be initialized
14091421
// appropriately and made accessible through insert or insertOrReplace.
@@ -1435,6 +1447,17 @@ class CacheAllocator : public CacheBase {
14351447
uint32_t creationTime,
14361448
uint32_t expiryTime);
14371449

1450+
// create a new cache allocation on specific memory tier.
1451+
// For description see allocateInternal.
1452+
//
1453+
// @param tid id a memory tier
1454+
WriteHandle allocateInternalTier(TierId tid,
1455+
PoolId id,
1456+
Key key,
1457+
uint32_t size,
1458+
uint32_t creationTime,
1459+
uint32_t expiryTime);
1460+
14381461
// Allocate a chained item
14391462
//
14401463
// The resulting chained item does not have a parent item and
@@ -1525,6 +1548,15 @@ class CacheAllocator : public CacheBase {
15251548
// not exist.
15261549
FOLLY_ALWAYS_INLINE WriteHandle findFastImpl(Key key, AccessMode mode);
15271550

1551+
// Moves a regular item to a different memory tier.
1552+
//
1553+
// @param oldItem Reference to the item being moved
1554+
// @param newItemHdl Reference to the handle of the new item being moved into
1555+
//
1556+
// @return true If the move was completed, and the containers were updated
1557+
// successfully.
1558+
bool moveRegularItemOnEviction(Item& oldItem, WriteHandle& newItemHdl);
1559+
15281560
// Moves a regular item to a different slab. This should only be used during
15291561
// slab release after the item's exclusive bit has been set. The user supplied
15301562
// callback is responsible for copying the contents and fixing the semantics
@@ -1679,23 +1711,26 @@ class CacheAllocator : public CacheBase {
16791711
// Implementation to find a suitable eviction from the container. The
16801712
// two parameters together identify a single container.
16811713
//
1714+
// @param tid the id of the tier to look for evictions inside
16821715
// @param pid the id of the pool to look for evictions inside
16831716
// @param cid the id of the class to look for evictions inside
16841717
// @return An evicted item or nullptr if there is no suitable candidate found
16851718
// within the configured number of attempts.
1686-
Item* findEviction(PoolId pid, ClassId cid);
1719+
Item* findEviction(TierId tid, PoolId pid, ClassId cid);
16871720

16881721
// Get next eviction candidate from MMContainer, remove from AccessContainer,
16891722
// MMContainer and insert into NVMCache if enabled.
16901723
//
1724+
// @param tid the id of the tier to look for evictions inside
16911725
// @param pid the id of the pool to look for evictions inside
16921726
// @param cid the id of the class to look for evictions inside
16931727
// @param searchTries number of search attempts so far.
16941728
//
16951729
// @return pair of [candidate, toRecycle]. Pair of null if reached the end of
16961730
// the eviction queue or no suitable candidate found
16971731
// within the configured number of attempts
1698-
std::pair<Item*, Item*> getNextCandidate(PoolId pid,
1732+
std::pair<Item*, Item*> getNextCandidate(TierId tid,
1733+
PoolId pid,
16991734
ClassId cid,
17001735
unsigned int& searchTries);
17011736

@@ -1713,7 +1748,7 @@ class CacheAllocator : public CacheBase {
17131748
const typename Item::PtrCompressor& compressor);
17141749

17151750
unsigned int reclaimSlabs(PoolId id, size_t numSlabs) final {
1716-
return allocator_->reclaimSlabsAndGrow(id, numSlabs);
1751+
return allocator_[currentTier()]->reclaimSlabsAndGrow(id, numSlabs);
17171752
}
17181753

17191754
FOLLY_ALWAYS_INLINE EventTracker* getEventTracker() const {
@@ -1772,7 +1807,7 @@ class CacheAllocator : public CacheBase {
17721807
const void* hint = nullptr) final;
17731808

17741809
// @param releaseContext slab release context
1775-
void releaseSlabImpl(const SlabReleaseContext& releaseContext);
1810+
void releaseSlabImpl(TierId tid, const SlabReleaseContext& releaseContext);
17761811

17771812
// @return true when successfully marked as moving,
17781813
// fasle when this item has already been freed
@@ -1834,7 +1869,7 @@ class CacheAllocator : public CacheBase {
18341869
// primitives. So we consciously exempt ourselves here from TSAN data race
18351870
// detection.
18361871
folly::annotate_ignore_thread_sanitizer_guard g(__FILE__, __LINE__);
1837-
auto slabsSkipped = allocator_->forEachAllocation(std::forward<Fn>(f));
1872+
auto slabsSkipped = allocator_[currentTier()]->forEachAllocation(std::forward<Fn>(f));
18381873
stats().numReaperSkippedSlabs.add(slabsSkipped);
18391874
}
18401875

@@ -1878,10 +1913,10 @@ class CacheAllocator : public CacheBase {
18781913
std::unique_ptr<T>& worker,
18791914
std::chrono::seconds timeout = std::chrono::seconds{0});
18801915

1881-
ShmSegmentOpts createShmCacheOpts();
1882-
std::unique_ptr<MemoryAllocator> createNewMemoryAllocator();
1883-
std::unique_ptr<MemoryAllocator> restoreMemoryAllocator();
1884-
std::unique_ptr<CCacheManager> restoreCCacheManager();
1916+
ShmSegmentOpts createShmCacheOpts(TierId tid);
1917+
std::unique_ptr<MemoryAllocator> createNewMemoryAllocator(TierId tid);
1918+
std::unique_ptr<MemoryAllocator> restoreMemoryAllocator(TierId tid);
1919+
std::unique_ptr<CCacheManager> restoreCCacheManager(TierId tid);
18851920

18861921
PoolIds filterCompactCachePools(const PoolIds& poolIds) const;
18871922

@@ -1901,7 +1936,7 @@ class CacheAllocator : public CacheBase {
19011936
}
19021937

19031938
typename Item::PtrCompressor createPtrCompressor() const {
1904-
return allocator_->createPtrCompressor<Item>();
1939+
return allocator_[0 /* TODO */]->createPtrCompressor<Item>();
19051940
}
19061941

19071942
// helper utility to throttle and optionally log.
@@ -1924,9 +1959,14 @@ class CacheAllocator : public CacheBase {
19241959

19251960
// @param type the type of initialization
19261961
// @return nullptr if the type is invalid
1927-
// @return pointer to memory allocator
1962+
// @return vector of pointers to memory allocator
19281963
// @throw std::runtime_error if type is invalid
1929-
std::unique_ptr<MemoryAllocator> initAllocator(InitMemType type);
1964+
std::vector<std::unique_ptr<MemoryAllocator>> initAllocator(InitMemType type);
1965+
1966+
std::vector<std::unique_ptr<MemoryAllocator>> createPrivateAllocator();
1967+
std::vector<std::unique_ptr<MemoryAllocator>> createAllocators();
1968+
std::vector<std::unique_ptr<MemoryAllocator>> restoreAllocators();
1969+
19301970
// @param type the type of initialization
19311971
// @return nullptr if the type is invalid
19321972
// @return pointer to access container
@@ -1984,6 +2024,17 @@ class CacheAllocator : public CacheBase {
19842024

19852025
// BEGIN private members
19862026

2027+
TierId currentTier() const {
2028+
// TODO: every function which calls this method should be refactored.
2029+
// We should go case by case and either make such function work on
2030+
// all tiers or expose separate parameter to describe the tier ID.
2031+
return 0;
2032+
}
2033+
2034+
unsigned getNumTiers() const {
2035+
return config_.memoryTierConfigs.size();
2036+
}
2037+
19872038
// Whether the memory allocator for this cache allocator was created on shared
19882039
// memory. The hash table, chained item hash table etc is also created on
19892040
// shared memory except for temporary shared memory mode when they're created
@@ -2009,9 +2060,10 @@ class CacheAllocator : public CacheBase {
20092060
const MMConfig mmConfig_{};
20102061

20112062
// the memory allocator for allocating out of the available memory.
2012-
std::unique_ptr<MemoryAllocator> allocator_;
2063+
std::vector<std::unique_ptr<MemoryAllocator>> allocator_;
20132064

20142065
// compact cache allocator manager
2066+
// TODO: per tier?
20152067
std::unique_ptr<CCacheManager> compactCacheManager_;
20162068

20172069
// compact cache instances reside here when user "add" or "attach" compact

cachelib/allocator/PoolOptimizer.cpp

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,8 @@ void PoolOptimizer::optimizeRegularPoolSizes() {
5151

5252
void PoolOptimizer::optimizeCompactCacheSizes() {
5353
try {
54+
// TODO: should optimizer look at each tier individually?
55+
// If yes, then resizePools should be per-tier
5456
auto strategy = cache_.getPoolOptimizeStrategy();
5557
if (!strategy) {
5658
strategy = strategy_;

cachelib/allocator/memory/MemoryAllocator.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -646,6 +646,13 @@ class MemoryAllocator {
646646
memoryPoolManager_.updateNumSlabsToAdvise(numSlabs);
647647
}
648648

649+
// returns ture if ptr points to memory which is managed by this
650+
// allocator
651+
bool isMemoryInAllocator(const void *ptr) {
652+
return ptr && ptr >= slabAllocator_.getSlabMemoryBegin()
653+
&& ptr < slabAllocator_.getSlabMemoryEnd();
654+
}
655+
649656
private:
650657
// @param memory pointer to the memory.
651658
// @return the MemoryPool corresponding to the memory.

cachelib/allocator/memory/SlabAllocator.h

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -322,6 +322,17 @@ class SlabAllocator {
322322
return PtrCompressor<PtrType, SlabAllocator>(*this);
323323
}
324324

325+
// returns starting address of memory we own.
326+
const Slab* getSlabMemoryBegin() const noexcept {
327+
return reinterpret_cast<Slab*>(memoryStart_);
328+
}
329+
330+
// returns first byte after the end of memory region we own.
331+
const Slab* getSlabMemoryEnd() const noexcept {
332+
return reinterpret_cast<Slab*>(reinterpret_cast<uint8_t*>(memoryStart_) +
333+
memorySize_);
334+
}
335+
325336
private:
326337
// null Slab* presenttation. With 4M Slab size, a valid slab index would never
327338
// reach 2^16 - 1;
@@ -339,12 +350,6 @@ class SlabAllocator {
339350
// @throw std::invalid_argument if the state is invalid.
340351
void checkState() const;
341352

342-
// returns first byte after the end of memory region we own.
343-
const Slab* getSlabMemoryEnd() const noexcept {
344-
return reinterpret_cast<Slab*>(reinterpret_cast<uint8_t*>(memoryStart_) +
345-
memorySize_);
346-
}
347-
348353
// returns true if we have slabbed all the memory that is available to us.
349354
// false otherwise.
350355
bool allMemorySlabbed() const noexcept {

cachelib/allocator/tests/AllocatorResizeTest.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -966,23 +966,23 @@ class AllocatorResizeTest : public AllocatorTest<AllocatorT> {
966966
for (i = 1; i <= numItersToMaxAdviseAway + 1; i++) {
967967
alloc.memMonitor_->adviseAwaySlabs();
968968
std::this_thread::sleep_for(std::chrono::seconds{2});
969-
ASSERT_EQ(alloc.allocator_->getAdvisedMemorySize(), i * perIterAdvSize);
969+
ASSERT_EQ(alloc.allocator_[0 /* TODO - extend test */]->getAdvisedMemorySize(), i * perIterAdvSize);
970970
}
971971
i--;
972972
// This should fail
973973
alloc.memMonitor_->adviseAwaySlabs();
974974
std::this_thread::sleep_for(std::chrono::seconds{2});
975-
auto totalAdvisedAwayMemory = alloc.allocator_->getAdvisedMemorySize();
975+
auto totalAdvisedAwayMemory = alloc.allocator_[0 /* TODO - extend test */]->getAdvisedMemorySize();
976976
ASSERT_EQ(totalAdvisedAwayMemory, i * perIterAdvSize);
977977

978978
// Try to reclaim back
979979
for (i = 1; i <= numItersToMaxAdviseAway + 1; i++) {
980980
alloc.memMonitor_->reclaimSlabs();
981981
std::this_thread::sleep_for(std::chrono::seconds{2});
982-
ASSERT_EQ(alloc.allocator_->getAdvisedMemorySize(),
982+
ASSERT_EQ(alloc.allocator_[0 /* TODO - extend test */]->getAdvisedMemorySize(),
983983
totalAdvisedAwayMemory - i * perIterAdvSize);
984984
}
985-
totalAdvisedAwayMemory = alloc.allocator_->getAdvisedMemorySize();
985+
totalAdvisedAwayMemory = alloc.allocator_[0 /* TODO - extend test */]->getAdvisedMemorySize();
986986
ASSERT_EQ(totalAdvisedAwayMemory, 0);
987987
}
988988
}

cachelib/allocator/tests/BaseAllocatorTest.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4257,13 +4257,13 @@ class BaseAllocatorTest : public AllocatorTest<AllocatorT> {
42574257
// Had a bug: D4799860 where we allocated the wrong size for chained item
42584258
{
42594259
const auto parentAllocInfo =
4260-
alloc.allocator_->getAllocInfo(itemHandle->getMemory());
4260+
alloc.allocator_[0 /* TODO - extend test */]->getAllocInfo(itemHandle->getMemory());
42614261
const auto child1AllocInfo =
4262-
alloc.allocator_->getAllocInfo(chainedItemHandle->getMemory());
4262+
alloc.allocator_[0 /* TODO - extend test */]->getAllocInfo(chainedItemHandle->getMemory());
42634263
const auto child2AllocInfo =
4264-
alloc.allocator_->getAllocInfo(chainedItemHandle2->getMemory());
4264+
alloc.allocator_[0 /* TODO - extend test */]->getAllocInfo(chainedItemHandle2->getMemory());
42654265
const auto child3AllocInfo =
4266-
alloc.allocator_->getAllocInfo(chainedItemHandle3->getMemory());
4266+
alloc.allocator_[0 /* TODO - extend test */]->getAllocInfo(chainedItemHandle3->getMemory());
42674267

42684268
const auto parentCid = parentAllocInfo.classId;
42694269
const auto child1Cid = child1AllocInfo.classId;

cachelib/allocator/tests/TestBase-inl.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -312,7 +312,7 @@ void AllocatorTest<AllocatorT>::testShmIsRemoved(
312312
ASSERT_FALSE(AllocatorT::ShmManager::segmentExists(
313313
config.getCacheDir(), detail::kShmHashTableName, config.usePosixShm));
314314
ASSERT_FALSE(AllocatorT::ShmManager::segmentExists(
315-
config.getCacheDir(), detail::kShmCacheName, config.usePosixShm));
315+
config.getCacheDir(), detail::kShmCacheName + std::to_string(0), config.usePosixShm));
316316
ASSERT_FALSE(AllocatorT::ShmManager::segmentExists(
317317
config.getCacheDir(), detail::kShmChainedItemHashTableName,
318318
config.usePosixShm));
@@ -326,7 +326,7 @@ void AllocatorTest<AllocatorT>::testShmIsNotRemoved(
326326
ASSERT_TRUE(AllocatorT::ShmManager::segmentExists(
327327
config.getCacheDir(), detail::kShmHashTableName, config.usePosixShm));
328328
ASSERT_TRUE(AllocatorT::ShmManager::segmentExists(
329-
config.getCacheDir(), detail::kShmCacheName, config.usePosixShm));
329+
config.getCacheDir(), detail::kShmCacheName + std::to_string(0), config.usePosixShm));
330330
ASSERT_TRUE(AllocatorT::ShmManager::segmentExists(
331331
config.getCacheDir(), detail::kShmChainedItemHashTableName,
332332
config.usePosixShm));

0 commit comments

Comments
 (0)