44 void append(
const char *aBuf,
int aSize)
override;
45 void vappendf(
const char *fmt, va_list ap)
override;
77 firstSlice(aFirstSlice),
78 lastSlice(firstSlice),
121 Must(firstSlice < 0 || lastSlice >= 0);
146 const int64_t copySize = std::min(writingDebt, pageSize - sliceOffset);
147 memcpy(
static_cast<char*
>(PagePointer(page)) + sliceOffset,
buf +
bufWritten,
150 debugs(20, 7,
"copied " << slice.
size <<
'+' << copySize <<
" bytes of " <<
151 entry <<
" from " << sliceOffset <<
" in " << page);
153 slice.
size += copySize;
184 if (diskMaxSize == -1) {
186 "is unlimited but mem-cache maximum object size is " <<
187 memMaxSize / 1024.0 <<
" KB");
188 }
else if (diskMaxSize > memMaxSize) {
190 "is too large for mem-cache: " <<
191 diskMaxSize / 1024.0 <<
" KB > " <<
192 memMaxSize / 1024.0 <<
" KB");
239 if (entryLimit > 0) {
246 const unsigned int slotsFree =
248 if (slotsFree <=
static_cast<unsigned int>(slotLimit)) {
249 const int usedSlots = slotLimit -
static_cast<int>(slotsFree);
251 usedSlots, (100.0 * usedSlots / slotLimit));
254 if (slotLimit < 100) {
336 debugs(20, 3,
"failed for " << *e);
363 }
catch (
const std::exception &ex) {
364 debugs(20, 2,
"error starting to update entry " << *updatedE <<
": " << ex.what());
376 debugs(20, 7,
"stale hdr_sz: " << staleHdrSz);
379 Must(staleHdrSz > 0);
388 debugs(20, 7,
"fresh hdr_sz: " << freshHdrSz <<
" diff: " << (freshHdrSz - staleHdrSz));
394 Must(headersInLastSlice > 0);
395 Must(slice.
size >= headersInLastSlice);
398 char *page =
static_cast<char*
>(PagePointer(extra.page));
399 debugs(20, 5,
"appending same-slice payload: " << payloadInLastSlice);
400 writer.
append(page + headersInLastSlice, payloadInLastSlice);
448 const bool copied =
copyFromShm(entry, index, anchor);
481 debugs(20, 7,
"mem-loading entry " << index <<
" from " << anchor.
start);
487 bool wasEof = anchor.
complete() && sid < 0;
488 int64_t sliceOffset = 0;
490 SBuf httpHeaderParsingBuffer;
497 debugs(20, 8,
"entry " << index <<
" slice " << sid <<
" eof " <<
498 wasEof <<
" wasSize " << wasSize <<
" <= " <<
505 assert(prefixSize <= wasSize);
509 char *page =
static_cast<char*
>(PagePointer(extra.page));
515 debugs(20, 8,
"entry " << index <<
" copied slice " << sid <<
516 " from " << extra.page <<
'+' << prefixSize);
522 if (reply.parseTerminatedPrefix(httpHeaderParsingBuffer.
c_str(), httpHeaderParsingBuffer.
length()))
523 httpHeaderParsingBuffer =
SBuf();
529 if (slice.
next >= 0) {
532 if (wasSize >= slice.
size) {
533 sliceOffset += wasSize;
536 }
else if (wasSize >= slice.
size) {
584 assert(offAfter >= 0 && offBefore <= offAfter &&
585 static_cast<size_t>(offAfter - offBefore) == buf.
length);
593 debugs(20, 5,
"already loaded from mem-cache: " << e);
598 debugs(20, 5,
"already written to mem-cache: " << e);
603 debugs(20, 5,
"avoid heavy optional work during shutdown: " << e);
611 debugs(20, 5,
"yield to entry publisher: " << e);
616 debugs(20, 7,
"Not memory cachable: " << e);
630 const int64_t ramSize =
max(loadedSize, expectedSize);
632 debugs(20, 5,
"Too big max(" <<
633 loadedSize <<
", " << expectedSize <<
"): " << e);
638 debugs(20, 5,
"not contiguous");
643 debugs(20, 5,
"No map to mem-cache " << e);
648 debugs(20, 5,
"Not mem-caching ENTRY_SPECIAL " << e);
662 debugs(20, 5,
"No room in mem-cache map to index " << e);
688 debugs(20, 5,
"postponing copying " << e <<
" for lack of news: " <<
706 if (anchor.
start < 0)
711 debugs(20, 7,
"mem-cached available " << eSize <<
" bytes of " << e);
725 static_cast<char*
>(PagePointer(page)) + sliceOffset);
730 debugs(20, 2,
"Failed to mem-cache " << (bufSize - sliceOffset) <<
733 throw TexcHere(
"data_hdr.copy failure");
736 debugs(20, 7,
"mem-cached " << copied <<
" bytes of " << e <<
739 slice.
size += copied;
750 if (sliceOffset < 0) {
755 extras->items[sliceOffset].page = page;
756 anchor.
start = sliceOffset;
763 if (slice.
size >= sliceCapacity) {
764 if (slice.
next >= 0) {
765 sliceOffset = slice.
next;
771 extras->items[sliceOffset].page = page;
772 debugs(20, 7,
"entry " << fileNo <<
" new slice: " << sliceOffset);
798 const auto slotId = slot.
number - 1;
799 debugs(20, 5,
"got a previously free slot: " << slotId);
802 debugs(20, 5,
"and got a previously free page: " << page);
806 debugs(20, 3,
"but there is no free page, returning " << slotId);
819 const auto slotId = slot.
number - 1;
821 debugs(20, 5,
"got previously busy " << slotId <<
" and " << page);
829 throw TexcHere(
"ran out of mem-cache slots");
836 debugs(20, 9,
"slice " << sliceId <<
" freed " << pageId);
840 slotId.
number = sliceId + 1;
859 debugs(20, 7,
"entry " << e);
885 }
catch (
const std::exception &x) {
886 debugs(20, 2,
"mem-caching error writing entry " << e <<
": " << x.what());
922 }
else if (
const auto key = e.
publicKey()) {
1015 fatal(
"memory_cache_shared is on, but no support for shared memory detected");
1018 " a single worker is running");
#define Assure(condition)
#define Here()
source code location of the caller
static const auto MapLabel
shared memory segment path to use for MemStore maps
static const char * ExtrasLabel
shared memory segment path to use for IDs of shared pages with slice data
static const char * SpaceLabel
shared memory segment path to use for the free slices index
Ipc::StoreMap MemStoreMap
#define DefineRunnerRegistrator(ClassName)
std::ostream & CurrentException(std::ostream &os)
prints active (i.e., thrown but not yet handled) exception
#define TexcHere(msg)
legacy convenience macro; it is not difficult to type Here() now
static void Broadcast(const StoreEntry &e, const bool includingThisWorker=false)
notify other workers about changes in entry state (e.g., new data)
static std::ostream & Extra(std::ostream &)
void packHeadersUsingSlowPacker(Packable &p) const
same as packHeadersUsingFastPacker() but assumes that p cannot quickly process small additions
Shared memory page identifier, address, or handler.
uint32_t number
page number within the segment
bool set() const
true if and only if both critical components have been initialized
PageStack construction and SharedMemorySize calculation parameters.
PageCount capacity
the maximum number of pages
size_t pageSize
page size, used to calculate shared memory size
bool createFull
whether a newly created PageStack should be prefilled with PageIds
static PoolId IdForMemStoreSpace()
stack of free cache_mem slot positions
bool pop(PageId &page)
sets value and returns true unless no free page numbers are found
void push(PageId &page)
makes value available as a free page number to future pop() callers
void useConfig() override
static bool Enabled()
Whether shared memory support is available.
approximate stats of a set of ReadWriteLocks
void dump(StoreEntry &e) const
std::atomic< StoreMapSliceId > start
where the chain of StoreEntry slices begins [app]
struct Ipc::StoreMapAnchor::Basics basics
std::atomic< uint8_t > writerHalted
whether StoreMap::abortWriting() was called for a read-locked entry
void set(const StoreEntry &anEntry, const cache_key *aKey=nullptr)
store StoreEntry key and basics for an inode slot
void exportInto(StoreEntry &) const
load StoreEntry basics that were previously stored with set()
std::atomic< StoreMapSliceId > next
ID of the next entry slice.
std::atomic< Size > size
slice contents size
sfileno fileNo
StoreMap::fileNos[name], for convenience/speed.
StoreMapSliceId splicingPoint
the last slice in the chain still containing metadata/headers
StoreMapAnchor * anchor
StoreMap::anchors[fileNo], for convenience/speed.
Aggregates information required for updating entry metadata and headers.
Edition fresh
new anchor and the updated chain prefix
Edition stale
old anchor and chain
StoreEntry * entry
the store entry being updated
aggregates anchor and slice owners for Init() caller convenience
Anchor * openForWriting(const cache_key *const key, sfileno &fileno)
const Slice & readableSlice(const AnchorId anchorId, const SliceId sliceId) const
readable slice within an entry chain opened by openForReading()
bool openForUpdating(Update &update, sfileno fileNoHint)
finds and locks the Update entry for an exclusive metadata update
Anchor & writeableEntry(const AnchorId anchorId)
writeable anchor for the entry created by openForWriting()
const Anchor & readableEntry(const AnchorId anchorId) const
readable anchor for the entry created by openForReading()
int entryCount() const
number of writeable and readable entries
static Owner * Init(const SBuf &path, const int slotLimit)
initialize shared memory
void closeForWriting(const sfileno fileno)
successfully finish creating or updating the entry at fileno pos
StoreMapCleaner * cleaner
notified before a readable entry is freed
void abortUpdating(Update &update)
undoes partial update, unlocks, and cleans up
SliceId sliceContaining(const sfileno fileno, const uint64_t nth) const
const Anchor * openForReading(const cache_key *const key, sfileno &fileno)
opens entry (identified by key) for reading, increments read level
bool freeEntry(const sfileno)
void closeForReading(const sfileno fileno)
closes open entry after reading, decrements read level
void abortWriting(const sfileno fileno)
stop writing the entry, freeing its slot for others to use if possible
void startAppending(const sfileno fileno)
restrict opened for writing entry to appending operations; allow reads
void prepFreeSlice(const SliceId sliceId)
prepare a chain-unaffiliated slice for being added to an entry chain
void closeForUpdating(Update &update)
makes updated info available to others, unlocks, and cleans up
bool purgeOne()
either finds and frees an entry with at least 1 slice or returns false
void updateStats(ReadWriteLockStats &stats) const
adds approximate current stats to the supplied ones
void freeEntryByKey(const cache_key *const key)
Slice & writeableSlice(const AnchorId anchorId, const SliceId sliceId)
writeable slice within an entry chain created by openForWriting()
int sliceLimit() const
maximum number of slices possible
int entryLimit() const
maximum entryCount() possible
State of an entry with regards to the [shared] memory caching.
int32_t index
entry position inside the memory cache
Store::IoStatus io
current I/O state
int64_t offset
bytes written/read to/from the memory cache so far
int64_t expectedReplySize() const
const HttpReply & freshestReply() const
MemCache memCache
current [shared] memory caching state for the entry
int64_t endOffset() const
const HttpReply & baseReply() const
HttpReply & adjustableBaseReply()
bool isContiguous() const
void create() override
called when the runner should create a new memory segment
Ipc::Mem::Owner< Ipc::Mem::PageStack > * spaceOwner
free slices Owner
MemStoreMap::Owner * mapOwner
primary map Owner
void useConfig() override
void finalizeConfig() override
void claimMemoryNeeds() override
Ipc::Mem::Owner< MemStoreMapExtras > * extrasOwner
PageIds Owner.
Ipc::Mem::PageId * slot
local slot variable, waiting to be filled
Ipc::Mem::PageId * page
local page variable, waiting to be filled
bool updateAnchored(StoreEntry &) override
void anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
anchors StoreEntry to an already locked map entry
void updateHeaders(StoreEntry *e) override
make stored metadata and HTTP headers the same as in the given entry
sfileno lastWritingSlice
the last allocate slice for writing a store entry (during copyToShm)
uint64_t currentCount() const override
the total number of objects stored right now
Ipc::Mem::Pointer< Extras > extras
IDs of pages with slice data.
Ipc::Mem::PageId pageForSlice(Ipc::StoreMapSliceId sliceId)
safely returns a previously allocated memory page for the given entry slice
bool anchorToCache(StoreEntry &) override
bool updateAnchoredWith(StoreEntry &, const sfileno, const Ipc::StoreMapAnchor &)
updates Transients entry after its anchor has been located
void copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor, Ipc::StoreMap::Slice &slice)
copies at most one slice worth of local memory to shared memory
void disconnect(StoreEntry &e)
called when the entry is about to forget its association with mem cache
uint64_t minSize() const override
the minimum size the store will shrink to via normal housekeeping
bool shouldCache(StoreEntry &e) const
whether we should cache the entry
void evictCached(StoreEntry &) override
void copyFromShmSlice(StoreEntry &, const StoreIOBuffer &)
imports one shared memory slice into local memory
Ipc::Mem::Pointer< Ipc::Mem::PageStack > freeSlots
unused map slot IDs
SlotAndPage waitingFor
a cache for a single "hot" free slot and page
void completeWriting(StoreEntry &e)
all data has been received; there will be no more write() calls
void stat(StoreEntry &e) const override
uint64_t maxSize() const override
StoreEntry * get(const cache_key *) override
void copyToShm(StoreEntry &e)
copies all local data to shared memory
int64_t maxObjectSize() const override
the maximum size of a storable object; -1 if unlimited
Ipc::StoreMap::Slice & nextAppendableSlice(const sfileno entryIndex, sfileno &sliceOffset)
static int64_t EntryLimit()
calculates maximum number of entries we need to store and map
sfileno reserveSapForWriting(Ipc::Mem::PageId &page)
finds a slot and a free page to fill or throws
void evictIfFound(const cache_key *) override
void reference(StoreEntry &e) override
somebody needs this entry (many cache replacement policies need to know)
static bool Enabled()
whether Squid is correctly configured to use a shared memory cache
void updateHeadersOrThrow(Ipc::StoreMapUpdate &update)
bool copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
copies the entire entry from shared to local memory
bool dereference(StoreEntry &e) override
void write(StoreEntry &e)
copy non-shared entry data of the being-cached entry to our cache
uint64_t currentSize() const override
current size
void maintain() override
perform regular periodic maintenance; TODO: move to UFSSwapDir::Maintain
bool startCaching(StoreEntry &e)
locks map anchor and preps to store the entry in shared memory
void noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId) override
adjust slice-linked state before a locked Readable slice is erased
MemStoreMap * map
index of mem-cached entries
void getStats(StoreInfoStats &stats) const override
collect statistics
const char * rawContent() const
SBuf & vappendf(const char *fmt, va_list vargs)
size_type length() const
Returns the number of bytes stored in SBuf.
SBuf & append(const SBuf &S)
void copyToShm()
copies the entire buffer to shared memory
StoreEntry * entry
the entry being updated
const char * buf
content being appended now
Ipc::StoreMapSliceId firstSlice
int bufWritten
buf bytes appended so far
uint64_t totalWritten
cumulative number of bytes appended so far
Ipc::StoreMapSliceId lastSlice
the slot keeping the last byte of the appended content (at least)
void append(const char *aBuf, int aSize) override
Appends a c-string to existing packed data.
void vappendf(const char *fmt, va_list ap) override
ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice=-1)
void copyToShmSlice(Ipc::StoreMap::Slice &slice)
copies at most one slice worth of buffer to shared memory
Store::DiskConfig cacheSwap
struct SquidConfig::@88 Store
YesNoNone memShared
whether the memory cache is shared among workers
bool hasMemStore() const
whether there is a corresponding locked shared memory table entry
bool memoryCachable()
checkCachable() and can be cached in memory
bool hasDisk(const sdirno dirn=-1, const sfileno filen=-1) const
bool hasParsedReplyHeader() const
whether this entry has access to [deserialized] [HTTP] response headers
const cache_key * publicKey() const
void memOutDecision(const bool willCacheInRam)
void storeWriterDone()
called when a store writer ends its work (successfully or not)
store_status_t store_status
void setMemStatus(mem_status_t)
bool shared
whether memory cache is shared among workers
double capacity
the size limit
double count
number of cached objects
double size
bytes currently in use
High-level store statistics used by mgr:info action. Used inside PODs!
Mem mem
all cache_dirs stats
int64_t maxObjectSize() const override
the maximum size of a storable object; -1 if unlimited
an std::runtime_error with thrower location info
void configure(bool beSet)
enables or disables the option; updating to 'configured' state
ssize_t copy(StoreIOBuffer const &) const
bool write(StoreIOBuffer const &)
A const & max(A const &lhs, A const &rhs)
A const & min(A const &lhs, A const &rhs)
#define debugs(SECTION, LEVEL, CONTENT)
#define EBIT_SET(flag, bit)
#define EBIT_TEST(flag, bit)
void fatal(const char *message)
size_t PageLevel()
approximate total number of shared memory pages used now
bool GetPage(const PageId::Purpose purpose, PageId &page)
sets page ID and returns true unless no free pages are found
size_t PagesAvailable()
approximate total number of shared memory pages we can allocate now
size_t PageSize()
returns page size in bytes; all pages are assumed to be the same size
void NotePageNeed(const int purpose, const int count)
claim the need for a number of pages for a given purpose
void PutPage(PageId &page)
makes identified page available as a free page to future GetPage() callers
size_t PageLimit()
the total number of shared memory pages that can be in use at any time
double doublePercent(const double, const double)
Controller & Root()
safely access controller singleton
SBuf ToSBuf(Args &&... args)
slowly stream-prints all arguments into a freshly allocated SBuf
unsigned char cache_key
Store key.
void storeAppendPrintf(StoreEntry *e, const char *fmt,...)
std::atomic< uint64_t > swap_file_sz