Squid Web Cache master
Loading...
Searching...
No Matches
MemStore.cc
Go to the documentation of this file.
1/*
2 * Copyright (C) 1996-2025 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9/* DEBUG: section 20 Memory Cache */
10
11#include "squid.h"
13#include "CollapsedForwarding.h"
14#include "HttpReply.h"
15#include "ipc/mem/Page.h"
16#include "ipc/mem/Pages.h"
17#include "MemObject.h"
18#include "MemStore.h"
19#include "mime_header.h"
20#include "sbuf/SBuf.h"
21#include "sbuf/Stream.h"
22#include "SquidConfig.h"
23#include "SquidMath.h"
24#include "store/forward.h"
25#include "StoreStats.h"
26#include "tools.h"
27
29static const auto MapLabel = "cache_mem_map";
31static const char *SpaceLabel = "cache_mem_space";
33static const char *ExtrasLabel = "cache_mem_ex";
34// TODO: sync with Rock::SwapDir::*Path()
35
38class ShmWriter: public Packable
39{
40public:
41 ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice = -1);
42
43 /* Packable API */
44 void append(const char *aBuf, int aSize) override;
45 void vappendf(const char *fmt, va_list ap) override;
46
47public:
49
53
56
57 uint64_t totalWritten;
58
59protected:
60 void copyToShm();
62
63private:
66
67 /* set by (and only valid during) append calls */
68 const char *buf;
69 int bufSize;
71};
72
73/* ShmWriter */
74
75ShmWriter::ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice):
76 entry(anEntry),
77 firstSlice(aFirstSlice),
78 lastSlice(firstSlice),
79 totalWritten(0),
80 store(aStore),
81 fileNo(aFileNo),
82 buf(nullptr),
83 bufSize(0),
84 bufWritten(0)
85{
86 Must(entry);
87}
88
89void
90ShmWriter::append(const char *aBuf, int aBufSize)
91{
92 Must(!buf);
93 buf = aBuf;
94 bufSize = aBufSize;
95 if (bufSize) {
96 Must(buf);
97 bufWritten = 0;
98 copyToShm();
99 }
100 buf = nullptr;
101 bufSize = 0;
102 bufWritten = 0;
103}
104
105void
106ShmWriter::vappendf(const char *fmt, va_list ap)
107{
108 SBuf vaBuf;
109 va_list apCopy;
110 va_copy(apCopy, ap);
111 vaBuf.vappendf(fmt, apCopy);
112 va_end(apCopy);
113 append(vaBuf.rawContent(), vaBuf.length());
114}
115
117void
119{
120 Must(bufSize > 0); // do not use up shared memory pages for nothing
121 Must(firstSlice < 0 || lastSlice >= 0);
122
123 // fill, skip slices that are already full
124 while (bufWritten < bufSize) {
126 if (firstSlice < 0)
128 copyToShmSlice(slice);
129 }
130
131 debugs(20, 7, "stored " << bufWritten << '/' << totalWritten << " header bytes of " << *entry);
132}
133
135void
137{
139 debugs(20, 7, "entry " << *entry << " slice " << lastSlice << " has " <<
140 page);
141
143 const int64_t writingDebt = bufSize - bufWritten;
144 const int64_t pageSize = Ipc::Mem::PageSize();
145 const int64_t sliceOffset = totalWritten % pageSize;
146 const int64_t copySize = std::min(writingDebt, pageSize - sliceOffset);
147 memcpy(static_cast<char*>(PagePointer(page)) + sliceOffset, buf + bufWritten,
148 copySize);
149
150 debugs(20, 7, "copied " << slice.size << '+' << copySize << " bytes of " <<
151 entry << " from " << sliceOffset << " in " << page);
152
153 slice.size += copySize;
154 bufWritten += copySize;
155 totalWritten += copySize;
156 // fresh anchor.basics.swap_file_sz is already set [to the stale value]
157
158 // either we wrote everything or we filled the entire slice
159 Must(bufWritten == bufSize || sliceOffset + copySize == pageSize);
160}
161
162/* MemStore */
163
164MemStore::MemStore(): map(nullptr), lastWritingSlice(-1)
165{
166}
167
169{
170 delete map;
171}
172
173void
175{
176 const int64_t entryLimit = EntryLimit();
177 if (entryLimit <= 0)
178 return; // no shared memory cache configured or a misconfiguration
179
180 // check compatibility with the disk cache, if any
181 if (Config.cacheSwap.n_configured > 0) {
182 const int64_t diskMaxSize = Store::Root().maxObjectSize();
183 const int64_t memMaxSize = maxObjectSize();
184 if (diskMaxSize == -1) {
185 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
186 "is unlimited but mem-cache maximum object size is " <<
187 memMaxSize / 1024.0 << " KB");
188 } else if (diskMaxSize > memMaxSize) {
189 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
190 "is too large for mem-cache: " <<
191 diskMaxSize / 1024.0 << " KB > " <<
192 memMaxSize / 1024.0 << " KB");
193 }
194 }
195
198
199 Must(!map);
201 map->cleaner = this;
202}
203
204void
206{
207 const size_t pageSize = Ipc::Mem::PageSize();
208
209 stats.mem.shared = true;
210
211 // In SMP mode, only the first worker reports shared memory stats to avoid
212 // adding up same-cache positive stats (reported by multiple worker
213 // processes) when Coordinator aggregates worker-reported stats.
214 // See also: Store::Disk::doReportStat().
215 if (UsingSmp() && KidIdentifier != 1)
216 return;
217
218 stats.mem.capacity =
220 stats.mem.size =
222 stats.mem.count = currentCount();
223}
224
225void
227{
228 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
229
230 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
231 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
232 currentSize() / 1024.0,
234
235 if (map) {
236 const int entryLimit = map->entryLimit();
237 const int slotLimit = map->sliceLimit();
238 storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit);
239 if (entryLimit > 0) {
240 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
241 currentCount(), (100.0 * currentCount() / entryLimit));
242 }
243
244 storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit);
245 if (slotLimit > 0) {
246 const unsigned int slotsFree =
248 if (slotsFree <= static_cast<unsigned int>(slotLimit)) {
249 const int usedSlots = slotLimit - static_cast<int>(slotsFree);
250 storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
251 usedSlots, (100.0 * usedSlots / slotLimit));
252 }
253
254 if (slotLimit < 100) { // XXX: otherwise too expensive to count
256 map->updateStats(stats);
257 stats.dump(e);
258 }
259 }
260 }
261}
262
263void
267
268uint64_t
270{
271 return 0; // XXX: irrelevant, but Store parent forces us to implement this
272}
273
274uint64_t
276{
277 return Config.memMaxSize;
278}
279
280uint64_t
286
287uint64_t
289{
290 return map ? map->entryCount() : 0;
291}
292
293int64_t
298
299void
303
304bool
306{
307 // no need to keep e in the global store_table for us; we have our own map
308 return false;
309}
310
313{
314 if (!map)
315 return nullptr;
316
317 sfileno index;
318 const Ipc::StoreMapAnchor *const slot = map->openForReading(key, index);
319 if (!slot)
320 return nullptr;
321
322 // create a brand new store entry and initialize it with stored info
323 StoreEntry *e = new StoreEntry();
324
325 try {
326 // XXX: We do not know the URLs yet, only the key, but we need to parse and
327 // store the response for the Root().find() callers to be happy because they
328 // expect IN_MEMORY entries to already have the response headers and body.
329 e->createMemObject();
330
331 anchorEntry(*e, index, *slot);
332
333 // TODO: make copyFromShm() throw on all failures, simplifying this code
334 if (copyFromShm(*e, index, *slot))
335 return e;
336 debugs(20, 3, "failed for " << *e);
337 } catch (...) {
338 // see store_client::parseHttpHeadersFromDisk() for problems this may log
339 debugs(20, DBG_IMPORTANT, "ERROR: Cannot load a cache hit from shared memory" <<
340 Debug::Extra << "exception: " << CurrentException <<
341 Debug::Extra << "cache_mem entry: " << *e);
342 }
343
344 map->freeEntry(index); // do not let others into the same trap
345 destroyStoreEntry(static_cast<hash_link *>(e));
346 return nullptr;
347}
348
349void
351{
352 if (!map)
353 return;
354
355 Ipc::StoreMapUpdate update(updatedE);
356 assert(updatedE);
357 assert(updatedE->mem_obj);
358 if (!map->openForUpdating(update, updatedE->mem_obj->memCache.index))
359 return;
360
361 try {
362 updateHeadersOrThrow(update);
363 } catch (const std::exception &ex) {
364 debugs(20, 2, "error starting to update entry " << *updatedE << ": " << ex.what());
365 map->abortUpdating(update);
366 }
367}
368
369void
371{
372 // our +/- hdr_sz math below does not work if the chains differ [in size]
374
375 const uint64_t staleHdrSz = update.entry->mem().baseReply().hdr_sz;
376 debugs(20, 7, "stale hdr_sz: " << staleHdrSz);
377
378 /* we will need to copy same-slice payload after the stored headers later */
379 Must(staleHdrSz > 0);
380 update.stale.splicingPoint = map->sliceContaining(update.stale.fileNo, staleHdrSz);
381 Must(update.stale.splicingPoint >= 0);
382 Must(update.stale.anchor->basics.swap_file_sz >= staleHdrSz);
383
384 Must(update.stale.anchor);
385 ShmWriter writer(*this, update.entry, update.fresh.fileNo);
387 const uint64_t freshHdrSz = writer.totalWritten;
388 debugs(20, 7, "fresh hdr_sz: " << freshHdrSz << " diff: " << (freshHdrSz - staleHdrSz));
389
390 /* copy same-slice payload remaining after the stored headers */
391 const Ipc::StoreMapSlice &slice = map->readableSlice(update.stale.fileNo, update.stale.splicingPoint);
392 const Ipc::StoreMapSlice::Size sliceCapacity = Ipc::Mem::PageSize();
393 const Ipc::StoreMapSlice::Size headersInLastSlice = staleHdrSz % sliceCapacity;
394 Must(headersInLastSlice > 0); // or sliceContaining() would have stopped earlier
395 Must(slice.size >= headersInLastSlice);
396 const Ipc::StoreMapSlice::Size payloadInLastSlice = slice.size - headersInLastSlice;
397 const MemStoreMapExtras::Item &extra = extras->items[update.stale.splicingPoint];
398 char *page = static_cast<char*>(PagePointer(extra.page));
399 debugs(20, 5, "appending same-slice payload: " << payloadInLastSlice);
400 writer.append(page + headersInLastSlice, payloadInLastSlice);
401 update.fresh.splicingPoint = writer.lastSlice;
402
403 update.fresh.anchor->basics.swap_file_sz -= staleHdrSz;
404 update.fresh.anchor->basics.swap_file_sz += freshHdrSz;
405
406 map->closeForUpdating(update);
407}
408
409bool
411{
412 Assure(!entry.hasMemStore());
413 Assure(entry.mem().memCache.io != Store::ioDone);
414
415 if (!map)
416 return false;
417
418 sfileno index;
419 const Ipc::StoreMapAnchor *const slot = map->openForReading(
420 reinterpret_cast<cache_key*>(entry.key), index);
421 if (!slot)
422 return false;
423
424 anchorEntry(entry, index, *slot);
425 if (!updateAnchoredWith(entry, index, *slot))
426 throw TextException("updateAnchoredWith() failure", Here());
427 return true;
428}
429
430bool
432{
433 if (!map)
434 return false;
435
436 assert(entry.mem_obj);
437 assert(entry.hasMemStore());
438 const sfileno index = entry.mem_obj->memCache.index;
439 const Ipc::StoreMapAnchor &anchor = map->readableEntry(index);
440 return updateAnchoredWith(entry, index, anchor);
441}
442
444bool
446{
447 entry.swap_file_sz = anchor.basics.swap_file_sz;
448 const bool copied = copyFromShm(entry, index, anchor);
449 return copied;
450}
451
453void
455{
456 assert(!e.hasDisk()); // no conflict with disk entry basics
457 anchor.exportInto(e);
458
459 assert(e.mem_obj);
460 if (anchor.complete()) {
464 } else {
466 assert(e.mem_obj->object_sz < 0);
468 }
469
471
473 mc.index = index;
474 mc.io = Store::ioReading;
475}
476
478bool
480{
481 debugs(20, 7, "mem-loading entry " << index << " from " << anchor.start);
482 assert(e.mem_obj);
483
484 // emulate the usual Store code but w/o inapplicable checks and callbacks:
485
486 Ipc::StoreMapSliceId sid = anchor.start; // optimize: remember the last sid
487 bool wasEof = anchor.complete() && sid < 0;
488 int64_t sliceOffset = 0;
489
490 SBuf httpHeaderParsingBuffer;
491 while (sid >= 0) {
492 const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid);
493 // slice state may change during copying; take snapshots now
494 wasEof = anchor.complete() && slice.next < 0;
495 const Ipc::StoreMapSlice::Size wasSize = slice.size;
496
497 debugs(20, 8, "entry " << index << " slice " << sid << " eof " <<
498 wasEof << " wasSize " << wasSize << " <= " <<
499 anchor.basics.swap_file_sz << " sliceOffset " << sliceOffset <<
500 " mem.endOffset " << e.mem_obj->endOffset());
501
502 if (e.mem_obj->endOffset() < sliceOffset + wasSize) {
503 // size of the slice data that we already copied
504 const size_t prefixSize = e.mem_obj->endOffset() - sliceOffset;
505 assert(prefixSize <= wasSize);
506
507 const MemStoreMapExtras::Item &extra = extras->items[sid];
508
509 char *page = static_cast<char*>(PagePointer(extra.page));
510 const StoreIOBuffer sliceBuf(wasSize - prefixSize,
511 e.mem_obj->endOffset(),
512 page + prefixSize);
513
514 copyFromShmSlice(e, sliceBuf);
515 debugs(20, 8, "entry " << index << " copied slice " << sid <<
516 " from " << extra.page << '+' << prefixSize);
517
518 // parse headers if needed; they might span multiple slices!
519 if (!e.hasParsedReplyHeader()) {
520 httpHeaderParsingBuffer.append(sliceBuf.data, sliceBuf.length);
521 auto &reply = e.mem().adjustableBaseReply();
522 if (reply.parseTerminatedPrefix(httpHeaderParsingBuffer.c_str(), httpHeaderParsingBuffer.length()))
523 httpHeaderParsingBuffer = SBuf(); // we do not need these bytes anymore
524 }
525 }
526 // else skip a [possibly incomplete] slice that we copied earlier
527
528 // careful: the slice may have grown _and_ gotten the next slice ID!
529 if (slice.next >= 0) {
530 assert(!wasEof);
531 // here we know that slice.size may not change any more
532 if (wasSize >= slice.size) { // did not grow since we started copying
533 sliceOffset += wasSize;
534 sid = slice.next;
535 }
536 } else if (wasSize >= slice.size) { // did not grow
537 break;
538 }
539 }
540
541 if (!wasEof) {
542 debugs(20, 7, "mem-loaded " << e.mem_obj->endOffset() << '/' <<
543 anchor.basics.swap_file_sz << " bytes of " << e);
544 return true;
545 }
546
547 if (anchor.writerHalted) {
548 debugs(20, 5, "mem-loaded aborted " << e.mem_obj->endOffset() << '/' <<
549 anchor.basics.swap_file_sz << " bytes of " << e);
550 return false;
551 }
552
553 debugs(20, 5, "mem-loaded all " << e.mem_obj->endOffset() << '/' <<
554 anchor.basics.swap_file_sz << " bytes of " << e);
555
556 if (!e.hasParsedReplyHeader())
557 throw TextException(ToSBuf("truncated mem-cached headers; accumulated: ", httpHeaderParsingBuffer.length()), Here());
558
559 // from StoreEntry::complete()
563
564 assert(e.mem_obj->object_sz >= 0);
565 assert(static_cast<uint64_t>(e.mem_obj->object_sz) == anchor.basics.swap_file_sz);
566 // would be nice to call validLength() here, but it needs e.key
567
568 // we read the entire response into the local memory; no more need to lock
569 disconnect(e);
570 return true;
571}
572
574void
576{
577 debugs(20, 7, "buf: " << buf.offset << " + " << buf.length);
578
579 // local memory stores both headers and body so copy regardless of pstate
580 const int64_t offBefore = e.mem_obj->endOffset();
581 assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write()
582 const int64_t offAfter = e.mem_obj->endOffset();
583 // expect to write the entire buf because StoreEntry::write() never fails
584 assert(offAfter >= 0 && offBefore <= offAfter &&
585 static_cast<size_t>(offAfter - offBefore) == buf.length);
586}
587
589bool
591{
592 if (e.mem_status == IN_MEMORY) {
593 debugs(20, 5, "already loaded from mem-cache: " << e);
594 return false;
595 }
596
597 if (e.mem_obj && e.mem_obj->memCache.offset > 0) {
598 debugs(20, 5, "already written to mem-cache: " << e);
599 return false;
600 }
601
602 if (shutting_down) {
603 debugs(20, 5, "avoid heavy optional work during shutdown: " << e);
604 return false;
605 }
606
607 // To avoid SMP workers releasing each other caching attempts, restrict disk
608 // caching to StoreEntry publisher. This check goes before memoryCachable()
609 // that may incorrectly release() publisher's entry via checkCachable().
610 if (Store::Root().transientsReader(e)) {
611 debugs(20, 5, "yield to entry publisher: " << e);
612 return false;
613 }
614
615 if (!e.memoryCachable()) {
616 debugs(20, 7, "Not memory cachable: " << e);
617 return false; // will not cache due to entry state or properties
618 }
619
620 assert(e.mem_obj);
621
622 if (!e.mem_obj->vary_headers.isEmpty()) {
623 // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
624 debugs(20, 5, "Vary not yet supported: " << e.mem_obj->vary_headers);
625 return false;
626 }
627
628 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
629 const int64_t loadedSize = e.mem_obj->endOffset();
630 const int64_t ramSize = max(loadedSize, expectedSize);
631 if (ramSize > maxObjectSize()) {
632 debugs(20, 5, "Too big max(" <<
633 loadedSize << ", " << expectedSize << "): " << e);
634 return false; // will not cache due to cachable entry size limits
635 }
636
637 if (!e.mem_obj->isContiguous()) {
638 debugs(20, 5, "not contiguous");
639 return false;
640 }
641
642 if (!map) {
643 debugs(20, 5, "No map to mem-cache " << e);
644 return false;
645 }
646
647 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
648 debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e);
649 return false;
650 }
651
652 return true;
653}
654
656bool
658{
659 sfileno index = 0;
660 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
661 if (!slot) {
662 debugs(20, 5, "No room in mem-cache map to index " << e);
663 return false;
664 }
665
666 assert(e.mem_obj);
667 e.mem_obj->memCache.index = index;
669 slot->set(e);
670 // Do not allow others to feed off an unknown-size entry because we will
671 // stop swapping it out if it grows too large.
672 if (e.mem_obj->expectedReplySize() >= 0)
673 map->startAppending(index);
674 e.memOutDecision(true);
675 return true;
676}
677
679void
681{
682 assert(map);
683 assert(e.mem_obj);
685
686 const int64_t eSize = e.mem_obj->endOffset();
687 if (e.mem_obj->memCache.offset >= eSize) {
688 debugs(20, 5, "postponing copying " << e << " for lack of news: " <<
689 e.mem_obj->memCache.offset << " >= " << eSize);
690 return; // nothing to do (yet)
691 }
692
693 // throw if an accepted unknown-size entry grew too big or max-size changed
694 Must(eSize <= maxObjectSize());
695
696 const int32_t index = e.mem_obj->memCache.index;
697 assert(index >= 0);
698 Ipc::StoreMapAnchor &anchor = map->writeableEntry(index);
699 lastWritingSlice = anchor.start;
700
701 // fill, skip slices that are already full
702 // Optimize: remember lastWritingSlice in e.mem_obj
703 while (e.mem_obj->memCache.offset < eSize) {
706 if (anchor.start < 0)
707 anchor.start = lastWritingSlice;
708 copyToShmSlice(e, anchor, slice);
709 }
710
711 debugs(20, 7, "mem-cached available " << eSize << " bytes of " << e);
712}
713
715void
717{
719 debugs(20, 7, "entry " << e << " slice " << lastWritingSlice << " has " <<
720 page);
721
722 const int64_t bufSize = Ipc::Mem::PageSize();
723 const int64_t sliceOffset = e.mem_obj->memCache.offset % bufSize;
724 StoreIOBuffer sharedSpace(bufSize - sliceOffset, e.mem_obj->memCache.offset,
725 static_cast<char*>(PagePointer(page)) + sliceOffset);
726
727 // check that we kept everything or purge incomplete/sparse cached entry
728 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
729 if (copied <= 0) {
730 debugs(20, 2, "Failed to mem-cache " << (bufSize - sliceOffset) <<
731 " bytes of " << e << " from " << e.mem_obj->memCache.offset <<
732 " in " << page);
733 throw TexcHere("data_hdr.copy failure");
734 }
735
736 debugs(20, 7, "mem-cached " << copied << " bytes of " << e <<
737 " from " << e.mem_obj->memCache.offset << " in " << page);
738
739 slice.size += copied;
740 e.mem_obj->memCache.offset += copied;
742}
743
748{
749 // allocate the very first slot for the entry if needed
750 if (sliceOffset < 0) {
751 Ipc::StoreMapAnchor &anchor = map->writeableEntry(fileNo);
752 Must(anchor.start < 0);
753 Ipc::Mem::PageId page;
754 sliceOffset = reserveSapForWriting(page); // throws
755 extras->items[sliceOffset].page = page;
756 anchor.start = sliceOffset;
757 }
758
759 const size_t sliceCapacity = Ipc::Mem::PageSize();
760 do {
761 Ipc::StoreMap::Slice &slice = map->writeableSlice(fileNo, sliceOffset);
762
763 if (slice.size >= sliceCapacity) {
764 if (slice.next >= 0) {
765 sliceOffset = slice.next;
766 continue;
767 }
768
769 Ipc::Mem::PageId page;
770 slice.next = sliceOffset = reserveSapForWriting(page);
771 extras->items[sliceOffset].page = page;
772 debugs(20, 7, "entry " << fileNo << " new slice: " << sliceOffset);
773 continue; // to get and return the slice at the new sliceOffset
774 }
775
776 return slice;
777 } while (true);
778 /* not reached */
779}
780
784{
785 Must(extras);
786 Must(sliceId >= 0);
787 Ipc::Mem::PageId page = extras->items[sliceId].page;
788 Must(page);
789 return page;
790}
791
795{
796 Ipc::Mem::PageId slot;
797 if (freeSlots->pop(slot)) {
798 const auto slotId = slot.number - 1;
799 debugs(20, 5, "got a previously free slot: " << slotId);
800
802 debugs(20, 5, "and got a previously free page: " << page);
803 map->prepFreeSlice(slotId);
804 return slotId;
805 } else {
806 debugs(20, 3, "but there is no free page, returning " << slotId);
807 freeSlots->push(slot);
808 }
809 }
810
811 // catch free slots delivered to noteFreeMapSlice()
813 waitingFor.slot = &slot;
814 waitingFor.page = &page;
815 if (map->purgeOne()) {
816 assert(!waitingFor); // noteFreeMapSlice() should have cleared it
817 assert(slot.set());
818 assert(page.set());
819 const auto slotId = slot.number - 1;
820 map->prepFreeSlice(slotId);
821 debugs(20, 5, "got previously busy " << slotId << " and " << page);
822 return slotId;
823 }
824 assert(waitingFor.slot == &slot && waitingFor.page == &page);
825 waitingFor.slot = nullptr;
826 waitingFor.page = nullptr;
827
828 debugs(47, 3, "cannot get a slice; entries: " << map->entryCount());
829 throw TexcHere("ran out of mem-cache slots");
830}
831
832void
834{
835 Ipc::Mem::PageId &pageId = extras->items[sliceId].page;
836 debugs(20, 9, "slice " << sliceId << " freed " << pageId);
837 assert(pageId);
838 Ipc::Mem::PageId slotId;
840 slotId.number = sliceId + 1;
841 if (!waitingFor) {
842 // must zero pageId before we give slice (and pageId extras!) to others
843 Ipc::Mem::PutPage(pageId);
844 freeSlots->push(slotId);
845 } else {
846 *waitingFor.slot = slotId;
847 *waitingFor.page = pageId;
848 waitingFor.slot = nullptr;
849 waitingFor.page = nullptr;
850 pageId = Ipc::Mem::PageId();
851 }
852}
853
854void
856{
857 assert(e.mem_obj);
858
859 debugs(20, 7, "entry " << e);
860
861 switch (e.mem_obj->memCache.io) {
863 if (!shouldCache(e) || !startCaching(e)) {
865 e.memOutDecision(false);
866 return;
867 }
868 break;
869
870 case Store::ioDone:
871 case Store::ioReading:
872 return; // we should not write in all of the above cases
873
874 case Store::ioWriting:
875 break; // already decided to write and still writing
876 }
877
878 try {
879 copyToShm(e);
880 if (e.store_status == STORE_OK) // done receiving new content
882 else
884 return;
885 } catch (const std::exception &x) { // TODO: should we catch ... as well?
886 debugs(20, 2, "mem-caching error writing entry " << e << ": " << x.what());
887 // fall through to the error handling code
888 }
889
890 disconnect(e);
891}
892
893void
895{
896 assert(e.mem_obj);
897 const int32_t index = e.mem_obj->memCache.index;
898 assert(index >= 0);
899 assert(map);
900
901 debugs(20, 5, "mem-cached all " << e.mem_obj->memCache.offset << " bytes of " << e);
902
903 e.mem_obj->memCache.index = -1;
905 map->closeForWriting(index);
906
908 e.storeWriterDone();
909}
910
911void
913{
914 debugs(47, 5, e);
915 if (e.hasMemStore()) {
918 if (!e.locked()) {
919 disconnect(e);
921 }
922 } else if (const auto key = e.publicKey()) {
923 // the entry may have been loaded and then disconnected from the cache
924 evictIfFound(key);
925 if (!e.locked())
927 }
928}
929
930void
932{
933 if (map)
934 map->freeEntryByKey(key);
935}
936
937void
939{
940 assert(e.mem_obj);
941 MemObject &mem_obj = *e.mem_obj;
942 if (e.hasMemStore()) {
943 if (mem_obj.memCache.io == Store::ioWriting) {
944 map->abortWriting(mem_obj.memCache.index);
945 mem_obj.memCache.index = -1;
946 mem_obj.memCache.io = Store::ioDone;
948 e.storeWriterDone();
949 } else {
952 mem_obj.memCache.index = -1;
953 mem_obj.memCache.io = Store::ioDone;
954 }
955 }
956}
957
958bool
960{
961 return Config.memShared && Config.memMaxSize > 0;
962}
963
965int64_t
967{
968 if (!Requested())
969 return 0;
970
971 const int64_t minEntrySize = Ipc::Mem::PageSize();
972 const int64_t entryLimit = Config.memMaxSize / minEntrySize;
973 return entryLimit;
974}
975
980{
981public:
982 /* RegisteredRunner API */
983 MemStoreRr(): spaceOwner(nullptr), mapOwner(nullptr), extrasOwner(nullptr) {}
984 void finalizeConfig() override;
985 void claimMemoryNeeds() override;
986 void useConfig() override;
987 ~MemStoreRr() override;
988
989protected:
990 /* Ipc::Mem::RegisteredRunner API */
991 void create() override;
992
993private:
997};
998
1000
1001void
1006
1007void
1009{
1010 // decide whether to use a shared memory cache if the user did not specify
1011 if (!Config.memShared.configured()) {
1013 Config.memMaxSize > 0);
1014 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
1015 fatal("memory_cache_shared is on, but no support for shared memory detected");
1016 } else if (Config.memShared && !UsingSmp()) {
1017 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
1018 " a single worker is running");
1019 }
1020
1022 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small (" <<
1023 (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
1024 (Ipc::Mem::PageSize() / 1024.0) << " KB");
1025 }
1026}
1027
1028void
1034
1035void
1037{
1038 if (!MemStore::Enabled())
1039 return;
1040
1041 const int64_t entryLimit = MemStore::EntryLimit();
1042 assert(entryLimit > 0);
1043
1044 Ipc::Mem::PageStack::Config spaceConfig;
1046 spaceConfig.pageSize = 0; // the pages are stored in Ipc::Mem::Pages
1047 spaceConfig.capacity = entryLimit;
1048 spaceConfig.createFull = true; // all pages are initially available
1049 Must(!spaceOwner);
1051 Must(!mapOwner);
1052 mapOwner = MemStoreMap::Init(SBuf(MapLabel), entryLimit);
1053 Must(!extrasOwner);
1055}
1056
1058{
1059 delete extrasOwner;
1060 delete mapOwner;
1061 delete spaceOwner;
1062}
1063
#define Assure(condition)
Definition Assure.h:35
#define Here()
source code location of the caller
Definition Here.h:15
static const auto MapLabel
shared memory segment path to use for MemStore maps
Definition MemStore.cc:29
static const char * ExtrasLabel
shared memory segment path to use for IDs of shared pages with slice data
Definition MemStore.cc:33
static const char * SpaceLabel
shared memory segment path to use for the free slices index
Definition MemStore.cc:31
Ipc::StoreMap MemStoreMap
Definition MemStore.h:23
#define shm_new(Class)
Definition Pointer.h:200
#define shm_old(Class)
Definition Pointer.h:201
#define DefineRunnerRegistrator(ClassName)
class SquidConfig Config
FREE destroyStoreEntry
std::ostream & CurrentException(std::ostream &os)
prints active (i.e., thrown but not yet handled) exception
#define TexcHere(msg)
legacy convenience macro; it is not difficult to type Here() now
#define Must(condition)
#define assert(EX)
Definition assert.h:17
static void Broadcast(const StoreEntry &e, const bool includingThisWorker=false)
notify other workers about changes in entry state (e.g., new data)
static std::ostream & Extra(std::ostream &)
Definition debug.cc:1316
void packHeadersUsingSlowPacker(Packable &p) const
same as packHeadersUsingFastPacker() but assumes that p cannot quickly process small additions
Definition HttpReply.cc:95
Shared memory page identifier, address, or handler.
Definition Page.h:24
PoolId pool
Definition Page.h:39
uint32_t number
page number within the segment
Definition Page.h:42
bool set() const
true if and only if both critical components have been initialized
Definition Page.h:29
PageStack construction and SharedMemorySize calculation parameters.
Definition PageStack.h:123
PageCount capacity
the maximum number of pages
Definition PageStack.h:127
size_t pageSize
page size, used to calculate shared memory size
Definition PageStack.h:126
bool createFull
whether a newly created PageStack should be prefilled with PageIds
Definition PageStack.h:130
static PoolId IdForMemStoreSpace()
stack of free cache_mem slot positions
Definition PageStack.h:167
bool pop(PageId &page)
sets value and returns true unless no free page numbers are found
Definition PageStack.cc:442
void push(PageId &page)
makes value available as a free page number to future pop() callers
Definition PageStack.cc:465
void useConfig() override
Definition Segment.cc:375
static bool Enabled()
Whether shared memory support is available.
Definition Segment.cc:320
approximate stats of a set of ReadWriteLocks
void dump(StoreEntry &e) const
std::atomic< StoreMapSliceId > start
where the chain of StoreEntry slices begins [app]
Definition StoreMap.h:111
struct Ipc::StoreMapAnchor::Basics basics
bool complete() const
Definition StoreMap.h:77
std::atomic< uint8_t > writerHalted
whether StoreMap::abortWriting() was called for a read-locked entry
Definition StoreMap.h:83
void set(const StoreEntry &anEntry, const cache_key *aKey=nullptr)
store StoreEntry key and basics for an inode slot
Definition StoreMap.cc:959
void exportInto(StoreEntry &) const
load StoreEntry basics that were previously stored with set()
Definition StoreMap.cc:979
std::atomic< StoreMapSliceId > next
ID of the next entry slice.
Definition StoreMap.h:49
std::atomic< Size > size
slice contents size
Definition StoreMap.h:48
sfileno fileNo
StoreMap::fileNos[name], for convenience/speed.
Definition StoreMap.h:194
StoreMapSliceId splicingPoint
the last slice in the chain still containing metadata/headers
Definition StoreMap.h:198
StoreMapAnchor * anchor
StoreMap::anchors[fileNo], for convenience/speed.
Definition StoreMap.h:193
Aggregates information required for updating entry metadata and headers.
Definition StoreMap.h:182
Edition fresh
new anchor and the updated chain prefix
Definition StoreMap.h:209
Edition stale
old anchor and chain
Definition StoreMap.h:208
StoreEntry * entry
the store entry being updated
Definition StoreMap.h:207
aggregates anchor and slice owners for Init() caller convenience
Definition StoreMap.h:233
Anchor * openForWriting(const cache_key *const key, sfileno &fileno)
Definition StoreMap.cc:141
const Slice & readableSlice(const AnchorId anchorId, const SliceId sliceId) const
readable slice within an entry chain opened by openForReading()
Definition StoreMap.cc:230
bool openForUpdating(Update &update, sfileno fileNoHint)
finds and locks the Update entry for an exclusive metadata update
Definition StoreMap.cc:523
Anchor & writeableEntry(const AnchorId anchorId)
writeable anchor for the entry created by openForWriting()
Definition StoreMap.cc:238
const Anchor & readableEntry(const AnchorId anchorId) const
readable anchor for the entry created by openForReading()
Definition StoreMap.cc:245
int entryCount() const
number of writeable and readable entries
Definition StoreMap.cc:739
static Owner * Init(const SBuf &path, const int slotLimit)
initialize shared memory
Definition StoreMap.cc:43
void closeForWriting(const sfileno fileno)
successfully finish creating or updating the entry at fileno pos
Definition StoreMap.cc:201
StoreMapCleaner * cleaner
notified before a readable entry is freed
Definition StoreMap.h:361
void abortUpdating(Update &update)
undoes partial update, unlocks, and cleans up
Definition StoreMap.cc:269
SliceId sliceContaining(const sfileno fileno, const uint64_t nth) const
Definition StoreMap.cc:421
const Anchor * openForReading(const cache_key *const key, sfileno &fileno)
opens entry (identified by key) for reading, increments read level
Definition StoreMap.cc:440
bool freeEntry(const sfileno)
Definition StoreMap.cc:313
void closeForReading(const sfileno fileno)
closes open entry after reading, decrements read level
Definition StoreMap.cc:497
void abortWriting(const sfileno fileno)
stop writing the entry, freeing its slot for others to use if possible
Definition StoreMap.cc:252
void startAppending(const sfileno fileno)
restrict opened for writing entry to appending operations; allow reads
Definition StoreMap.cc:192
void prepFreeSlice(const SliceId sliceId)
prepare a chain-unaffiliated slice for being added to an entry chain
Definition StoreMap.cc:413
void closeForUpdating(Update &update)
makes updated info available to others, unlocks, and cleans up
Definition StoreMap.cc:605
bool purgeOne()
either finds and frees an entry with at least 1 slice or returns false
Definition StoreMap.cc:702
void updateStats(ReadWriteLockStats &stats) const
adds approximate current stats to the supplied ones
Definition StoreMap.cc:751
void freeEntryByKey(const cache_key *const key)
Definition StoreMap.cc:331
Slice & writeableSlice(const AnchorId anchorId, const SliceId sliceId)
writeable slice within an entry chain created by openForWriting()
Definition StoreMap.cc:222
int sliceLimit() const
maximum number of slices possible
Definition StoreMap.cc:745
int entryLimit() const
maximum entryCount() possible
Definition StoreMap.cc:733
State of an entry with regards to the [shared] memory caching.
Definition MemObject.h:196
int32_t index
entry position inside the memory cache
Definition MemObject.h:198
Store::IoStatus io
current I/O state
Definition MemObject.h:201
int64_t offset
bytes written/read to/from the memory cache so far
Definition MemObject.h:199
int64_t expectedReplySize() const
Definition MemObject.cc:238
SBuf vary_headers
Definition MemObject.h:221
mem_hdr data_hdr
Definition MemObject.h:148
const HttpReply & freshestReply() const
Definition MemObject.h:68
MemCache memCache
current [shared] memory caching state for the entry
Definition MemObject.h:203
int64_t endOffset() const
Definition MemObject.cc:214
const HttpReply & baseReply() const
Definition MemObject.h:60
HttpReply & adjustableBaseReply()
Definition MemObject.cc:121
bool isContiguous() const
Definition MemObject.cc:406
int64_t object_sz
Definition MemObject.h:215
void create() override
called when the runner should create a new memory segment
Definition MemStore.cc:1036
Ipc::Mem::Owner< Ipc::Mem::PageStack > * spaceOwner
free slices Owner
Definition MemStore.cc:994
MemStoreMap::Owner * mapOwner
primary map Owner
Definition MemStore.cc:995
void useConfig() override
Definition MemStore.cc:1029
void finalizeConfig() override
Definition MemStore.cc:1008
~MemStoreRr() override
Definition MemStore.cc:1057
void claimMemoryNeeds() override
Definition MemStore.cc:1002
Ipc::Mem::Owner< MemStoreMapExtras > * extrasOwner
PageIds Owner.
Definition MemStore.cc:996
Ipc::Mem::PageId * slot
local slot variable, waiting to be filled
Definition MemStore.h:114
Ipc::Mem::PageId * page
local page variable, waiting to be filled
Definition MemStore.h:115
bool updateAnchored(StoreEntry &) override
Definition MemStore.cc:431
void anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
anchors StoreEntry to an already locked map entry
Definition MemStore.cc:454
void updateHeaders(StoreEntry *e) override
make stored metadata and HTTP headers the same as in the given entry
Definition MemStore.cc:350
sfileno lastWritingSlice
the last allocate slice for writing a store entry (during copyToShm)
Definition MemStore.h:106
uint64_t currentCount() const override
the total number of objects stored right now
Definition MemStore.cc:288
Ipc::Mem::Pointer< Extras > extras
IDs of pages with slice data.
Definition MemStore.h:103
Ipc::Mem::PageId pageForSlice(Ipc::StoreMapSliceId sliceId)
safely returns a previously allocated memory page for the given entry slice
Definition MemStore.cc:783
bool anchorToCache(StoreEntry &) override
Definition MemStore.cc:410
bool updateAnchoredWith(StoreEntry &, const sfileno, const Ipc::StoreMapAnchor &)
updates Transients entry after its anchor has been located
Definition MemStore.cc:445
void copyToShmSlice(StoreEntry &e, Ipc::StoreMapAnchor &anchor, Ipc::StoreMap::Slice &slice)
copies at most one slice worth of local memory to shared memory
Definition MemStore.cc:716
void disconnect(StoreEntry &e)
called when the entry is about to forget its association with mem cache
Definition MemStore.cc:938
uint64_t minSize() const override
the minimum size the store will shrink to via normal housekeeping
Definition MemStore.cc:269
bool shouldCache(StoreEntry &e) const
whether we should cache the entry
Definition MemStore.cc:590
void evictCached(StoreEntry &) override
Definition MemStore.cc:912
void copyFromShmSlice(StoreEntry &, const StoreIOBuffer &)
imports one shared memory slice into local memory
Definition MemStore.cc:575
Ipc::Mem::Pointer< Ipc::Mem::PageStack > freeSlots
unused map slot IDs
Definition MemStore.h:99
SlotAndPage waitingFor
a cache for a single "hot" free slot and page
Definition MemStore.h:117
void completeWriting(StoreEntry &e)
all data has been received; there will be no more write() calls
Definition MemStore.cc:894
void stat(StoreEntry &e) const override
Definition MemStore.cc:226
uint64_t maxSize() const override
Definition MemStore.cc:275
StoreEntry * get(const cache_key *) override
Definition MemStore.cc:312
void copyToShm(StoreEntry &e)
copies all local data to shared memory
Definition MemStore.cc:680
int64_t maxObjectSize() const override
the maximum size of a storable object; -1 if unlimited
Definition MemStore.cc:294
Ipc::StoreMap::Slice & nextAppendableSlice(const sfileno entryIndex, sfileno &sliceOffset)
Definition MemStore.cc:747
static int64_t EntryLimit()
calculates maximum number of entries we need to store and map
Definition MemStore.cc:966
sfileno reserveSapForWriting(Ipc::Mem::PageId &page)
finds a slot and a free page to fill or throws
Definition MemStore.cc:794
void init() override
Definition MemStore.cc:174
void evictIfFound(const cache_key *) override
Definition MemStore.cc:931
void reference(StoreEntry &e) override
somebody needs this entry (many cache replacement policies need to know)
Definition MemStore.cc:300
static bool Enabled()
whether Squid is correctly configured to use a shared memory cache
Definition MemStore.h:68
void updateHeadersOrThrow(Ipc::StoreMapUpdate &update)
Definition MemStore.cc:370
static bool Requested()
Definition MemStore.cc:959
bool copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
copies the entire entry from shared to local memory
Definition MemStore.cc:479
bool dereference(StoreEntry &e) override
Definition MemStore.cc:305
~MemStore() override
Definition MemStore.cc:168
void write(StoreEntry &e)
copy non-shared entry data of the being-cached entry to our cache
Definition MemStore.cc:855
uint64_t currentSize() const override
current size
Definition MemStore.cc:281
void maintain() override
perform regular periodic maintenance; TODO: move to UFSSwapDir::Maintain
Definition MemStore.cc:264
bool startCaching(StoreEntry &e)
locks map anchor and preps to store the entry in shared memory
Definition MemStore.cc:657
void noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId) override
adjust slice-linked state before a locked Readable slice is erased
Definition MemStore.cc:833
MemStoreMap * map
index of mem-cached entries
Definition MemStore.h:100
void getStats(StoreInfoStats &stats) const override
collect statistics
Definition MemStore.cc:205
Definition SBuf.h:94
const char * rawContent() const
Definition SBuf.cc:509
SBuf & vappendf(const char *fmt, va_list vargs)
Definition SBuf.cc:239
const char * c_str()
Definition SBuf.cc:516
size_type length() const
Returns the number of bytes stored in SBuf.
Definition SBuf.h:419
bool isEmpty() const
Definition SBuf.h:435
SBuf & append(const SBuf &S)
Definition SBuf.cc:185
void copyToShm()
copies the entire buffer to shared memory
Definition MemStore.cc:118
StoreEntry * entry
the entry being updated
Definition MemStore.cc:48
const char * buf
content being appended now
Definition MemStore.cc:68
Ipc::StoreMapSliceId firstSlice
Definition MemStore.cc:52
int bufWritten
buf bytes appended so far
Definition MemStore.cc:70
uint64_t totalWritten
cumulative number of bytes appended so far
Definition MemStore.cc:57
MemStore & store
Definition MemStore.cc:64
Ipc::StoreMapSliceId lastSlice
the slot keeping the last byte of the appended content (at least)
Definition MemStore.cc:55
void append(const char *aBuf, int aSize) override
Appends a c-string to existing packed data.
Definition MemStore.cc:90
void vappendf(const char *fmt, va_list ap) override
Definition MemStore.cc:106
ShmWriter(MemStore &aStore, StoreEntry *anEntry, const sfileno aFileNo, Ipc::StoreMapSliceId aFirstSlice=-1)
Definition MemStore.cc:75
const sfileno fileNo
Definition MemStore.cc:65
void copyToShmSlice(Ipc::StoreMap::Slice &slice)
copies at most one slice worth of buffer to shared memory
Definition MemStore.cc:136
int bufSize
buf size
Definition MemStore.cc:69
size_t memMaxSize
Definition SquidConfig.h:91
Store::DiskConfig cacheSwap
size_t maxInMemObjSize
struct SquidConfig::@88 Store
YesNoNone memShared
whether the memory cache is shared among workers
Definition SquidConfig.h:89
mem_status_t mem_status
Definition Store.h:239
uint16_t flags
Definition Store.h:231
MemObject & mem()
Definition Store.h:47
int locked() const
Definition Store.h:145
bool hasMemStore() const
whether there is a corresponding locked shared memory table entry
Definition Store.h:212
bool memoryCachable()
checkCachable() and can be cached in memory
Definition store.cc:1276
bool hasDisk(const sdirno dirn=-1, const sfileno filen=-1) const
Definition store.cc:1929
bool hasParsedReplyHeader() const
whether this entry has access to [deserialized] [HTTP] response headers
Definition store.cc:231
const cache_key * publicKey() const
Definition Store.h:112
void memOutDecision(const bool willCacheInRam)
Definition store.cc:1791
void storeWriterDone()
called when a store writer ends its work (successfully or not)
Definition store.cc:1808
MemObject * mem_obj
Definition Store.h:220
store_status_t store_status
Definition Store.h:243
void createMemObject()
Definition store.cc:1575
uint64_t swap_file_sz
Definition Store.h:229
void setMemStatus(mem_status_t)
Definition store.cc:1524
void destroyMemObject()
Definition store.cc:386
bool shared
whether memory cache is shared among workers
Definition StoreStats.h:42
double capacity
the size limit
Definition StoreStats.h:22
double count
number of cached objects
Definition StoreStats.h:21
double size
bytes currently in use
Definition StoreStats.h:20
High-level store statistics used by mgr:info action. Used inside PODs!
Definition StoreStats.h:14
Mem mem
all cache_dirs stats
Definition StoreStats.h:48
int64_t maxObjectSize() const override
the maximum size of a storable object; -1 if unlimited
an std::runtime_error with thrower location info
void configure(bool beSet)
enables or disables the option; updating to 'configured' state
Definition YesNoNone.h:53
bool configured() const
Definition YesNoNone.h:67
ssize_t copy(StoreIOBuffer const &) const
Definition stmem.cc:187
bool write(StoreIOBuffer const &)
Definition stmem.cc:303
A const & max(A const &lhs, A const &rhs)
A const & min(A const &lhs, A const &rhs)
#define DBG_IMPORTANT
Definition Stream.h:38
#define debugs(SECTION, LEVEL, CONTENT)
Definition Stream.h:192
#define EBIT_SET(flag, bit)
Definition defines.h:65
#define EBIT_TEST(flag, bit)
Definition defines.h:67
@ ENTRY_VALIDATED
Definition enums.h:108
@ ENTRY_SPECIAL
Definition enums.h:79
@ ENTRY_FWD_HDR_WAIT
Definition enums.h:106
@ NOT_IN_MEMORY
Definition enums.h:30
@ IN_MEMORY
Definition enums.h:31
@ STORE_PENDING
Definition enums.h:46
@ STORE_OK
Definition enums.h:45
void fatal(const char *message)
Definition fatal.cc:28
int shutting_down
int KidIdentifier
size_t PageLevel()
approximate total number of shared memory pages used now
Definition Pages.cc:80
bool GetPage(const PageId::Purpose purpose, PageId &page)
sets page ID and returns true unless no free pages are found
Definition Pages.cc:34
size_t PagesAvailable()
approximate total number of shared memory pages we can allocate now
Definition Pages.h:47
size_t PageSize()
returns page size in bytes; all pages are assumed to be the same size
Definition Pages.cc:28
void NotePageNeed(const int purpose, const int count)
claim the need for a number of pages for a given purpose
Definition Pages.cc:72
void PutPage(PageId &page)
makes identified page available as a free page to future GetPage() callers
Definition Pages.cc:41
size_t PageLimit()
the total number of shared memory pages that can be in use at any time
Definition Pages.cc:55
int32_t StoreMapSliceId
Definition StoreMap.h:24
double doublePercent(const double, const double)
Definition SquidMath.cc:25
Controller & Root()
safely access controller singleton
@ ioReading
Definition forward.h:40
@ ioWriting
Definition forward.h:40
@ ioUndecided
Definition forward.h:40
@ ioDone
Definition forward.h:40
SBuf ToSBuf(Args &&... args)
slowly stream-prints all arguments into a freshly allocated SBuf
Definition Stream.h:63
unsigned char cache_key
Store key.
Definition forward.h:29
signed_int32_t sfileno
Definition forward.h:22
void storeAppendPrintf(StoreEntry *e, const char *fmt,...)
Definition store.cc:855
std::atomic< uint64_t > swap_file_sz
Definition StoreMap.h:105
bool UsingSmp()
Whether there should be more than one worker process running.
Definition tools.cc:697
#define PRId64
Definition types.h:104