Squid Web Cache master
Loading...
Searching...
No Matches
RockSwapDir.cc
Go to the documentation of this file.
1/*
2 * Copyright (C) 1996-2025 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9/* DEBUG: section 47 Store Directory Routines */
10
11#include "squid.h"
12#include "base/IoManip.h"
13#include "cache_cf.h"
14#include "CollapsedForwarding.h"
15#include "compat/socket.h"
16#include "compat/unistd.h"
17#include "ConfigOption.h"
18#include "DiskIO/DiskIOModule.h"
20#include "DiskIO/ReadRequest.h"
21#include "DiskIO/WriteRequest.h"
24#include "fs/rock/RockIoState.h"
25#include "fs/rock/RockSwapDir.h"
26#include "globals.h"
27#include "ipc/mem/Pages.h"
28#include "MemObject.h"
29#include "Parsing.h"
30#include "SquidConfig.h"
31#include "SquidMath.h"
32#include "tools.h"
33
34#include <cstdlib>
35#include <iomanip>
36#include <limits>
37
38#if HAVE_SYS_STAT_H
39#include <sys/stat.h>
40#endif
41
43 slotSize(HeaderSize), filePath(nullptr), map(nullptr), io(nullptr),
44 waitingForPage(nullptr)
45{
46}
47
49{
50 delete io;
51 delete map;
52 safe_free(filePath);
53}
54
55// called when Squid core needs a StoreEntry with a given key
58{
59 if (!map || !theFile || !theFile->canRead())
60 return nullptr;
61
62 sfileno filen;
63 const Ipc::StoreMapAnchor *const slot = map->openForReading(key, filen);
64 if (!slot)
65 return nullptr;
66
67 // create a brand new store entry and initialize it with stored basics
68 StoreEntry *e = new StoreEntry();
69 e->createMemObject();
70 anchorEntry(*e, filen, *slot);
71 trackReferences(*e);
72 return e;
73}
74
75bool
77{
78 Assure(!entry.hasDisk());
79
80 if (!map || !theFile || !theFile->canRead())
81 return false;
82
83 sfileno filen;
84 const Ipc::StoreMapAnchor *const slot = map->openForReading(
85 reinterpret_cast<cache_key*>(entry.key), filen);
86 if (!slot)
87 return false;
88
89 anchorEntry(entry, filen, *slot);
90 return true;
91}
92
93bool
95{
96 if (!map || !theFile || !theFile->canRead())
97 return false;
98
99 assert(entry.hasDisk(index));
100
101 const auto &anchor = map->readableEntry(entry.swap_filen);
102 entry.swap_file_sz = anchor.basics.swap_file_sz;
103 return true;
104}
105
106void
108{
109 anchor.exportInto(e);
110
111 const bool complete = anchor.complete();
112 e.store_status = complete ? STORE_OK : STORE_PENDING;
113 // SWAPOUT_WRITING: even though another worker writes?
114 e.attachToDisk(index, filen, complete ? SWAPOUT_DONE : SWAPOUT_WRITING);
115
117
119}
120
122{
123 assert(e.hasDisk(index));
124
125 ignoreReferences(e);
126
127 // do not rely on e.swap_status here because there is an async delay
128 // before it switches from SWAPOUT_WRITING to SWAPOUT_DONE.
129
130 // since e has swap_filen, its slot is locked for reading and/or writing
131 // but it is difficult to know whether THIS worker is reading or writing e,
132 // especially since we may switch from writing to reading. This code relies
133 // on Rock::IoState::writeableAnchor_ being set when we locked for writing.
134 if (e.mem_obj && e.mem_obj->swapout.sio != nullptr &&
135 dynamic_cast<IoState&>(*e.mem_obj->swapout.sio).writeableAnchor_) {
136 map->abortWriting(e.swap_filen);
137 e.detachFromDisk();
138 dynamic_cast<IoState&>(*e.mem_obj->swapout.sio).writeableAnchor_ = nullptr;
140 e.storeWriterDone();
141 } else {
142 map->closeForReading(e.swap_filen);
143 e.detachFromDisk();
144 }
145}
146
147uint64_t
149{
150 const uint64_t spaceSize = !freeSlots ?
151 maxSize() : (slotSize * freeSlots->size());
152 // everything that is not free is in use
153 return maxSize() - spaceSize;
154}
155
156uint64_t
158{
159 return map ? map->entryCount() : 0;
160}
161
164bool
166{
167 return ::SwapDir::doReportStat() && (!UsingSmp() || IamDiskProcess());
168}
169
170void
172{
173 // nothing to do; handleWriteCompletionSuccess() did everything for us
174 assert(!e.mem_obj ||
175 !e.mem_obj->swapout.sio ||
176 !dynamic_cast<IoState&>(*e.mem_obj->swapout.sio).writeableAnchor_);
177}
178
179void
181{
182 debugs(47, 5, entry);
183 disconnect(entry); // calls abortWriting() to free the disk entry
184}
185
186int64_t
188{
189 // the max value is an invalid one; all values must be below the limit
190 assert(std::numeric_limits<Ipc::StoreMapSliceId>::max() ==
191 std::numeric_limits<SlotId>::max());
192 return std::numeric_limits<SlotId>::max();
193}
194
195int64_t
197{
198 const int64_t sWanted = (maxSize() - HeaderSize)/slotSize;
199 const int64_t sLimitLo = map ? map->sliceLimit() : 0; // dynamic shrinking unsupported
200 const int64_t sLimitHi = slotLimitAbsolute();
201 return min(max(sLimitLo, sWanted), sLimitHi);
202}
203
204int64_t
206{
207 return min(slotLimitActual(), entryLimitAbsolute());
208}
209
210// TODO: encapsulate as a tool
211void
213{
214 assert(path);
215 assert(filePath);
216
217 if (UsingSmp() && !IamDiskProcess()) {
218 debugs (47,3, "disker will create in " << path);
219 return;
220 }
221
222 debugs (47,3, "creating in " << path);
223
224 struct stat dir_sb;
225 if (::stat(path, &dir_sb) == 0) {
226 struct stat file_sb;
227 if (::stat(filePath, &file_sb) == 0) {
228 debugs (47, DBG_IMPORTANT, "Skipping existing Rock db: " << filePath);
229 return;
230 }
231 // else the db file is not there or is not accessible, and we will try
232 // to create it later below, generating a detailed error on failures.
233 } else { // path does not exist or is inaccessible
234 // If path exists but is not accessible, mkdir() below will fail, and
235 // the admin should see the error and act accordingly, so there is
236 // no need to distinguish ENOENT from other possible stat() errors.
237 debugs (47, DBG_IMPORTANT, "Creating Rock db directory: " << path);
238 const int res = mkdir(path, 0700);
239 if (res != 0)
240 createError("mkdir");
241 }
242
243 debugs (47, DBG_IMPORTANT, "Creating Rock db: " << filePath);
244 const auto swap = xopen(filePath, O_WRONLY|O_CREAT|O_TRUNC|O_BINARY, 0600);
245 if (swap < 0)
246 createError("create");
247
248#if SLOWLY_FILL_WITH_ZEROS
249 char block[1024];
250 Must(maxSize() % sizeof(block) == 0);
251 memset(block, '\0', sizeof(block));
252
253 for (off_t offset = 0; offset < maxSize(); offset += sizeof(block)) {
254 if (xwrite(swap, block, sizeof(block)) != sizeof(block))
255 createError("write");
256 }
257#else
258 if (ftruncate(swap, maxSize()) != 0)
259 createError("truncate");
260
261 char header[HeaderSize];
262 memset(header, '\0', sizeof(header));
263 if (xwrite(swap, header, sizeof(header)) != sizeof(header))
264 createError("write");
265#endif
266
267 xclose(swap);
268}
269
270// report Rock DB creation error and exit
271void
272Rock::SwapDir::createError(const char *const msg)
273{
274 int xerrno = errno; // XXX: where does errno come from?
275 debugs(47, DBG_CRITICAL, "ERROR: Failed to initialize Rock Store db in " <<
276 filePath << "; " << msg << " error: " << xstrerr(xerrno));
277 fatal("Rock Store db creation error");
278}
279
280void
282{
283 debugs(47,2, MYNAME);
284
285 // XXX: SwapDirs aren't refcounted. We make IORequestor calls, which
286 // are refcounted. We up our count once to avoid implicit delete's.
287 lock();
288
289 freeSlots = shm_old(Ipc::Mem::PageStack)(freeSlotsPath());
290
291 Must(!map);
292 map = new DirMap(inodeMapPath());
293 map->cleaner = this;
294
295 const char *ioModule = needsDiskStrand() ? "IpcIo" : "Blocking";
296 if (DiskIOModule *m = DiskIOModule::Find(ioModule)) {
297 debugs(47,2, "Using DiskIO module: " << ioModule);
298 io = m->createStrategy();
299 io->init();
300 } else {
301 debugs(47, DBG_CRITICAL, "FATAL: Rock store is missing DiskIO module: " <<
302 ioModule);
303 fatal("Rock Store missing a required DiskIO module");
304 }
305
306 theFile = io->newFile(filePath);
307 theFile->configure(fileConfig);
308 theFile->open(O_RDWR, 0644, this);
309}
310
311bool
313{
314 const bool wontEvenWorkWithoutDisker = Config.workers > 1;
315 const bool wouldWorkBetterWithDisker = DiskIOModule::Find("IpcIo");
316 return InDaemonMode() && (wontEvenWorkWithoutDisker ||
317 wouldWorkBetterWithDisker);
318}
319
320void
321Rock::SwapDir::parse(int anIndex, char *aPath)
322{
323 index = anIndex;
324
325 path = xstrdup(aPath);
326
327 // cache store is located at path/db
328 String fname(path);
329 fname.append("/rock");
330 filePath = xstrdup(fname.termedBuf());
331
332 parseSize(false);
333 parseOptions(0);
334
335 // Current openForWriting() code overwrites the old slot if needed
336 // and possible, so proactively removing old slots is probably useless.
337 assert(!repl); // repl = createRemovalPolicy(Config.replPolicy);
338
339 validateOptions();
340}
341
342void
344{
345 parseSize(true);
346 parseOptions(1);
347 // TODO: can we reconfigure the replacement policy (repl)?
348 validateOptions();
349}
350
352void
353Rock::SwapDir::parseSize(const bool reconfig)
354{
355 const int i = GetInteger();
356 if (i < 0)
357 fatal("negative Rock cache_dir size value");
358 const uint64_t new_max_size =
359 static_cast<uint64_t>(i) << 20; // MBytes to Bytes
360 if (!reconfig)
361 max_size = new_max_size;
362 else if (new_max_size != max_size) {
363 debugs(3, DBG_IMPORTANT, "WARNING: cache_dir '" << path << "' size "
364 "cannot be changed dynamically, value left unchanged (" <<
365 (max_size >> 20) << " MB)");
366 }
367}
368
371{
373 ConfigOptionVector *vector = dynamic_cast<ConfigOptionVector*>(copt);
374 if (vector) {
375 // if copt is actually a ConfigOptionVector
376 vector->options.push_back(new ConfigOptionAdapter<SwapDir>(*const_cast<SwapDir *>(this), &SwapDir::parseSizeOption, &SwapDir::dumpSizeOption));
377 vector->options.push_back(new ConfigOptionAdapter<SwapDir>(*const_cast<SwapDir *>(this), &SwapDir::parseTimeOption, &SwapDir::dumpTimeOption));
378 vector->options.push_back(new ConfigOptionAdapter<SwapDir>(*const_cast<SwapDir *>(this), &SwapDir::parseRateOption, &SwapDir::dumpRateOption));
379 } else {
380 // we don't know how to handle copt, as it's not a ConfigOptionVector.
381 // free it (and return nullptr)
382 delete copt;
383 copt = nullptr;
384 }
385 return copt;
386}
387
388bool
389Rock::SwapDir::allowOptionReconfigure(const char *const option) const
390{
391 return strcmp(option, "slot-size") != 0 &&
393}
394
396bool
397Rock::SwapDir::parseTimeOption(char const *option, const char *value, int reconfig)
398{
399 // TODO: ::SwapDir or, better, Config should provide time-parsing routines,
400 // including time unit handling. Same for size and rate.
401
402 time_msec_t *storedTime;
403 if (strcmp(option, "swap-timeout") == 0)
404 storedTime = &fileConfig.ioTimeout;
405 else
406 return false;
407
408 if (!value) {
410 return false;
411 }
412
413 // TODO: handle time units and detect parsing errors better
414 const int64_t parsedValue = strtoll(value, nullptr, 10);
415 if (parsedValue < 0) {
416 debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must not be negative but is: " << parsedValue);
418 return false;
419 }
420
421 const time_msec_t newTime = static_cast<time_msec_t>(parsedValue);
422
423 if (!reconfig)
424 *storedTime = newTime;
425 else if (*storedTime != newTime) {
426 debugs(3, DBG_IMPORTANT, "WARNING: cache_dir " << path << ' ' << option
427 << " cannot be changed dynamically, value left unchanged: " <<
428 *storedTime);
429 }
430
431 return true;
432}
433
435void
437{
438 if (fileConfig.ioTimeout)
439 storeAppendPrintf(e, " swap-timeout=%" PRId64,
440 static_cast<int64_t>(fileConfig.ioTimeout));
441}
442
444bool
445Rock::SwapDir::parseRateOption(char const *option, const char *value, int isaReconfig)
446{
447 int *storedRate;
448 if (strcmp(option, "max-swap-rate") == 0)
449 storedRate = &fileConfig.ioRate;
450 else
451 return false;
452
453 if (!value) {
455 return false;
456 }
457
458 // TODO: handle time units and detect parsing errors better
459 const int64_t parsedValue = strtoll(value, nullptr, 10);
460 if (parsedValue < 0) {
461 debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must not be negative but is: " << parsedValue);
463 return false;
464 }
465
466 const int newRate = static_cast<int>(parsedValue);
467
468 if (newRate < 0) {
469 debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must not be negative but is: " << newRate);
471 return false;
472 }
473
474 if (!isaReconfig)
475 *storedRate = newRate;
476 else if (*storedRate != newRate) {
477 debugs(3, DBG_IMPORTANT, "WARNING: cache_dir " << path << ' ' << option
478 << " cannot be changed dynamically, value left unchanged: " <<
479 *storedRate);
480 }
481
482 return true;
483}
484
486void
488{
489 if (fileConfig.ioRate >= 0)
490 storeAppendPrintf(e, " max-swap-rate=%d", fileConfig.ioRate);
491}
492
494bool
495Rock::SwapDir::parseSizeOption(char const *option, const char *value, int reconfig)
496{
497 uint64_t *storedSize;
498 if (strcmp(option, "slot-size") == 0)
499 storedSize = &slotSize;
500 else
501 return false;
502
503 if (!value) {
505 return false;
506 }
507
508 // TODO: handle size units and detect parsing errors better
509 const uint64_t newSize = strtoll(value, nullptr, 10);
510 if (newSize <= 0) {
511 debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must be positive; got: " << newSize);
513 return false;
514 }
515
516 if (newSize <= sizeof(DbCellHeader)) {
517 debugs(3, DBG_CRITICAL, "FATAL: cache_dir " << path << ' ' << option << " must exceed " << sizeof(DbCellHeader) << "; got: " << newSize);
519 return false;
520 }
521
522 if (!reconfig)
523 *storedSize = newSize;
524 else if (*storedSize != newSize) {
525 debugs(3, DBG_IMPORTANT, "WARNING: cache_dir " << path << ' ' << option
526 << " cannot be changed dynamically, value left unchanged: " <<
527 *storedSize);
528 }
529
530 return true;
531}
532
534void
536{
537 storeAppendPrintf(e, " slot-size=%" PRId64, slotSize);
538}
539
541void
543{
544 if (slotSize <= 0)
545 fatal("Rock store requires a positive slot-size");
546
547 const int64_t maxSizeRoundingWaste = 1024 * 1024; // size is configured in MB
548 const int64_t slotSizeRoundingWaste = slotSize;
549 const int64_t maxRoundingWaste =
550 max(maxSizeRoundingWaste, slotSizeRoundingWaste);
551
552 // an entry consumes at least one slot; round up to reduce false warnings
553 const int64_t blockSize = static_cast<int64_t>(slotSize);
554 const int64_t maxObjSize = max(blockSize,
555 ((maxObjectSize()+blockSize-1)/blockSize)*blockSize);
556
557 // Does the "sfileno*max-size" limit match configured db capacity?
558 const double entriesMayOccupy = entryLimitAbsolute()*static_cast<double>(maxObjSize);
559 if (entriesMayOccupy + maxRoundingWaste < maxSize()) {
560 const int64_t diskWasteSize = maxSize() - static_cast<int64_t>(entriesMayOccupy);
561 debugs(47, DBG_CRITICAL, "WARNING: Rock cache_dir " << path << " wastes disk space due to entry limits:" <<
562 "\n\tconfigured db capacity: " << maxSize() << " bytes" <<
563 "\n\tconfigured db slot size: " << slotSize << " bytes" <<
564 "\n\tconfigured maximum entry size: " << maxObjectSize() << " bytes" <<
565 "\n\tmaximum number of cache_dir entries supported by Squid: " << entryLimitAbsolute() <<
566 "\n\tdisk space all entries may use: " << entriesMayOccupy << " bytes" <<
567 "\n\tdisk space wasted: " << diskWasteSize << " bytes");
568 }
569
570 // Does the "absolute slot count" limit match configured db capacity?
571 const double slotsMayOccupy = slotLimitAbsolute()*static_cast<double>(slotSize);
572 if (slotsMayOccupy + maxRoundingWaste < maxSize()) {
573 const int64_t diskWasteSize = maxSize() - static_cast<int64_t>(entriesMayOccupy);
574 debugs(47, DBG_CRITICAL, "WARNING: Rock cache_dir " << path << " wastes disk space due to slot limits:" <<
575 "\n\tconfigured db capacity: " << maxSize() << " bytes" <<
576 "\n\tconfigured db slot size: " << slotSize << " bytes" <<
577 "\n\tmaximum number of rock cache_dir slots supported by Squid: " << slotLimitAbsolute() <<
578 "\n\tdisk space all slots may use: " << slotsMayOccupy << " bytes" <<
579 "\n\tdisk space wasted: " << diskWasteSize << " bytes");
580 }
581}
582
583bool
584Rock::SwapDir::canStore(const StoreEntry &e, int64_t diskSpaceNeeded, int &load) const
585{
586 if (diskSpaceNeeded >= 0)
587 diskSpaceNeeded += sizeof(DbCellHeader);
588 if (!::SwapDir::canStore(e, diskSpaceNeeded, load))
589 return false;
590
591 if (!theFile || !theFile->canWrite())
592 return false;
593
594 if (!map)
595 return false;
596
597 // Do not start I/O transaction if there are less than 10% free pages left.
598 // TODO: reserve page instead
599 if (needsDiskStrand() &&
601 debugs(47, 5, "too few shared pages for IPC I/O left");
602 return false;
603 }
604
605 if (io->shedLoad())
606 return false;
607
608 load = io->load();
609 return true;
610}
611
614{
615 if (!theFile || theFile->error()) {
616 debugs(47,4, theFile);
617 return nullptr;
618 }
619
620 sfileno filen;
621 Ipc::StoreMapAnchor *const slot =
622 map->openForWriting(reinterpret_cast<const cache_key *>(e.key), filen);
623 if (!slot) {
624 debugs(47, 5, "map->add failed");
625 return nullptr;
626 }
627
628 assert(filen >= 0);
629 slot->set(e);
630
631 // XXX: We rely on our caller, storeSwapOutStart(), to set e.fileno.
632 // If that does not happen, the entry will not decrement the read level!
633
634 Rock::SwapDir::Pointer self(this);
635 IoState *sio = new IoState(self, &e, cbIo, cbData);
636
637 sio->swap_dirn = index;
638 sio->swap_filen = filen;
639 sio->writeableAnchor_ = slot;
640
641 debugs(47,5, "dir " << index << " created new filen " <<
642 asHex(sio->swap_filen).upperCase().minDigits(8) << " starting at " <<
643 diskOffset(sio->swap_filen));
644
645 sio->file(theFile);
646
647 trackReferences(e);
648 return sio;
649}
650
653{
654 if (!theFile || theFile->error()) {
655 debugs(47,4, theFile);
656 return nullptr;
657 }
658
659 Must(update.fresh);
660 Must(update.fresh.fileNo >= 0);
661
662 Rock::SwapDir::Pointer self(this);
663 IoState *sio = new IoState(self, update.entry, cbIo, data);
664
665 sio->swap_dirn = index;
666 sio->swap_filen = update.fresh.fileNo;
667 sio->writeableAnchor_ = update.fresh.anchor;
668
669 debugs(47,5, "dir " << index << " updating filen " <<
670 asHex(sio->swap_filen).upperCase().minDigits(8) << " starting at " <<
671 diskOffset(sio->swap_filen));
672
673 sio->file(theFile);
674 return sio;
675}
676
677int64_t
678Rock::SwapDir::diskOffset(const SlotId sid) const
679{
680 assert(sid >= 0);
681 return HeaderSize + slotSize*sid;
682}
683
684int64_t
686{
687 assert(pageId);
688 return diskOffset(pageId.number - 1);
689}
690
691int64_t
693{
694 assert(map);
695 return diskOffset(map->sliceLimit());
696}
697
700{
701 Ipc::Mem::PageId pageId;
702
703 if (freeSlots->pop(pageId)) {
704 const auto slotId = pageId.number - 1;
705 debugs(47, 5, "got a previously free slot: " << slotId);
706 map->prepFreeSlice(slotId);
707 return slotId;
708 }
709
710 // catch free slots delivered to noteFreeMapSlice()
711 assert(!waitingForPage);
712 waitingForPage = &pageId;
713 if (map->purgeOne()) {
714 assert(!waitingForPage); // noteFreeMapSlice() should have cleared it
715 assert(pageId.set());
716 const auto slotId = pageId.number - 1;
717 debugs(47, 5, "got a previously busy slot: " << slotId);
718 map->prepFreeSlice(slotId);
719 return slotId;
720 }
721 assert(waitingForPage == &pageId);
722 waitingForPage = nullptr;
723
724 // This may happen when the number of available db slots is close to the
725 // number of concurrent requests reading or writing those slots, which may
726 // happen when the db is "small" compared to the request traffic OR when we
727 // are rebuilding and have not loaded "many" entries or empty slots yet.
728 debugs(47, 3, "cannot get a slot; entries: " << map->entryCount());
729 throw TexcHere("ran out of free db slots");
730}
731
732bool
734{
735 return 0 <= slotId && slotId < slotLimitActual();
736}
737
738void
740{
741 Ipc::Mem::PageId pageId;
743 pageId.number = sliceId+1;
744 if (waitingForPage) {
745 *waitingForPage = pageId;
746 waitingForPage = nullptr;
747 } else {
748 freeSlots->push(pageId);
749 }
750}
751
752// tries to open an old entry with swap_filen for reading
754Rock::SwapDir::openStoreIO(StoreEntry &e, StoreIOState::STIOCB * const cbIo, void * const cbData)
755{
756 if (!theFile || theFile->error()) {
757 debugs(47,4, theFile);
758 return nullptr;
759 }
760
761 if (!e.hasDisk()) {
762 debugs(47,4, e);
763 return nullptr;
764 }
765
766 // Do not start I/O transaction if there are less than 10% free pages left.
767 // TODO: reserve page instead
768 if (needsDiskStrand() &&
770 debugs(47, 5, "too few shared pages for IPC I/O left");
771 return nullptr;
772 }
773
774 // The are two ways an entry can get swap_filen: our get() locked it for
775 // reading or our storeSwapOutStart() locked it for writing. Peeking at our
776 // locked entry is safe, but no support for reading the entry we swap out.
777 const Ipc::StoreMapAnchor *slot = map->peekAtReader(e.swap_filen);
778 if (!slot)
779 return nullptr; // we were writing after all
780
781 Rock::SwapDir::Pointer self(this);
782 IoState *sio = new IoState(self, &e, cbIo, cbData);
783
784 sio->swap_dirn = index;
785 sio->swap_filen = e.swap_filen;
786 sio->readableAnchor_ = slot;
787 sio->file(theFile);
788
789 debugs(47,5, "dir " << index << " has old filen: " <<
790 asHex(sio->swap_filen).upperCase().minDigits(8));
791
792 // When StoreEntry::swap_filen for e was set by our anchorEntry(), e had a
793 // public key, but it could have gone private since then (while keeping the
794 // anchor lock). The stale anchor key is not (and cannot be) erased (until
795 // the marked-for-deletion/release anchor/entry is unlocked is recycled).
796 const auto ourAnchor = [&]() {
797 if (const auto publicKey = e.publicKey())
798 return slot->sameKey(publicKey);
799 return true; // cannot check
800 };
801 assert(ourAnchor());
802
803 // For collapsed disk hits: e.swap_file_sz and slot->basics.swap_file_sz
804 // may still be zero and basics.swap_file_sz may grow.
806
807 return sio;
808}
809
810void
812{
813 if (!theFile)
814 fatalf("Rock cache_dir failed to initialize db file: %s", filePath);
815
816 if (theFile->error()) {
817 int xerrno = errno; // XXX: where does errno come from
818 fatalf("Rock cache_dir at %s failed to open db file: %s", filePath,
819 xstrerr(xerrno));
820 }
821
822 debugs(47, 2, "Rock cache_dir[" << index << "] limits: " <<
823 std::setw(12) << maxSize() << " disk bytes, " <<
824 std::setw(7) << map->entryLimit() << " entries, and " <<
825 std::setw(7) << map->sliceLimit() << " slots");
826
827 if (!Rebuild::Start(*this))
828 storeRebuildComplete(nullptr);
829}
830
831void
833{
834 theFile = nullptr;
835}
836
837void
838Rock::SwapDir::readCompleted(const char *, int rlen, int errflag, RefCount< ::ReadRequest> r)
839{
840 ReadRequest *request = dynamic_cast<Rock::ReadRequest*>(r.getRaw());
841 assert(request);
842 IoState::Pointer sio = request->sio;
843 sio->handleReadCompletion(*request, rlen, errflag);
844}
845
846void
848{
849 // TODO: Move details into IoState::handleWriteCompletion() after figuring
850 // out how to deal with map access. See readCompleted().
851
852 Rock::WriteRequest *request = dynamic_cast<Rock::WriteRequest*>(r.getRaw());
853 assert(request);
854 assert(request->sio != nullptr);
855 IoState &sio = *request->sio;
856
857 // quit if somebody called IoState::close() while we were waiting
858 if (!sio.stillWaiting()) {
859 debugs(79, 3, "ignoring closed entry " << sio.swap_filen);
860 noteFreeMapSlice(request->sidCurrent);
861 return;
862 }
863
864 debugs(79, 7, "errflag=" << errflag << " rlen=" << request->len << " eof=" << request->eof);
865
866 if (errflag != DISK_OK)
867 handleWriteCompletionProblem(errflag, *request);
868 else if (!sio.expectedReply(request->id))
869 handleWriteCompletionProblem(DISK_ERROR, *request);
870 else
871 handleWriteCompletionSuccess(*request);
872
873 if (sio.touchingStoreEntry())
875}
876
878void
880{
881 auto &sio = *(request.sio);
882 sio.splicingPoint = request.sidCurrent;
883 // do not increment sio.offset_ because we do it in sio->write()
884
885 assert(sio.writeableAnchor_);
886 if (sio.writeableAnchor_->start < 0) { // wrote the first slot
887 Must(request.sidPrevious < 0);
888 sio.writeableAnchor_->start = request.sidCurrent;
889 } else {
890 Must(request.sidPrevious >= 0);
891 map->writeableSlice(sio.swap_filen, request.sidPrevious).next = request.sidCurrent;
892 }
893
894 // finalize the shared slice info after writing slice contents to disk;
895 // the chain gets possession of the slice we were writing
896 Ipc::StoreMap::Slice &slice =
897 map->writeableSlice(sio.swap_filen, request.sidCurrent);
898 slice.size = request.len - sizeof(DbCellHeader);
899 Must(slice.next < 0);
900
901 if (request.eof) {
902 assert(sio.e);
903 if (sio.touchingStoreEntry()) {
904 sio.e->swap_file_sz = sio.writeableAnchor_->basics.swap_file_sz =
905 sio.offset_;
906
907 map->switchWritingToReading(sio.swap_filen);
908 // sio.e keeps the (now read) lock on the anchor
909 // storeSwapOutFileClosed() sets swap_status and calls storeWriterDone()
910 }
911 sio.writeableAnchor_ = nullptr;
912 sio.finishedWriting(DISK_OK);
913 }
914}
915
917void
919{
920 auto &sio = *request.sio;
921
922 noteFreeMapSlice(request.sidCurrent);
923
924 writeError(sio);
925 sio.finishedWriting(errflag);
926 // and hope that Core will call disconnect() to close the map entry
927}
928
929void
931{
932 // Do not abortWriting here. The entry should keep the write lock
933 // instead of losing association with the store and confusing core.
934 map->freeEntry(sio.swap_filen); // will mark as unusable, just in case
935
936 if (sio.touchingStoreEntry())
938 // else noop: a fresh entry update error does not affect stale entry readers
939
940 // All callers must also call IoState callback, to propagate the error.
941}
942
943void
945{
946 if (!map)
947 return;
948
949 Ipc::StoreMapUpdate update(updatedE);
950 if (!map->openForUpdating(update, updatedE->swap_filen))
951 return;
952
953 try {
954 AsyncJob::Start(new HeaderUpdater(this, update));
955 } catch (const std::exception &ex) {
956 debugs(20, 2, "error starting to update entry " << *updatedE << ": " << ex.what());
957 map->abortUpdating(update);
958 }
959}
960
961bool
963{
964 return freeSlots != nullptr && !freeSlots->size();
965}
966
967// storeSwapOutFileClosed calls this method on DISK_NO_SPACE_LEFT,
968// but it should not happen for us
969void
971{
972 debugs(20, DBG_IMPORTANT, "ERROR: Squid BUG: No space left with rock cache_dir: " <<
973 filePath);
974}
975
977void
979{
980 // The Store calls this to free some db space, but there is nothing wrong
981 // with a full() db, except when db has to shrink after reconfigure, and
982 // we do not support shrinking yet (it would have to purge specific slots).
983 // TODO: Disable maintain() requests when they are pointless.
984}
985
986void
988{
989 debugs(47, 5, &e << ' ' << e.swap_dirn << ' ' << e.swap_filen);
990 if (repl && repl->Referenced)
991 repl->Referenced(repl, &e, &e.repl);
992}
993
994bool
996{
997 debugs(47, 5, &e << ' ' << e.swap_dirn << ' ' << e.swap_filen);
998 if (repl && repl->Dereferenced)
999 repl->Dereferenced(repl, &e, &e.repl);
1000
1001 // no need to keep e in the global store_table for us; we have our own map
1002 return false;
1003}
1004
1005bool
1007{
1008 // no entry-specific files to unlink
1009 return false;
1010}
1011
1012void
1014{
1015 if (map)
1016 map->freeEntryByKey(key); // may not be there
1017}
1018
1019void
1021{
1022 debugs(47, 5, e);
1023 if (e.hasDisk(index)) {
1024 if (map->freeEntry(e.swap_filen))
1026 if (!e.locked())
1027 disconnect(e);
1028 } else if (const auto key = e.publicKey()) {
1029 evictIfFound(key);
1030 }
1031}
1032
1033void
1035{
1036 debugs(47, 5, e);
1037 if (repl)
1038 repl->Add(repl, &e, &e.repl);
1039}
1040
1041void
1043{
1044 debugs(47, 5, e);
1045 if (repl)
1046 repl->Remove(repl, &e, &e.repl);
1047}
1048
1049void
1051{
1052 storeAppendPrintf(&e, "\n");
1053 storeAppendPrintf(&e, "Maximum Size: %" PRIu64 " KB\n", maxSize() >> 10);
1054 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
1055 currentSize() / 1024.0,
1056 Math::doublePercent(currentSize(), maxSize()));
1057
1058 const int entryLimit = entryLimitActual();
1059 const int slotLimit = slotLimitActual();
1060 storeAppendPrintf(&e, "Maximum entries: %9d\n", entryLimit);
1061 if (map && entryLimit > 0) {
1062 const int entryCount = map->entryCount();
1063 storeAppendPrintf(&e, "Current entries: %9d %.2f%%\n",
1064 entryCount, (100.0 * entryCount / entryLimit));
1065 }
1066
1067 storeAppendPrintf(&e, "Maximum slots: %9d\n", slotLimit);
1068 if (map && slotLimit > 0) {
1069 const unsigned int slotsFree = !freeSlots ? 0 : freeSlots->size();
1070 if (slotsFree <= static_cast<unsigned int>(slotLimit)) {
1071 const int usedSlots = slotLimit - static_cast<int>(slotsFree);
1072 storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
1073 usedSlots, (100.0 * usedSlots / slotLimit));
1074 }
1075 if (slotLimit < 100) { // XXX: otherwise too expensive to count
1077 map->updateStats(stats);
1078 stats.dump(e);
1079 }
1080 }
1081
1082 storeAppendPrintf(&e, "Pending operations: %d out of %d\n",
1084
1085 storeAppendPrintf(&e, "Flags:");
1086
1087 if (flags.selected)
1088 storeAppendPrintf(&e, " SELECTED");
1089
1090 if (flags.read_only)
1091 storeAppendPrintf(&e, " READ-ONLY");
1092
1093 storeAppendPrintf(&e, "\n");
1094
1095}
1096
1097SBuf
1099{
1100 return Ipc::Mem::Segment::Name(SBuf(path), "map");
1101}
1102
1103const char *
1105{
1106 static String spacesPath;
1107 spacesPath = path;
1108 spacesPath.append("_spaces");
1109 return spacesPath.termedBuf();
1110}
1111
1112bool
1114{
1115 return map->hasReadableEntry(reinterpret_cast<const cache_key*>(e.key));
1116}
1117
1119
1121{
1122 Must(mapOwners.empty() && freeSlotsOwners.empty());
1123 for (size_t i = 0; i < Config.cacheSwap.n_configured; ++i) {
1124 if (const Rock::SwapDir *const sd = dynamic_cast<Rock::SwapDir *>(INDEXSD(i))) {
1125 rebuildStatsOwners.push_back(Rebuild::Stats::Init(*sd));
1126
1127 const int64_t capacity = sd->slotLimitActual();
1128
1129 SwapDir::DirMap::Owner *const mapOwner =
1130 SwapDir::DirMap::Init(sd->inodeMapPath(), capacity);
1131 mapOwners.push_back(mapOwner);
1132
1133 // TODO: somehow remove pool id and counters from PageStack?
1136 config.pageSize = 0; // this is an index of slots on _disk_
1137 config.capacity = capacity;
1138 config.createFull = false; // Rebuild finds and pushes free slots
1139 Ipc::Mem::Owner<Ipc::Mem::PageStack> *const freeSlotsOwner =
1140 shm_new(Ipc::Mem::PageStack)(sd->freeSlotsPath(), config);
1141 freeSlotsOwners.push_back(freeSlotsOwner);
1142 }
1143 }
1144}
1145
1147{
1148 for (size_t i = 0; i < mapOwners.size(); ++i) {
1149 delete rebuildStatsOwners[i];
1150 delete mapOwners[i];
1151 delete freeSlotsOwners[i];
1152 }
1153}
1154
#define Assure(condition)
Definition Assure.h:35
AsHex< Integer > asHex(const Integer n)
a helper to ease AsHex object creation
Definition IoManip.h:169
int GetInteger(void)
Definition Parsing.cc:148
#define shm_new(Class)
Definition Pointer.h:200
#define shm_old(Class)
Definition Pointer.h:201
#define DefineRunnerRegistratorIn(Namespace, ClassName)
class SquidConfig Config
#define INDEXSD(i)
Definition SquidConfig.h:74
#define TexcHere(msg)
legacy convenience macro; it is not difficult to type Here() now
#define Must(condition)
#define assert(EX)
Definition assert.h:17
static char vector[AUTH_VECTOR_LEN]
void self_destruct(void)
Definition cache_cf.cc:275
static void Start(const Pointer &job)
Definition AsyncJob.cc:37
static void Broadcast(const StoreEntry &e, const bool includingThisWorker=false)
notify other workers about changes in entry state (e.g., new data)
std::vector< ConfigOption * > options
static DiskIOModule * Find(char const *type)
Shared memory page identifier, address, or handler.
Definition Page.h:24
PoolId pool
Definition Page.h:39
uint32_t number
page number within the segment
Definition Page.h:42
bool set() const
true if and only if both critical components have been initialized
Definition Page.h:29
PageStack construction and SharedMemorySize calculation parameters.
Definition PageStack.h:123
PageCount capacity
the maximum number of pages
Definition PageStack.h:127
size_t pageSize
page size, used to calculate shared memory size
Definition PageStack.h:126
bool createFull
whether a newly created PageStack should be prefilled with PageIds
Definition PageStack.h:130
static PoolId IdForSwapDirSpace(const int dirIdx)
stack of free rock cache_dir slot numbers
Definition PageStack.h:171
static SBuf Name(const SBuf &prefix, const char *suffix)
concatenates parts of a name to form a complete name (or its prefix)
Definition Segment.cc:51
approximate stats of a set of ReadWriteLocks
void dump(StoreEntry &e) const
bool sameKey(const cache_key *const aKey) const
Definition StoreMap.cc:952
struct Ipc::StoreMapAnchor::Basics basics
bool complete() const
Definition StoreMap.h:77
void set(const StoreEntry &anEntry, const cache_key *aKey=nullptr)
store StoreEntry key and basics for an inode slot
Definition StoreMap.cc:959
void exportInto(StoreEntry &) const
load StoreEntry basics that were previously stored with set()
Definition StoreMap.cc:979
std::atomic< StoreMapSliceId > next
ID of the next entry slice.
Definition StoreMap.h:49
std::atomic< Size > size
slice contents size
Definition StoreMap.h:48
sfileno fileNo
StoreMap::fileNos[name], for convenience/speed.
Definition StoreMap.h:194
StoreMapAnchor * anchor
StoreMap::anchors[fileNo], for convenience/speed.
Definition StoreMap.h:193
Aggregates information required for updating entry metadata and headers.
Definition StoreMap.h:182
Edition fresh
new anchor and the updated chain prefix
Definition StoreMap.h:209
StoreEntry * entry
the store entry being updated
Definition StoreMap.h:207
StoreIOState::Pointer sio
Definition MemObject.h:162
SwapOut swapout
Definition MemObject.h:169
C * getRaw() const
Definition RefCount.h:89
bool expectedReply(const IoXactionId receivedId)
Ipc::StoreMapAnchor * writeableAnchor_
starting point for writing
Definition RockIoState.h:57
void file(const RefCount< DiskFile > &aFile)
const Ipc::StoreMapAnchor * readableAnchor_
starting point for reading
Definition RockIoState.h:56
bool stillWaiting() const
whether we are still waiting for the I/O results (i.e., not closed)
Definition RockIoState.h:43
void handleReadCompletion(Rock::ReadRequest &request, const int rlen, const int errFlag)
forwards read data (or an error) to the reader that initiated this I/O
IoState::Pointer sio
static Ipc::Mem::Owner< Stats > * Init(const SwapDir &)
static bool Start(SwapDir &dir)
~SwapDirRr() override
void create() override
called when the runner should create a new memory segment
int64_t slotLimitAbsolute() const
Rock store implementation limit.
void readCompleted(const char *buf, int len, int errflag, RefCount< ::ReadRequest >) override
void handleWriteCompletionProblem(const int errflag, const WriteRequest &request)
code shared by writeCompleted() error handling cases
int64_t slotLimitActual() const
total number of slots in this db
void dumpSizeOption(StoreEntry *e) const
reports size-specific options; mimics SwapDir::optionObjectSizeDump()
SBuf inodeMapPath() const
void parseSize(const bool reconfiguring)
parses anonymous cache_dir size option
bool parseTimeOption(char const *option, const char *value, int reconfiguring)
parses time-specific options; mimics SwapDir::optionObjectSizeParse()
void validateOptions()
warns of configuration problems; may quit
void handleWriteCompletionSuccess(const WriteRequest &request)
code shared by writeCompleted() success handling cases
~SwapDir() override
bool parseRateOption(char const *option, const char *value, int reconfiguring)
parses rate-specific options; mimics SwapDir::optionObjectSizeParse()
const char * freeSlotsPath() const
bool full() const
no more entries can be stored without purging
int64_t diskOffset(Ipc::Mem::PageId &pageId) const
void writeCompleted(int errflag, size_t len, RefCount< ::WriteRequest >) override
bool validSlotId(const SlotId slotId) const
whether the given slot ID may point to a slot in this db
void ignoreReferences(StoreEntry &e)
delete from repl policy scope
void noteFreeMapSlice(const Ipc::StoreMapSliceId fileno) override
adjust slice-linked state before a locked Readable slice is erased
int64_t diskOffsetLimit() const
void trackReferences(StoreEntry &e)
add to replacement policy scope
void closeCompleted() override
void dumpRateOption(StoreEntry *e) const
reports rate-specific options; mimics SwapDir::optionObjectSizeDump()
void createError(const char *const msg)
void ioCompletedNotification() override
StoreIOState::Pointer createUpdateIO(const Ipc::StoreMapUpdate &, StoreIOState::STIOCB *, void *)
void anchorEntry(StoreEntry &e, const sfileno filen, const Ipc::StoreMapAnchor &anchor)
void dumpTimeOption(StoreEntry *e) const
reports time-specific options; mimics SwapDir::optionObjectSizeDump()
void writeError(StoreIOState &sio)
bool parseSizeOption(char const *option, const char *value, int reconfiguring)
parses size-specific options; mimics SwapDir::optionObjectSizeParse()
SlotId reserveSlotForWriting()
finds and returns a free db slot to fill or throws
int64_t entryLimitActual() const
max number of possible entries in db
SlotId sidPrevious
slot that will point to sidCurrent in the cache_dir map
bool eof
whether this is the last request for the entry
SlotId sidCurrent
slot being written using this write request
IoXactionId id
identifies this write transaction for the requesting IoState
IoState::Pointer sio
Definition SBuf.h:94
Store::DiskConfig cacheSwap
int max_open_disk_fds
uint16_t flags
Definition Store.h:231
sdirno swap_dirn
Definition Store.h:237
int locked() const
Definition Store.h:145
void detachFromDisk()
Definition store.cc:1953
bool hasDisk(const sdirno dirn=-1, const sfileno filen=-1) const
Definition store.cc:1929
const cache_key * publicKey() const
Definition Store.h:112
sfileno swap_filen
unique ID inside a cache_dir for swapped out entries; -1 for others
Definition Store.h:235
void storeWriterDone()
called when a store writer ends its work (successfully or not)
Definition store.cc:1808
void attachToDisk(const sdirno, const sfileno, const swap_status_t)
Definition store.cc:1940
RemovalPolicyNode repl
Definition Store.h:221
MemObject * mem_obj
Definition Store.h:220
ping_status_t ping_status
Definition Store.h:241
store_status_t store_status
Definition Store.h:243
void createMemObject()
Definition store.cc:1575
uint64_t swap_file_sz
Definition Store.h:229
sfileno swap_filen
StoreEntry * e
bool touchingStoreEntry() const
sdirno swap_dirn
void STIOCB(void *their_data, int errflag, StoreIOState::Pointer self)
virtual void updateHeaders(StoreEntry *)
make stored metadata and HTTP headers the same as in the given entry
Definition Controlled.h:35
virtual bool updateAnchored(StoreEntry &)
Definition Controlled.h:44
virtual bool anchorToCache(StoreEntry &)
Definition Controlled.h:39
virtual bool canStore(const StoreEntry &e, int64_t diskSpaceNeeded, int &load) const =0
check whether we can store the entry; if we can, report current load
Definition Disk.cc:164
void create() override
create system resources needed for this store to operate in the future
Definition Disk.cc:40
virtual void diskFull()
Definition Disk.cc:145
virtual void parse(int index, char *path)=0
virtual bool doReportStat() const
whether stat should be reported by this SwapDir
Definition Disk.h:35
virtual void finalizeSwapoutSuccess(const StoreEntry &)=0
finalize the successful swapout that has been already noticed by Store
virtual bool needsDiskStrand() const
needs a dedicated kid process
Definition Disk.cc:249
virtual bool allowOptionReconfigure(const char *const) const
Definition Disk.h:85
virtual void statfs(StoreEntry &) const
Definition Disk.cc:83
virtual void reconfigure()=0
virtual void disconnect(StoreEntry &)
called when the entry is about to forget its association with cache_dir
Definition Disk.h:71
virtual StoreIOState::Pointer createStoreIO(StoreEntry &, StoreIOState::STIOCB *, void *)=0
virtual bool unlinkdUseful() const =0
whether SwapDir may benefit from unlinkd
StoreEntry * get(const cache_key *) override
Definition Disk.cc:393
virtual bool hasReadableEntry(const StoreEntry &e) const =0
whether this cache dir has an entry with e.key
virtual ConfigOption * getOptionTree() const
Definition Disk.cc:258
void reference(StoreEntry &e) override
somebody needs this entry (many cache replacement policies need to know)
Definition Disk.cc:136
bool dereference(StoreEntry &e) override
Definition Disk.cc:139
virtual void finalizeSwapoutFailure(StoreEntry &)=0
abort the failed swapout that has been already noticed by Store
void maintain() override
perform regular periodic maintenance; TODO: move to UFSSwapDir::Maintain
Definition Disk.cc:86
virtual StoreIOState::Pointer openStoreIO(StoreEntry &, StoreIOState::STIOCB *, void *)=0
virtual void init()=0
virtual uint64_t currentSize() const =0
current size
virtual void evictIfFound(const cache_key *)=0
virtual uint64_t currentCount() const =0
the total number of objects stored right now
virtual void evictCached(StoreEntry &e)=0
char const * termedBuf() const
Definition SquidString.h:93
void append(char const *buf, int len)
Definition String.cc:131
A const & max(A const &lhs, A const &rhs)
A const & min(A const &lhs, A const &rhs)
#define MYNAME
Definition Stream.h:219
#define DBG_IMPORTANT
Definition Stream.h:38
#define debugs(SECTION, LEVEL, CONTENT)
Definition Stream.h:192
#define DBG_CRITICAL
Definition Stream.h:37
#define O_BINARY
Definition defines.h:134
#define EBIT_SET(flag, bit)
Definition defines.h:65
#define DISK_ERROR
Definition defines.h:28
#define DISK_OK
Definition defines.h:27
@ ENTRY_VALIDATED
Definition enums.h:108
@ PING_NONE
Has not considered whether to send ICP queries to peers yet.
Definition enums.h:36
@ SWAPOUT_WRITING
Definition enums.h:56
@ SWAPOUT_DONE
Definition enums.h:59
@ STORE_PENDING
Definition enums.h:46
@ STORE_OK
Definition enums.h:45
void fatal(const char *message)
Definition fatal.cc:28
void fatalf(const char *fmt,...)
Definition fatal.cc:68
int store_open_disk_fd
size_t PageLevel()
approximate total number of shared memory pages used now
Definition Pages.cc:80
size_t PageLimit()
the total number of shared memory pages that can be in use at any time
Definition Pages.cc:55
int32_t StoreMapSliceId
Definition StoreMap.h:24
double doublePercent(const double, const double)
Definition SquidMath.cc:25
Definition forward.h:28
sfileno SlotId
db cell number, starting with cell 0 (always occupied by the db header)
Definition forward.h:33
#define xstrdup
unsigned char cache_key
Store key.
Definition forward.h:29
signed_int32_t sfileno
Definition forward.h:22
void storeAppendPrintf(StoreEntry *e, const char *fmt,...)
Definition store.cc:855
void storeRebuildComplete(StoreRebuildData *dc)
int64_t strtoll(const char *nptr, char **endptr, int base)
Definition strtoll.c:61
std::atomic< uint64_t > swap_file_sz
Definition StoreMap.h:105
bool IamDiskProcess() STUB_RETVAL_NOP(false) bool InDaemonMode() STUB_RETVAL_NOP(false) bool UsingSmp() STUB_RETVAL_NOP(false) bool IamCoordinatorProcess() STUB_RETVAL(false) bool IamPrimaryProcess() STUB_RETVAL(false) int NumberOfKids() STUB_RETVAL(0) void setMaxFD(void) STUB void setSystemLimits(void) STUB void squid_signal(int
whether the current process is dedicated to managing a cache_dir
uint64_t time_msec_t
Definition gadgets.h:16
bool InDaemonMode()
Whether we are running in daemon mode.
Definition tools.cc:691
bool UsingSmp()
Whether there should be more than one worker process running.
Definition tools.cc:697
#define PRIu64
Definition types.h:114
#define PRId64
Definition types.h:104
int xwrite(int fd, const void *buf, size_t bufSize)
POSIX write(2) equivalent.
Definition unistd.h:67
int xclose(int fd)
POSIX close(2) equivalent.
Definition unistd.h:43
int xopen(const char *filename, int oflag, int pmode=0)
POSIX open(2) equivalent.
Definition unistd.h:55
#define safe_free(x)
Definition xalloc.h:73
const char * xstrerr(int error)
Definition xstrerror.cc:83