HashDBM constructor
HashDBM(})
Open a new database. Optional parameters are: buckets which sets the
number of hash buckets to use, flush which when set to true will force
data to disk every time it is changed, crc which will enable CRC
checks on underlying records if set to true, and readonly which opens
the database with a shared lock and prevents mutations. The defaults are
generally good enough.
Implementation
HashDBM(RandomAccessFile file,
{int buckets = 10007,
bool flush = true,
bool crc = false,
bool readonly = false,
bool versioned = false})
: _file = file,
_flush = flush,
_readonly = readonly,
_header = HashHeader(buckets) {
_file.lockSync(readonly ? FileLock.shared : FileLock.exclusive);
_finalizer.attach(this, _file);
final length = file.lengthSync();
final existing = length >= _header.length;
if (existing) {
_header.read(_file);
} else if (readonly) {
throw DBMException(
403, 'Cannot open a new file in readonly mode');
}
if (_header.magic != HashHeader.MAGIC) {
throw DBMException(500, 'HashHeader magic mismatch: ${_header.magic}');
}
if (!_header.validate()) {
throw DBMException(500, 'Header CRC mismatch');
}
// Format version validation
final ver = _header.version;
if (existing) {
if (versioned) {
if (ver == HashHeader.VERSION_PLAIN) {
// Upgrade plain file to versioned format
_header.version = HashHeader.VERSION_VERSIONED;
} else if (ver != HashHeader.VERSION_VERSIONED) {
throw DBMException(
500, 'Unknown format version: 0x${ver.toRadixString(16)}');
}
} else {
if (ver == HashHeader.VERSION_VERSIONED) {
throw DBMException(403,
'File is a versioned database; open with VersionedHashDBM');
} else if (ver != HashHeader.VERSION_PLAIN) {
throw DBMException(
500, 'Unknown format version: 0x${ver.toRadixString(16)}');
}
}
} else if (versioned) {
_header.version = HashHeader.VERSION_VERSIONED;
}
if (!readonly) {
// Update with current time
_header.modified = DateTime.now().millisecondsSinceEpoch;
_header.seal();
_header.write(_file);
}
// Create the memory pool
_memoryPool = MemoryPool(_file, _header.memPoolOffset);
// Create the record pool
_recordPool = HashRecordPool(
_file, _memoryPool.end + 1, _memoryPool, _header.numBuckets,
crc: crc);
}