Example #1
0
def local_intermittent_storage(
    cache_name: str, ) -> Generator[pt.DbmMapping, None, None]:
    """

    The cache is deleted upon a Raspberry Pi restart!

    Examples
    ---------
    > with local_intermittent_storage('pwm') as cache:
    >     assert '1' in cache
    >     cache['1'] = str(0.5)


    Notes
    -------
    Opening the same cache in a context manager are tricky, and should be avoided. The general rule is:

    1. Outer-scoped caches can't access keys created by inner scope caches.
    2. The latest value given to a key, regardless of which scoped cache, is the one that is finally written.

    See tests in test_utils.py for examples.

    """
    try:
        cache = ndbm.open(f"/tmp/{cache_name}", "c")
        yield cache  # type: ignore
    finally:
        cache.close()
    def __init__(self, path, readOnly=False):
        super().__init__()

        self._extension = '.db'
        if path[-3:] != self._extension:
            raise DbError(
                _("Database %(db)s does not end with %(ext)s") % {
                    'db': path,
                    'ext': self._extension
                })

        self._dbpath = path[:-3]  # strip the .db suffix
        try:
            mode = 'r' if readOnly else 'c'
            self._seen = ndbm.open(self._dbpath, mode, 0o644)
            'foo%0' in self._seen
        except Exception as ex:
            raise DbError(
                _("Database %(db)s failed to load: %(errmsg)s") % {
                    'db': path,
                    'errmsg': str(ex)
                }) from ex

        # Will replace seen after changes have actually been seen
        self._seen_new = {}
    def apply_changes(self):
        if not self._seen_new:
            # Nothing to update
            return

        def mk(arg):
            return self._dbpath + arg + self._extension

        old, cur, new = (mk('-old'), mk(''), mk('-new'))

        # For reliability and to have backup of the database,
        # it is updated in the following steps:
        # 1. copy current version to new
        if os.path.isfile(cur):
            shutil.copy(cur, new)

        # 2. apply the changes to new
        seen = ndbm.open(self._dbpath + '-new', 'c', 0o644)
        for (key, value) in self._seen_new.items():
            seen[key] = value
        seen.close()

        # 3. save current as old, and rename new to current
        if os.path.isfile(old):
            os.unlink(old)
        if os.path.isfile(cur):
            os.link(cur, old)
        os.rename(new, cur)
Example #4
0
def ndbm_test_db(request):
    print("creating test ndbm file")
    temp_file = tempfile.NamedTemporaryFile()
    test_db = ndbm.open(temp_file.name, "n")
    test_db[key1] = val1
    test_db[key2] = val2
    test_db.close()

    def delete_test_ndbm():
        print("deleting test ndbm file")
        temp_file.close()
        os.remove(temp_file.name + ".db")

    request.addfinalizer(delete_test_ndbm)
    return temp_file.name
Example #5
0
def local_persistant_storage(
    cache_name: str, ) -> Generator[pt.DbmMapping, None, None]:
    """
    Values stored in this storage will stay around between RPi restarts, and until overwritten
    or deleted.

    Examples
    ---------
    > with local_persistant_storage('od_blank') as cache:
    >     assert '1' in cache
    >     cache['1'] = str(0.5)

    """
    from pioreactor.whoami import is_testing_env

    try:
        if is_testing_env():
            cache = ndbm.open(f".pioreactor/storage/{cache_name}", "c")
        else:
            cache = ndbm.open(f"/home/pi/.pioreactor/storage/{cache_name}",
                              "c")
        yield cache  # type: ignore
    finally:
        cache.close()
Example #6
0
def ndbm_test_db(request):
    temp_file = tempfile.NamedTemporaryFile(delete=False)
    test_db = ndbm.open(temp_file.name, "n")
    test_db[key1] = val1
    test_db[key2] = val2
    test_db.close()

    def delete_test_ndbm():
        temp_file.close()
        for f in glob.glob("{}*".format(temp_file.name)):
            print("deleting test ndbm file {}".format(f))
            os.remove(f)

    request.addfinalizer(delete_test_ndbm)
    return temp_file.name
Example #7
0
def ndbm_test_db(request):
    temp_file = tempfile.NamedTemporaryFile(delete=False)
    test_db = ndbm.open(temp_file.name, "n")
    test_db[key1] = val1
    test_db[key2] = val2
    test_db.close()

    def delete_test_ndbm():
        temp_file.close()
        for f in glob.glob("{}*".format(temp_file.name)):
            print("deleting test ndbm file {}".format(f))
            os.remove(f)

    request.addfinalizer(delete_test_ndbm)
    return temp_file.name
Example #8
0
def whichdb(filename):
    """Guess which db package to use to open a db file.

    Return values:

    - None if the database file can't be read;
    - empty string if the file can be read but can't be recognized
    - the name of the dbm submodule (e.g. "ndbm" or "gnu") if recognized.

    Importing the given module may still fail, and opening the
    database using that module may still fail.
    """

    # Check for ndbm first -- this has a .pag and a .dir file
    try:
        f = io.open(filename + ".pag", "rb")
        f.close()
        # dbm linked with gdbm on OS/2 doesn't have .dir file
        if not (ndbm.library == "GNU gdbm" and sys.platform == "os2emx"):
            f = io.open(filename + ".dir", "rb")
            f.close()
        return "dbm.ndbm"
    except IOError:
        # some dbm emulations based on Berkeley DB generate a .db file
        # some do not, but they should be caught by the bsd checks
        try:
            f = io.open(filename + ".db", "rb")
            f.close()
            # guarantee we can actually open the file using dbm
            # kind of overkill, but since we are dealing with emulations
            # it seems like a prudent step
            if ndbm is not None:
                d = ndbm.open(filename)
                d.close()
                return "dbm.ndbm"
        except IOError:
            pass

    # Check for dumbdbm next -- this has a .dir and a .dat file
    try:
        # First check for presence of files
        os.stat(filename + ".dat")
        size = os.stat(filename + ".dir").st_size
        # dumbdbm files with no keys are empty
        if size == 0:
            return "dbm.dumb"
        f = io.open(filename + ".dir", "rb")
        try:
            if f.read(1) in (b"'", b'"'):
                return "dbm.dumb"
        finally:
            f.close()
    except (OSError, IOError):
        pass

    # See if the file exists, return None if not
    try:
        f = io.open(filename, "rb")
    except IOError:
        return None

    # Read the start of the file -- the magic number
    s16 = f.read(16)
    f.close()
    s = s16[0:4]

    # Return "" if not at least 4 bytes
    if len(s) != 4:
        return ""

    # Convert to 4-byte int in native byte order -- return "" if impossible
    try:
        (magic,) = struct.unpack("=l", s)
    except struct.error:
        return ""

    # Check for GNU dbm
    if magic in (0x13579ace, 0x13579acd, 0x13579acf):
        return "dbm.gnu"

    # Later versions of Berkeley db hash file have a 12-byte pad in
    # front of the file type
    try:
        (magic,) = struct.unpack("=l", s16[-4:])
    except struct.error:
        return ""

    # Unknown
    return ""
Example #9
0
    x['DictionaryKeyIteratorType'] = iter(type.__dict__.keys())
    x['DictionaryValueIteratorType'] = iter(type.__dict__.values())
else:
    x['DictionaryItemIteratorType'] = type.__dict__.iteritems()
    x['DictionaryKeyIteratorType'] = type.__dict__.iterkeys()
    x['DictionaryValueIteratorType'] = type.__dict__.itervalues()
# string services (CH 7)
x['StructType'] = struct.Struct('c')
x['CallableIteratorType'] = _srepattern.finditer('')
x['SREMatchType'] = _srepattern.match('')
x['SREScannerType'] = _srepattern.scanner('')
x['StreamReader'] = codecs.StreamReader(_cstrI) #XXX: ... and etc
# python object persistence (CH 11)
# x['DbShelveType'] = shelve.open('foo','n')#,protocol=2) #XXX: delete foo
if HAS_ALL:
    x['DbmType'] = dbm.open(_tempfile,'n')
# x['DbCursorType'] = _dbcursor = anydbm.open('foo','n') #XXX: delete foo
# x['DbType'] = _dbcursor.db
# data compression and archiving (CH 12)
x['ZlibCompressType'] = zlib.compressobj()
x['ZlibDecompressType'] = zlib.decompressobj()
# file formats (CH 13)
x['CSVReaderType'] = csv.reader(_cstrI)
x['CSVWriterType'] = csv.writer(_cstrO)
x['CSVDictReaderType'] = csv.DictReader(_cstrI)
x['CSVDictWriterType'] = csv.DictWriter(_cstrO,{})
# cryptographic services (CH 14)
x['HashType'] = hashlib.md5()
x['HMACType'] = hmac.new(_in)
# generic operating system services (CH 15)
if HAS_CURSES: pass
Example #10
0
    x["DictionaryKeyIteratorType"] = iter(type.__dict__.keys())
    x["DictionaryValueIteratorType"] = iter(type.__dict__.values())
else:
    x["DictionaryItemIteratorType"] = type.__dict__.iteritems()
    x["DictionaryKeyIteratorType"] = type.__dict__.iterkeys()
    x["DictionaryValueIteratorType"] = type.__dict__.itervalues()
# string services (CH 7)
x["StructType"] = struct.Struct("c")
x["CallableIteratorType"] = _srepattern.finditer("")
x["SREMatchType"] = _srepattern.match("")
x["SREScannerType"] = _srepattern.scanner("")
x["StreamReader"] = codecs.StreamReader(_cstrI)  # XXX: ... and etc
# python object persistence (CH 11)
# x['DbShelveType'] = shelve.open('foo','n')#,protocol=2) #XXX: delete foo
if HAS_ALL:
    x["DbmType"] = dbm.open(_tempfile, "n")
# x['DbCursorType'] = _dbcursor = anydbm.open('foo','n') #XXX: delete foo
# x['DbType'] = _dbcursor.db
# data compression and archiving (CH 12)
x["ZlibCompressType"] = zlib.compressobj()
x["ZlibDecompressType"] = zlib.decompressobj()
# file formats (CH 13)
x["CSVReaderType"] = csv.reader(_cstrI)
x["CSVWriterType"] = csv.writer(_cstrO)
x["CSVDictReaderType"] = csv.DictReader(_cstrI)
x["CSVDictWriterType"] = csv.DictWriter(_cstrO, {})
# cryptographic services (CH 14)
x["HashType"] = hashlib.md5()
x["HMACType"] = hmac.new(_in)
# generic operating system services (CH 15)
if HAS_CURSES:
Example #11
0
    def __init__(self, db_filename='schedules'):
        '''Initializes the scheduler and opens the dbm file.'''

        db_filename = db_filename
        db_filename = os.path.join(options.main.workdir, db_filename)
        self.db = dbm.open(db_filename, 'c', 0o600)
Example #12
0
    def __init__(self, db_filename='schedules'):
        '''Initializes the scheduler and opens the dbm file.'''

        db_filename = db_filename
        db_filename = os.path.join(options.main.workdir, db_filename)
        self.db = dbm.open(db_filename, 'c', 0o600)
Example #13
0
 def __init__(self, datadir):
     self.blockDB = dbmd.open(datadir + "/blocks", 'c')
     self.currentBlock = 0
     self.headers_map = dict()
Example #14
0
 def __init__(self, datadir):
     self.txDB = dbmd.open(datadir + "/transactions", 'c')
Example #15
0
def whichdb(filename):
    """Guess which db package to use to open a db file.

    Return values:

    - None if the database file can't be read;
    - empty string if the file can be read but can't be recognized
    - the name of the dbm submodule (e.g. "ndbm" or "gnu") if recognized.

    Importing the given module may still fail, and opening the
    database using that module may still fail.
    """
    try:
        f = io.open(filename + '.pag', 'rb')
        f.close()
        f = io.open(filename + '.dir', 'rb')
        f.close()
        return 'dbm.ndbm'
    except OSError:
        try:
            f = io.open(filename + '.db', 'rb')
            f.close()
            if ndbm is not None:
                d = ndbm.open(filename)
                d.close()
                return 'dbm.ndbm'
        except OSError:
            pass
    try:
        os.stat(filename + '.dat')
        size = os.stat(filename + '.dir').st_size
        if size == 0:
            return 'dbm.dumb'
        f = io.open(filename + '.dir', 'rb')
        try:
            if f.read(1) in (b"'", b'"'):
                return 'dbm.dumb'
        finally:
            f.close()
    except OSError:
        pass
    try:
        f = io.open(filename, 'rb')
    except OSError:
        return None
    with f:
        s16 = f.read(16)
    s = s16[0:4]
    if len(s) != 4:
        return ''
    try:
        magic, = struct.unpack('=l', s)
    except struct.error:
        return ''
    if magic in (324508366, 324508365, 324508367):
        return 'dbm.gnu'
    try:
        magic, = struct.unpack('=l', s16[-4:])
    except struct.error:
        return ''
    return ''
Example #16
0
def whichdb(filename):
    """Guess which db package to use to open a db file.

    Return values:

    - None if the database file can't be read;
    - empty string if the file can be read but can't be recognized
    - the name of the dbm submodule (e.g. "ndbm" or "gnu") if recognized.

    Importing the given module may still fail, and opening the
    database using that module may still fail.
    """

    # Check for ndbm first -- this has a .pag and a .dir file
    try:
        f = io.open(filename + ".pag", "rb")
        f.close()
        f = io.open(filename + ".dir", "rb")
        f.close()
        return "dbm.ndbm"
    except OSError:
        # some dbm emulations based on Berkeley DB generate a .db file
        # some do not, but they should be caught by the bsd checks
        try:
            f = io.open(filename + ".db", "rb")
            f.close()
            # guarantee we can actually open the file using dbm
            # kind of overkill, but since we are dealing with emulations
            # it seems like a prudent step
            if ndbm is not None:
                d = ndbm.open(filename)
                d.close()
                return "dbm.ndbm"
        except OSError:
            pass

    # Check for dumbdbm next -- this has a .dir and a .dat file
    try:
        # First check for presence of files
        os.stat(filename + ".dat")
        size = os.stat(filename + ".dir").st_size
        # dumbdbm files with no keys are empty
        if size == 0:
            return "dbm.dumb"
        f = io.open(filename + ".dir", "rb")
        try:
            if f.read(1) in (b"'", b'"'):
                return "dbm.dumb"
        finally:
            f.close()
    except OSError:
        pass

    # See if the file exists, return None if not
    try:
        f = io.open(filename, "rb")
    except OSError:
        return None

    with f:
        # Read the start of the file -- the magic number
        s16 = f.read(16)
    s = s16[0:4]

    # Return "" if not at least 4 bytes
    if len(s) != 4:
        return ""

    # Convert to 4-byte int in native byte order -- return "" if impossible
    try:
        (magic,) = struct.unpack("=l", s)
    except struct.error:
        return ""

    # Check for GNU dbm
    if magic in (0x13579ace, 0x13579acd, 0x13579acf):
        return "dbm.gnu"

    # Later versions of Berkeley db hash file have a 12-byte pad in
    # front of the file type
    try:
        (magic,) = struct.unpack("=l", s16[-4:])
    except struct.error:
        return ""

    # Unknown
    return ""
Example #17
0
    x['DictionaryKeyIteratorType'] = iter(type.__dict__.keys())
    x['DictionaryValueIteratorType'] = iter(type.__dict__.values())
else:
    x['DictionaryItemIteratorType'] = type.__dict__.iteritems()
    x['DictionaryKeyIteratorType'] = type.__dict__.iterkeys()
    x['DictionaryValueIteratorType'] = type.__dict__.itervalues()
# string services (CH 7)
x['StructType'] = struct.Struct('c')
x['CallableIteratorType'] = _srepattern.finditer('')
x['SREMatchType'] = _srepattern.match('')
x['SREScannerType'] = _srepattern.scanner('')
x['StreamReader'] = codecs.StreamReader(_cstrI)  #XXX: ... and etc
# python object persistence (CH 11)
# x['DbShelveType'] = shelve.open('foo','n')#,protocol=2) #XXX: delete foo
if HAS_ALL:
    x['DbmType'] = dbm.open(_tempfile, 'n')
# x['DbCursorType'] = _dbcursor = anydbm.open('foo','n') #XXX: delete foo
# x['DbType'] = _dbcursor.db
# data compression and archiving (CH 12)
x['ZlibCompressType'] = zlib.compressobj()
x['ZlibDecompressType'] = zlib.decompressobj()
# file formats (CH 13)
x['CSVReaderType'] = csv.reader(_cstrI)
x['CSVWriterType'] = csv.writer(_cstrO)
x['CSVDictReaderType'] = csv.DictReader(_cstrI)
x['CSVDictWriterType'] = csv.DictWriter(_cstrO, {})
# cryptographic services (CH 14)
x['HashType'] = hashlib.md5()
x['HMACType'] = hmac.new(_in)
# generic operating system services (CH 15)
if HAS_CURSES: pass
Example #18
0
 def __init__(self, storepath):
     self.storepath = storepath
     self.db = dbm.open(storepath, "c", 0o600)