def get_db_state(disposition=DISPOSITION_RO):
    impl = config.get_implementation()
    if impl is None:
        impl = sys.modules[__name__]

    db_filename = config.get_db_filename(impl=impl)
    lastblock_filename = config.get_lastblock_filename()

    firstcheck = True
    for path in [db_filename, lastblock_filename]:
        if os.path.exists(path):
            # have already create the db
            firstcheck = False
    if not firstcheck and not os.path.exists(lastblock_filename):
        log.error("FATAL: no such file or directory: %s" % lastblock_filename)

    # verify that it is well-formed if it exists
    elif os.path.exists(lastblock_filename):
        try:
            with open(lastblock_filename, "r") as f:
                int(f.read().strip())
        except Exception, e:
            log.error("FATAL: failed to parse: %s " % lastblock_filename)
            log.exception(e)
            os.abort()
Ejemplo n.º 2
0
    def save( self, block_id, consensus_hash, pending_ops, backup=False ):
        """
        Write out all state to the working directory.
        Calls the implementation's 'db_save' method.
        
        Return True on success 
        Return False on error
        Raise exception if block_id represents a block 
         we've already processed.
        """
        
        if block_id < self.lastblock:
           raise Exception("Already processed up to block %s (got %s)" % (self.lastblock, block_id))
        
        # stage data to temporary files
        tmp_db_filename = (config.get_db_filename() + ".tmp")
        tmp_snapshot_filename = (config.get_snapshots_filename() + ".tmp")
        tmp_lastblock_filename = (config.get_lastblock_filename() + ".tmp")
        
        with open(tmp_snapshot_filename, 'w') as f:
            db_dict = {
               'snapshots': self.consensus_hashes
            }
            f.write(json.dumps(db_dict))
            f.flush()
        
        # put this last...
        with open(tmp_lastblock_filename, "w") as lastblock_f:
            lastblock_f.write("%s" % block_id)
            lastblock_f.flush()

        rc = self.impl.db_save( block_id, consensus_hash, pending_ops, tmp_db_filename, db_state=self.state )
        if not rc:
            # failed to save 
            log.error("Implementation failed to save at block %s to %s" % (block_id, tmp_db_filename))
            
            try:
                os.unlink( tmp_lastblock_filename )
            except:
                pass 
            
            try:
                os.unlink( tmp_snapshot_filename )
            except:
                pass 
            
            return False
       
        rc = self.commit( backup=backup )
        if not rc:
            log.error("Failed to commit data at block %s.  Rolling back." % block_id )
            
            self.rollback()
            return False 
        
        else:
            self.lastblock = block_id
            return True
    def commit(self, backup=False, startup=False):
        """
        Move all written but uncommitted data into place.
        Return True on success
        Return False on error (in which case the caller should rollback())

        It is safe to call this method repeatedly until it returns True.
        """

        if self.read_only:
            log.error("FATAL: read-only")
            os.abort()

        tmp_db_filename = config.get_db_filename(impl=self.impl) + ".tmp"
        tmp_snapshot_filename = config.get_snapshots_filename(impl=self.impl) + ".tmp"
        tmp_lastblock_filename = config.get_lastblock_filename(impl=self.impl) + ".tmp"

        if not os.path.exists(tmp_lastblock_filename) and (
            os.path.exists(tmp_db_filename) or os.path.exists(tmp_snapshot_filename)):
            # we did not successfully stage the write.
            # rollback
            log.error("Partial write detected.  Not committing.")
            return False

        # basic sanity checks: don't overwrite the db if the file is zero bytes, or if we can't load it
        if os.path.exists(tmp_db_filename):
            db_dir = os.path.dirname(tmp_db_filename)

            try:
                dirfd = os.open(db_dir, os.O_DIRECTORY)
                os.fsync(dirfd)
                os.close(dirfd)
            except Exception, e:
                log.exception(e)
                log.error("FATAL: failed to sync directory %s" % db_dir)
                traceback.print_stack()
                os.abort()

            sb = os.stat(tmp_db_filename)
            if sb.st_size == 0:
                log.error("Partial write detected: tried to overwrite with zero-sized db!  Will rollback.")
                return False

            if startup:
                # make sure we can load this
                try:
                    with open(tmp_snapshot_filename, "r") as f:
                        db_txt = f.read()

                    db_json = json.loads(db_txt)
                except:
                    log.error("Partial write detected: corrupt partially-committed db!  Will rollback.")
                    return False
Ejemplo n.º 4
0
    def commit( self, backup=False, startup=False ):
        """
        Move all written but uncommitted data into place.
        Return True on success 
        Return False on error (in which case the caller should rollback())
        
        It is safe to call this method repeatedly until it returns True.
        """

        if self.read_only:
           log.error("FATAL: read-only")
           os.abort()

        tmp_db_filename = config.get_db_filename(impl=self.impl) + ".tmp"
        tmp_snapshot_filename = config.get_snapshots_filename(impl=self.impl) + ".tmp"
        tmp_lastblock_filename = config.get_lastblock_filename(impl=self.impl) + ".tmp"
        
        if not os.path.exists( tmp_lastblock_filename ) and (os.path.exists(tmp_db_filename) or os.path.exists(tmp_snapshot_filename)):
            # we did not successfully stage the write.
            # rollback 
            log.error("Partial write detected.  Not committing.")
            return False
           
        # basic sanity checks: don't overwrite the db if the file is zero bytes, or if we can't load it
        if os.path.exists( tmp_db_filename ):
            db_dir = os.path.dirname( tmp_db_filename )

            try:
                dirfd = os.open(db_dir, os.O_DIRECTORY)
                os.fsync(dirfd)
                os.close( dirfd )
            except Exception, e:
                log.exception(e)
                log.error("FATAL: failed to sync directory %s" % db_dir)
                traceback.print_stack()
                os.abort()

            sb = os.stat( tmp_db_filename )
            if sb.st_size == 0:
                log.error("Partial write detected: tried to overwrite with zero-sized db!  Will rollback.")
                return False

            if startup:
                # make sure we can load this 
                try:
                    with open(tmp_snapshot_filename, "r") as f:
                        db_txt = f.read()

                    db_json = json.loads(db_txt)
                except:
                    log.error("Partial write detected: corrupt partially-committed db!  Will rollback.")
                    return False
Ejemplo n.º 5
0
 def rollback( self ):
     """
     Roll back a pending write: blow away temporary files.
     """
     
     tmp_db_filename = config.get_db_filename() + ".tmp"
     tmp_snapshot_filename = config.get_snapshots_filename() + ".tmp"
     tmp_lastblock_filename = config.get_lastblock_filename() + ".tmp"
     
     for f in [tmp_db_filename, tmp_snapshot_filename, tmp_lastblock_filename]:
         if os.path.exists( f ):
             
             try:
                 os.unlink( f )
             except:
                 log.error("Failed to unlink '%s'" % f )
                 pass
Ejemplo n.º 6
0
    def save( self, block_id, consensus_hash, pending_ops, backup=False ):
        """
        Write out all state to the working directory.
        Calls the implementation's 'db_save' method to store any state for this block.
        Calls the implementation's 'db_continue' method at the very end, to signal
        to the implementation that all virtualchain state has been saved.  This method
        can return False, in which case, indexing stops
        
        Return True on success 
        Return False if the implementation wants to exit.
        Aborts on fatal error
        """
        
        if self.read_only:
            log.error("FATAL: read only")
            traceback.print_stack()
            os.abort()

        if block_id < self.lastblock:
            log.error("FATAL: Already processed up to block %s (got %s)" % (self.lastblock, block_id))
            traceback.print_stack()
            os.abort()

        # stage data to temporary files
        tmp_db_filename = (config.get_db_filename(impl=self.impl) + ".tmp")
        tmp_snapshot_filename = (config.get_snapshots_filename(impl=self.impl) + ".tmp")
        tmp_lastblock_filename = (config.get_lastblock_filename(impl=self.impl) + ".tmp")
        
        try:
            with open(tmp_snapshot_filename, 'w') as f:
                db_dict = {
                   'snapshots': self.consensus_hashes
                }
                f.write(json.dumps(db_dict))
                f.flush()
            
            with open(tmp_lastblock_filename, "w") as lastblock_f:
                lastblock_f.write("%s" % block_id)
                lastblock_f.flush()

        except Exception, e:
            # failure to save is fatal 
            log.exception(e)
            log.error("FATAL: Could not stage data for block %s" % block_id)
            traceback.print_stack()
            os.abort()
    def save(self, block_id, consensus_hash, pending_ops, backup=False):
        """
        Write out all state to the working directory.
        Calls the implementation's 'db_save' method to store any state for this block.
        Calls the implementation's 'db_continue' method at the very end, to signal
        to the implementation that all virtualchain state has been saved.  This method
        can return False, in which case, indexing stops

        Return True on success
        Return False if the implementation wants to exit.
        Aborts on fatal error
        """

        if self.read_only:
            log.error("FATAL: read only")
            traceback.print_stack()
            os.abort()

        if block_id < self.lastblock:
            log.error("FATAL: Already processed up to block %s (got %s)" % (self.lastblock, block_id))
            traceback.print_stack()
            os.abort()

        # stage data to temporary files
        tmp_db_filename = (config.get_db_filename(impl=self.impl) + ".tmp")
        tmp_snapshot_filename = (config.get_snapshots_filename(impl=self.impl) + ".tmp")
        tmp_lastblock_filename = (config.get_lastblock_filename(impl=self.impl) + ".tmp")

        try:
            with open(tmp_snapshot_filename, 'w') as f:
                db_dict = {
                    'snapshots': self.consensus_hashes
                }
                f.write(json.dumps(db_dict))
                f.flush()

            with open(tmp_lastblock_filename, "w") as lastblock_f:
                lastblock_f.write("%s" % block_id)
                lastblock_f.flush()

        except Exception, e:
            # failure to save is fatal
            log.exception(e)
            log.error("FATAL: Could not stage data for block %s" % block_id)
            traceback.print_stack()
            os.abort()
Ejemplo n.º 8
0
 def rollback( self ):
     """
     Roll back a pending write: blow away temporary files.
     """
     
     tmp_db_filename = config.get_db_filename() + ".tmp"
     tmp_snapshot_filename = config.get_snapshots_filename() + ".tmp"
     tmp_lastblock_filename = config.get_lastblock_filename() + ".tmp"
     
     for f in [tmp_db_filename, tmp_snapshot_filename, tmp_lastblock_filename]:
         if os.path.exists( f ):
             
             try:
                 os.unlink( f )
             except:
                 log.error("Failed to unlink '%s'" % f )
                 pass
    def get_lastblock(self, lastblock_filename=None, impl=None):
        """
        Get the last block
        """
        if lastblock_filename is None:

            if impl is None:
                impl = self.impl

            lastblock_filename = config.get_lastblock_filename()

        if os.path.exists( lastblock_filename ):
            try:
                with open(lastblock_filename) as f:
                    lastblock_str = f.read().strip()
                    return int(lastblock_str)
            except Exception, e:
                log.error("Failed to read last block number at: %s" % lastblock_filename)
                return None
    def __init__(self, magic_bytes, opcodes, opfields, impl=None, state=None, initial_snapshots = {}, expected_snapshots={}, backup_frequency=None, backup_max_age=None, read_only=False):
        self.consensus_hashes = initial_snapshots
        self.pending_opts = defaultdict(list)
        self.magic_bytes = magic_bytes
        self.opcodes = opcodes[:]
        self.opfields = copy.deepcopy(opfields)
        self.state = state
        self.impl = impl
        self.lastblock = self.impl.get_first_block_id() - 1
        self.pool = None
        self.rejected = {}
        self.expected_snapshots = expected_snapshots
        self.backup_frequency = backup_frequency
        self.backup_max_age = backup_max_age
        self.read_only = read_only

        firsttime = True

        consensus_snapshots_filename = config.get_snapshots_filename(impl)
        lastblock_filename = config.get_lastblock_filename(impl)

        # check whether it is the first time
        for fp in [consensus_snapshots_filename, lastblock_filename]:
            if os.path.exists(fp):
                firsttime = False

        # Attempt to load the snapshots
        if os.path.exists(consensus_snapshots_filename):
            log.debug("Consensus snapshots at %s" % consensus_snapshots_filename)

            try:
                with open(consensus_snapshots_filename, "r") as f:
                    db_dict = json.loads(f.read())
                    assert 'snapshots' in db_dict
                    self.consensus_hashes = db_dict['snapshots']

            except Exception, e:
                log.error("FATAL: Failed to read consensus snapshots at %s" % (consensus_snapshots_filename))
                log.exception(e)
                os.abort()
Ejemplo n.º 11
0
    def get_lastblock( self, lastblock_filename=None, impl=None, working_dir=None ):
        """
        What was the last block processed?
        Return the number on success
        Return None on failure to read
        """

        if lastblock_filename is None:
            
            if impl is None:
                impl = self.impl

            lastblock_filename = config.get_lastblock_filename(impl=impl, working_dir=working_dir)
        
        if os.path.exists( lastblock_filename ):
           try:
              with open(lastblock_filename, 'r') as f:
                 lastblock_str = f.read().strip()
                 return int(lastblock_str)
              
           except Exception, e:
              log.error("Failed to read last block number at '%s'" % lastblock_filename )
              return None
Ejemplo n.º 12
0
    def get_lastblock( self, lastblock_filename=None, impl=None ):
        """
        What was the last block processed?
        Return the number on success
        Return None on failure to read
        """

        if lastblock_filename is None:
            
            if impl is None:
                impl = self.impl

            lastblock_filename = config.get_lastblock_filename(impl=impl)
        
        if os.path.exists( lastblock_filename ):
           try:
              with open(lastblock_filename, 'r') as f:
                 lastblock_str = f.read().strip()
                 return int(lastblock_str)
              
           except Exception, e:
              log.error("Failed to read last block number at '%s'" % lastblock_filename )
              return None
Ejemplo n.º 13
0
    
    Return None if we fail to connect to bitcoind.
    """

    start_block = config.get_first_block_id()
       
    try:
       current_block = int(bitcoind.getblockcount())
        
    except Exception, e:
       # TODO: reconnect on connection error
       log.exception(e)
       return None, None

    # check our last known file
    lastblock_file = config.get_lastblock_filename()
    
    saved_block = 0
    if os.path.isfile(lastblock_file):
         
        with open(lastblock_file, 'r') as fin:
           try:
              saved_block = fin.read()
              saved_block = int(saved_block)
           except:
              saved_block = 0
              try:
                 os.unlink(lastblock_file)
              except OSError, oe:
                 pass 
              
Ejemplo n.º 14
0
    def get_backup_paths( cls, block_id, impl ):
        """
        Get the set of backup paths, given the virtualchain implementation module and block number
        """
        backup_dir = os.path.join( config.get_working_dir(impl=impl), "backups" )
        backup_paths = []
        for p in [config.get_db_filename(impl=impl), config.get_snapshots_filename(impl=impl), config.get_lastblock_filename(impl=impl)]:
            pbase = os.path.basename(p)
            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % block_id))
            backup_paths.append( backup_path )

        return backup_paths
            if startup:
                # make sure we can load this
                try:
                    with open(tmp_snapshot_filename, "r") as f:
                        db_txt = f.read()

                    db_json = json.loads(db_txt)
                except:
                    log.error("Partial write detected: corrupt partially-committed db!  Will rollback.")
                    return False

        backup_time = int(time.time() * 1000000)

        listing = []
        listing.append(("lastblock", tmp_lastblock_filename, config.get_lastblock_filename(impl=self.impl)))
        listing.append(("snapshots", tmp_snapshot_filename, config.get_snapshots_filename(impl=self.impl)))
        listing.append(("db", tmp_db_filename, config.get_db_filename(impl=self.impl)))

        for i in xrange(0, len(listing)):
            file_type, tmp_filename, filename = listing[i]

            dir_path = os.path.dirname(tmp_filename)
            dirfd = None
            try:
                dirfd = os.open(dir_path, os.O_DIRECTORY)
                os.fsync(dirfd)
            except Exception, e:
                log.exception(e)
                log.error("FATAL: failed to sync directory %s" % dir_path)
                traceback.print_stack()
Ejemplo n.º 16
0
    def commit(self, backup=False, startup=False):
        """
        Move all written but uncommitted data into place.
        Return True on success 
        Return False on error (in which case the caller should rollback())
        
        It is safe to call this method repeatedly until it returns True.
        """

        tmp_db_filename = config.get_db_filename() + ".tmp"
        tmp_snapshot_filename = config.get_snapshots_filename() + ".tmp"
        tmp_lastblock_filename = config.get_lastblock_filename() + ".tmp"

        if not os.path.exists(tmp_lastblock_filename) and (
                os.path.exists(tmp_db_filename)
                or os.path.exists(tmp_snapshot_filename)):
            # we did not successfully stage the write.
            # rollback
            log.error("Partial write detected.  Not committing.")
            return False

        # basic sanity checks: don't overwrite the db if the file is zero bytes, or if we can't load it
        if os.path.exists(tmp_db_filename):
            sb = os.stat(tmp_db_filename)
            if sb.st_size == 0:
                log.error(
                    "Partial write detected: tried to overwrite with zero-sized db!  Will rollback."
                )
                return False

            if startup:
                # make sure we can load this
                try:
                    with open(tmp_snapshot_filename, "r") as f:
                        db_txt = f.read()

                    db_json = json.loads(db_txt)
                except:
                    log.error(
                        "Partial write detected: corrupt partially-committed db!  Will rollback."
                    )
                    return False

        backup_time = int(time.time() * 1000000)

        for tmp_filename, filename in zip( [tmp_lastblock_filename, tmp_snapshot_filename, tmp_db_filename], \
                                           [config.get_lastblock_filename(), config.get_snapshots_filename(), config.get_db_filename()] ):

            if not os.path.exists(tmp_filename):
                continue

            # commit our new lastblock, consensus hash set, and state engine data
            try:

                # NOTE: rename fails on Windows if the destination exists
                if sys.platform == 'win32' and os.path.exists(filename):

                    try:
                        os.unlink(filename)
                    except:
                        pass

                if not backup:
                    os.rename(tmp_filename, filename)
                else:
                    shutil.copy(tmp_filename, filename)
                    os.rename(tmp_filename,
                              tmp_filename + (".%s" % backup_time))

            except Exception, e:

                log.exception(e)
                return False
Ejemplo n.º 17
0
    Get the range of block numbers that we need to fetch from the blockchain.
    
    Return None if we fail to connect to bitcoind.
    """

    start_block = config.get_first_block_id()

    try:
        current_block = int(bitcoind.getblockcount())

    except Exception, e:
        log.error(e)
        return None, None

    # check our last known file
    lastblock_file = config.get_lastblock_filename()

    saved_block = 0
    if os.path.isfile(lastblock_file):

        with open(lastblock_file, 'r') as fin:
            try:
                saved_block = fin.read()
                saved_block = int(saved_block)
            except:
                saved_block = 0
                try:
                    os.unlink(lastblock_file)
                except OSError, oe:
                    pass
Ejemplo n.º 18
0
    def commit( self, backup=False, startup=False ):
        """
        Move all written but uncommitted data into place.
        Return True on success 
        Return False on error (in which case the caller should rollback())
        
        It is safe to call this method repeatedly until it returns True.
        """

        tmp_db_filename = config.get_db_filename() + ".tmp"
        tmp_snapshot_filename = config.get_snapshots_filename() + ".tmp"
        tmp_lastblock_filename = config.get_lastblock_filename() + ".tmp"
        
        if not os.path.exists( tmp_lastblock_filename ) and (os.path.exists(tmp_db_filename) or os.path.exists(tmp_snapshot_filename)):
            # we did not successfully stage the write.
            # rollback 
            log.error("Partial write detected.  Not committing.")
            return False
            
        # basic sanity checks: don't overwrite the db if the file is zero bytes, or if we can't load it
        if os.path.exists( tmp_db_filename ):
            sb = os.stat( tmp_db_filename )
            if sb.st_size == 0:
                log.error("Partial write detected: tried to overwrite with zero-sized db!  Will rollback.")
                return False

            if startup:
                # make sure we can load this 
                try:
                    with open(tmp_snapshot_filename, "r") as f:
                        db_txt = f.read()

                    db_json = json.loads(db_txt)
                except:
                    log.error("Partial write detected: corrupt partially-committed db!  Will rollback.")
                    return False

        
        backup_time = int(time.time() * 1000000)

        for tmp_filename, filename in zip( [tmp_lastblock_filename, tmp_snapshot_filename, tmp_db_filename], \
                                           [config.get_lastblock_filename(), config.get_snapshots_filename(), config.get_db_filename()] ):
               
            if not os.path.exists( tmp_filename ):
                continue  

            # commit our new lastblock, consensus hash set, and state engine data
            try:
               
               # NOTE: rename fails on Windows if the destination exists 
               if sys.platform == 'win32' and os.path.exists( filename ):
                  
                  try:
                     os.unlink( filename )
                  except:
                     pass

               if not backup:
                   os.rename( tmp_filename, filename )
               else:
                   shutil.copy( tmp_filename, filename )
                   os.rename( tmp_filename, tmp_filename + (".%s" % backup_time))
                  
            except Exception, e:
               
               log.exception(e)
               return False 
Ejemplo n.º 19
0
    def get_backup_paths( cls, block_id, impl, working_dir=None ):
        """
        Get the set of backup paths, given the virtualchain implementation module and block number
        """
        backup_dir = os.path.join( config.get_working_dir(impl=impl, working_dir=working_dir), "backups" )
        backup_paths = []
        for p in [config.get_db_filename(impl=impl, working_dir=working_dir), config.get_snapshots_filename(impl=impl, working_dir=working_dir), config.get_lastblock_filename(impl=impl, working_dir=working_dir)]:
            pbase = os.path.basename(p)
            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % block_id))
            backup_paths.append( backup_path )

        return backup_paths
Ejemplo n.º 20
0
    def backup_restore( cls, block_id, impl ):
        """
        Restore from a backup, given the virutalchain implementation module and block number
        """
        backup_dir = os.path.join( config.get_working_dir(impl=impl), "backups" )
        backup_paths = cls.get_backup_paths( block_id, impl )
        for p in backup_paths:
            assert os.path.exists( p ), "No such backup file: %s" % backup_paths

        for p in [config.get_db_filename(impl=impl), config.get_snapshots_filename(impl=impl), config.get_lastblock_filename(impl=impl)]:
            pbase = os.path.basename(p)
            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % block_id))
            log.debug("Restoring '%s' to '%s'" % (backup_path, p))
            shutil.copy( backup_path, p )
    
        return True
Ejemplo n.º 21
0
    def __init__(self, magic_bytes, opcodes, opfields, impl=None, state=None, initial_snapshots={} ):
        """
        Construct a state engine client, optionally from locally-cached 
        state and the set of previously-calculated consensus 
        hashes for each block.
        
        This class will be fed a sequence of sets of transactions, grouped by block 
        and ordered by block ID, that each contain an OP_RETURN.  The nulldata 
        assocated with the OP_RETURN will be parsed, checked, logged, and 
        committed by the implementation (impl).  The implementation decides exactly 
        what each of these mean; this class simply feeds it the transactions
        in the order they appeared on the blockchain.
        
        This class looks for OP_RETURN data that starts with the byte sequence in magic_bytes,
        and then only select those which start with magic_bytes + op, where op is an 
        opcode byte in opcodes.  Magic bytes can be of variable length, but it should
        be specific to this virtual chain.
        
        Expected OP_RETURN data format:
        
         0     M  M+1                      len(OP_RETURN)-M-1
         |-----|--|------------------------|
          magic op payload
        
        The job of the implementation is to translate the above data, plus anything else it 
        can earn from the previously-parsed transactions and from other sources, into a 
        dictionary of (field: value) tuples that constitute an operation.

        @magic_bytes: the `magic` field above.
        @opcodes: the list of possible values for the `op` field.
        @opfields: a dictionary that maps each `op` to a list of field names. 
        
        The caller may supply an optional argument called 'state', which will be 
        passed into each implementation method.  It is meant to preserve implementation-
        specific state--in particular, whatever state the implementation expects to be 
        present.
        """
        
        self.consensus_hashes = initial_snapshots
        self.pending_ops = defaultdict(list)
        self.magic_bytes = magic_bytes 
        self.opcodes = opcodes[:]
        self.opfields = copy.deepcopy(opfields)
        self.state = state
        self.impl = impl
        self.lastblock = self.impl.get_first_block_id() - 1
        self.pool = None
        self.rejected = {}

        consensus_snapshots_filename = config.get_snapshots_filename()
        lastblock_filename = config.get_lastblock_filename()
        
        # if we crashed during a commit, try to finish
        rc = self.commit( startup=True )
        if not rc:
           log.error("Failed to commit partial data.  Rolling back.")
           self.rollback()
        
        # attempt to load the snapshots 
        if os.path.exists( consensus_snapshots_filename ):
           try:
              
              with open(consensus_snapshots_filename, 'r') as f:
                 
                 db_dict = json.loads(f.read())
                 
                 if 'snapshots' in db_dict:
                     self.consensus_hashes = db_dict['snapshots']
                 
           except Exception, e:
              log.error("Failed to read consensus snapshots at '%s'" % consensus_snapshots_filename )
              raise e
Ejemplo n.º 22
0
    def backup_restore( cls, block_id, impl, working_dir=None ):
        """
        Restore from a backup, given the virutalchain implementation module and block number
        """
        backup_dir = os.path.join( config.get_working_dir(impl=impl, working_dir=working_dir), "backups" )
        backup_paths = cls.get_backup_paths( block_id, impl, working_dir=working_dir )
        for p in backup_paths:
            assert os.path.exists( p ), "No such backup file: %s" % backup_paths

        for p in [config.get_db_filename(impl=impl, working_dir=working_dir), config.get_snapshots_filename(impl=impl, working_dir=working_dir), config.get_lastblock_filename(impl=impl, working_dir=working_dir)]:
            pbase = os.path.basename(p)
            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % block_id))
            log.debug("Restoring '%s' to '%s'" % (backup_path, p))
            shutil.copy( backup_path, p )
    
        return True
Ejemplo n.º 23
0
    def make_backups( self, block_id ):
        """
        If we're doing backups on a regular basis, then 
        carry them out here if it is time to do so.
        This method does nothing otherwise.
        Abort on failure
        """

        # make a backup?
        if self.backup_frequency is not None:
            if (block_id % self.backup_frequency) == 0:

                backup_dir = os.path.join( config.get_working_dir(impl=self.impl), "backups" )
                if not os.path.exists(backup_dir):
                    try:
                        os.makedirs(backup_dir)
                    except Exception, e:
                        log.exception(e)
                        log.error("FATAL: failed to make backup directory '%s'" % backup_dir)
                        traceback.print_stack()
                        os.abort()
                        

                for p in [config.get_db_filename(impl=self.impl), config.get_snapshots_filename(impl=self.impl), config.get_lastblock_filename(impl=self.impl)]:
                    if os.path.exists(p):
                        try:
                            pbase = os.path.basename(p)
                            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % (block_id - 1)))

                            if not os.path.exists( backup_path ):
                                shutil.copy( p, backup_path )
                            else:
                                log.error("Will not overwrite '%s'" % backup_path)

                        except Exception, e:
                            log.exception(e)
                            log.error("FATAL: failed to back up '%s'" % p)
                            traceback.print_stack()
                            os.abort()
Ejemplo n.º 24
0
    def make_backups( self, block_id, working_dir=None ):
        """
        If we're doing backups on a regular basis, then 
        carry them out here if it is time to do so.
        This method does nothing otherwise.
        Abort on failure
        """

        # make a backup?
        if self.backup_frequency is not None:
            if (block_id % self.backup_frequency) == 0:

                backup_dir = os.path.join( config.get_working_dir(impl=self.impl, working_dir=working_dir), "backups" )
                if not os.path.exists(backup_dir):
                    try:
                        os.makedirs(backup_dir)
                    except Exception, e:
                        log.exception(e)
                        log.error("FATAL: failed to make backup directory '%s'" % backup_dir)
                        traceback.print_stack()
                        os.abort()
                        

                for p in [config.get_db_filename(impl=self.impl, working_dir=working_dir), config.get_snapshots_filename(impl=self.impl, working_dir=working_dir), config.get_lastblock_filename(impl=self.impl, working_dir=working_dir)]:
                    if os.path.exists(p):
                        try:
                            pbase = os.path.basename(p)
                            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % (block_id - 1)))

                            if not os.path.exists( backup_path ):
                                shutil.copy( p, backup_path )
                            else:
                                log.error("Will not overwrite '%s'" % backup_path)

                        except Exception, e:
                            log.exception(e)
                            log.error("FATAL: failed to back up '%s'" % p)
                            traceback.print_stack()
                            os.abort()
Ejemplo n.º 25
0
    def __init__(self, magic_bytes, opcodes, impl=None, state=None, op_order=None, initial_snapshots={} ):
        """
        Construct a state engine client, optionally from locally-cached 
        state and the set of previously-calculated consensus 
        hashes for each block.
        
        This class will be fed a sequence of sets of transactions, grouped by block 
        and ordered by block ID, that each contain an OP_RETURN.  The nulldata 
        assocated with the OP_RETURN will be parsed, checked, logged, and 
        committed by the implementation (impl).  The implementation decides exactly 
        what each of these mean; this class simply feeds it the transactions
        in order.
        
        This class looks for OP_RETURN data that starts with the byte sequence in magic_bytes,
        and then only select those which start with magic_bytes + op, where op is an 
        opcode byte in opcodes.  Magic bytes can be of variable length, but it should
        be specific to this virtual chain.
        
        Expected OP_RETURN data format:
        
         0     M  M+1                      40-M-1
         |-----|--|------------------------|
          magic op payload
        
        
        The caller may supply an optional argument called 'state', which will be 
        passed into each implementation method.  It is meant to preserve implementation-
        specific state--in particular, whatever state the implementation expects to be 
        present.
        
        The caller may also specify the order in which each type of operation is 
        processed, by passing a list of opcodes in op_order.
        """
        
        self.consensus_hashes = initial_snapshots
        self.pending_ops = defaultdict(list)
        self.magic_bytes = magic_bytes 
        self.opcodes = opcodes[:]
        self.state = state
        self.op_order = op_order
        self.impl = impl
        self.lastblock = self.impl.get_first_block_id() - 1
        self.pool = None
        self.rejected = {}

        if self.op_order is None:
            self.op_order = self.impl.get_op_processing_order()[:]
            if self.op_order is None:
                self.op_order = opcodes
       
        # there's always a 'final' operation type, to be processed last
        self.op_order.append('virtualchain_final')

        consensus_snapshots_filename = config.get_snapshots_filename()
        lastblock_filename = config.get_lastblock_filename()
        
        # if we crashed during a commit, try to finish
        rc = self.commit( startup=True )
        if not rc:
           log.error("Failed to commit partial data.  Rolling back.")
           self.rollback()
        
        # attempt to load the snapshots 
        if os.path.exists( consensus_snapshots_filename ):
           try:
              
              with open(consensus_snapshots_filename, 'r') as f:
                 
                 db_dict = json.loads(f.read())
                 
                 if 'snapshots' in db_dict:
                     self.consensus_hashes = db_dict['snapshots']
                 
           except Exception, e:
              log.error("Failed to read consensus snapshots at '%s'" % consensus_snapshots_filename )
              raise e
Ejemplo n.º 26
0
    def __init__(self,
                 magic_bytes,
                 opcodes,
                 opfields,
                 impl=None,
                 state=None,
                 initial_snapshots={}):
        """
        Construct a state engine client, optionally from locally-cached 
        state and the set of previously-calculated consensus 
        hashes for each block.
        
        This class will be fed a sequence of sets of transactions, grouped by block 
        and ordered by block ID, that each contain an OP_RETURN.  The nulldata 
        assocated with the OP_RETURN will be parsed, checked, logged, and 
        committed by the implementation (impl).  The implementation decides exactly 
        what each of these mean; this class simply feeds it the transactions
        in the order they appeared on the blockchain.
        
        This class looks for OP_RETURN data that starts with the byte sequence in magic_bytes,
        and then only select those which start with magic_bytes + op, where op is an 
        opcode byte in opcodes.  Magic bytes can be of variable length, but it should
        be specific to this virtual chain.
        
        Expected OP_RETURN data format:
        
         0     M  M+1                      len(OP_RETURN)-M-1
         |-----|--|------------------------|
          magic op payload
        
        The job of the implementation is to translate the above data, plus anything else it 
        can earn from the previously-parsed transactions and from other sources, into a 
        dictionary of (field: value) tuples that constitute an operation.

        @magic_bytes: the `magic` field above.
        @opcodes: the list of possible values for the `op` field.
        @opfields: a dictionary that maps each `op` to a list of field names. 
        
        The caller may supply an optional argument called 'state', which will be 
        passed into each implementation method.  It is meant to preserve implementation-
        specific state--in particular, whatever state the implementation expects to be 
        present.
        """

        self.consensus_hashes = initial_snapshots
        self.pending_ops = defaultdict(list)
        self.magic_bytes = magic_bytes
        self.opcodes = opcodes[:]
        self.opfields = copy.deepcopy(opfields)
        self.state = state
        self.impl = impl
        self.lastblock = self.impl.get_first_block_id() - 1
        self.pool = None
        self.rejected = {}

        consensus_snapshots_filename = config.get_snapshots_filename()
        lastblock_filename = config.get_lastblock_filename()

        # if we crashed during a commit, try to finish
        rc = self.commit(startup=True)
        if not rc:
            log.error("Failed to commit partial data.  Rolling back.")
            self.rollback()

        # attempt to load the snapshots
        if os.path.exists(consensus_snapshots_filename):
            try:

                with open(consensus_snapshots_filename, 'r') as f:

                    db_dict = json.loads(f.read())

                    if 'snapshots' in db_dict:
                        self.consensus_hashes = db_dict['snapshots']

            except Exception, e:
                log.error("Failed to read consensus snapshots at '%s'" %
                          consensus_snapshots_filename)
                raise e
Ejemplo n.º 27
0
    def __init__(self,
                 magic_bytes,
                 opcodes,
                 impl=None,
                 state=None,
                 op_order=None,
                 initial_snapshots={}):
        """
        Construct a state engine client, optionally from locally-cached 
        state and the set of previously-calculated consensus 
        hashes for each block.
        
        This class will be fed a sequence of sets of transactions, grouped by block 
        and ordered by block ID, that each contain an OP_RETURN.  The nulldata 
        assocated with the OP_RETURN will be parsed, checked, logged, and 
        committed by the implementation (impl).  The implementation decides exactly 
        what each of these mean; this class simply feeds it the transactions
        in order.
        
        This class looks for OP_RETURN data that starts with the byte sequence in magic_bytes,
        and then only select those which start with magic_bytes + op, where op is an 
        opcode byte in opcodes.  Magic bytes can be of variable length, but it should
        be specific to this virtual chain.
        
        Expected OP_RETURN data format:
        
         0     M  M+1                      40-M-1
         |-----|--|------------------------|
          magic op payload
        
        
        The caller may supply an optional argument called 'state', which will be 
        passed into each implementation method.  It is meant to preserve implementation-
        specific state--in particular, whatever state the implementation expects to be 
        present.
        
        The caller may also specify the order in which each type of operation is 
        processed, by passing a list of opcodes in op_order.
        """

        self.consensus_hashes = initial_snapshots
        self.pending_ops = defaultdict(list)
        self.magic_bytes = magic_bytes
        self.opcodes = opcodes[:]
        self.state = state
        self.op_order = op_order
        self.impl = impl
        self.lastblock = self.impl.get_first_block_id() - 1
        self.pool = None
        self.rejected = {}

        if self.op_order is None:
            self.op_order = self.impl.get_op_processing_order()[:]
            if self.op_order is None:
                self.op_order = opcodes

        # there's always a 'final' operation type, to be processed last
        self.op_order.append('virtualchain_final')

        consensus_snapshots_filename = config.get_snapshots_filename()
        lastblock_filename = config.get_lastblock_filename()

        # if we crashed during a commit, try to finish
        rc = self.commit(startup=True)
        if not rc:
            log.error("Failed to commit partial data.  Rolling back.")
            self.rollback()

        # attempt to load the snapshots
        if os.path.exists(consensus_snapshots_filename):
            try:

                with open(consensus_snapshots_filename, 'r') as f:

                    db_dict = json.loads(f.read())

                    if 'snapshots' in db_dict:
                        self.consensus_hashes = db_dict['snapshots']

            except Exception, e:
                log.error("Failed to read consensus snapshots at '%s'" %
                          consensus_snapshots_filename)
                raise e
Ejemplo n.º 28
0
    def save(self, block_id, consensus_hash, pending_ops, backup=False):
        """
        Write out all state to the working directory.
        Calls the implementation's 'db_save' method.
        
        Return True on success 
        Return False on error
        Raise exception if block_id represents a block 
         we've already processed.
        """

        if block_id < self.lastblock:
            raise Exception("Already processed up to block %s (got %s)" %
                            (self.lastblock, block_id))

        # stage data to temporary files
        tmp_db_filename = (config.get_db_filename() + ".tmp")
        tmp_snapshot_filename = (config.get_snapshots_filename() + ".tmp")
        tmp_lastblock_filename = (config.get_lastblock_filename() + ".tmp")

        with open(tmp_snapshot_filename, 'w') as f:
            db_dict = {'snapshots': self.consensus_hashes}
            f.write(json.dumps(db_dict))
            f.flush()

        # put this last...
        with open(tmp_lastblock_filename, "w") as lastblock_f:
            lastblock_f.write("%s" % block_id)
            lastblock_f.flush()

        rc = self.impl.db_save(block_id,
                               consensus_hash,
                               pending_ops,
                               tmp_db_filename,
                               db_state=self.state)
        if not rc:
            # failed to save
            log.error("Implementation failed to save at block %s to %s" %
                      (block_id, tmp_db_filename))

            try:
                os.unlink(tmp_lastblock_filename)
            except:
                pass

            try:
                os.unlink(tmp_snapshot_filename)
            except:
                pass

            return False

        rc = self.commit(backup=backup)
        if not rc:
            log.error("Failed to commit data at block %s.  Rolling back." %
                      block_id)

            self.rollback()
            return False

        else:
            self.lastblock = block_id
            return True
Ejemplo n.º 29
0
            if startup:
                # make sure we can load this 
                try:
                    with open(tmp_snapshot_filename, "r") as f:
                        db_txt = f.read()

                    db_json = json.loads(db_txt)
                except:
                    log.error("Partial write detected: corrupt partially-committed db!  Will rollback.")
                    return False

        
        backup_time = int(time.time() * 1000000)
       
        listing = []
        listing.append( ("lastblock", tmp_lastblock_filename, config.get_lastblock_filename(impl=self.impl)) )
        listing.append( ("snapshots", tmp_snapshot_filename, config.get_snapshots_filename(impl=self.impl)) )
        listing.append( ("db", tmp_db_filename, config.get_db_filename(impl=self.impl)) )

        for i in xrange(0, len(listing)):
            file_type, tmp_filename, filename = listing[i]
            
            dir_path = os.path.dirname( tmp_filename )
            dirfd = None
            try:
                dirfd = os.open(dir_path, os.O_DIRECTORY)
                os.fsync(dirfd)
            except Exception, e:
                log.exception(e)
                log.error("FATAL: failed to sync directory %s" % dir_path)
                traceback.print_stack()