def get_backup_blocks( cls, impl ):
        """
        Get the set of block IDs that were backed up
        """
        ret = []
        backup_dir = os.path.join( config.get_working_dir(impl=impl), "backups" )
        if not os.path.exists(backup_dir):
            return []

        for name in os.listdir( backup_dir ):
            if ".bak." not in name:
                continue 

            suffix = name.split(".bak.")[-1]
            try:
                block_id = int(suffix)
            except:
                continue 

            # must exist...
            backup_paths = cls.get_backup_paths( block_id, impl )
            for p in backup_paths:
                if not os.path.exists(p):
                    # doesn't exist
                    block_id = None
                    continue

            if block_id is not None:
                # have backup at this block 
                ret.append(block_id)

        return ret
Beispiel #2
0
    def get_backup_blocks( cls, impl, working_dir=None ):
        """
        Get the set of block IDs that were backed up
        """
        ret = []
        backup_dir = os.path.join( config.get_working_dir(impl=impl, working_dir=working_dir), "backups" )
        if not os.path.exists(backup_dir):
            return []

        for name in os.listdir( backup_dir ):
            if ".bak." not in name:
                continue 

            suffix = name.split(".bak.")[-1]
            try:
                block_id = int(suffix)
            except:
                continue 

            # must exist...
            backup_paths = cls.get_backup_paths( block_id, impl, working_dir=working_dir )
            for p in backup_paths:
                if not os.path.exists(p):
                    # doesn't exist
                    block_id = None
                    continue

            if block_id is not None:
                # have backup at this block 
                ret.append(block_id)

        return ret
Beispiel #3
0
    def get_backup_paths( cls, block_id, impl, working_dir=None ):
        """
        Get the set of backup paths, given the virtualchain implementation module and block number
        """
        backup_dir = os.path.join( config.get_working_dir(impl=impl, working_dir=working_dir), "backups" )
        backup_paths = []
        for p in [config.get_db_filename(impl=impl, working_dir=working_dir), config.get_snapshots_filename(impl=impl, working_dir=working_dir), config.get_lastblock_filename(impl=impl, working_dir=working_dir)]:
            pbase = os.path.basename(p)
            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % block_id))
            backup_paths.append( backup_path )

        return backup_paths
    def get_backup_paths( cls, block_id, impl ):
        """
        Get the set of backup paths, given the virtualchain implementation module and block number
        """
        backup_dir = os.path.join( config.get_working_dir(impl=impl), "backups" )
        backup_paths = []
        for p in [config.get_db_filename(impl=impl), config.get_snapshots_filename(impl=impl), config.get_lastblock_filename(impl=impl)]:
            pbase = os.path.basename(p)
            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % block_id))
            backup_paths.append( backup_path )

        return backup_paths
Beispiel #5
0
def main(args):
    with open(config.get_json_path(), 'rb') as fp:
        patches = data.parse_json(fp.read())

    working_dir = config.get_working_dir()
    commit = gitcmd.get_sha1(config.get_master_branch())

    bots = args.bots
    if not bots:
        bots = config.get_buildbots()

    for bot in bots:
        q = config.get_buildbot_query(bot)
        run_bot(patches, working_dir, commit, bot, q)
Beispiel #6
0
def main(args):
    with open(config.get_json_path(), 'rb') as fp:
        patches = data.parse_json(fp.read())

    working_dir = config.get_working_dir()
    commit = gitcmd.get_sha1(config.get_master_branch())

    bots = args.bots
    if not bots:
        bots = config.get_buildbots()

    for bot in bots:
        q = config.get_buildbot_query(bot)
        run_bot(patches, working_dir, commit, bot, q)
Beispiel #7
0
    def backup_restore( cls, block_id, impl, working_dir=None ):
        """
        Restore from a backup, given the virutalchain implementation module and block number
        """
        backup_dir = os.path.join( config.get_working_dir(impl=impl, working_dir=working_dir), "backups" )
        backup_paths = cls.get_backup_paths( block_id, impl, working_dir=working_dir )
        for p in backup_paths:
            assert os.path.exists( p ), "No such backup file: %s" % backup_paths

        for p in [config.get_db_filename(impl=impl, working_dir=working_dir), config.get_snapshots_filename(impl=impl, working_dir=working_dir), config.get_lastblock_filename(impl=impl, working_dir=working_dir)]:
            pbase = os.path.basename(p)
            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % block_id))
            log.debug("Restoring '%s' to '%s'" % (backup_path, p))
            shutil.copy( backup_path, p )
    
        return True
    def backup_restore( cls, block_id, impl ):
        """
        Restore from a backup, given the virutalchain implementation module and block number
        """
        backup_dir = os.path.join( config.get_working_dir(impl=impl), "backups" )
        backup_paths = cls.get_backup_paths( block_id, impl )
        for p in backup_paths:
            assert os.path.exists( p ), "No such backup file: %s" % backup_paths

        for p in [config.get_db_filename(impl=impl), config.get_snapshots_filename(impl=impl), config.get_lastblock_filename(impl=impl)]:
            pbase = os.path.basename(p)
            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % block_id))
            log.debug("Restoring '%s' to '%s'" % (backup_path, p))
            shutil.copy( backup_path, p )
    
        return True
Beispiel #9
0
    def clear_old_backups( self, block_id, working_dir=None ):
        """
        If we limit the number of backups we make, then clean out old ones
        older than block_id - backup_max_age (given in the constructor)

        This method does nothing otherwise.
        """
        
        if self.backup_max_age is None:
            # never delete backups
            return 

        # find old backups 
        backup_dir = os.path.join( config.get_working_dir(impl=self.impl, working_dir=working_dir), "backups" )
        if not os.path.exists(backup_dir):
            return 

        backups = os.listdir( backup_dir )
        for backup_name in backups:
            if backup_name in [".", ".."]:
                continue 

            backup_path = os.path.join(backup_dir, backup_name)
            backup_block = None 

            try:
                backup_block = int(backup_path.split(".")[-1])
            except:
                # not a backup file
                log.info("Skipping non-backup '%s'" % backup_path)

            if not backup_path.endswith( ".bak.%s" % backup_block ):
                # not a backup file 
                log.info("Skipping non-backup '%s'" % backup_path)
                continue
        
            if backup_block + self.backup_max_age < block_id:
                # dead 
                log.info("Removing old backup '%s'" % backup_path)
                try:
                    os.unlink(backup_path)
                except:
                    pass
    def clear_old_backups( self, block_id ):
        """
        If we limit the number of backups we make, then clean out old ones
        older than block_id - backup_max_age (given in the constructor)

        This method does nothing otherwise.
        """
        
        if self.backup_max_age is None:
            # never delete backups
            return 

        # find old backups 
        backup_dir = os.path.join( config.get_working_dir(impl=self.impl), "backups" )
        if not os.path.exists(backup_dir):
            return 

        backups = os.listdir( backup_dir )
        for backup_name in backups:
            if backup_name in [".", ".."]:
                continue 

            backup_path = os.path.join(backup_dir, backup_name)
            backup_block = None 

            try:
                backup_block = int(backup_path.split(".")[-1])
            except:
                # not a backup file
                log.info("Skipping non-backup '%s'" % backup_path)

            if not backup_path.endswith( ".bak.%s" % backup_block ):
                # not a backup file 
                log.info("Skipping non-backup '%s'" % backup_path)
                continue
        
            if backup_block + self.backup_max_age < block_id:
                # dead 
                log.info("Removing old backup '%s'" % backup_path)
                try:
                    os.unlink(backup_path)
                except:
                    pass
Beispiel #11
0
    def make_backups( self, block_id, working_dir=None ):
        """
        If we're doing backups on a regular basis, then 
        carry them out here if it is time to do so.
        This method does nothing otherwise.
        Abort on failure
        """

        # make a backup?
        if self.backup_frequency is not None:
            if (block_id % self.backup_frequency) == 0:

                backup_dir = os.path.join( config.get_working_dir(impl=self.impl, working_dir=working_dir), "backups" )
                if not os.path.exists(backup_dir):
                    try:
                        os.makedirs(backup_dir)
                    except Exception, e:
                        log.exception(e)
                        log.error("FATAL: failed to make backup directory '%s'" % backup_dir)
                        traceback.print_stack()
                        os.abort()
                        

                for p in [config.get_db_filename(impl=self.impl, working_dir=working_dir), config.get_snapshots_filename(impl=self.impl, working_dir=working_dir), config.get_lastblock_filename(impl=self.impl, working_dir=working_dir)]:
                    if os.path.exists(p):
                        try:
                            pbase = os.path.basename(p)
                            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % (block_id - 1)))

                            if not os.path.exists( backup_path ):
                                shutil.copy( p, backup_path )
                            else:
                                log.error("Will not overwrite '%s'" % backup_path)

                        except Exception, e:
                            log.exception(e)
                            log.error("FATAL: failed to back up '%s'" % p)
                            traceback.print_stack()
                            os.abort()
    def make_backups( self, block_id ):
        """
        If we're doing backups on a regular basis, then 
        carry them out here if it is time to do so.
        This method does nothing otherwise.
        Abort on failure
        """

        # make a backup?
        if self.backup_frequency is not None:
            if (block_id % self.backup_frequency) == 0:

                backup_dir = os.path.join( config.get_working_dir(impl=self.impl), "backups" )
                if not os.path.exists(backup_dir):
                    try:
                        os.makedirs(backup_dir)
                    except Exception, e:
                        log.exception(e)
                        log.error("FATAL: failed to make backup directory '%s'" % backup_dir)
                        traceback.print_stack()
                        os.abort()
                        

                for p in [config.get_db_filename(impl=self.impl), config.get_snapshots_filename(impl=self.impl), config.get_lastblock_filename(impl=self.impl)]:
                    if os.path.exists(p):
                        try:
                            pbase = os.path.basename(p)
                            backup_path = os.path.join( backup_dir, pbase + (".bak.%s" % (block_id - 1)))

                            if not os.path.exists( backup_path ):
                                shutil.copy( p, backup_path )
                            else:
                                log.error("Will not overwrite '%s'" % backup_path)

                        except Exception, e:
                            log.exception(e)
                            log.error("FATAL: failed to back up '%s'" % p)
                            traceback.print_stack()
                            os.abort()
Beispiel #13
0
    def run(self, *args, task_id=None, **kwargs):
        """
        Run the job.

        This is an override of `Task.run` which is the method
        that actually gets called by Celery each time a task
        in processed. It wraps `self.do()` to handle
        logging, exceptions, termination etc.

        Most jobs need to operate within a project's working directory
        and project `File` paths are always relative to those.
        To avoid code repetition and potential errors this method requires
        that a project id is supplied and changes into the working
        directory of that project.
        """
        project = kwargs.get("project")
        if project is None:
            raise ValueError("Project number must be provided as an argument!")

        current_dir = os.getcwd()
        working_dir = get_working_dir(project)
        ensure_dir(working_dir)

        self.begin(task_id)
        try:
            os.chdir(working_dir)
            result = self.do(*args, **kwargs)
            return self.success(result)
        except SoftTimeLimitExceeded:
            return self.terminated()
        except Ignore as exc:
            raise exc
        except Exception as exc:
            raise self.failure(exc)
        finally:
            os.chdir(current_dir)
def run_scenario(scenario,
                 config_file,
                 client_config_file,
                 interactive=False,
                 blocktime=10):
    """
    * set up the virtualchain to use mock UTXO provider and mock bitcoin blockchain
    * seed it with the intial value in the wallet
    * set the intial consensus hash
    * start the api server
    * run the scenario method
    * run the check method
    """

    virtualchain_working_dir = os.environ["VIRTUALCHAIN_WORKING_DIR"]

    spv_header_path = os.path.join(virtualchain_working_dir, "spv_headers.dat")
    virtualchain.setup_virtualchain(state_engine)

    db = state_engine.get_db_state(disposition=state_engine.DISPOSITION_RW)

    log.info("Connect to the bitcoind ")
    bitcoind = bitcoin_regtest_connect(bitcoin_regtest_opts())
    working_dir = get_working_dir()

    utxo_opts = {}

    # Start the pinger
    pinger = Pinger()
    pinger.start()

    #set up the environment
    testlib.set_utxo_opts(utxo_opts)
    testlib.set_bitcoind(bitcoind)
    testlib.set_state_engine(db)

    test_env = {
        "sync_virtualchain_upcall":
        lambda: sync_virtualchain_upcall(zonefilemanage_opts=None,
                                         need_db_refresh=False),
        "next_block_upcall":
        bitcoin_regtest_next_block,
        "working_dir":
        working_dir,
        "bitcoind":
        bitcoind,
        "bitcoind_opts":
        bitcoin_regtest_opts(),
        "spv_header_path":
        spv_header_path
    }

    # Sync initial utxos
    testlib.next_block(**test_env)

    # Load the scenario into the mock blockchain and mock utxo provider
    try:
        rc = scenario.scenario(scenario.wallets, **test_env)
    except Exception, e:
        log.exception(e)
        traceback.print_exc()