def get_db_state(): """ (required by virtualchain state engine) Callback to the virtual chain state engine. Get a handle to our state engine implementation (i.e. our name database) """ global blockstore_db, blockstore_db_lock now = time.time() blockstore_db_lock.acquire() if blockstore_db is not None: blockstore_db_lock.release() return blockstore_db db_filename = virtualchain.get_db_filename() log.info("(Re)Loading blockstore state from '%s'" % db_filename) blockstore_db = BlockstoreDB(db_filename) blockstore_db_lock.release() return blockstore_db
def get_db_state(): """ (required by virtualchain state engine) Callback to the virtual chain state engine. Get a handle to our state engine implementation (i.e. our name database) """ global blockstore_db global last_load_time now = time.time() # force invalidation if now - last_load_time > REINDEX_FREQUENCY: blockstore_db = None if blockstore_db is not None: return blockstore_db db_filename = virtualchain.get_db_filename() log.info("(Re)Loading blockstore state from '%s'" % db_filename ) blockstore_db = BlockstoreDB( db_filename ) last_load_time = time.time() return blockstore_db
def need_db_reload(): """ Do we need to instantiate/reload the database? """ global blockstack_db global last_load_time global last_check_time db_filename = virtualchain.get_db_filename() sb = None if os.path.exists(db_filename): sb = os.stat(db_filename) if blockstack_db is None: # doesn't exist in RAM log.debug("cache consistency: DB is not in RAM") return True if not os.path.exists(db_filename): # doesn't exist on disk log.debug("cache consistency: DB does not exist on disk") return True if sb is not None and sb.st_mtime != last_load_time: # stale--new version exists on disk log.debug("cache consistency: DB was modified; in-RAM copy is stale") return True if time.time() - last_check_time > 600: # just for good measure--don't keep it around past the blocktime log.debug("cache consistency: Blocktime has passed") return True return False
def get_db_state(): """ (required by virtualchain state engine) Callback to the virtual chain state engine. Get a handle to our state engine implementation (i.e. our name database) """ global blockstore_db global last_load_time now = time.time() # force invalidation if now - last_load_time > REINDEX_FREQUENCY: blockstore_db = None if blockstore_db is not None: return blockstore_db db_filename = virtualchain.get_db_filename() log.info("(Re)Loading blockstore state from '%s'" % db_filename) blockstore_db = BlockstoreDB(db_filename) last_load_time = time.time() return blockstore_db
def get_db_state(): """ (required by virtualchain state engine) Callback to the virtual chain state engine. Get a handle to our state engine implementation (i.e. our name database) """ global blockstore_db, blockstore_db_lock now = time.time() blockstore_db_lock.acquire() if blockstore_db is not None: blockstore_db_lock.release() return blockstore_db db_filename = virtualchain.get_db_filename() log.info("(Re)Loading blockstore state from '%s'" % db_filename ) blockstore_db = BlockstoreDB( db_filename ) blockstore_db_lock.release() return blockstore_db
def sync_blockchain(bt_opts, last_block, expected_snapshots={}, **virtualchain_args): """ synchronize state with the blockchain. Return True on success Return False if we're supposed to stop indexing Abort on error """ # make this usable even if we haven't explicitly configured virtualchain impl = sys.modules[__name__] if virtualchain.get_implementation() is not None: impl = None log.info("Synchronizing database up to block %s" % last_block) db_filename = virtualchain.get_db_filename(impl=impl) # NOTE: this is the only place where a read-write handle should be created, # since this is the only place where the db should be modified. new_db = BlockstackDB.borrow_readwrite_instance( db_filename, last_block, expected_snapshots=expected_snapshots) rc = virtualchain.sync_virtualchain(bt_opts, last_block, new_db, expected_snapshots=expected_snapshots, **virtualchain_args) BlockstackDB.release_readwrite_instance(new_db, last_block) return rc
def sync_blockchain(bc_config, last_block=None, expected_snapshots={}, **virtualchain_args): """ synchronize state with the blockchain. Return True on success Return False if we're supposed to stop indexing Abort on error """ impl = sys.modules[__name__] if virtualchain.get_implementation() is not None: impl = None db_filename = virtualchain.get_db_filename(impl=impl) new_db = TalosPolicyDB(db_filename, expected_snapshots=expected_snapshots, read_only=False) try: if last_block is None: last_block = _get_newest_block(bc_config) rc = virtualchain.sync_virtualchain( bc_config, last_block, new_db, expected_snapshots=expected_snapshots) finally: new_db.close() return rc
def get_db_state(): """ Return an opaque 'state' object that will be preserved across calls to the blockchain indexing callbacks. """ impl = virtualchain.get_implementation() if impl is None: impl = sys.modules[__name__] db_filename = virtualchain.get_db_filename(impl=impl) db_inst = TalosPolicyDB(db_filename) return db_inst
def get_db_state(disposition=DISPOSITION_RO): """ (required by virtualchain state engine) Callback to the virtual chain state engine. Get a handle to our state engine implementation (i.e. our name database). Note that in this implementation, the database handle returned will only support read-only operations by default. NO COMMITS WILL BE ALLOWED. """ # make this usable even if we haven't explicitly configured virtualchain impl = virtualchain.get_implementation() if impl is None: impl = sys.modules[__name__] db_filename = virtualchain.get_db_filename(impl=impl) lastblock_filename = virtualchain.get_lastblock_filename(impl=impl) lastblock = None firstcheck = True for path in [db_filename, lastblock_filename]: if os.path.exists(path): # have already created the db firstcheck = False if not firstcheck and not os.path.exists(lastblock_filename): # this can't ever happen log.error("FATAL: no such file or directory: %s" % lastblock_filename) os.abort() # verify that it is well-formed, if it exists elif os.path.exists(lastblock_filename): try: with open(lastblock_filename, "r") as f: lastblock = int(f.read().strip()) except Exception, e: # this can't ever happen log.error("FATAL: failed to parse: %s" % lastblock_filename) log.exception(e) os.abort()
def get_db_state( disposition=DISPOSITION_RO ): """ (required by virtualchain state engine) Callback to the virtual chain state engine. Get a handle to our state engine implementation (i.e. our name database). Note that in this implementation, the database handle returned will only support read-only operations by default. NO COMMITS WILL BE ALLOWED. """ # make this usable even if we haven't explicitly configured virtualchain impl = virtualchain.get_implementation() if impl is None: impl = sys.modules[__name__] db_filename = virtualchain.get_db_filename(impl=impl) lastblock_filename = virtualchain.get_lastblock_filename(impl=impl) lastblock = None firstcheck = True for path in [db_filename, lastblock_filename]: if os.path.exists( path ): # have already created the db firstcheck = False if not firstcheck and not os.path.exists( lastblock_filename ): # this can't ever happen log.error("FATAL: no such file or directory: %s" % lastblock_filename ) os.abort() # verify that it is well-formed, if it exists elif os.path.exists( lastblock_filename ): try: with open(lastblock_filename, "r") as f: lastblock = int( f.read().strip() ) except Exception, e: # this can't ever happen log.error("FATAL: failed to parse: %s" % lastblock_filename) log.exception(e) os.abort()
def sync_blockchain(bt_opts, last_block): """ synchronize state with the blockchain. build up the next blockstore_db """ global blockstore_db, blockstore_db_lock log.info("Synchronizing database up to block %s" % last_block) db_filename = virtualchain.get_db_filename() new_db = BlockstoreDB(db_filename) virtualchain.sync_virtualchain(bt_opts, last_block, new_db) # refresh blockstore_db_lock.acquire() del blockstore_db blockstore_db = new_db blockstore_db_lock.release()
def sync_blockchain( bt_opts, last_block ): """ synchronize state with the blockchain. build up the next blockstore_db """ global blockstore_db, blockstore_db_lock log.info("Synchronizing database up to block %s" % last_block) db_filename = virtualchain.get_db_filename() new_db = BlockstoreDB( db_filename ) virtualchain.sync_virtualchain( bt_opts, last_block, new_db ) # refresh blockstore_db_lock.acquire() del blockstore_db blockstore_db = new_db blockstore_db_lock.release()
def get_db_state(disposition=None): """ (required by virtualchain state engine) Callback to the virtual chain state engine. Get a handle to our state engine implementation (i.e. our name database) @disposition is for compatibility. It is ignored """ global blockstack_db global last_load_time global last_check_time global reload_lock reload_lock.acquire() mtime = None db_filename = virtualchain.get_db_filename() if os.path.exists(db_filename): sb = os.stat(db_filename) mtime = sb.st_mtime if need_db_reload(): log.info("(Re)Loading blockstack state from '%s'" % db_filename ) blockstack_db = BlockstackDB( db_filename ) last_check_time = time.time() if mtime is not None: last_load_time = mtime else: log.debug("cache consistency: Using cached blockstack state") reload_lock.release() return blockstack_db
def clean( confirm=True ): """ Remove blockstore's db, lastblock, and snapshot files. Prompt for confirmation """ delete = False exit_status = 0 if confirm: warning = "WARNING: THIS WILL DELETE YOUR BLOCKSTORE DATABASE!\n" warning+= "Are you sure you want to proceed?\n" warning+= "Type 'YES' if so: " value = raw_input( warning ) if value != "YES": sys.exit(exit_status) else: delete = True else: delete = True if delete: print "Deleting..." db_filename = virtualchain.get_db_filename() lastblock_filename = virtualchain.get_lastblock_filename() snapshots_filename = virtualchain.get_snapshots_filename() for path in [db_filename, lastblock_filename, snapshots_filename]: try: os.unlink( path ) except: log.warning("Unable to delete '%s'" % path) exit_status = 1 sys.exit(exit_status)
def sync_blockchain( bt_opts, last_block, expected_snapshots={}, **virtualchain_args ): """ synchronize state with the blockchain. Return True on success Return False if we're supposed to stop indexing Abort on error """ # make this usable even if we haven't explicitly configured virtualchain impl = sys.modules[__name__] if virtualchain.get_implementation() is not None: impl = None log.info("Synchronizing database up to block %s" % last_block) db_filename = virtualchain.get_db_filename(impl=impl) # NOTE: this is the only place where a read-write handle should be created, # since this is the only place where the db should be modified. new_db = BlockstackDB.borrow_readwrite_instance( db_filename, last_block, expected_snapshots=expected_snapshots ) rc = virtualchain.sync_virtualchain( bt_opts, last_block, new_db, expected_snapshots=expected_snapshots, **virtualchain_args ) BlockstackDB.release_readwrite_instance( new_db, last_block ) return rc
def rebuild_database( target_block_id, untrusted_db_path, working_db_path=None, resume_dir=None, start_block=None, expected_snapshots={} ): """ Given a target block ID and a path to an (untrusted) db, reconstruct it in a temporary directory by replaying all the nameops it contains. Optionally check that the snapshots in @expected_snapshots match up as we verify. @expected_snapshots maps str(block_id) to str(consensus hash) Return the consensus hash calculated at the target block. Return None on verification failure (i.e. we got a different consensus hash than one for the same block in expected_snapshots) """ # reconfigure the virtualchain to use a temporary directory, # so we don't interfere with this instance's primary database working_dir = None if resume_dir is None: working_dir = tempfile.mkdtemp( prefix='blockstack-verify-database-' ) else: working_dir = resume_dir blockstack_state_engine.working_dir = working_dir virtualchain.setup_virtualchain( impl=blockstack_state_engine ) if resume_dir is None: # not resuming start_block = virtualchain.get_first_block_id() else: # resuming old_start_block = start_block start_block = get_lastblock() if start_block is None: start_block = old_start_block log.debug( "Rebuilding database from %s to %s" % (start_block, target_block_id) ) # feed in operations, block by block, from the untrusted database untrusted_db = BlockstackDB( untrusted_db_path, DISPOSITION_RO ) # working db, to build up the operations in the untrusted db block-by-block working_db = None if working_db_path is None: working_db_path = virtualchain.get_db_filename() working_db = BlockstackDB( working_db_path, DISPOSITION_RW ) # map block ID to consensus hashes consensus_hashes = {} for block_id in xrange( start_block, target_block_id+1 ): untrusted_db.lastblock = block_id virtualchain_ops = block_to_virtualchain_ops( block_id, working_db, untrusted_db ) # feed ops to virtualchain to reconstruct the db at this block consensus_hash = working_db.process_block( block_id, virtualchain_ops ) log.debug("VERIFY CONSENSUS(%s): %s" % (block_id, consensus_hash)) consensus_hashes[block_id] = consensus_hash if block_id in expected_snapshots: if expected_snapshots[block_id] != consensus_hash: log.error("DATABASE IS NOT CONSISTENT AT %s: %s != %s" % (block_id, expected_snashots[block_id], consensus_hash)) return None # final consensus hash return consensus_hashes[ target_block_id ]
def get_working_db(): import talosvirtualchain return get_db_filename(impl=talosvirtualchain)