Example #1
0
def get_logfile_path():
   """
   Get the logfile path for our service endpoint.
   """
   working_dir = virtualchain.get_working_dir()
   logfile_filename = blockstore_state_engine.get_virtual_chain_name() + ".log"
   return os.path.join( working_dir, logfile_filename )
Example #2
0
def get_pidfile_path():
   """
   Get the PID file path.
   """
   working_dir = virtualchain.get_working_dir()
   pid_filename = blockstore_state_engine.get_virtual_chain_name() + ".pid"
   return os.path.join( working_dir, pid_filename )
def restore( snapshot_path, restore_dir, pubkeys, num_required ):
    
    global value_hashes

    config_path = os.environ.get("BLOCKSTACK_CLIENT_CONFIG")
    assert config_path

    os.makedirs(restore_dir)
    shutil.copy(config_path, os.path.join(restore_dir, os.path.basename(config_path)))

    rc = ysi.fast_sync_import( restore_dir, "file://{}".format(snapshot_path), public_keys=pubkeys, num_required=num_required )
    if not rc:
        print "failed to restore snapshot {}".format(snapshot_path)
        return False

    # database must be identical 
    db_filenames = ['ysi-server.db', 'ysi-server.snapshots', 'ysi-server.lastblock']
    src_paths = [os.path.join(virtualchain.get_working_dir(), fn) for fn in db_filenames]
    backup_paths = [os.path.join(restore_dir, fn) for fn in db_filenames]

    for src_path, backup_path in zip(src_paths, backup_paths):
        rc = os.system("cmp '{}' '{}'".format(src_path, backup_path))
        if rc != 0:
            print '{} disagress with {}'.format(src_path, backup_path)
            return False
    
    # all zone files must be present
    for vh in value_hashes:
        zfdata = ysi.get_cached_zonefile_data(vh, zonefile_dir=os.path.join(restore_dir, 'zonefiles'))
        if zfdata is None:
            print 'Missing {} in {}'.format(vh, os.path.join(restore_dir, 'zonefiles'))
            return False

    shutil.rmtree(restore_dir)
    return True
def restore( snapshot_path, restore_dir, pubkeys, num_required ):
    
    global value_hashes

    config_path = os.environ.get("BLOCKSTACK_CLIENT_CONFIG")
    assert config_path

    os.makedirs(restore_dir)
    shutil.copy(config_path, os.path.join(restore_dir, os.path.basename(config_path)))

    rc = blockstack.fast_sync_import( restore_dir, "file://{}".format(snapshot_path), public_keys=pubkeys, num_required=num_required )
    if not rc:
        print "failed to restore snapshot {}".format(snapshot_path)
        return False

    # database must be identical 
    db_filenames = ['blockstack-server.db', 'blockstack-server.snapshots', 'blockstack-server.lastblock']
    src_paths = [os.path.join(virtualchain.get_working_dir(), fn) for fn in db_filenames]
    backup_paths = [os.path.join(restore_dir, fn) for fn in db_filenames]

    for src_path, backup_path in zip(src_paths, backup_paths):
        rc = os.system("cmp '{}' '{}'".format(src_path, backup_path))
        if rc != 0:
            print '{} disagress with {}'.format(src_path, backup_path)
            return False
    
    # all zone files must be present
    for vh in value_hashes:
        zfdata = blockstack.get_cached_zonefile_data(vh, zonefile_dir=os.path.join(restore_dir, 'zonefiles'))
        if zfdata is None:
            print 'Missing {} in {}'.format(vh, os.path.join(restore_dir, 'zonefiles'))
            return False

    shutil.rmtree(restore_dir)
    return True
Example #5
0
def run_scenario(scenario, config_file):
    """
    Run a test scenario:
    * set up the virtualchain to use our mock UTXO provider and mock bitcoin blockchain
    * seed it with the initial values in the wallet 
    * set the initial consensus hash 
    * run the scenario method
    * run the check method
    """

    # use mock bitcoind
    virtualchain.setup_virtualchain(
        blockstore_state_engine,
        bitcoind_connection_factory=mock_bitcoind.connect_mock_bitcoind)

    # set up blockstore
    # NOTE: utxo_opts encodes the mock-bitcoind options
    blockstore_opts, bitcoin_opts, utxo_opts, dht_opts = blockstore.lib.configure(
        config_file=config_file, interactive=False)

    # override multiprocessing options to ensure single-process behavior
    utxo_opts['multiprocessing_num_procs'] = 1
    utxo_opts['multiprocessing_num_blocks'] = 64

    blockstored.set_bitcoin_opts(bitcoin_opts)
    blockstored.set_utxo_opts(utxo_opts)

    db = blockstored.get_state_engine()
    bitcoind = mock_bitcoind.connect_mock_bitcoind(utxo_opts)
    sync_virtualchain_upcall = lambda: virtualchain.sync_virtualchain(
        utxo_opts, bitcoind.getblockcount(), db)
    mock_utxo = blockstore.lib.connect_utxo_provider(utxo_opts)
    working_dir = virtualchain.get_working_dir()

    # set up test environment
    testlib.set_utxo_client(mock_utxo)
    testlib.set_bitcoind(bitcoind)
    testlib.set_state_engine(db)

    test_env = {
        "sync_virtualchain_upcall": sync_virtualchain_upcall,
        "working_dir": working_dir
    }

    # sync initial utxos
    testlib.next_block(**test_env)

    # load the scenario into the mock blockchain and mock utxo provider
    try:
        scenario.scenario(scenario.wallets, **test_env)

    except Exception, e:
        log.exception(e)
        traceback.print_exc()
        log.error("Failed to run scenario '%s'" % scenario.__name__)
        return False
def get_zonefile_dir( working_dir=None ):
    """
    Get the path to the directory to hold any zonefiles we download.
    """

    if working_dir is None:
       working_dir = virtualchain.get_working_dir()

    zonefile_dir = os.path.join( working_dir, "zonefiles" )
    return zonefile_dir
def get_announce_filename( working_dir=None ):
   """
   Get the path to the file that stores all of the announcements.
   """

   if working_dir is None:
       working_dir = virtualchain.get_working_dir()

   announce_filepath = os.path.join( working_dir, virtualchain.get_implementation().get_virtual_chain_name() ) + ".announce"
   return announce_filepath
Example #8
0
def get_announce_filename( working_dir=None ):
   """
   Get the path to the file that stores all of the announcements.
   """

   if working_dir is None:
       working_dir = virtualchain.get_working_dir()

   announce_filepath = os.path.join( working_dir, virtualchain.get_implementation().get_virtual_chain_name() ) + ".announce"
   return announce_filepath
Example #9
0
def get_zonefile_dir( working_dir=None ):
    """
    Get the path to the directory to hold any zonefiles we download.
    """

    if working_dir is None:
       working_dir = virtualchain.get_working_dir()

    zonefile_dir = os.path.join( working_dir, "zonefiles" )
    return zonefile_dir
Example #10
0
def get_testset_filename( working_dir=None ):
   """
   Get the path to the file to determine whether or not we're in testset.
   """

   if working_dir is None:
       working_dir = virtualchain.get_working_dir()

   testset_filepath = os.path.join( working_dir, virtualchain.get_implementation().get_virtual_chain_name() ) + ".testset"
   return testset_filepath
def run_scenario( scenario, config_file ):
    """
    Run a test scenario:
    * set up the virtualchain to use our mock UTXO provider and mock bitcoin blockchain
    * seed it with the initial values in the wallet 
    * set the initial consensus hash 
    * run the scenario method
    * run the check method
    """

    # use mock bitcoind
    virtualchain.setup_virtualchain( blockstack_state_engine, bitcoind_connection_factory=mock_bitcoind.connect_mock_bitcoind ) 

    # set up blockstack
    # NOTE: utxo_opts encodes the mock-bitcoind options 
    blockstack_opts, bitcoin_opts, utxo_opts, dht_opts = blockstack.lib.configure( config_file=config_file, interactive=False )
   
    # override multiprocessing options to ensure single-process behavior 
    utxo_opts['multiprocessing_num_procs'] = 1 
    utxo_opts['multiprocessing_num_blocks'] = 64

    blockstackd.set_bitcoin_opts( bitcoin_opts )
    blockstackd.set_utxo_opts( utxo_opts )

    db = blockstackd.get_state_engine()
    bitcoind = mock_bitcoind.connect_mock_bitcoind( utxo_opts )
    sync_virtualchain_upcall = lambda: virtualchain.sync_virtualchain( utxo_opts, bitcoind.getblockcount(), db )
    mock_utxo = blockstack.lib.connect_utxo_provider( utxo_opts )
    working_dir = virtualchain.get_working_dir()
 
    # set up test environment
    testlib.set_utxo_client( mock_utxo )
    testlib.set_utxo_opts( utxo_opts )
    testlib.set_bitcoind( bitcoind )
    testlib.set_state_engine( db )

    test_env = {
        "sync_virtualchain_upcall": sync_virtualchain_upcall,
        "working_dir": working_dir
    }

    # sync initial utxos 
    testlib.next_block( **test_env )

    # load the scenario into the mock blockchain and mock utxo provider
    try:
        scenario.scenario( scenario.wallets, **test_env )

    except Exception, e:
        log.exception(e)
        traceback.print_exc()
        log.error("Failed to run scenario '%s'" % scenario.__name__)
        return False
Example #12
0
def get_default_talos_config():
    import talosvirtualchain
    return {
        "bitcoind_port":
        18332,
        "bitcoind_user":
        "******",
        "bitcoind_passwd":
        "talos",
        "bitcoind_server":
        "127.0.0.1",
        "bitcoind_p2p_port":
        18444,
        "bitcoind_spv_path":
        os.path.join(get_working_dir(impl=talosvirtualchain), "tmp.dat")
    }
Example #13
0
def get_indexing_lockfile():
    """
    Return path to the indexing lockfile 
    """
    return os.path.join( virtualchain.get_working_dir(), "blockstore.indexing" )
Example #14
0
def get_indexing_lockfile(impl=None):
    """
    Return path to the indexing lockfile
    """
    return os.path.join( virtualchain.get_working_dir(impl=impl), "blockstack-server.indexing" )
Example #15
0
def store_announcement( announcement_hash, announcement_text, working_dir=None, force=False ):
   """
   Store a new announcement locally, atomically.
   """

   if working_dir is None:
       working_dir = virtualchain.get_working_dir()

   if not force:
       # don't store unless we haven't seen it before
       if announcement_hash in ANNOUNCEMENTS:
           return

   announce_filename = get_announce_filename( working_dir )
   announce_filename_tmp = announce_filename + ".tmp"
   announce_text = ""
   announce_cleanup_list = []

   # did we try (and fail) to store a previous announcement?  If so, merge them all
   if os.path.exists( announce_filename_tmp ):

       log.debug("Merge announcement list %s" % announce_filename_tmp )

       with open(announce_filename, "r") as f:
           announce_text += f.read()

       i = 1
       failed_path = announce_filename_tmp + (".%s" % i)
       while os.path.exists( failed_path ):

           log.debug("Merge announcement list %s" % failed_paht )
           with open(failed_path, "r") as f:
               announce_text += f.read()

           announce_cleanup_list.append( failed_path )

           i += 1
           failed_path = announce_filename_tmp + (".%s" % i)

       announce_filename_tmp = failed_path

   if os.path.exists( announce_filename ):
       with open(announce_filename, "r" ) as f:
           announce_text += f.read()

   announce_text += ("\n%s\n" % announcement_hash)

   # filter
   if not force:
       announcement_list = announce_text.split("\n")
       unseen_announcements = filter( lambda a: a not in ANNOUNCEMENTS, announcement_list )
       announce_text = "\n".join( unseen_announcements ).strip() + "\n"

   log.debug("Store announcement hash to %s" % announce_filename )

   with open(announce_filename_tmp, "w" ) as f:
       f.write( announce_text )
       f.flush()

   # NOTE: rename doesn't remove the old file on Windows
   if sys.platform == 'win32' and os.path.exists( announcement_filename_tmp ):
       try:
           os.unlink( announcement_filename_tmp )
       except:
           pass

   try:
       os.rename( announce_filename_tmp, announce_filename )
   except:
       log.error("Failed to save announcement %s to %s" % (announcement_hash, announce_filename ))
       raise

   # clean up
   for tmp_path in announce_cleanup_list:
       try:
           os.unlink( tmp_path )
       except:
           pass

   # put the announcement text
   announcement_text_dir = os.path.join( working_dir, "announcements" )
   if not os.path.exists( announcement_text_dir ):
       try:
           os.makedirs( announcement_text_dir )
       except:
           log.error("Failed to make directory %s" % announcement_text_dir )
           raise

   announcement_text_path = os.path.join( announcement_text_dir, "%s.txt" % announcement_hash )

   try:
       with open( announcement_text_path, "w" ) as f:
           f.write( announcement_text )

   except:
       log.error("Failed to save announcement text to %s" % announcement_text_path )
       raise

   log.debug("Stored announcement to %s" % (announcement_text_path))
Example #16
0
def run_scenario(scenario, config_file):
    """
    Run a test scenario:
    * set up the virtualchain to use our mock UTXO provider and mock bitcoin blockchain
    * seed it with the initial values in the wallet 
    * set the initial consensus hash 
    * run the scenario method
    * run the check method
    """

    mock_bitcoind_save_path = "/tmp/mock_bitcoind.dat"
    if os.path.exists(mock_bitcoind_save_path):
        try:
            os.unlink(mock_bitcoind_save_path)
        except:
            pass

    # use mock bitcoind
    worker_env = {
        # use mock_bitcoind to connect to bitcoind (but it has to import it in order to use it)
        "VIRTUALCHAIN_MOD_CONNECT_BLOCKCHAIN": mock_bitcoind.__file__,
        "MOCK_BITCOIND_SAVE_PATH": mock_bitcoind_save_path,
        "BLOCKSTORE_TEST": "1"
    }

    if os.environ.get("PYTHONPATH", None) is not None:
        worker_env["PYTHONPATH"] = os.environ["PYTHONPATH"]

    virtualchain.setup_virtualchain(
        blockstore_state_engine,
        bitcoind_connection_factory=mock_bitcoind.connect_mock_bitcoind,
        index_worker_env=worker_env)

    # set up blockstore
    # NOTE: utxo_opts encodes the mock-bitcoind options
    blockstore_opts, bitcoin_opts, utxo_opts, dht_opts = blockstore.lib.configure(
        config_file=config_file, interactive=False)

    # override multiprocessing options to ensure single-process behavior
    utxo_opts['multiprocessing_num_procs'] = 1
    utxo_opts['multiprocessing_num_blocks'] = 10

    # pass along extra arguments
    utxo_opts['save_file'] = mock_bitcoind_save_path

    # save headers as well
    utxo_opts['spv_headers_path'] = mock_bitcoind_save_path + ".spvheaders"
    with open(utxo_opts['spv_headers_path'], "w") as f:
        # write out "initial" headers, up to the first block
        empty_header = ("00" * 81).decode('hex')
        for i in xrange(0, blockstore.FIRST_BLOCK_MAINNET):
            f.write(empty_header)

    blockstored.set_bitcoin_opts(bitcoin_opts)
    blockstored.set_utxo_opts(utxo_opts)

    db = blockstored.get_state_engine()
    bitcoind = mock_bitcoind.connect_mock_bitcoind(utxo_opts)
    sync_virtualchain_upcall = lambda: virtualchain.sync_virtualchain(
        utxo_opts, bitcoind.getblockcount(), db)
    mock_utxo = blockstore.lib.connect_utxo_provider(utxo_opts)
    working_dir = virtualchain.get_working_dir()

    # set up test environment
    testlib.set_utxo_opts(utxo_opts)
    testlib.set_utxo_client(mock_utxo)
    testlib.set_bitcoind(bitcoind)
    testlib.set_state_engine(db)

    test_env = {
        "sync_virtualchain_upcall": sync_virtualchain_upcall,
        "working_dir": working_dir,
        "bitcoind": bitcoind,
        "bitcoind_save_path": mock_bitcoind_save_path
    }

    # sync initial utxos
    testlib.next_block(**test_env)

    try:
        os.unlink(mock_bitcoind_save_path)
    except:
        pass

    # load the scenario into the mock blockchain and mock utxo provider
    try:
        scenario.scenario(scenario.wallets, **test_env)

    except Exception, e:
        log.exception(e)
        traceback.print_exc()
        log.error("Failed to run scenario '%s'" % scenario.__name__)
        return False
def default_blockstack_opts( config_file=None, testset=False ):
   """
   Get our default blockstack opts from a config file
   or from sane defaults.
   """

   if config_file is None:
      config_file = virtualchain.get_config_filename()

   testset_path = get_testset_filename( virtualchain.get_working_dir() )
   announce_path = get_announce_filename( virtualchain.get_working_dir() )

   parser = SafeConfigParser()
   parser.read( config_file )

   blockstack_opts = {}
   tx_broadcaster = None
   utxo_provider = None
   testset_first_block = None
   max_subsidy = 0
   contact_email = None
   announcers = "judecn.id,muneeb.id,shea256.id"
   announcements = None

   if parser.has_section('blockstack'):

      if parser.has_option('blockstack', 'tx_broadcaster'):
         tx_broadcaster = parser.get('blockstack', 'tx_broadcaster')

      if parser.has_option('blockstack', 'utxo_provider'):
         utxo_provider = parser.get('blockstack', 'utxo_provider')

      if parser.has_option('blockstack', 'testset_first_block'):
         testset_first_block = int( parser.get('blockstack', 'testset_first_block') )

      if parser.has_option('blockstack', 'max_subsidy'):
         max_subsidy = int( parser.get('blockstack', 'max_subsidy'))

      if parser.has_option('blockstack', 'email'):
         contact_email = parser.get('blockstack', 'email')

      if parser.has_option('blockstack', 'announcers'):
         # must be a CSV of blockchain IDs
         announcer_list_str = parser.get('blockstack', 'announcers')
         announcer_list = announcer_list_str.split(",")

         import scripts

         # validate each one
         valid = True
         for bid in announcer_list:
             if not scripts.is_name_valid( bid ):
                 log.error("Invalid blockchain ID '%s'" % bid)
                 valid = False

         if valid:
             announcers = ",".join(announcer_list)

   if os.path.exists( testset_path ):
       # testset file flag set
       testset = True

   if os.path.exists( announce_path ):
       # load announcement list
       with open( announce_path, "r" ) as f:
           announce_text = f.readlines()

       all_announcements = [ a.strip() for a in announce_text ]
       unseen_announcements = []

       # find announcements we haven't seen yet
       for a in all_announcements:
           if a not in ANNOUNCEMENTS:
               unseen_announcements.append( a )

       announcements = ",".join( unseen_announcements )

   blockstack_opts = {
       'tx_broadcaster': tx_broadcaster,
       'utxo_provider': utxo_provider,
       'testset': testset,
       'testset_first_block': testset_first_block,
       'max_subsidy': max_subsidy,
       'email': contact_email,
       'announcers': announcers,
       'announcements': announcements
   }

   # strip Nones
   for (k, v) in blockstack_opts.items():
      if v is None:
         del blockstack_opts[k]

   return blockstack_opts
Example #18
0
def fast_sync_import( working_dir, import_url, public_keys=config.FAST_SYNC_PUBLIC_KEYS, num_required=len(config.FAST_SYNC_PUBLIC_KEYS), verbose=False ):
    """
    Fast sync import.
    Verify the given fast-sync file from @import_path using @public_key, and then 
    uncompress it into @working_dir.

    Verify that at least `num_required` public keys in `public_keys` signed.
    NOTE: `public_keys` needs to be in the same order as the private keys that signed.
    """

    def logmsg(s):
        if verbose:
            print s
        else:
            log.debug(s)

    def logerr(s):
        if verbose:
            print >> sys.stderr, s
        else:
            log.error(s)

    if working_dir is None:
        working_dir = virtualchain.get_working_dir()

    if not os.path.exists(working_dir):
        logerr("No such directory {}".format(working_dir))
        return False

    # go get it 
    import_path = fast_sync_fetch(import_url)
    if import_path is None:
        logerr("Failed to fetch {}".format(import_url))
        return False

    # format: <signed bz2 payload> <sigb64> <sigb64 length (8 bytes hex)> ... <num signatures>
    file_size = 0
    try:
        sb = os.stat(import_path)
        file_size = sb.st_size
    except Exception as e:
        log.exception(e)
        return False

    num_signatures = 0
    ptr = file_size
    signatures = []

    with open(import_path, 'r') as f:
        info = fast_sync_inspect( f )
        if 'error' in info:
            logerr("Failed to inspect snapshot {}: {}".format(import_path, info['error']))
            return False

        signatures = info['signatures']
        ptr = info['payload_size']

        # get the hash of the file 
        hash_hex = get_file_hash(f, hashlib.sha256, fd_len=ptr)
        
        # validate signatures over the hash
        logmsg("Verify {} bytes".format(ptr))
        key_idx = 0
        num_match = 0
        for next_pubkey in public_keys:
            for sigb64 in signatures:
                valid = verify_digest( hash_hex, keylib.ECPublicKey(next_pubkey).to_hex(), sigb64, hashfunc=hashlib.sha256 ) 
                if valid:
                    num_match += 1
                    if num_match >= num_required:
                        break
                    
                    logmsg("Public key {} matches {} ({})".format(next_pubkey, sigb64, hash_hex))
                    signatures.remove(sigb64)
                
                else:
                    logmsg("Public key {} does NOT match {} ({})".format(next_pubkey, sigb64, hash_hex))

        # enough signatures?
        if num_match < num_required:
            logerr("Not enough signatures match (required {}, found {})".format(num_required, num_match))
            return False

    # decompress
    import_path = os.path.abspath(import_path)
    res = fast_sync_snapshot_decompress(import_path, working_dir)
    if 'error' in res:
        logerr("Failed to decompress {} to {}: {}".format(import_path, working_dir, res['error']))
        return False

    # restore from backup
    rc = ysi_backup_restore(working_dir, None)
    if not rc:
        logerr("Failed to instantiate ysi name database")
        return False

    # success!
    logmsg("Restored to {}".format(working_dir))
    return True
Example #19
0
def default_blockstack_opts( config_file=None, virtualchain_impl=None ):
   """
   Get our default blockstack opts from a config file
   or from sane defaults.
   """

   if config_file is None:
      config_file = virtualchain.get_config_filename()

   announce_path = get_announce_filename( virtualchain.get_working_dir(impl=virtualchain_impl) )

   parser = SafeConfigParser()
   parser.read( config_file )

   blockstack_opts = {}
   contact_email = None
   announcers = "judecn.id,muneeb.id,shea256.id"
   announcements = None
   backup_frequency = 144   # once a day; 10 minute block time
   backup_max_age = 1008    # one week
   rpc_port = RPC_SERVER_PORT 
   serve_zonefiles = True
   serve_profiles = False
   serve_data = False
   zonefile_dir = os.path.join( os.path.dirname(config_file), "zonefiles")
   analytics_key = None
   zonefile_storage_drivers = "disk,dht"
   zonefile_storage_drivers_write = "disk"
   profile_storage_drivers = "disk"
   profile_storage_drivers_write = "disk"
   data_storage_drivers = "disk"
   data_storage_drivers_write = "disk"
   redirect_data = False
   data_servers = None
   server_version = None
   atlas_enabled = True
   atlas_seed_peers = "node.blockstack.org:%s" % RPC_SERVER_PORT
   atlasdb_path = os.path.join( os.path.dirname(config_file), "atlas.db" )
   atlas_blacklist = ""
   atlas_hostname = socket.gethostname()

   if parser.has_section('blockstack'):

      if parser.has_option('blockstack', 'backup_frequency'):
         backup_frequency = int( parser.get('blockstack', 'backup_frequency'))

      if parser.has_option('blockstack', 'backup_max_age'):
         backup_max_age = int( parser.get('blockstack', 'backup_max_age') )

      if parser.has_option('blockstack', 'email'):
         contact_email = parser.get('blockstack', 'email')

      if parser.has_option('blockstack', 'rpc_port'):
         rpc_port = int(parser.get('blockstack', 'rpc_port'))

      if parser.has_option('blockstack', 'serve_zonefiles'):
          serve_zonefiles = parser.get('blockstack', 'serve_zonefiles')
          if serve_zonefiles.lower() in ['1', 'yes', 'true', 'on']:
              serve_zonefiles = True
          else:
              serve_zonefiles = False

      if parser.has_option('blockstack', 'serve_profiles'):
          serve_profiles = parser.get('blockstack', 'serve_profiles')
          if serve_profiles.lower() in ['1', 'yes', 'true', 'on']:
              serve_profiles = True
          else:
              serve_profiles = False

      if parser.has_option('blockstack', 'serve_data'):
          serve_data = parser.get('blockstack', 'serve_data')
          if serve_data.lower() in ['1', 'yes', 'true', 'on']:
              serve_data = True
          else:
              serve_data = False

      if parser.has_option("blockstack", "zonefile_storage_drivers"):
          zonefile_storage_drivers = parser.get("blockstack", "zonefile_storage_drivers")

      if parser.has_option("blockstack", "zonefile_storage_drivers_write"):
          zonefile_storage_drivers_write = parser.get("blockstack", "zonefile_storage_drivers_write")

      if parser.has_option("blockstack", "profile_storage_drivers"):
          profile_storage_drivers = parser.get("blockstack", "profile_storage_drivers")

      if parser.has_option("blockstack", "profile_storage_drivers_write"):
          profile_storage_drivers_write = parser.get("blockstack", "profile_storage_drivers_write")

      if parser.has_option("blockstack", "data_storage_drivers"):
          data_storage_drivers = parser.get("blockstack", "data_storage_drivers")

      if parser.has_option("blockstack", "data_storage_drivers_write"):
          data_storage_drivers_write = parser.get("blockstack", "data_storage_drivers_write")

      if parser.has_option("blockstack", "zonefiles"):
          zonefile_dir = parser.get("blockstack", "zonefiles")
    
      if parser.has_option('blockstack', 'redirect_data'):
          redirect_data = parser.get('blockstack', 'redirect_data')
          if redirect_data.lower() in ['1', 'yes', 'true', 'on']:
              redirect_data = True
          else:
              redirect_data = False

      if parser.has_option('blockstack', 'data_servers'):
          data_servers = parser.get('blockstack', 'data_servers')

          # must be a CSV of host:port
          hostports = filter( lambda x: len(x) > 0, data_servers.split(",") )
          for hp in hostports:
              host, port = url_to_host_port( hp )
              assert host is not None and port is not None


      if parser.has_option('blockstack', 'announcers'):
         # must be a CSV of blockchain IDs
         announcer_list_str = parser.get('blockstack', 'announcers')
         announcer_list = filter( lambda x: len(x) > 0, announcer_list_str.split(",") )

         import scripts

         # validate each one
         valid = True
         for bid in announcer_list:
             if not scripts.is_name_valid( bid ):
                 log.error("Invalid blockchain ID '%s'" % bid)
                 valid = False

         if valid:
             announcers = ",".join(announcer_list)

      if parser.has_option('blockstack', 'analytics_key'):
         analytics_key = parser.get('blockstack', 'analytics_key')

      if parser.has_option('blockstack', 'server_version'):
         server_version = parser.get('blockstack', 'server_version')

      if parser.has_option('blockstack', 'atlas'):
         atlas_enabled = parser.get('blockstack', 'atlas')
         if atlas_enabled.lower() in ['true', '1', 'enabled', 'enabled', 'on']:
            atlas_enabled = True
         else:
            atlas_enabled = False

      if parser.has_option('blockstack', 'atlas_seeds'):
         atlas_seed_peers = parser.get('blockstack', 'atlas_seeds')
         
         # must be a CSV of host:port
         hostports = filter( lambda x: len(x) > 0, atlas_seed_peers.split(",") )
         for hp in hostports:
             host, port = url_to_host_port( hp )
             assert host is not None and port is not None

      if parser.has_option('blockstack', 'atlasdb_path'):
         atlasdb_path = parser.get('blockstack', 'atlasdb_path')

      if parser.has_option('blockstack', 'atlas_blacklist'):
         atlas_blacklist = parser.get('blockstack', 'atlas_blacklist')

         # must be a CSV of host:port
         hostports = filter( lambda x: len(x) > 0, atlas_blacklist.split(",") )
         for hp in hostports:
             host, port = url_to_host_port( hp )
             assert host is not None and port is not None

      if parser.has_option('blockstack', 'atlas_hostname'):
         atlas_hostname = parser.get('blockstack', 'atlas_hostname')
        

   if os.path.exists( announce_path ):
       # load announcement list
       with open( announce_path, "r" ) as f:
           announce_text = f.readlines()

       all_announcements = [ a.strip() for a in announce_text ]
       unseen_announcements = []

       # find announcements we haven't seen yet
       for a in all_announcements:
           if a not in ANNOUNCEMENTS:
               unseen_announcements.append( a )

       announcements = ",".join( unseen_announcements )

   if zonefile_dir is not None and not os.path.exists( zonefile_dir ):
       try:
           os.makedirs( zonefile_dir, 0700 )
       except:
           pass

   blockstack_opts = {
       'rpc_port': rpc_port,
       'email': contact_email,
       'announcers': announcers,
       'announcements': announcements,
       'backup_frequency': backup_frequency,
       'backup_max_age': backup_max_age,
       'serve_zonefiles': serve_zonefiles,
       'zonefile_storage_drivers': zonefile_storage_drivers,
       "zonefile_storage_drivers_write": zonefile_storage_drivers_write,
       'serve_profiles': serve_profiles,
       'profile_storage_drivers': profile_storage_drivers,
       "profile_storage_drivers_write": profile_storage_drivers_write,
       'serve_data': serve_data,
       'data_storage_drivers': data_storage_drivers,
       "data_storage_drivers_write": data_storage_drivers_write,
       'redirect_data': redirect_data,
       'data_servers': data_servers,
       'analytics_key': analytics_key,
       'server_version': server_version,
       'atlas': atlas_enabled,
       'atlas_seeds': atlas_seed_peers,
       'atlasdb_path': atlasdb_path,
       'atlas_blacklist': atlas_blacklist,
       'atlas_hostname': atlas_hostname,
       'zonefiles': zonefile_dir,
   }

   # strip Nones
   for (k, v) in blockstack_opts.items():
      if v is None:
         del blockstack_opts[k]

   return blockstack_opts
Example #20
0
def default_blockstack_opts( config_file=None ):
   """
   Get our default blockstack opts from a config file
   or from sane defaults.
   """

   if config_file is None:
      config_file = virtualchain.get_config_filename()

   announce_path = get_announce_filename( virtualchain.get_working_dir() )

   parser = SafeConfigParser()
   parser.read( config_file )

   blockstack_opts = {}
   contact_email = None
   announcers = "judecn.id,muneeb.id,shea256.id"
   announcements = None
   backup_frequency = 1008  # once a week; 10 minute block time
   backup_max_age = 12096   # 12 weeks
   rpc_port = RPC_SERVER_PORT 
   blockchain_proxy = False
   serve_zonefiles = True
   serve_profiles = False
   zonefile_dir = None
   analytics_key = None
   zonefile_storage_drivers = "disk"
   profile_storage_drivers = ""
   server_version = None

   if parser.has_section('blockstack'):

      if parser.has_option('blockstack', 'backup_frequency'):
         backup_frequency = int( parser.get('blockstack', 'backup_frequency'))

      if parser.has_option('blockstack', 'backup_max_age'):
         backup_max_age = int( parser.get('blockstack', 'backup_max_age') )

      if parser.has_option('blockstack', 'email'):
         contact_email = parser.get('blockstack', 'email')

      if parser.has_option('blockstack', 'rpc_port'):
         rpc_port = int(parser.get('blockstack', 'rpc_port'))

      if parser.has_option('blockstack', 'blockchain_proxy'):
         blockchain_proxy = parser.get('blockstack', 'blockchain_proxy')
         if blockchain_proxy.lower() in ['1', 'yes', 'true', 'on']:
             blockchain_proxy = True
         else:
             blockchain_proxy = False

      if parser.has_option('blockstack', 'serve_zonefiles'):
          serve_zonefiles = parser.get('blockstack', 'serve_zonefiles')
          if serve_zonefiles.lower() in ['1', 'yes', 'true', 'on']:
              serve_zonefiles = True
          else:
              serve_zonefiles = False

      if parser.has_option('blockstack', 'serve_profiles'):
          serve_profiles = parser.get('blockstack', 'serve_profiles')
          if serve_profiles.lower() in ['1', 'yes', 'true', 'on']:
              serve_profiles = True
          else:
              serve_profiles = False

      if parser.has_option("blockstack", "zonefile_storage_drivers"):
          zonefile_storage_drivers = parser.get("blockstack", "zonefile_storage_drivers")

      if parser.has_option("blockstack", "profile_storage_drivers"):
          profile_storage_drivers = parser.get("blockstack", "profile_storage_drivers")

      if parser.has_option("blockstack", "zonefiles"):
          zonefile_dir = parser.get("blockstack", "zonefiles")

      if parser.has_option('blockstack', 'announcers'):
         # must be a CSV of blockchain IDs
         announcer_list_str = parser.get('blockstack', 'announcers')
         announcer_list = announcer_list_str.split(",")

         import scripts

         # validate each one
         valid = True
         for bid in announcer_list:
             if not scripts.is_name_valid( bid ):
                 log.error("Invalid blockchain ID '%s'" % bid)
                 valid = False

         if valid:
             announcers = ",".join(announcer_list)

      if parser.has_option('blockstack', 'analytics_key'):
         analytics_key = parser.get('blockstack', 'analytics_key')

      if parser.has_option('blockstack', 'server_version'):
         server_version = parser.get('blockstack', 'server_version')


   if os.path.exists( announce_path ):
       # load announcement list
       with open( announce_path, "r" ) as f:
           announce_text = f.readlines()

       all_announcements = [ a.strip() for a in announce_text ]
       unseen_announcements = []

       # find announcements we haven't seen yet
       for a in all_announcements:
           if a not in ANNOUNCEMENTS:
               unseen_announcements.append( a )

       announcements = ",".join( unseen_announcements )

   if zonefile_dir is not None and not os.path.exists( zonefile_dir ):
       try:
           os.makedirs( zonefile_dir, 0700 )
       except:
           pass

   blockstack_opts = {
       'rpc_port': rpc_port,
       'email': contact_email,
       'announcers': announcers,
       'announcements': announcements,
       'backup_frequency': backup_frequency,
       'backup_max_age': backup_max_age,
       'blockchain_proxy': blockchain_proxy,
       'serve_zonefiles': serve_zonefiles,
       'serve_profiles': serve_profiles,
       'zonefile_storage_drivers': zonefile_storage_drivers,
       'profile_storage_drivers': profile_storage_drivers,
       'zonefiles': zonefile_dir,
       'analytics_key': analytics_key,
       'server_version': server_version
   }

   # strip Nones
   for (k, v) in blockstack_opts.items():
      if v is None:
         del blockstack_opts[k]

   return blockstack_opts
Example #21
0
            return []

        inner.zonefile_count = 0
        return inner

    _zonefile_copy_progress = _zonefile_copy_progress()

    # make sure we have the apppriate tools
    tools = ['sqlite3']
    for tool in tools:
        rc = os.system("which {} > /dev/null".format(tool))
        if rc != 0:
            log.error("'{}' command not found".format(tool))
            return False

    working_dir = virtualchain.get_working_dir() 
    if not os.path.exists(working_dir):
        log.error("No such directory {}".format(working_dir))
        return False

    if block_number is None:
        # last backup
        all_blocks = BlockstackDB.get_backup_blocks( virtualchain_hooks )
        if len(all_blocks) == 0:
            log.error("No backups available")
            return False

        block_number = max(all_blocks)

    log.debug("Snapshot from block {}".format(block_number))
def get_indexing_lockfile(impl=None):
    """
    Return path to the indexing lockfile
    """
    return os.path.join( virtualchain.get_working_dir(impl=impl), "blockstack-server.indexing" )
Example #23
0
def fast_sync_import(working_dir,
                     import_url,
                     public_keys=config.FAST_SYNC_PUBLIC_KEYS,
                     num_required=len(config.FAST_SYNC_PUBLIC_KEYS)):
    """
    Fast sync import.
    Verify the given fast-sync file from @import_path using @public_key, and then 
    uncompress it into @working_dir.

    Verify that at least `num_required` public keys in `public_keys` signed.
    NOTE: `public_keys` needs to be in the same order as the private keys that signed.
    """

    # make sure we have the apppriate tools
    tools = ['tar', 'bzip2', 'mv']
    for tool in tools:
        rc = os.system("which {} > /dev/null".format(tool))
        if rc != 0:
            log.error("'{}' command not found".format(tool))
            return False

    if working_dir is None:
        working_dir = virtualchain.get_working_dir()

    if not os.path.exists(working_dir):
        log.error("No such directory {}".format(working_dir))
        return False

    # go get it
    import_path = fast_sync_fetch(import_url)
    if import_path is None:
        log.error("Failed to fetch {}".format(import_url))
        return False

    # format: <signed bz2 payload> <sigb64> <sigb64 length (8 bytes hex)> ... <num signatures>
    file_size = 0
    try:
        sb = os.stat(import_path)
        file_size = sb.st_size
    except Exception as e:
        log.exception(e)
        return False

    num_signatures = 0
    ptr = file_size
    signatures = []

    with open(import_path, 'r') as f:
        info = fast_sync_inspect(f)
        if 'error' in info:
            log.error("Failed to inspect snapshot {}: {}".format(
                import_path, info['error']))
            return False

        signatures = info['signatures']
        ptr = info['payload_size']

        # get the hash of the file
        hash_hex = blockstack_client.storage.get_file_hash(f,
                                                           hashlib.sha256,
                                                           fd_len=ptr)

        # validate signatures over the hash
        log.debug("Verify {} bytes".format(ptr))
        key_idx = 0
        num_match = 0
        for next_pubkey in public_keys:
            for sigb64 in signatures:
                valid = blockstack_client.keys.verify_digest(
                    hash_hex,
                    keylib.ECPublicKey(next_pubkey).to_hex(),
                    sigb64,
                    hashfunc=hashlib.sha256)
                if valid:
                    num_match += 1
                    if num_match >= num_required:
                        break

                    log.debug("Public key {} matches {} ({})".format(
                        next_pubkey, sigb64, hash_hex))
                    signatures.remove(sigb64)

                elif os.environ.get("BLOCKSTACK_TEST") == "1":
                    log.debug("Public key {} does NOT match {} ({})".format(
                        next_pubkey, sigb64, hash_hex))

        # enough signatures?
        if num_match < num_required:
            log.error(
                "Not enough signatures match (required {}, found {})".format(
                    num_required, num_match))
            return False

    # decompress
    import_path = os.path.abspath(import_path)
    cmd = "cd '{}' && tar xf '{}'".format(working_dir, import_path)
    log.debug(cmd)
    rc = os.system(cmd)
    if rc != 0:
        log.error("Failed to decompress. Exit code {}. Command: {}".format(
            rc, cmd))
        return False

    # restore from backup
    rc = blockstack_backup_restore(working_dir, None)
    if not rc:
        log.error("Failed to instantiate blockstack name database")
        return False

    # success!
    log.debug("Restored to {}".format(working_dir))
    return True
def store_announcement( announcement_hash, announcement_text, working_dir=None, force=False ):
   """
   Store a new announcement locally, atomically.
   """

   if working_dir is None:
       working_dir = virtualchain.get_working_dir()

   if not force:
       # don't store unless we haven't seen it before
       if announcement_hash in ANNOUNCEMENTS:
           return

   announce_filename = get_announce_filename( working_dir )
   announce_filename_tmp = announce_filename + ".tmp"
   announce_text = ""
   announce_cleanup_list = []

   # did we try (and fail) to store a previous announcement?  If so, merge them all
   if os.path.exists( announce_filename_tmp ):

       log.debug("Merge announcement list %s" % announce_filename_tmp )

       with open(announce_filename, "r") as f:
           announce_text += f.read()

       i = 1
       failed_path = announce_filename_tmp + (".%s" % i)
       while os.path.exists( failed_path ):

           log.debug("Merge announcement list %s" % failed_paht )
           with open(failed_path, "r") as f:
               announce_text += f.read()

           announce_cleanup_list.append( failed_path )

           i += 1
           failed_path = announce_filename_tmp + (".%s" % i)

       announce_filename_tmp = failed_path

   if os.path.exists( announce_filename ):
       with open(announce_filename, "r" ) as f:
           announce_text += f.read()

   announce_text += ("\n%s\n" % announcement_hash)

   # filter
   if not force:
       announcement_list = announce_text.split("\n")
       unseen_announcements = filter( lambda a: a not in ANNOUNCEMENTS, announcement_list )
       announce_text = "\n".join( unseen_announcements ).strip() + "\n"

   log.debug("Store announcement hash to %s" % announce_filename )

   with open(announce_filename_tmp, "w" ) as f:
       f.write( announce_text )
       f.flush()

   # NOTE: rename doesn't remove the old file on Windows
   if sys.platform == 'win32' and os.path.exists( announcement_filename_tmp ):
       try:
           os.unlink( announcement_filename_tmp )
       except:
           pass

   try:
       os.rename( announce_filename_tmp, announce_filename )
   except:
       log.error("Failed to save announcement %s to %s" % (announcement_hash, announce_filename ))
       raise

   # clean up
   for tmp_path in announce_cleanup_list:
       try:
           os.unlink( tmp_path )
       except:
           pass

   # put the announcement text
   announcement_text_dir = os.path.join( working_dir, "announcements" )
   if not os.path.exists( announcement_text_dir ):
       try:
           os.makedirs( announcement_text_dir )
       except:
           log.error("Failed to make directory %s" % announcement_text_dir )
           raise

   announcement_text_path = os.path.join( announcement_text_dir, "%s.txt" % announcement_hash )

   try:
       with open( announcement_text_path, "w" ) as f:
           f.write( announcement_text )

   except:
       log.error("Failed to save announcement text to %s" % announcement_text_path )
       raise

   log.debug("Stored announcement to %s" % (announcement_text_path))
Example #25
0
def get_working_path_file():
    import talosvirtualchain
    return os.path.join(get_working_dir(impl=talosvirtualchain), "tmp.dat")
def default_blockstack_opts( config_file=None, virtualchain_impl=None ):
   """
   Get our default blockstack opts from a config file
   or from sane defaults.
   """

   if config_file is None:
      config_file = virtualchain.get_config_filename()

   announce_path = get_announce_filename( virtualchain.get_working_dir(impl=virtualchain_impl) )

   parser = SafeConfigParser()
   parser.read( config_file )

   blockstack_opts = {}
   contact_email = None
   announcers = "judecn.id,muneeb.id,shea256.id"
   announcements = None
   backup_frequency = 144   # once a day; 10 minute block time
   backup_max_age = 1008    # one week
   rpc_port = RPC_SERVER_PORT 
   serve_zonefiles = True
   serve_profiles = False
   serve_data = False
   zonefile_dir = os.path.join( os.path.dirname(config_file), "zonefiles")
   analytics_key = None
   zonefile_storage_drivers = "disk,dht"
   zonefile_storage_drivers_write = "disk"
   profile_storage_drivers = "disk"
   profile_storage_drivers_write = "disk"
   data_storage_drivers = "disk"
   data_storage_drivers_write = "disk"
   redirect_data = False
   data_servers = None
   server_version = None
   atlas_enabled = True
   atlas_seed_peers = "node.blockstack.org:%s" % RPC_SERVER_PORT
   atlasdb_path = os.path.join( os.path.dirname(config_file), "atlas.db" )
   atlas_blacklist = ""
   atlas_hostname = socket.gethostname()

   if parser.has_section('blockstack'):

      if parser.has_option('blockstack', 'backup_frequency'):
         backup_frequency = int( parser.get('blockstack', 'backup_frequency'))

      if parser.has_option('blockstack', 'backup_max_age'):
         backup_max_age = int( parser.get('blockstack', 'backup_max_age') )

      if parser.has_option('blockstack', 'email'):
         contact_email = parser.get('blockstack', 'email')

      if parser.has_option('blockstack', 'rpc_port'):
         rpc_port = int(parser.get('blockstack', 'rpc_port'))

      if parser.has_option('blockstack', 'serve_zonefiles'):
          serve_zonefiles = parser.get('blockstack', 'serve_zonefiles')
          if serve_zonefiles.lower() in ['1', 'yes', 'true', 'on']:
              serve_zonefiles = True
          else:
              serve_zonefiles = False

      if parser.has_option('blockstack', 'serve_profiles'):
          serve_profiles = parser.get('blockstack', 'serve_profiles')
          if serve_profiles.lower() in ['1', 'yes', 'true', 'on']:
              serve_profiles = True
          else:
              serve_profiles = False

      if parser.has_option('blockstack', 'serve_data'):
          serve_data = parser.get('blockstack', 'serve_data')
          if serve_data.lower() in ['1', 'yes', 'true', 'on']:
              serve_data = True
          else:
              serve_data = False

      if parser.has_option("blockstack", "zonefile_storage_drivers"):
          zonefile_storage_drivers = parser.get("blockstack", "zonefile_storage_drivers")

      if parser.has_option("blockstack", "zonefile_storage_drivers_write"):
          zonefile_storage_drivers_write = parser.get("blockstack", "zonefile_storage_drivers_write")

      if parser.has_option("blockstack", "profile_storage_drivers"):
          profile_storage_drivers = parser.get("blockstack", "profile_storage_drivers")

      if parser.has_option("blockstack", "profile_storage_drivers_write"):
          profile_storage_drivers_write = parser.get("blockstack", "profile_storage_drivers_write")

      if parser.has_option("blockstack", "data_storage_drivers"):
          data_storage_drivers = parser.get("blockstack", "data_storage_drivers")

      if parser.has_option("blockstack", "data_storage_drivers_write"):
          data_storage_drivers_write = parser.get("blockstack", "data_storage_drivers_write")

      if parser.has_option("blockstack", "zonefiles"):
          zonefile_dir = parser.get("blockstack", "zonefiles")
    
      if parser.has_option('blockstack', 'redirect_data'):
          redirect_data = parser.get('blockstack', 'redirect_data')
          if redirect_data.lower() in ['1', 'yes', 'true', 'on']:
              redirect_data = True
          else:
              redirect_data = False

      if parser.has_option('blockstack', 'data_servers'):
          data_servers = parser.get('blockstack', 'data_servers')

          # must be a CSV of host:port
          hostports = filter( lambda x: len(x) > 0, data_servers.split(",") )
          for hp in hostports:
              host, port = url_to_host_port( hp )
              assert host is not None and port is not None


      if parser.has_option('blockstack', 'announcers'):
         # must be a CSV of blockchain IDs
         announcer_list_str = parser.get('blockstack', 'announcers')
         announcer_list = filter( lambda x: len(x) > 0, announcer_list_str.split(",") )

         import scripts

         # validate each one
         valid = True
         for bid in announcer_list:
             if not scripts.is_name_valid( bid ):
                 log.error("Invalid blockchain ID '%s'" % bid)
                 valid = False

         if valid:
             announcers = ",".join(announcer_list)

      if parser.has_option('blockstack', 'analytics_key'):
         analytics_key = parser.get('blockstack', 'analytics_key')

      if parser.has_option('blockstack', 'server_version'):
         server_version = parser.get('blockstack', 'server_version')

      if parser.has_option('blockstack', 'atlas'):
         atlas_enabled = parser.get('blockstack', 'atlas')
         if atlas_enabled.lower() in ['true', '1', 'enabled', 'enabled', 'on']:
            atlas_enabled = True
         else:
            atlas_enabled = False

      if parser.has_option('blockstack', 'atlas_seeds'):
         atlas_seed_peers = parser.get('blockstack', 'atlas_seeds')
         
         # must be a CSV of host:port
         hostports = filter( lambda x: len(x) > 0, atlas_seed_peers.split(",") )
         for hp in hostports:
             host, port = url_to_host_port( hp )
             assert host is not None and port is not None

      if parser.has_option('blockstack', 'atlasdb_path'):
         atlasdb_path = parser.get('blockstack', 'atlasdb_path')

      if parser.has_option('blockstack', 'atlas_blacklist'):
         atlas_blacklist = parser.get('blockstack', 'atlas_blacklist')

         # must be a CSV of host:port
         hostports = filter( lambda x: len(x) > 0, atlas_blacklist.split(",") )
         for hp in hostports:
             host, port = url_to_host_port( hp )
             assert host is not None and port is not None

      if parser.has_option('blockstack', 'atlas_hostname'):
         atlas_hostname = parser.get('blockstack', 'atlas_hostname')
        

   if os.path.exists( announce_path ):
       # load announcement list
       with open( announce_path, "r" ) as f:
           announce_text = f.readlines()

       all_announcements = [ a.strip() for a in announce_text ]
       unseen_announcements = []

       # find announcements we haven't seen yet
       for a in all_announcements:
           if a not in ANNOUNCEMENTS:
               unseen_announcements.append( a )

       announcements = ",".join( unseen_announcements )

   if zonefile_dir is not None and not os.path.exists( zonefile_dir ):
       try:
           os.makedirs( zonefile_dir, 0700 )
       except:
           pass

   blockstack_opts = {
       'rpc_port': rpc_port,
       'email': contact_email,
       'announcers': announcers,
       'announcements': announcements,
       'backup_frequency': backup_frequency,
       'backup_max_age': backup_max_age,
       'serve_zonefiles': serve_zonefiles,
       'zonefile_storage_drivers': zonefile_storage_drivers,
       "zonefile_storage_drivers_write": zonefile_storage_drivers_write,
       'serve_profiles': serve_profiles,
       'profile_storage_drivers': profile_storage_drivers,
       "profile_storage_drivers_write": profile_storage_drivers_write,
       'serve_data': serve_data,
       'data_storage_drivers': data_storage_drivers,
       "data_storage_drivers_write": data_storage_drivers_write,
       'redirect_data': redirect_data,
       'data_servers': data_servers,
       'analytics_key': analytics_key,
       'server_version': server_version,
       'atlas': atlas_enabled,
       'atlas_seeds': atlas_seed_peers,
       'atlasdb_path': atlasdb_path,
       'atlas_blacklist': atlas_blacklist,
       'atlas_hostname': atlas_hostname,
       'zonefiles': zonefile_dir,
   }

   # strip Nones
   for (k, v) in blockstack_opts.items():
      if v is None:
         del blockstack_opts[k]

   return blockstack_opts
Example #27
0
def default_blockstore_opts(config_file=None, testset=False):
    """
   Get our default blockstore opts from a config file
   or from sane defaults.
   """

    if config_file is None:
        config_file = virtualchain.get_config_filename()

    testset_path = get_testset_filename(virtualchain.get_working_dir())
    announce_path = get_announce_filename(virtualchain.get_working_dir())

    parser = SafeConfigParser()
    parser.read(config_file)

    blockstore_opts = {}
    tx_broadcaster = None
    utxo_provider = None
    testset_first_block = None
    max_subsidy = 0
    contact_email = None
    announcers = "judecn.id,muneeb.id,shea256.id"
    announcements = None

    if parser.has_section('blockstore'):

        if parser.has_option('blockstore', 'tx_broadcaster'):
            tx_broadcaster = parser.get('blockstore', 'tx_broadcaster')

        if parser.has_option('blockstore', 'utxo_provider'):
            utxo_provider = parser.get('blockstore', 'utxo_provider')

        if parser.has_option('blockstore', 'testset_first_block'):
            testset_first_block = int(
                parser.get('blockstore', 'testset_first_block'))

        if parser.has_option('blockstore', 'max_subsidy'):
            max_subsidy = int(parser.get('blockstore', 'max_subsidy'))

        if parser.has_option('blockstore', 'email'):
            contact_email = parser.get('blockstore', 'email')

        if parser.has_option('blockstore', 'announcers'):
            # must be a CSV of blockchain IDs
            announcer_list_str = parser.get('blockstore', 'announcers')
            announcer_list = announcer_list_str.split(",")

            import scripts

            # validate each one
            valid = True
            for bid in announcer_list:
                if not scripts.is_name_valid(bid):
                    log.error("Invalid blockchain ID '%s'" % bid)
                    valid = False

            if valid:
                announcers = ",".join(announcer_list)

    if os.path.exists(testset_path):
        # testset file flag set
        testset = True

    if os.path.exists(announce_path):
        # load announcement list
        with open(announce_path, "r") as f:
            announce_text = f.readlines()

        all_announcements = [a.strip() for a in announce_text]
        unseen_announcements = []

        # find announcements we haven't seen yet
        for a in all_announcements:
            if a not in ANNOUNCEMENTS:
                unseen_announcements.append(a)

        announcements = ",".join(unseen_announcements)

    blockstore_opts = {
        'tx_broadcaster': tx_broadcaster,
        'utxo_provider': utxo_provider,
        'testset': testset,
        'testset_first_block': testset_first_block,
        'max_subsidy': max_subsidy,
        'email': contact_email,
        'announcers': announcers,
        'announcements': announcements
    }

    # strip Nones
    for (k, v) in blockstore_opts.items():
        if v is None:
            del blockstore_opts[k]

    return blockstore_opts
Example #28
0
def run_scenario(scenario, config_file):
    """
    Run a test scenario:
    * set up the virtualchain to use our mock UTXO provider and mock bitcoin blockchain
    * seed it with the initial values in the wallet 
    * set the initial consensus hash 
    * run the scenario method
    * run the check method
    """

    mock_bitcoind_save_path = "/tmp/mock_bitcoind.dat"
    if os.path.exists(mock_bitcoind_save_path):
        try:
            os.unlink(mock_bitcoind_save_path)
        except:
            pass

    # use mock bitcoind
    worker_env = {
        # use mock_bitcoind to connect to bitcoind (but it has to import it in order to use it)
        "VIRTUALCHAIN_MOD_CONNECT_BLOCKCHAIN": mock_bitcoind.__file__,
        "MOCK_BITCOIND_SAVE_PATH": mock_bitcoind_save_path,
        "BLOCKSTORE_TEST": "1",
    }

    if os.environ.get("PYTHONPATH", None) is not None:
        worker_env["PYTHONPATH"] = os.environ["PYTHONPATH"]

    virtualchain.setup_virtualchain(
        blockstore_state_engine,
        bitcoind_connection_factory=mock_bitcoind.connect_mock_bitcoind,
        index_worker_env=worker_env,
    )

    # set up blockstore
    # NOTE: utxo_opts encodes the mock-bitcoind options
    blockstore_opts, bitcoin_opts, utxo_opts, dht_opts = blockstore.lib.configure(
        config_file=config_file, interactive=False
    )

    # override multiprocessing options to ensure single-process behavior
    utxo_opts["multiprocessing_num_procs"] = 1
    utxo_opts["multiprocessing_num_blocks"] = 10

    # pass along extra arguments
    utxo_opts["save_file"] = mock_bitcoind_save_path

    # save headers as well
    utxo_opts["spv_headers_path"] = mock_bitcoind_save_path + ".spvheaders"
    with open(utxo_opts["spv_headers_path"], "w") as f:
        # write out "initial" headers, up to the first block
        empty_header = ("00" * 81).decode("hex")
        for i in xrange(0, blockstore.FIRST_BLOCK_MAINNET):
            f.write(empty_header)

    blockstored.set_bitcoin_opts(bitcoin_opts)
    blockstored.set_utxo_opts(utxo_opts)

    db = blockstored.get_state_engine()
    bitcoind = mock_bitcoind.connect_mock_bitcoind(utxo_opts)
    sync_virtualchain_upcall = lambda: virtualchain.sync_virtualchain(utxo_opts, bitcoind.getblockcount(), db)
    mock_utxo = blockstore.lib.connect_utxo_provider(utxo_opts)
    working_dir = virtualchain.get_working_dir()

    # set up test environment
    testlib.set_utxo_opts(utxo_opts)
    testlib.set_utxo_client(mock_utxo)
    testlib.set_bitcoind(bitcoind)
    testlib.set_state_engine(db)

    test_env = {
        "sync_virtualchain_upcall": sync_virtualchain_upcall,
        "working_dir": working_dir,
        "bitcoind": bitcoind,
        "bitcoind_save_path": mock_bitcoind_save_path,
    }

    # sync initial utxos
    testlib.next_block(**test_env)

    try:
        os.unlink(mock_bitcoind_save_path)
    except:
        pass

    # load the scenario into the mock blockchain and mock utxo provider
    try:
        scenario.scenario(scenario.wallets, **test_env)

    except Exception, e:
        log.exception(e)
        traceback.print_exc()
        log.error("Failed to run scenario '%s'" % scenario.__name__)
        return False