def blockstack_mutable_data_url_parse( url ): """ Parse a blockstack:// URL for mutable data Return (blockchain ID, data ID, data version, account ID, service ID) on success * The version may be None if not given (in which case, the latest value is requested). * The data ID may be None, in which case, a listing of mutable data is requested. * account ID and service ID will be None for mutable data in the profile, but will be defined for app-specific mutable data Raise on bad data """ url = str(url) mutable_url_data_regex = r"blockstack://(%s+)[/]+(%s+)(#[0-9]+)?" % (B40_CLASS, URLENCODED_CLASS) app_url_data_regex = r"blockstack://(%s+)\.(%s+)@(%s+)[/]+(%s+)(#[0-9]+)?" % (URLENCODED_CLASS, URLENCODED_CLASS, B40_CLASS, URLENCODED_CLASS) mutable_url_listing_regex = r"blockstack://(%s+)[/]+#mutable" % (B40_CLASS) blockchain_id = None data_id = None version = None # app? m = re.match( app_url_data_regex, url ) if m: account_id, service_id, blockchain_id, data_id, version = m.groups() if not is_name_valid( blockchain_id ): raise ValueError("Invalid blockchain ID '%s'" % blockchain_id) # version? if version is not None: version = version.strip("#") version = int(version) return urllib.unquote(blockchain_id), urllib.unquote(data_id), version, urllib.unquote(account_id), urllib.unquote(service_id) # mutable? m = re.match( mutable_url_data_regex, url ) if m: blockchain_id, data_id, version = m.groups() if not is_name_valid( blockchain_id ): raise ValueError("Invalid blockchain ID '%s'" % blockchain_id) # version? if version is not None: version = version.strip("#") version = int(version) return urllib.unquote(blockchain_id), urllib.unquote(data_id), version, None, None else: # maybe a listing? m = re.match( mutable_url_listing_regex, url ) if not m: raise ValueError("Invalid URL: %s" % url) blockchain_id = m.groups()[0] return urllib.unquote(blockchain_id), None, None, None, None
def blockstack_mutable_data_url_parse(url): """ Parse a blockstack:// URL for mutable data Return (blockchain ID, data ID, data version, datastore ID) on success. The data ID will be a path if user ID and datastore ID are given; if the path ends in '/', then a directory is specifically requested. The version may be None if not given (in which case, the latest value is requested). """ url = str(url) mutable_url_data_regex = r'^blockstack://({}+)[/]+({}+)[/]*(#[0-9]+)?$'.format( B40_CLASS, URLENCODED_CLASS) datastore_url_data_regex = r"^blockstack://({}+)@({}+)[/]+({}+)[/]*(#[0-9]+)?$".format( URLENCODED_CLASS, URLENCODED_CLASS, URLENCODED_PATH_CLASS, URLENCODED_CLASS) blockchain_id, data_id, version, app_domain = None, None, None, None is_dir = False # mutable? m = re.match(mutable_url_data_regex, url) if m: blockchain_id, data_id, version = m.groups() if not is_name_valid(blockchain_id): raise ValueError( 'Invalid blockchain ID "{}"'.format(blockchain_id)) # version? if version is not None: version = version.strip('#/') version = int(version) return urllib.unquote(blockchain_id), data_id, version, None # datastore? m = re.match(datastore_url_data_regex, url) if m: datastore_id, app_domain, path, version = m.groups() if path.endswith('/'): is_dir = True # version? if version is not None: version = version.strip('#/') version = int(version) # unquote path = '/' + '/'.join( [urllib.unquote(p) for p in posixpath.normpath(path).split('/')]) if is_dir: path += '/' return urllib.unquote(datastore_id), urllib.unquote( path), version, urllib.unquote(app_domain) return None, None, None, None
def is_fq_data_id( fq_data_id ): """ Is a data ID is fully qualified? """ if len(fq_data_id.split(":")) < 2: return False # name must be valid name = fq_data_id.split(":")[0] if not is_name_valid(name): return False return True
def blockstack_immutable_data_url_parse(url): """ Parse a blockstack:// URL for immutable data Return (blockchain ID, data ID, data hash) * The hash may be None if not given, in which case, the hash should be looked up from the blockchain ID's profile. * The data ID may be None, in which case, the list of immutable data is requested. Raise on bad data """ url = str(url) immutable_data_regex = r'^blockstack://({}+)\.({}+)\.({}+)[/]*([/]+#[a-fA-F0-9]+)?$'.format( URLENCODED_CLASS, B40_NO_PERIOD_CLASS, B40_NO_PERIOD_CLASS) immutable_listing_regex = r'^blockstack://({}+)[/]+#immutable$'.format( B40_CLASS) m = re.match(immutable_data_regex, url) if m: data_id, blockchain_name, namespace_id, data_hash = m.groups() blockchain_id = '{}.{}'.format(blockchain_name, namespace_id) if not is_name_valid(blockchain_id): log.debug('Invalid blockstack ID "{}"'.format(blockchain_id)) raise ValueError('Invalid blockstack ID') if data_hash is not None: data_hash = data_hash.lower().strip('#/') if not is_valid_hash(data_hash): log.debug('Invalid data hash "{}"'.format(data_hash)) raise ValueError('Invalid data hash') return urllib.unquote(blockchain_id), data_id, data_hash else: # maybe a listing? m = re.match(immutable_listing_regex, url) if not m: log.debug('Invalid immutable URL "{}"'.format(url)) raise ValueError('Invalid immutable URL') blockchain_id = m.groups()[0] return urllib.unquote(blockchain_id), None, None return None, None, None
def blockstack_immutable_data_url_parse(url): """ Parse a blockstack:// URL for immutable data Return (blockchain ID, data ID, data hash) * The hash may be None if not given, in which case, the hash should be looked up from the blockchain ID's profile. * The data ID may be None, in which case, the list of immutable data is requested. Raise on bad data """ url = str(url) immutable_data_regex = r"blockstack://(%s+)\.(%s+)\.(%s+)([/]+#[a-fA-F0-9]+)?" % ( URLENCODED_CLASS, B40_NO_PERIOD_CLASS, B40_NO_PERIOD_CLASS) immutable_listing_regex = r"blockstack://(%s+)[/]+#immutable" % (B40_CLASS) m = re.match(immutable_data_regex, url) if m: data_id, blockchain_name, namespace_id, data_hash = m.groups() blockchain_id = "%s.%s" % (blockchain_name, namespace_id) if not is_name_valid(blockchain_id): log.debug("Invalid blockstack ID '%s" % blockchain_id) raise ValueError("Invalid blockstack ID") if data_hash is not None: data_hash = data_hash.lower().strip("#/") if not is_valid_hash(data_hash): log.debug("Invalid data hash '%s'" % data_hash) raise ValueError("Invalid data hash") return urllib.unquote(blockchain_id), urllib.unquote( data_id), data_hash else: # maybe a listing? m = re.match(immutable_listing_regex, url) if not m: log.debug("Invalid URL '%s'" % url) raise ValueError("Invalid URL") blockchain_id = m.groups()[0] return urllib.unquote(blockchain_id), None, None
def delete_mutable_data(fq_data_id, privatekey, only_use=None): """ Given the data ID and private key of a user, go and delete the associated mutable data. """ global storage_handlers only_use = [] if only_use is None else only_use # sanity check if not keys.is_singlesig(privatekey): log.error("Only single-signature data private keys are supported") return False fq_data_id = str(fq_data_id) assert is_fq_data_id(fq_data_id) or is_name_valid( fq_data_id ), "Data ID must be fully qualified or must be a valid blockchain ID (got %s)" % fq_data_id sigb64 = sign_raw_data(fq_data_id, privatekey) # remove data for handler in storage_handlers: if not hasattr(handler, "delete_mutable_handler"): continue if len(only_use) > 0 and handler.__name__ in only_use: log.debug("Skip storage driver %s" % handler.__name__) continue try: handler.delete_mutable_handler(fq_data_id, sigb64) except Exception, e: log.exception(e) return False
def default_blockstack_opts( config_file=None, virtualchain_impl=None ): """ Get our default blockstack opts from a config file or from sane defaults. """ if config_file is None: config_file = virtualchain.get_config_filename() announce_path = get_announce_filename( virtualchain.get_working_dir(impl=virtualchain_impl) ) parser = SafeConfigParser() parser.read( config_file ) blockstack_opts = {} contact_email = None announcers = "judecn.id,muneeb.id,shea256.id" announcements = None backup_frequency = 144 # once a day; 10 minute block time backup_max_age = 1008 # one week rpc_port = RPC_SERVER_PORT serve_zonefiles = True serve_profiles = False serve_data = False zonefile_dir = os.path.join( os.path.dirname(config_file), "zonefiles") analytics_key = None zonefile_storage_drivers = "disk,dht" zonefile_storage_drivers_write = "disk" profile_storage_drivers = "disk" profile_storage_drivers_write = "disk" data_storage_drivers = "disk" data_storage_drivers_write = "disk" redirect_data = False data_servers = None server_version = None atlas_enabled = True atlas_seed_peers = "node.blockstack.org:%s" % RPC_SERVER_PORT atlasdb_path = os.path.join( os.path.dirname(config_file), "atlas.db" ) atlas_blacklist = "" atlas_hostname = socket.gethostname() if parser.has_section('blockstack'): if parser.has_option('blockstack', 'backup_frequency'): backup_frequency = int( parser.get('blockstack', 'backup_frequency')) if parser.has_option('blockstack', 'backup_max_age'): backup_max_age = int( parser.get('blockstack', 'backup_max_age') ) if parser.has_option('blockstack', 'email'): contact_email = parser.get('blockstack', 'email') if parser.has_option('blockstack', 'rpc_port'): rpc_port = int(parser.get('blockstack', 'rpc_port')) if parser.has_option('blockstack', 'serve_zonefiles'): serve_zonefiles = parser.get('blockstack', 'serve_zonefiles') if serve_zonefiles.lower() in ['1', 'yes', 'true', 'on']: serve_zonefiles = True else: serve_zonefiles = False if parser.has_option('blockstack', 'serve_profiles'): serve_profiles = parser.get('blockstack', 'serve_profiles') if serve_profiles.lower() in ['1', 'yes', 'true', 'on']: serve_profiles = True else: serve_profiles = False if parser.has_option('blockstack', 'serve_data'): serve_data = parser.get('blockstack', 'serve_data') if serve_data.lower() in ['1', 'yes', 'true', 'on']: serve_data = True else: serve_data = False if parser.has_option("blockstack", "zonefile_storage_drivers"): zonefile_storage_drivers = parser.get("blockstack", "zonefile_storage_drivers") if parser.has_option("blockstack", "zonefile_storage_drivers_write"): zonefile_storage_drivers_write = parser.get("blockstack", "zonefile_storage_drivers_write") if parser.has_option("blockstack", "profile_storage_drivers"): profile_storage_drivers = parser.get("blockstack", "profile_storage_drivers") if parser.has_option("blockstack", "profile_storage_drivers_write"): profile_storage_drivers_write = parser.get("blockstack", "profile_storage_drivers_write") if parser.has_option("blockstack", "data_storage_drivers"): data_storage_drivers = parser.get("blockstack", "data_storage_drivers") if parser.has_option("blockstack", "data_storage_drivers_write"): data_storage_drivers_write = parser.get("blockstack", "data_storage_drivers_write") if parser.has_option("blockstack", "zonefiles"): zonefile_dir = parser.get("blockstack", "zonefiles") if parser.has_option('blockstack', 'redirect_data'): redirect_data = parser.get('blockstack', 'redirect_data') if redirect_data.lower() in ['1', 'yes', 'true', 'on']: redirect_data = True else: redirect_data = False if parser.has_option('blockstack', 'data_servers'): data_servers = parser.get('blockstack', 'data_servers') # must be a CSV of host:port hostports = filter( lambda x: len(x) > 0, data_servers.split(",") ) for hp in hostports: host, port = url_to_host_port( hp ) assert host is not None and port is not None if parser.has_option('blockstack', 'announcers'): # must be a CSV of blockchain IDs announcer_list_str = parser.get('blockstack', 'announcers') announcer_list = filter( lambda x: len(x) > 0, announcer_list_str.split(",") ) import scripts # validate each one valid = True for bid in announcer_list: if not scripts.is_name_valid( bid ): log.error("Invalid blockchain ID '%s'" % bid) valid = False if valid: announcers = ",".join(announcer_list) if parser.has_option('blockstack', 'analytics_key'): analytics_key = parser.get('blockstack', 'analytics_key') if parser.has_option('blockstack', 'server_version'): server_version = parser.get('blockstack', 'server_version') if parser.has_option('blockstack', 'atlas'): atlas_enabled = parser.get('blockstack', 'atlas') if atlas_enabled.lower() in ['true', '1', 'enabled', 'enabled', 'on']: atlas_enabled = True else: atlas_enabled = False if parser.has_option('blockstack', 'atlas_seeds'): atlas_seed_peers = parser.get('blockstack', 'atlas_seeds') # must be a CSV of host:port hostports = filter( lambda x: len(x) > 0, atlas_seed_peers.split(",") ) for hp in hostports: host, port = url_to_host_port( hp ) assert host is not None and port is not None if parser.has_option('blockstack', 'atlasdb_path'): atlasdb_path = parser.get('blockstack', 'atlasdb_path') if parser.has_option('blockstack', 'atlas_blacklist'): atlas_blacklist = parser.get('blockstack', 'atlas_blacklist') # must be a CSV of host:port hostports = filter( lambda x: len(x) > 0, atlas_blacklist.split(",") ) for hp in hostports: host, port = url_to_host_port( hp ) assert host is not None and port is not None if parser.has_option('blockstack', 'atlas_hostname'): atlas_hostname = parser.get('blockstack', 'atlas_hostname') if os.path.exists( announce_path ): # load announcement list with open( announce_path, "r" ) as f: announce_text = f.readlines() all_announcements = [ a.strip() for a in announce_text ] unseen_announcements = [] # find announcements we haven't seen yet for a in all_announcements: if a not in ANNOUNCEMENTS: unseen_announcements.append( a ) announcements = ",".join( unseen_announcements ) if zonefile_dir is not None and not os.path.exists( zonefile_dir ): try: os.makedirs( zonefile_dir, 0700 ) except: pass blockstack_opts = { 'rpc_port': rpc_port, 'email': contact_email, 'announcers': announcers, 'announcements': announcements, 'backup_frequency': backup_frequency, 'backup_max_age': backup_max_age, 'serve_zonefiles': serve_zonefiles, 'zonefile_storage_drivers': zonefile_storage_drivers, "zonefile_storage_drivers_write": zonefile_storage_drivers_write, 'serve_profiles': serve_profiles, 'profile_storage_drivers': profile_storage_drivers, "profile_storage_drivers_write": profile_storage_drivers_write, 'serve_data': serve_data, 'data_storage_drivers': data_storage_drivers, "data_storage_drivers_write": data_storage_drivers_write, 'redirect_data': redirect_data, 'data_servers': data_servers, 'analytics_key': analytics_key, 'server_version': server_version, 'atlas': atlas_enabled, 'atlas_seeds': atlas_seed_peers, 'atlasdb_path': atlasdb_path, 'atlas_blacklist': atlas_blacklist, 'atlas_hostname': atlas_hostname, 'zonefiles': zonefile_dir, } # strip Nones for (k, v) in blockstack_opts.items(): if v is None: del blockstack_opts[k] return blockstack_opts
def put_mutable_data( fq_data_id, data_json, privatekey, required=None, use_only=None ): """ Given the unserialized data, store it into our mutable data stores. Do so in a best-effort way. This method only fails if all storage providers fail. @fq_data_id is the fully-qualified data id. It must be prefixed with the username, to avoid collisions in shared mutable storage. Return True on success Return False on error """ global storage_handlers required = [] if required is None else required use_only = [] if use_only is None else use_only # sanity check: only support single-sig private keys if not keys.is_singlesig(privatekey): log.error("Only single-signature data private keys are supported") return False fq_data_id = str(fq_data_id) assert is_fq_data_id( fq_data_id ) or is_name_valid(fq_data_id), "Data ID must be fully qualified or must be a valid blockchain ID (got %s)" % fq_data_id assert privatekey is not None fqu = None if is_fq_data_id(fq_data_id): fqu = fq_data_id.split(":")[0] else: fqu = fq_data_id serialized_data = serialize_mutable_data( data_json, privatekey ) successes = 0 log.debug("put_mutable_data(%s), required=%s" % (fq_data_id, ",".join(required))) for handler in storage_handlers: if not hasattr( handler, "put_mutable_handler" ): if handler.__name__ in required: log.debug("Failed to replicate with required storage provider '%s'" % handler.__name__) return None else: continue if len(use_only) > 0 and handler.__name__ not in use_only: log.debug("Skipping storage driver '%s'" % handler.__name__) continue rc = False try: log.debug("Try '%s'" % handler.__name__) rc = handler.put_mutable_handler( fq_data_id, serialized_data, fqu=fqu ) except Exception, e: log.exception( e ) if handler.__name__ in required: log.debug("Failed to replicate with required storage provider '%s'" % handler.__name__) return None else: continue if not rc: if handler.__name__ in required: log.debug("Failed to replicate with required storage provider '%s'" % handler.__name__) return None else: log.debug("Failed to replicate with '%s'" % handler.__name__) continue else: successes += 1
def get_mutable_data( fq_data_id, data_pubkey, urls=None, data_address=None, owner_address=None, drivers=None, decode=True ): """ Given a mutable data's zonefile, go fetch the data. Return a mutable data dict on success Return None on error """ global storage_handlers fq_data_id = str(fq_data_id) assert is_fq_data_id( fq_data_id ) or is_name_valid( fq_data_id ), "Need either a fully-qualified data ID or a blockchain ID: '%s'" % fq_data_id fqu = None if is_fq_data_id(fq_data_id): fqu = fq_data_id.split(":")[0] else: fqu = fq_data_id handlers_to_use = [] if drivers is not None and len(drivers) > 0: # whitelist of drivers to try for d in drivers: for h in storage_handlers: if h.__name__ == d: handlers_to_use.append(h) else: handlers_to_use = storage_handlers log.debug("get_mutable %s" % fq_data_id) for storage_handler in handlers_to_use: if not hasattr(storage_handler, "get_mutable_handler"): continue # which URLs to attempt? try_urls = [] if urls is None: # make one on-the-fly if not hasattr(storage_handler, "make_mutable_url"): log.warning("Storage handler %s does not support `make_mutable_url`" % storage_handler.__name__) continue new_url = None try: new_url = storage_handler.make_mutable_url( fq_data_id ) except Exception, e: log.exception(e) continue try_urls = [new_url] else: # find the set that this handler can manage for url in urls: if not hasattr(storage_handler, "handles_url"): log.warning("Storage handler %s does not support `handles_url`" % storage_handler.__name__) continue if storage_handler.handles_url( url ): try_urls.append(url) for url in try_urls: data_json = None data = None log.debug("Try %s (%s)" % (storage_handler.__name__, url)) try: data_json = storage_handler.get_mutable_handler( url, fqu=fqu ) except UnhandledURLException, uue: # handler doesn't handle this URL log.debug("Storage handler %s does not handle URLs like %s" % (storage_handler.__name__, url )) continue except Exception, e: log.exception( e ) continue
def default_blockstore_opts(config_file=None, testset=False): """ Get our default blockstore opts from a config file or from sane defaults. """ if config_file is None: config_file = virtualchain.get_config_filename() testset_path = get_testset_filename(virtualchain.get_working_dir()) announce_path = get_announce_filename(virtualchain.get_working_dir()) parser = SafeConfigParser() parser.read(config_file) blockstore_opts = {} tx_broadcaster = None utxo_provider = None testset_first_block = None max_subsidy = 0 contact_email = None announcers = "judecn.id,muneeb.id,shea256.id" announcements = None if parser.has_section('blockstore'): if parser.has_option('blockstore', 'tx_broadcaster'): tx_broadcaster = parser.get('blockstore', 'tx_broadcaster') if parser.has_option('blockstore', 'utxo_provider'): utxo_provider = parser.get('blockstore', 'utxo_provider') if parser.has_option('blockstore', 'testset_first_block'): testset_first_block = int( parser.get('blockstore', 'testset_first_block')) if parser.has_option('blockstore', 'max_subsidy'): max_subsidy = int(parser.get('blockstore', 'max_subsidy')) if parser.has_option('blockstore', 'email'): contact_email = parser.get('blockstore', 'email') if parser.has_option('blockstore', 'announcers'): # must be a CSV of blockchain IDs announcer_list_str = parser.get('blockstore', 'announcers') announcer_list = announcer_list_str.split(",") import scripts # validate each one valid = True for bid in announcer_list: if not scripts.is_name_valid(bid): log.error("Invalid blockchain ID '%s'" % bid) valid = False if valid: announcers = ",".join(announcer_list) if os.path.exists(testset_path): # testset file flag set testset = True if os.path.exists(announce_path): # load announcement list with open(announce_path, "r") as f: announce_text = f.readlines() all_announcements = [a.strip() for a in announce_text] unseen_announcements = [] # find announcements we haven't seen yet for a in all_announcements: if a not in ANNOUNCEMENTS: unseen_announcements.append(a) announcements = ",".join(unseen_announcements) blockstore_opts = { 'tx_broadcaster': tx_broadcaster, 'utxo_provider': utxo_provider, 'testset': testset, 'testset_first_block': testset_first_block, 'max_subsidy': max_subsidy, 'email': contact_email, 'announcers': announcers, 'announcements': announcements } # strip Nones for (k, v) in blockstore_opts.items(): if v is None: del blockstore_opts[k] return blockstore_opts
def default_blockstack_opts( config_file=None, virtualchain_impl=None ): """ Get our default blockstack opts from a config file or from sane defaults. """ if config_file is None: config_file = virtualchain.get_config_filename() announce_path = get_announce_filename( virtualchain.get_working_dir(impl=virtualchain_impl) ) parser = SafeConfigParser() parser.read( config_file ) blockstack_opts = {} contact_email = None announcers = "judecn.id,muneeb.id,shea256.id" announcements = None backup_frequency = 144 # once a day; 10 minute block time backup_max_age = 1008 # one week rpc_port = RPC_SERVER_PORT serve_zonefiles = True serve_profiles = False serve_data = False zonefile_dir = os.path.join( os.path.dirname(config_file), "zonefiles") analytics_key = None zonefile_storage_drivers = "disk,dht" zonefile_storage_drivers_write = "disk" profile_storage_drivers = "disk" profile_storage_drivers_write = "disk" data_storage_drivers = "disk" data_storage_drivers_write = "disk" redirect_data = False data_servers = None server_version = None atlas_enabled = True atlas_seed_peers = "node.blockstack.org:%s" % RPC_SERVER_PORT atlasdb_path = os.path.join( os.path.dirname(config_file), "atlas.db" ) atlas_blacklist = "" atlas_hostname = socket.gethostname() if parser.has_section('blockstack'): if parser.has_option('blockstack', 'backup_frequency'): backup_frequency = int( parser.get('blockstack', 'backup_frequency')) if parser.has_option('blockstack', 'backup_max_age'): backup_max_age = int( parser.get('blockstack', 'backup_max_age') ) if parser.has_option('blockstack', 'email'): contact_email = parser.get('blockstack', 'email') if parser.has_option('blockstack', 'rpc_port'): rpc_port = int(parser.get('blockstack', 'rpc_port')) if parser.has_option('blockstack', 'serve_zonefiles'): serve_zonefiles = parser.get('blockstack', 'serve_zonefiles') if serve_zonefiles.lower() in ['1', 'yes', 'true', 'on']: serve_zonefiles = True else: serve_zonefiles = False if parser.has_option('blockstack', 'serve_profiles'): serve_profiles = parser.get('blockstack', 'serve_profiles') if serve_profiles.lower() in ['1', 'yes', 'true', 'on']: serve_profiles = True else: serve_profiles = False if parser.has_option('blockstack', 'serve_data'): serve_data = parser.get('blockstack', 'serve_data') if serve_data.lower() in ['1', 'yes', 'true', 'on']: serve_data = True else: serve_data = False if parser.has_option("blockstack", "zonefile_storage_drivers"): zonefile_storage_drivers = parser.get("blockstack", "zonefile_storage_drivers") if parser.has_option("blockstack", "zonefile_storage_drivers_write"): zonefile_storage_drivers_write = parser.get("blockstack", "zonefile_storage_drivers_write") if parser.has_option("blockstack", "profile_storage_drivers"): profile_storage_drivers = parser.get("blockstack", "profile_storage_drivers") if parser.has_option("blockstack", "profile_storage_drivers_write"): profile_storage_drivers_write = parser.get("blockstack", "profile_storage_drivers_write") if parser.has_option("blockstack", "data_storage_drivers"): data_storage_drivers = parser.get("blockstack", "data_storage_drivers") if parser.has_option("blockstack", "data_storage_drivers_write"): data_storage_drivers_write = parser.get("blockstack", "data_storage_drivers_write") if parser.has_option("blockstack", "zonefiles"): zonefile_dir = parser.get("blockstack", "zonefiles") if parser.has_option('blockstack', 'redirect_data'): redirect_data = parser.get('blockstack', 'redirect_data') if redirect_data.lower() in ['1', 'yes', 'true', 'on']: redirect_data = True else: redirect_data = False if parser.has_option('blockstack', 'data_servers'): data_servers = parser.get('blockstack', 'data_servers') # must be a CSV of host:port hostports = filter( lambda x: len(x) > 0, data_servers.split(",") ) for hp in hostports: host, port = url_to_host_port( hp ) assert host is not None and port is not None if parser.has_option('blockstack', 'announcers'): # must be a CSV of blockchain IDs announcer_list_str = parser.get('blockstack', 'announcers') announcer_list = filter( lambda x: len(x) > 0, announcer_list_str.split(",") ) import scripts # validate each one valid = True for bid in announcer_list: if not scripts.is_name_valid( bid ): log.error("Invalid blockchain ID '%s'" % bid) valid = False if valid: announcers = ",".join(announcer_list) if parser.has_option('blockstack', 'analytics_key'): analytics_key = parser.get('blockstack', 'analytics_key') if parser.has_option('blockstack', 'server_version'): server_version = parser.get('blockstack', 'server_version') if parser.has_option('blockstack', 'atlas'): atlas_enabled = parser.get('blockstack', 'atlas') if atlas_enabled.lower() in ['true', '1', 'enabled', 'enabled', 'on']: atlas_enabled = True else: atlas_enabled = False if parser.has_option('blockstack', 'atlas_seeds'): atlas_seed_peers = parser.get('blockstack', 'atlas_seeds') # must be a CSV of host:port hostports = filter( lambda x: len(x) > 0, atlas_seed_peers.split(",") ) for hp in hostports: host, port = url_to_host_port( hp ) assert host is not None and port is not None if parser.has_option('blockstack', 'atlasdb_path'): atlasdb_path = parser.get('blockstack', 'atlasdb_path') if parser.has_option('blockstack', 'atlas_blacklist'): atlas_blacklist = parser.get('blockstack', 'atlas_blacklist') # must be a CSV of host:port hostports = filter( lambda x: len(x) > 0, atlas_blacklist.split(",") ) for hp in hostports: host, port = url_to_host_port( hp ) assert host is not None and port is not None if parser.has_option('blockstack', 'atlas_hostname'): atlas_hostname = parser.get('blockstack', 'atlas_hostname') if os.path.exists( announce_path ): # load announcement list with open( announce_path, "r" ) as f: announce_text = f.readlines() all_announcements = [ a.strip() for a in announce_text ] unseen_announcements = [] # find announcements we haven't seen yet for a in all_announcements: if a not in ANNOUNCEMENTS: unseen_announcements.append( a ) announcements = ",".join( unseen_announcements ) if zonefile_dir is not None and not os.path.exists( zonefile_dir ): try: os.makedirs( zonefile_dir, 0700 ) except: pass blockstack_opts = { 'rpc_port': rpc_port, 'email': contact_email, 'announcers': announcers, 'announcements': announcements, 'backup_frequency': backup_frequency, 'backup_max_age': backup_max_age, 'serve_zonefiles': serve_zonefiles, 'zonefile_storage_drivers': zonefile_storage_drivers, "zonefile_storage_drivers_write": zonefile_storage_drivers_write, 'serve_profiles': serve_profiles, 'profile_storage_drivers': profile_storage_drivers, "profile_storage_drivers_write": profile_storage_drivers_write, 'serve_data': serve_data, 'data_storage_drivers': data_storage_drivers, "data_storage_drivers_write": data_storage_drivers_write, 'redirect_data': redirect_data, 'data_servers': data_servers, 'analytics_key': analytics_key, 'server_version': server_version, 'atlas': atlas_enabled, 'atlas_seeds': atlas_seed_peers, 'atlasdb_path': atlasdb_path, 'atlas_blacklist': atlas_blacklist, 'atlas_hostname': atlas_hostname, 'zonefiles': zonefile_dir, } # strip Nones for (k, v) in blockstack_opts.items(): if v is None: del blockstack_opts[k] return blockstack_opts
def default_blockstack_opts( config_file=None ): """ Get our default blockstack opts from a config file or from sane defaults. """ if config_file is None: config_file = virtualchain.get_config_filename() announce_path = get_announce_filename( virtualchain.get_working_dir() ) parser = SafeConfigParser() parser.read( config_file ) blockstack_opts = {} contact_email = None announcers = "judecn.id,muneeb.id,shea256.id" announcements = None backup_frequency = 1008 # once a week; 10 minute block time backup_max_age = 12096 # 12 weeks rpc_port = RPC_SERVER_PORT blockchain_proxy = False serve_zonefiles = True serve_profiles = False zonefile_dir = None analytics_key = None zonefile_storage_drivers = "disk" profile_storage_drivers = "" server_version = None if parser.has_section('blockstack'): if parser.has_option('blockstack', 'backup_frequency'): backup_frequency = int( parser.get('blockstack', 'backup_frequency')) if parser.has_option('blockstack', 'backup_max_age'): backup_max_age = int( parser.get('blockstack', 'backup_max_age') ) if parser.has_option('blockstack', 'email'): contact_email = parser.get('blockstack', 'email') if parser.has_option('blockstack', 'rpc_port'): rpc_port = int(parser.get('blockstack', 'rpc_port')) if parser.has_option('blockstack', 'blockchain_proxy'): blockchain_proxy = parser.get('blockstack', 'blockchain_proxy') if blockchain_proxy.lower() in ['1', 'yes', 'true', 'on']: blockchain_proxy = True else: blockchain_proxy = False if parser.has_option('blockstack', 'serve_zonefiles'): serve_zonefiles = parser.get('blockstack', 'serve_zonefiles') if serve_zonefiles.lower() in ['1', 'yes', 'true', 'on']: serve_zonefiles = True else: serve_zonefiles = False if parser.has_option('blockstack', 'serve_profiles'): serve_profiles = parser.get('blockstack', 'serve_profiles') if serve_profiles.lower() in ['1', 'yes', 'true', 'on']: serve_profiles = True else: serve_profiles = False if parser.has_option("blockstack", "zonefile_storage_drivers"): zonefile_storage_drivers = parser.get("blockstack", "zonefile_storage_drivers") if parser.has_option("blockstack", "profile_storage_drivers"): profile_storage_drivers = parser.get("blockstack", "profile_storage_drivers") if parser.has_option("blockstack", "zonefiles"): zonefile_dir = parser.get("blockstack", "zonefiles") if parser.has_option('blockstack', 'announcers'): # must be a CSV of blockchain IDs announcer_list_str = parser.get('blockstack', 'announcers') announcer_list = announcer_list_str.split(",") import scripts # validate each one valid = True for bid in announcer_list: if not scripts.is_name_valid( bid ): log.error("Invalid blockchain ID '%s'" % bid) valid = False if valid: announcers = ",".join(announcer_list) if parser.has_option('blockstack', 'analytics_key'): analytics_key = parser.get('blockstack', 'analytics_key') if parser.has_option('blockstack', 'server_version'): server_version = parser.get('blockstack', 'server_version') if os.path.exists( announce_path ): # load announcement list with open( announce_path, "r" ) as f: announce_text = f.readlines() all_announcements = [ a.strip() for a in announce_text ] unseen_announcements = [] # find announcements we haven't seen yet for a in all_announcements: if a not in ANNOUNCEMENTS: unseen_announcements.append( a ) announcements = ",".join( unseen_announcements ) if zonefile_dir is not None and not os.path.exists( zonefile_dir ): try: os.makedirs( zonefile_dir, 0700 ) except: pass blockstack_opts = { 'rpc_port': rpc_port, 'email': contact_email, 'announcers': announcers, 'announcements': announcements, 'backup_frequency': backup_frequency, 'backup_max_age': backup_max_age, 'blockchain_proxy': blockchain_proxy, 'serve_zonefiles': serve_zonefiles, 'serve_profiles': serve_profiles, 'zonefile_storage_drivers': zonefile_storage_drivers, 'profile_storage_drivers': profile_storage_drivers, 'zonefiles': zonefile_dir, 'analytics_key': analytics_key, 'server_version': server_version } # strip Nones for (k, v) in blockstack_opts.items(): if v is None: del blockstack_opts[k] return blockstack_opts
def default_blockstack_opts( config_file=None, testset=False ): """ Get our default blockstack opts from a config file or from sane defaults. """ if config_file is None: config_file = virtualchain.get_config_filename() testset_path = get_testset_filename( virtualchain.get_working_dir() ) announce_path = get_announce_filename( virtualchain.get_working_dir() ) parser = SafeConfigParser() parser.read( config_file ) blockstack_opts = {} tx_broadcaster = None utxo_provider = None testset_first_block = None max_subsidy = 0 contact_email = None announcers = "judecn.id,muneeb.id,shea256.id" announcements = None if parser.has_section('blockstack'): if parser.has_option('blockstack', 'tx_broadcaster'): tx_broadcaster = parser.get('blockstack', 'tx_broadcaster') if parser.has_option('blockstack', 'utxo_provider'): utxo_provider = parser.get('blockstack', 'utxo_provider') if parser.has_option('blockstack', 'testset_first_block'): testset_first_block = int( parser.get('blockstack', 'testset_first_block') ) if parser.has_option('blockstack', 'max_subsidy'): max_subsidy = int( parser.get('blockstack', 'max_subsidy')) if parser.has_option('blockstack', 'email'): contact_email = parser.get('blockstack', 'email') if parser.has_option('blockstack', 'announcers'): # must be a CSV of blockchain IDs announcer_list_str = parser.get('blockstack', 'announcers') announcer_list = announcer_list_str.split(",") import scripts # validate each one valid = True for bid in announcer_list: if not scripts.is_name_valid( bid ): log.error("Invalid blockchain ID '%s'" % bid) valid = False if valid: announcers = ",".join(announcer_list) if os.path.exists( testset_path ): # testset file flag set testset = True if os.path.exists( announce_path ): # load announcement list with open( announce_path, "r" ) as f: announce_text = f.readlines() all_announcements = [ a.strip() for a in announce_text ] unseen_announcements = [] # find announcements we haven't seen yet for a in all_announcements: if a not in ANNOUNCEMENTS: unseen_announcements.append( a ) announcements = ",".join( unseen_announcements ) blockstack_opts = { 'tx_broadcaster': tx_broadcaster, 'utxo_provider': utxo_provider, 'testset': testset, 'testset_first_block': testset_first_block, 'max_subsidy': max_subsidy, 'email': contact_email, 'announcers': announcers, 'announcements': announcements } # strip Nones for (k, v) in blockstack_opts.items(): if v is None: del blockstack_opts[k] return blockstack_opts