def list_immutable_data_history( name, data_id, current_block=None, proxy=None ): """ list_immutable_data_history List all prior hashes of an immutable datum, given its unchanging ID. If the zonefile at a particular update is missing, the string "missing zonefile" will be appended in its place. If the zonefile did not define data_id at that time, the string "data not defined" will be placed in the hash's place. Returns the list of hashes. If there are multiple matches for the data ID in a zonefile, then return the list of hashes for that zonefile. """ zonefiles = list_zonefile_history( name, current_block=current_block, proxy=proxy ) hashes = [] for zf in zonefiles: if 'error' in zf and len(zf.keys()) == 1: # invalid hashes.append("missing zonefile") continue if not user_db.is_user_zonefile(zf): # legacy profile hashes.append("missing zonefile") continue data_hash_or_hashes = user_db.get_immutable_data_hash( zf, data_id ) if data_hash_or_hashes is None: hashes.append("data not defined") continue else: hashes.append(data_hash_or_hashes) return hashes
def store_name_zonefile(name, user_zonefile, txid, storage_drivers=None): """ Store JSON user zonefile data to the immutable storage providers, synchronously. This is only necessary if we've added/changed/removed immutable data. Return (True, hash(user zonefile)) on success Return (False, None) on failure """ storage_drivers = [] if storage_drivers is None else storage_drivers assert not blockstack_profiles.is_profile_in_legacy_format( user_zonefile), 'User zonefile is a legacy profile' assert user_db.is_user_zonefile( user_zonefile), 'Not a user zonefile (maybe a custom legacy profile?)' # serialize and send off user_zonefile_txt = blockstack_zones.make_zone_file(user_zonefile, origin=name, ttl=USER_ZONEFILE_TTL) return store_name_zonefile_data(name, user_zonefile_txt, txid, storage_drivers=storage_drivers)
def decode_name_zonefile(name, zonefile_txt, allow_legacy=False): """ Decode a serialized zonefile into a JSON dict. If allow_legacy is True, then support legacy zone file formats (including Onename profiles) Otherwise, the data must actually be a Blockstack zone file. * If the zonefile does not have $ORIGIN, or if $ORIGIN does not match the name, then this fails. Return None on error """ user_zonefile = None try: # by default, it's a zonefile-formatted text file user_zonefile_defaultdict = blockstack_zones.parse_zone_file( zonefile_txt) assert user_db.is_user_zonefile( user_zonefile_defaultdict), 'Not a user zonefile' # force dict user_zonefile = dict(user_zonefile_defaultdict) except (IndexError, ValueError, blockstack_zones.InvalidLineException): if not allow_legacy: return {'error': 'Legacy zone file'} # might be legacy profile log.debug( 'WARN: failed to parse user zonefile; trying to import as legacy') try: user_zonefile = json.loads(zonefile_txt) if not isinstance(user_zonefile, dict): log.debug('Not a legacy user zonefile') return None except Exception as e: if BLOCKSTACK_DEBUG is not None: log.exception(e) log.error('Failed to parse non-standard zonefile') return None except Exception as e: log.exception(e) log.error('Failed to parse zonefile') return None if user_zonefile is None: return None if not allow_legacy: # additional checks if not user_zonefile.has_key('$origin'): log.debug("Zonefile has no $ORIGIN") return None if user_zonefile['$origin'] != name: log.debug("Name/zonefile mismatch: $ORIGIN = {}, name = {}".format( user_zonefile['$origin'], name)) return None return user_zonefile
def decode_name_zonefile(name, zonefile_txt, allow_legacy=False): """ Decode a serialized zonefile into a JSON dict. If allow_legacy is True, then support legacy zone file formats (including Onename profiles) Otherwise, the data must actually be a Blockstack zone file. * If the zonefile does not have $ORIGIN, or if $ORIGIN does not match the name, then this fails. Return None on error """ user_zonefile = None try: # by default, it's a zonefile-formatted text file user_zonefile_defaultdict = blockstack_zones.parse_zone_file(zonefile_txt) assert user_db.is_user_zonefile(user_zonefile_defaultdict), 'Not a user zonefile' # force dict user_zonefile = dict(user_zonefile_defaultdict) except (IndexError, ValueError, blockstack_zones.InvalidLineException): if not allow_legacy: return {'error': 'Legacy zone file'} # might be legacy profile log.debug('WARN: failed to parse user zonefile; trying to import as legacy') try: user_zonefile = json.loads(zonefile_txt) if not isinstance(user_zonefile, dict): log.debug('Not a legacy user zonefile') return None except Exception as e: if BLOCKSTACK_DEBUG: log.exception(e) log.error('Failed to parse non-standard zonefile') return None except Exception as e: if BLOCKSTACK_DEBUG: log.exception(e) log.error('Failed to parse zonefile') return None if user_zonefile is None: return None if not allow_legacy: # additional checks if not user_zonefile.has_key('$origin'): log.debug("Zonefile has no $ORIGIN") return None if user_zonefile['$origin'] != name: log.debug("Name/zonefile mismatch: $ORIGIN = {}, name = {}".format(user_zonefile['$origin'], name)) return None return user_zonefile
def get_immutable(name, data_hash, data_id=None, proxy=None): """ get_immutable Fetch a piece of immutable data. Use @data_hash to look it up in the user's zonefile, and then fetch and verify the data itself from the configured storage providers. Return {'data': the data, 'hash': hash} on success Return {'error': ...} on failure """ if proxy is None: proxy = get_default_proxy() user_zonefile = get_name_zonefile(name, proxy=proxy) if user_zonefile is None: return {'error': 'No user zonefile defined'} if 'error' in user_zonefile: return user_zonefile if blockstack_profiles.is_profile_in_legacy_format( user_zonefile ) or not user_db.is_user_zonefile( user_zonefile ): # zonefile is really a legacy profile return {'error': 'Profile is in a legacy format that does not support immutable data.'} if data_id is not None: # look up hash by name h = user_db.get_immutable_data_hash( user_zonefile, data_id ) if h is None: return {'error': 'No such immutable datum'} if type(h) == list: # this tool doesn't allow this to happen (one ID matches one hash), # but that doesn't preclude the user from doing this with other tools. if data_hash is not None and data_hash not in h: return {'error': 'Data ID/hash mismatch'} else: return {'error': "Multiple matches for '%s': %s" % (data_id, ",".join(h))} if data_hash is not None: if h != data_hash: return {'error': 'Data ID/hash mismatch'} else: data_hash = h elif not user_db.has_immutable_data( user_zonefile, data_hash ): return {'error': 'No such immutable datum'} data_url_hint = user_db.get_immutable_data_url( user_zonefile, data_hash ) data = storage.get_immutable_data( data_hash, fqu=name, data_id=data_id, data_url=data_url_hint ) if data is None: return {'error': 'No immutable data returned'} return {'data': data, 'hash': data_hash}
def delete_mutable(name, data_id, proxy=None, wallet_keys=None): """ delete_mutable Remove a piece of mutable data from the user's profile. Delete it from the storage providers as well. Returns a dict with {'status': True} on success Returns a dict with {'error': ...} on failure """ if proxy is None: proxy = get_default_proxy() fq_data_id = storage.make_fq_data_id( name, data_id ) legacy = False user_profile, user_zonefile = get_name_profile( name, proxy=proxy, wallet_keys=wallet_keys, include_name_record=True ) if user_profile is None: return user_zonefile # will be an error message name_record = user_zonefile['name_record'] del user_zonefile['name_record'] if blockstack_profiles.is_profile_in_legacy_format( user_zonefile ) or not user_db.is_user_zonefile( user_zonefile ): # zonefile is a legacy profile. There is no immutable data log.info("Profile is in legacy format. No immutable data.") return {'status': True} # already deleted? if not user_db.has_mutable_data( user_profile, data_id ): return {'status': True} # unlink user_db.remove_mutable_data_zonefile( user_profile, data_id ) # put new profile data_privkey = get_data_or_owner_privkey( user_zonefile, name_record['address'], wallet_keys=wallet_keys, config_path=proxy.conf['path'] ) if 'error' in data_privkey: return {'error': data_privkey['error']} else: data_privkey = data_privkey['privatekey'] assert data_privkey is not None rc = storage.put_mutable_data( name, user_profile, data_privkey ) if not rc: return {'error': 'Failed to unlink mutable data from profile'} # remove the data itself rc = storage.delete_mutable_data( fq_data_id, data_privkey ) if not rc: return {'error': 'Failed to delete mutable data from storage providers'} return {'status': True}
def store_name_zonefile(name, user_zonefile, txid, storage_drivers=None): """ Store JSON user zonefile data to the immutable storage providers, synchronously. This is only necessary if we've added/changed/removed immutable data. Return (True, hash(user zonefile)) on success Return (False, None) on failure """ storage_drivers = [] if storage_drivers is None else storage_drivers assert not blockstack_profiles.is_profile_in_legacy_format(user_zonefile), 'User zonefile is a legacy profile' assert user_db.is_user_zonefile(user_zonefile), 'Not a user zonefile (maybe a custom legacy profile?)' # serialize and send off user_zonefile_txt = blockstack_zones.make_zone_file(user_zonefile, origin=name, ttl=USER_ZONEFILE_TTL) return store_name_zonefile_data(name, user_zonefile_txt, txid, storage_drivers=storage_drivers)
def list_mutable_data( name, proxy=None, wallet_keys=None ): """ List the names and versions of all mutable data in a user's zonefile Returns {"data": [{"data_id": data ID, "version": version}]} """ if proxy is None: proxy = get_default_proxy() user_profile, user_zonefile = get_name_profile( name, proxy=proxy, wallet_keys=wallet_keys ) if user_zonefile is None: # user_profile will contain an error message return user_profile if blockstack_profiles.is_profile_in_legacy_format( user_zonefile ) or not user_db.is_user_zonefile( user_zonefile ): # zonefile is really a legacy profile return {"data": []} names_and_versions = user_db.list_mutable_data( user_profile ) listing = [ {"data_id": nv[0], "version": nv[1]} for nv in names_and_versions ] return {"data": listing}
def list_immutable_data( name, proxy=None ): """ List the names and hashes of all immutable data in a user's zonefile. Returns {"data": [{"data_id": data_id, "hash": hash}]} on success """ if proxy is None: proxy = get_default_proxy() user_zonefile = get_name_zonefile(name, proxy=proxy) if user_zonefile is None: return {'error': 'No user zonefile defined'} if 'error' in user_zonefile: return user_zonefile if blockstack_profiles.is_profile_in_legacy_format( user_zonefile ) or not user_db.is_user_zonefile( user_zonefile ): # zonefile is really a legacy profile return {"data": []} names_and_hashes = user_db.list_immutable_data( user_zonefile ) listing = [ {"data_id": nh[0], "hash": nh[1]} for nh in names_and_hashes ] return {"data": listing}
def delete_immutable(name, data_key, data_id=None, proxy=None, txid=None, wallet_keys=None): """ delete_immutable Remove an immutable datum from a name's profile, given by @data_key. Return a dict with {'status': True} on success Return a dict with {'error': ...} on failure """ from backend.nameops import do_update if proxy is None: proxy = get_default_proxy() legacy = False user_zonefile = get_name_zonefile( name, proxy=proxy, include_name_record=True ) if user_zonefile is None or 'error' in user_zonefile: if user_zonefile is None: return {'error': 'No user zonefile'} else: return user_zonefile name_record = user_zonefile['name_record'] del user_zonefile['name_record'] if blockstack_profiles.is_profile_in_legacy_format( user_zonefile ) or not user_db.is_user_zonefile( user_zonefile ): # zonefile is a legacy profile. There is no immutable data log.info("Profile is in legacy format. No immutable data.") return {'status': True} if data_key is None: if data_id is not None: # look up the key (or list of keys) # shouldn't be a list--this tool prevents that--but deal with it nevertheless data_key = user_db.get_immutable_data_hash( user_zonefile, data_id ) if type(data_key) == list: return {'error': "Multiple hashes for '%s': %s" % (data_id, ",".join(data_key)) } if data_key is None: return {'error': "No hash for '%s'" % data_id} else: return {'error': 'No data hash or data ID given'} # already deleted? if not user_db.has_immutable_data( user_zonefile, data_key ): return {'status': True} # remove user_db.remove_immutable_data_zonefile( user_zonefile, data_key ) zonefile_hash = hash_zonefile( user_zonefile ) if txid is None: # actually send the transaction _, payment_privkey = get_payment_keypair(wallet_keys=wallet_keys, config_path=proxy.conf['path']) _, owner_privkey = get_owner_keypair(wallet_keys=wallet_keys, config_path=proxy.conf['path']) utxo_client = get_utxo_provider_client( config_path=proxy.conf['path'] ) broadcaster_client = get_tx_broadcaster( config_path=proxy.conf['path'] ) update_result = do_update( name, zonefile_hash, owner_privkey, payment_privkey, utxo_client, broadcaster_client, config_path=proxy.conf['path'], proxy=proxy ) if 'error' in update_result: # failed to remove from zonefile return update_result txid = update_result['transaction_hash'] result = { 'zonefile_hash': zonefile_hash, 'transaction_hash': txid } # put new zonefile rc = store_name_zonefile( name, user_zonefile, txid ) if not rc: result['error'] = 'Failed to put new zonefile' return result # delete immutable data data_privkey = get_data_or_owner_privkey( user_zonefile, name_record['address'], wallet_keys=wallet_keys, config_path=proxy.conf['path'] ) if 'error' in data_privkey: return {'error': data_privkey['error']} else: data_privkey = data_privkey['privatekey'] assert data_privkey is not None rc = storage.delete_immutable_data( data_key, txid, data_privkey ) if not rc: result['error'] = 'Failed to delete immutable data' return result else: result['status'] = True return result
def get_mutable(name, data_id, proxy=None, ver_min=None, ver_max=None, ver_check=None, conf=None, wallet_keys=None): """ get_mutable Fetch a piece of mutable data. Use @data_id to look it up in the user's profile, and then fetch and erify the data itself from the configured storage providers. If @ver_min is given, ensure the data's version is greater or equal to it. If @ver_max is given, ensure the data's version is less than it. If @ver_check is given, it must be a callable that takes the name, data and version and returns True/False Return {'data': the data, 'version': the version} on success Return {'error': ...} on error """ if proxy is None: proxy = get_default_proxy() if conf is None: conf = proxy.conf fq_data_id = storage.make_fq_data_id( name, data_id ) user_profile, user_zonefile = get_name_profile( name, proxy=proxy, wallet_keys=wallet_keys, include_name_record=True ) if user_profile is None: return user_zonefile # will be an error message # recover name record name_record = user_zonefile['name_record'] del user_zonefile['name_record'] if blockstack_profiles.is_profile_in_legacy_format( user_zonefile ) or not user_db.is_user_zonefile( user_zonefile ): # profile has not been converted to the new zonefile format yet. return {'error': 'Profile is in a legacy format that does not support mutable data.'} # get the mutable data zonefile if not user_db.has_mutable_data( user_profile, data_id ): return {'error': "No such mutable datum"} mutable_data_zonefile = user_db.get_mutable_data_zonefile( user_profile, data_id ) assert mutable_data_zonefile is not None, "BUG: could not look up mutable datum '%s'.'%s'" % (name, data_id) # get user's data public key and owner address data_pubkey = user_db.user_zonefile_data_pubkey( user_zonefile ) data_address = name_record['address'] if data_pubkey is None: log.warn("Falling back to owner address for authentication") # get the mutable data itself urls = user_db.mutable_data_zonefile_urls( mutable_data_zonefile ) mutable_data = storage.get_mutable_data(fq_data_id, data_pubkey, urls=urls, data_address=data_address ) if mutable_data is None: return {'error': "Failed to look up mutable datum"} expected_version = load_mutable_data_version( conf, name, data_id ) if expected_version is None: expected_version = 0 # check consistency version = user_db.mutable_data_version( user_profile, data_id ) if ver_min is not None and ver_min > version: return {'error': 'Mutable data is stale'} if ver_max is not None and ver_max <= version: return {'error': 'Mutable data is in the future'} if ver_check is not None: rc = ver_check( name, mutable_data, version ) if not rc: return {'error': 'Mutable data consistency check failed'} elif expected_version > version: return {'error': 'Mutable data is stale; a later version was previously fetched'} rc = store_mutable_data_version( conf, fq_data_id, version ) if not rc: return {'error': 'Failed to store consistency information'} return {'data': mutable_data, 'version': version}