def blockstack_get_zonefile(zonefile_hash): """ Get a zonefile from the RPC endpoint Return None if not given MEANT FOR DIAGNOSTIC PURPOSES ONLY """ test_proxy = make_proxy() blockstack_client.set_default_proxy(test_proxy) zonefile_result = test_proxy.get_zonefiles([zonefile_hash]) if 'error' in zonefile_result: return None if zonefile_hash not in zonefile_result['zonefiles'].keys(): return None zonefile = blockstack_zones.parse_zone_file( zonefile_result['zonefiles'][zonefile_hash]) # verify if zonefile_hash != blockstack_client.hash_zonefile(zonefile): return None return blockstack_zones.parse_zone_file( zonefile_result['zonefiles'][zonefile_hash])
def decode_name_zonefile(name, zonefile_txt, allow_legacy=False): """ Decode a serialized zonefile into a JSON dict. If allow_legacy is True, then support legacy zone file formats (including Onename profiles) Otherwise, the data must actually be a Blockstack zone file. * If the zonefile does not have $ORIGIN, or if $ORIGIN does not match the name, then this fails. Return None on error """ user_zonefile = None try: # by default, it's a zonefile-formatted text file user_zonefile_defaultdict = blockstack_zones.parse_zone_file( zonefile_txt) assert user_db.is_user_zonefile( user_zonefile_defaultdict), 'Not a user zonefile' # force dict user_zonefile = dict(user_zonefile_defaultdict) except (IndexError, ValueError, blockstack_zones.InvalidLineException): if not allow_legacy: return {'error': 'Legacy zone file'} # might be legacy profile log.debug( 'WARN: failed to parse user zonefile; trying to import as legacy') try: user_zonefile = json.loads(zonefile_txt) if not isinstance(user_zonefile, dict): log.debug('Not a legacy user zonefile') return None except Exception as e: if BLOCKSTACK_DEBUG is not None: log.exception(e) log.error('Failed to parse non-standard zonefile') return None except Exception as e: log.exception(e) log.error('Failed to parse zonefile') return None if user_zonefile is None: return None if not allow_legacy: # additional checks if not user_zonefile.has_key('$origin'): log.debug("Zonefile has no $ORIGIN") return None if user_zonefile['$origin'] != name: log.debug("Name/zonefile mismatch: $ORIGIN = {}, name = {}".format( user_zonefile['$origin'], name)) return None return user_zonefile
def decode_name_zonefile(zonefile_txt): """ Decode a serialized zonefile into a JSON dict Return None on error """ user_zonefile = None try: # by default, it's a zonefile-formatted text file user_zonefile_defaultdict = blockstack_zones.parse_zone_file( zonefile_txt) assert user_db.is_user_zonefile( user_zonefile_defaultdict), "Not a user zonefile" # force dict user_zonefile = dict(user_zonefile_defaultdict) except (IndexError, ValueError, blockstack_zones.InvalidLineException): # might be legacy profile log.debug( "WARN: failed to parse user zonefile; trying to import as legacy") try: user_zonefile = json.loads(zonefile_txt) if type(user_zonefile) != dict: log.debug("Not a legacy user zonefile") return None except Exception, e: if os.environ.get("BLOCKSTACK_DEBUG", None) == "1": log.exception(e) log.error("Failed to parse non-standard zonefile") return None
def update_dns(self): from blockstack_zones import parse_zone_file #from blockstack_zones import parse_zone_file r = self.local_renderer for zone_data in r.env.zones: zone_file = zone_data['file'] domain = zone_data['domain'] backend = zone_data['backend'] types = zone_data['types'] if backend not in BACKENDS: raise NotImplementedError('Unsupported backend: %s' % backend) print('Processing zone file %s for domain %s.' % (zone_file, domain)) zone_data = open(zone_file).read() zone_data = parse_zone_file(zone_data) if self.verbose: pprint(dict(zone_data), indent=4) #TODO:add differential update using get_last_zonefile() # Only update record types we're specifically incharge of managing. for record_type in types: record_type = record_type.lower() for record in zone_data.get(record_type): getattr(self, 'update_dns_%s' % backend)(domain=domain, record_type=record_type, record=record)
def get_cached_zonefile(zonefile_hash, zonefile_dir=None): """ Get a cached zonefile from local disk Return None if not found """ if zonefile_dir is None: zonefile_dir = get_zonefile_dir() zonefile_path = os.path.join(zonefile_dir, zonefile_hash) if not os.path.exists(zonefile_path): return None with open(zonefile_path, "r") as f: data = f.read() # sanity check if not verify_zonefile(data, zonefile_hash): log.debug("Corrupt zonefile '%s'; uncaching" % zonefile_hash) remove_cached_zonefile(zonefile_hash, zonefile_dir=zonefile_dir) return None try: zonefile_dict = blockstack_zones.parse_zone_file(data) assert blockstack_client.is_user_zonefile( zonefile_dict), "Not a user zonefile: %s" % zonefile_hash return zonefile_dict except Exception, e: log.error("Failed to parse zonefile") return None
def test_zone_file_parsing_2(self): zone_file = parse_zone_file(zone_files["sample_2"]) # print(json.dumps(zone_file, indent=2)) self.assertTrue(isinstance(zone_file, dict)) self.assertTrue("a" in zone_file) self.assertTrue("cname" in zone_file) self.assertTrue("alias" in zone_file) self.assertTrue("$ttl" in zone_file) self.assertTrue("$origin" in zone_file) a_records = {record["name"]: record for record in zone_file["a"]} # Confirm that all records have class "IN" self.assertTrue( all([(record["class"] == "IN") for record in a_records.values()])) # TTL and no CLASS self.assertEqual(a_records["server1"].get("_missing_class"), True) # CLASS and no TTL self.assertEqual(a_records["server2"].get("_missing_class"), None) # TTL and no CLASS self.assertEqual(a_records["server3"].get("ttl"), 3600) self.assertEqual(a_records["server3"].get("_missing_class"), True) # TTL and CLASS self.assertEqual(a_records["dns1"].get("ttl"), 3600) self.assertEqual(a_records["dns1"].get("_missing_class"), None) # Reversed TTL and CLASS field order self.assertEqual(a_records["dns2"].get("ttl"), 3600) self.assertEqual(a_records["dns2"].get("_missing_class"), None)
def get_zonefile_from_storage(zonefile_hash, drivers=None): """ Get a zonefile from our storage drivers. Return the zonefile dict on success. Raise on error """ if not is_current_zonefile_hash(zonefile_hash): raise Exception("Unknown zonefile hash") zonefile_txt = blockstack_client.storage.get_immutable_data( zonefile_hash, hash_func=blockstack_client.get_blockchain_compat_hash, deserialize=False, drivers=drivers) if zonefile_txt is None: raise Exception("Failed to get data") # verify if blockstack_client.storage.get_zonefile_data_hash( zonefile_txt) != zonefile_hash: raise Exception("Corrupt zonefile: %s" % zonefile_hash) # parse try: user_zonefile = blockstack_zones.parse_zone_file(zonefile_txt) assert blockstack_client.is_user_zonefile( user_zonefile), "Not a user zonefile: %s" % zonefile_hash except AssertionError, ValueError: raise Exception("Failed to load zonefile %s" % zonefile_hash)
def get_cached_zonefile( zonefile_hash, zonefile_dir=None ): """ Get a cached zonefile from local disk Return None if not found """ if zonefile_dir is None: zonefile_dir = get_zonefile_dir() zonefile_path = os.path.join( zonefile_dir, zonefile_hash ) if not os.path.exists( zonefile_path ): return None with open(zonefile_path, "r") as f: data = f.read() # sanity check if not verify_zonefile( data, zonefile_hash ): log.debug("Corrupt zonefile '%s'; uncaching" % zonefile_hash) remove_cached_zonefile( zonefile_hash, zonefile_dir=zonefile_dir ) return None try: zonefile_dict = blockstack_zones.parse_zone_file( data ) assert blockstack_client.is_user_zonefile( zonefile_dict ), "Not a user zonefile: %s" % zonefile_hash return zonefile_dict except Exception, e: log.error("Failed to parse zonefile") return None
def test_zone_file_parsing_txt(self): zone_file = parse_zone_file(zone_files["sample_txt_1"]) self.assertTrue(isinstance(zone_file, dict)) self.assertTrue("soa" in zone_file) self.assertTrue("mx" in zone_file) self.assertTrue("ns" in zone_file) self.assertTrue("a" in zone_file) self.assertTrue("cname" in zone_file) self.assertTrue("$ttl" in zone_file) self.assertTrue("$origin" in zone_file) self.assertTrue("txt" in zone_file) self.assertEqual(zone_file["txt"][0]["name"], "single") self.assertEqual(zone_file["txt"][0]["txt"], "everything I do") self.assertEqual(zone_file["txt"][1]["name"], "singleTTL") self.assertEqual(zone_file["txt"][1]["ttl"], 100) self.assertEqual(zone_file["txt"][1]["txt"], "everything I do") self.assertEqual(zone_file["txt"][2]["name"], "multi") self.assertEqual(zone_file["txt"][2]["txt"], ["everything I do", "I do for you"]) self.assertEqual(zone_file["txt"][3]["name"], "multiTTL") self.assertEqual(zone_file["txt"][3]["ttl"], 100) self.assertEqual(zone_file["txt"][3]["txt"], ["everything I do", "I do for you"]) self.assertEqual(zone_file["txt"][4]["name"], "semiColonText") self.assertEqual(zone_file["txt"][4]["ttl"], 3600) self.assertEqual( zone_file["txt"][4]["txt"], "v=DMARC1; p=none; rua=mailto:[email protected]; ruf=mailto:[email protected]; fo=1" )
def test_zone_file_creation_txt(self): json_zone_file = zone_file_objects["sample_txt_1"] zone_file = make_zone_file(json_zone_file) print zone_file self.assertTrue(isinstance(zone_file, (unicode, str))) self.assertTrue("$ORIGIN" in zone_file) self.assertTrue("$TTL" in zone_file) self.assertTrue("@ IN SOA" in zone_file) zone_file = parse_zone_file(zone_file) self.assertTrue(isinstance(zone_file, dict)) self.assertTrue("soa" in zone_file) self.assertTrue("mx" in zone_file) self.assertTrue("ns" in zone_file) self.assertTrue("a" in zone_file) self.assertTrue("cname" in zone_file) self.assertTrue("$ttl" in zone_file) self.assertTrue("$origin" in zone_file) self.assertTrue("txt" in zone_file) self.assertEqual(zone_file["txt"][0]["name"], "single") self.assertEqual(zone_file["txt"][0]["txt"], "everything I do") self.assertEqual(zone_file["txt"][1]["name"], "singleTTL") self.assertEqual(zone_file["txt"][1]["ttl"], 100) self.assertEqual(zone_file["txt"][1]["txt"], "everything I do") self.assertEqual(zone_file["txt"][2]["name"], "multi") self.assertEqual(zone_file["txt"][2]["txt"], ["everything I do", "I do for you"]) self.assertEqual(zone_file["txt"][3]["name"], "multiTTL") self.assertEqual(zone_file["txt"][3]["ttl"], 100) self.assertEqual(zone_file["txt"][3]["txt"], ["everything I do", "I do for you"])
def decode_name_zonefile(name, zonefile_txt, allow_legacy=False): """ Decode a serialized zonefile into a JSON dict. If allow_legacy is True, then support legacy zone file formats (including Onename profiles) Otherwise, the data must actually be a Blockstack zone file. * If the zonefile does not have $ORIGIN, or if $ORIGIN does not match the name, then this fails. Return None on error """ user_zonefile = None try: # by default, it's a zonefile-formatted text file user_zonefile_defaultdict = blockstack_zones.parse_zone_file(zonefile_txt) assert user_db.is_user_zonefile(user_zonefile_defaultdict), 'Not a user zonefile' # force dict user_zonefile = dict(user_zonefile_defaultdict) except (IndexError, ValueError, blockstack_zones.InvalidLineException): if not allow_legacy: return {'error': 'Legacy zone file'} # might be legacy profile log.debug('WARN: failed to parse user zonefile; trying to import as legacy') try: user_zonefile = json.loads(zonefile_txt) if not isinstance(user_zonefile, dict): log.debug('Not a legacy user zonefile') return None except Exception as e: if BLOCKSTACK_DEBUG: log.exception(e) log.error('Failed to parse non-standard zonefile') return None except Exception as e: if BLOCKSTACK_DEBUG: log.exception(e) log.error('Failed to parse zonefile') return None if user_zonefile is None: return None if not allow_legacy: # additional checks if not user_zonefile.has_key('$origin'): log.debug("Zonefile has no $ORIGIN") return None if user_zonefile['$origin'] != name: log.debug("Name/zonefile mismatch: $ORIGIN = {}, name = {}".format(user_zonefile['$origin'], name)) return None return user_zonefile
def test_zone_file_parsing_2(self): zone_file = parse_zone_file(zone_files["sample_2"]) #print json.dumps(zone_file, indent=2) self.assertTrue(isinstance(zone_file, dict)) self.assertTrue("a" in zone_file) self.assertTrue("cname" in zone_file) self.assertTrue("$ttl" in zone_file) self.assertTrue("$origin" in zone_file)
def test_zone_file_parsing_2(self): zone_file = parse_zone_file(zone_files["sample_2"]) self.assertTrue(isinstance(zone_file, dict)) self.assertTrue("a" in zone_file) self.assertTrue("cname" in zone_file) self.assertTrue("alias" in zone_file) self.assertTrue("$ttl" in zone_file) self.assertTrue("$origin" in zone_file)
def test_zone_file_parsing_1(self): zone_file = parse_zone_file(zone_files["sample_1"]) print(json.dumps(zone_file, indent=2)) self.assertTrue(isinstance(zone_file, dict)) self.assertTrue("a" in zone_file) self.assertTrue("cname" in zone_file) self.assertTrue("alias" in zone_file) self.assertTrue("mx" in zone_file) self.assertTrue("$ttl" in zone_file) self.assertTrue("$origin" in zone_file)
def get_zonefile(domain): resp = rest_to_api("/v1/names/{}/zonefile".format(domain)) if resp.status_code != 200: log.error("Error fetch zonefile for {} : {} {}".format( domain, resp.status_code, resp.text)) raise Exception("Failed to fetch zonefile") zf_raw = resp.json()["zonefile"] if zf_raw: return blockstack_zones.parse_zone_file(str(zf_raw)) raise Exception("No zonefile returned")
def parse_uri_from_zone_file(zone_file): zone_file = dict(parse_zone_file(zone_file)) if isinstance(zone_file["uri"], list) and len(zone_file["uri"]) > 0: if "target" in zone_file["uri"][2]: first_uri_record = zone_file["uri"][2] token_file_url = first_uri_record["target"] return token_file_url
def generate_zone_file(zone_name): """ Generate a zone file containing all the records for a zone. """ # Load the base records for the zone from a static zone file zone_base = build_zone_base_template(zone_name) records = parse_zone_file(zone_base) # Check if we are generating the zone that returns proxy A records proxy_domain = current_app.config["PROXY_ZONE"] if proxy_domain.endswith(zone_name): # Determine the subdomain where proxy A records will be listed proxy_subdomain = proxy_domain.split(zone_name)[0].strip(".") # Add all online entry proxies to round-robin on this subdomain proxy_records = select_proxy_a_records(proxy_subdomain) for record_type in ["a", "aaaa"]: records[record_type].extend(proxy_records[record_type]) # Add all subdomains and associated TXT records for this zone domains = Domain.query.filter_by(zone=zone_name, deleted=False).all() for domain in domains: # Create the CNAME or ALIAS record pointing to the proxy record = { 'name': domain.subdomain, 'ttl': current_app.config["A_RECORD_TTL"] } if current_app.config.get("USE_ALIAS_RECORDS"): record["host"] = current_app.config["PROXY_ZONE"] records["alias"].append(record) else: record["alias"] = current_app.config["PROXY_ZONE"] records["cname"].append(record) # Create the TXT record with the domain->onion address mapping records["txt"].append({ 'name': domain.txt_label, 'txt': "onion={}".format(domain.onion_address), 'ttl': current_app.config["TXT_RECORD_TTL"] }) # Fixes a bug in `zone_file` which places the SOA record inside a list records["soa"] = records["soa"].pop() # Bump the serial number in the SOA records["soa"]["serial"] = int(time.time()) return make_zone_file(records)
def get_cached_zonefile( zonefile_hash, zonefile_dir=None ): """ Get a cached zonefile dict from local disk Return None if not found """ data = get_cached_zonefile_data( zonefile_hash, zonefile_dir=zonefile_dir ) if data is None: return None try: zonefile_dict = blockstack_zones.parse_zone_file( data ) assert blockstack_client.is_user_zonefile( zonefile_dict ), "Not a user zonefile: %s" % zonefile_hash return zonefile_dict except Exception, e: log.error("Failed to parse zonefile") return None
def get_token_file_url_from_zone_file(zone_file): token_file_url = None if isinstance(zone_file, dict): pass elif isinstance(zone_file, (str, unicode)): zone_file = dict(parse_zone_file(zone_file)) else: raise ValueError("Invalid zone file format") if "uri" not in zone_file: return token_file_url if isinstance(zone_file["uri"], list) and len(zone_file["uri"]) > 0: if "target" in zone_file["uri"][0]: first_uri_record = zone_file["uri"][0] token_file_url = first_uri_record["target"] return token_file_url
def parse_uri_from_zone_file(zone_file): token_file_url = None zone_file = dict(parse_zone_file(zone_file)) if isinstance(zone_file["uri"], list) and len(zone_file["uri"]) > 0: index = 0 while (index < len(zone_file["uri"])): record = zone_file["uri"][index] if 'name' in record and record['name'] == '_http._tcp': first_uri_record = zone_file["uri"][index] token_file_url = first_uri_record["target"] break index += 1 return token_file_url
def _parse_zone_file(path): """ Parse the zone file. Args: path (str): Path to the zone file Returns: str: Domain name dict[]: Domain records """ log.info('Parsing %s...', path) with open(path, 'r') as f: zone_file = parse_zone_file(f.read()) domain_name = zone_file.pop('$origin').strip('.') record_count = sum([len(records) for records in zone_file.itervalues()]) log.info('Parsed %d records for %s.', record_count, domain_name) return domain_name, zone_file
def format_profile(profile, fqa, zone_file, address, public_key): """ Process profile data and 1) Insert verifications 2) Check if profile data is valid JSON """ # if the zone file is a string, then parse it if isinstance(zone_file, (str, unicode)): try: zone_file = blockstack_zones.parse_zone_file(zone_file) except: # leave as text pass data = { 'profile': profile, 'zone_file': zone_file, 'public_key': public_key, 'owner_address': address } if not fqa.endswith('.id'): data['verifications'] = ["No verifications for non-id namespaces."] return data profile_in_legacy_format = is_profile_in_legacy_format(profile) if not profile_in_legacy_format: data['verifications'] = fetch_proofs(data['profile'], fqa, address, profile_ver=3, zonefile=zone_file) else: if type(profile) is not dict: data['profile'] = json.loads(profile) data['verifications'] = fetch_proofs(data['profile'], fqa, address) return data
def load_name_zonefile(name, expected_zonefile_hash, storage_drivers=None): """ Fetch and load a user zonefile from the storage implementation with the given hex string hash, The user zonefile hash should have been loaded from the blockchain, and thereby be the authentic hash. Return the user zonefile (as a dict) on success Return None on error """ zonefile_txt = storage.get_immutable_data(expected_zonefile_hash, hash_func=storage.get_zonefile_data_hash, fqu=name, zonefile=True, deserialize=False, drivers=storage_drivers) if zonefile_txt is None: log.error("Failed to load user zonefile '%s'" % expected_zonefile_hash) return None user_zonefile = None try: # by default, it's a zonefile-formatted text file user_zonefile_defaultdict = blockstack_zones.parse_zone_file( zonefile_txt ) assert user_db.is_user_zonefile( user_zonefile_defaultdict ), "Not a user zonefile" # force dict tmp = {} tmp.update(user_zonefile_defaultdict) user_zonefile = tmp except (IndexError, ValueError, blockstack_zones.InvalidLineException): # might be legacy profile log.debug("WARN: failed to parse user zonefile; trying to import as legacy") try: user_zonefile = json.loads(zonefile_txt) if type(user_zonefile) != dict: log.debug("Not a legacy user zonefile") return None except Exception, e: log.exception(e) log.error("Failed to parse zonefile") return None
def test_zone_file_parsing_txt(self): zone_file = parse_zone_file(zone_files["sample_txt_1"]) self.assertTrue(isinstance(zone_file, dict)) self.assertTrue("soa" in zone_file) self.assertTrue("mx" in zone_file) self.assertTrue("ns" in zone_file) self.assertTrue("a" in zone_file) self.assertTrue("cname" in zone_file) self.assertTrue("$ttl" in zone_file) self.assertTrue("$origin" in zone_file) self.assertTrue("txt" in zone_file) self.assertEqual(zone_file["txt"][0]["name"], "single") self.assertEqual(zone_file["txt"][0]["txt"], "everything I do") self.assertEqual(zone_file["txt"][1]["name"], "singleTTL") self.assertEqual(zone_file["txt"][1]["ttl"], 100) self.assertEqual(zone_file["txt"][1]["txt"], "everything I do") self.assertEqual(zone_file["txt"][2]["name"], "multi") self.assertEqual(zone_file["txt"][2]["txt"], ["everything I do", "I do for you"]) self.assertEqual(zone_file["txt"][3]["name"], "multiTTL") self.assertEqual(zone_file["txt"][3]["ttl"], 100) self.assertEqual(zone_file["txt"][3]["txt"], ["everything I do", "I do for you"])
def get_zonefile_from_storage( zonefile_hash, drivers=None ): """ Get a zonefile from our storage drivers. Return the zonefile dict on success. Raise on error """ if not is_current_zonefile_hash( zonefile_hash ): raise Exception("Unknown zonefile hash") zonefile_txt = blockstack_client.storage.get_immutable_data( zonefile_hash, hash_func=blockstack_client.get_blockchain_compat_hash, deserialize=False, drivers=drivers ) if zonefile_txt is None: raise Exception("Failed to get data") # verify if blockstack_client.storage.get_zonefile_data_hash( zonefile_txt ) != zonefile_hash: raise Exception("Corrupt zonefile: %s" % zonefile_hash) # parse try: user_zonefile = blockstack_zones.parse_zone_file( zonefile_txt ) assert blockstack_client.is_user_zonefile( user_zonefile ), "Not a user zonefile: %s" % zonefile_hash except AssertionError, ValueError: raise Exception("Failed to load zonefile %s" % zonefile_hash)
def check( state_engine ): global synchronized, atlasdb_path, value_hashes, working_dir, atlas_dir if not synchronized: print "not synchronized" return False # not revealed, but ready ns = state_engine.get_namespace_reveal( "test" ) if ns is not None: print "namespace not ready" return False ns = state_engine.get_namespace( "test" ) if ns is None: print "no namespace" return False if ns['namespace_id'] != 'test': print "wrong namespace" return False for i in xrange(0, 10): name = 'foo_{}.test'.format(i) # not preordered preorder = state_engine.get_name_preorder( name, virtualchain.make_payment_script(wallets[2].addr), wallets[3].addr ) if preorder is not None: print "still have preorder" return False # registered name_rec = state_engine.get_name( name ) if name_rec is None: print "name does not exist" return False # owned if name_rec['address'] != wallets[3].addr or name_rec['sender'] != virtualchain.make_payment_script(wallets[3].addr): print "name has wrong owner" return False # updated if name_rec['value_hash'] is None: print "wrong value hash: %s" % name_rec['value_hash'] return False for i in xrange(0, len(value_hashes)): name = 'foo_{}.test'.format(i) value_hash = value_hashes[i] ''' # atlas logic tried storage (either this node or the atlas peer) zfinfo = blockstack.atlasdb_get_zonefile( value_hash, path=atlasdb_path ) if not zfinfo['tried_storage']: zfinfo2 = blockstack.atlasdb_get_zonefile( value_hash, path=os.path.join(atlas_dir, "localhost:17000/atlas.db") ) if not zfinfo2['tried_storage']: print "didn't get zonefile from storage: test node: %s, atlas peer: %s" % (zfinfo, zfinfo2) return False ''' ''' # zonefile stored to disk? zfdata = blockstack_client.zonefile.load_name_zonefile(name, value_hash, storage_drivers=['disk']) if zfdata is None: print "failed to load zonefile %s from disk" % value_hash return False ''' # zonefile cached? cached_zonefile_txt = blockstack.lib.storage.get_atlas_zonefile_data( value_hash, zonefile_dir ) if cached_zonefile_txt is None: print "no cached zonefile %s in %s" % (value_hash, zonefile_dir) return False cached_zonefile = blockstack_zones.parse_zone_file(cached_zonefile_txt) return True
def put_data(data_id, data_txt, zonefile=False, fqu=None): """ Put data or a zonefile to the server. """ import blockstack_client if os.environ.get("BLOCKSTACK_RPC_PID", None) == str(os.getpid()): # don't talk to ourselves log.debug("Do not put_data to ourselves") return False url = "http://%s:%s/RPC2" % (SERVER_NAME, SERVER_PORT) ses = xmlrpclib.ServerProxy(url, allow_none=True) if zonefile: # must be a zonefile try: zf = blockstack_zones.parse_zone_file(data_txt) except: log.error("Failed to parse zone file for %s" % data_id) return False log.debug("Replicate zonefile for %s" % data_id) res_json = ses.put_zonefiles([base64.b64encode(data_txt)]) try: res = json.loads(res_json) except: log.error("Invalid non-JSON response") return False if 'error' in res: log.error("Failed to put %s: %s" % (data_id, data_txt)) return False elif len(res['saved']) != 1 or res['saved'][0] != 1: log.error("Server %s:%s failed to save %s" % (SERVER_NAME, SERVER_PORT, data_id)) return False else: return True elif data_id == fqu: log.debug("Replicate profile for %s" % data_id) # get current profile cur_profile_txt = get_data(data_id, zonefile=False) if cur_profile_txt is None: log.warning("Could not get profile for %s" % data_id) cur_profile_txt = "" # get the data private key (or owner private key if not given) wallet_info = blockstack_client.get_wallet() data_privkey = wallet_info.get('data_privkey', None) if data_privkey is None: data_privkey = wallet_info.get('owner_privkey', None) # sign this request cur_profile_hash = pybitcoin.hex_hash160(cur_profile_txt) sigb64 = blockstack_client.storage.sign_raw_data( "%s%s" % (cur_profile_hash, data_txt), data_privkey) # include signature res = ses.put_profile(data_id, data_txt, cur_profile_hash, sigb64) if 'error' in res: log.error("Failed to put %s: %s" % (data_id, res)) return False else: return True else: # neither profile nor zonefile if os.environ.get("BLOCKSTACK_TEST", None) is not None: # for testing raise Exception("Failed to replicate profile or zonefile") else: return False
def check(state_engine): global wallet_keys, datasets, zonefile_hash # not revealed, but ready ns = state_engine.get_namespace_reveal("test") if ns is not None: print "namespace not ready" return False ns = state_engine.get_namespace("test") if ns is None: print "no namespace" return False if ns['namespace_id'] != 'test': print "wrong namespace" return False # not preordered preorder = state_engine.get_name_preorder( "foo.test", pybitcoin.make_pay_to_address_script(wallets[2].addr), wallets[3].addr) if preorder is not None: print "still have preorder" return False # registered name_rec = state_engine.get_name("foo.test") if name_rec is None: print "name does not exist" return False # owned if name_rec['address'] != wallets[3].addr or name_rec[ 'sender'] != pybitcoin.make_pay_to_address_script(wallets[3].addr): print "name has wrong owner" return False srv = xmlrpclib.ServerProxy("http://localhost:%s" % blockstack.RPC_SERVER_PORT) # zonefile and profile replicated to blockstack server try: zonefile_by_name_str = srv.get_zonefiles_by_names(['foo.test']) zonefile_by_hash_str = srv.get_zonefiles([name_rec['value_hash']]) zonefile_by_name = json.loads(zonefile_by_name_str) zonefile_by_hash = json.loads(zonefile_by_hash_str) assert 'error' not in zonefile_by_name, json.dumps(zonefile_by_name, indent=4, sort_keys=True) assert 'error' not in zonefile_by_hash, json.dumps(zonefile_by_hash, indent=4, sort_keys=True) zf1 = None zf2 = None try: zf1 = base64.b64decode(zonefile_by_name['zonefiles']['foo.test']) except: print zonefile_by_name raise try: zf2 = base64.b64decode( zonefile_by_hash['zonefiles'][name_rec['value_hash']]) except: print zonefile_by_hash raise assert zf1 == zf2 zonefile = blockstack_zones.parse_zone_file(zf1) user_pubkey = blockstack_client.user.user_zonefile_data_pubkey( zonefile) assert user_pubkey is not None, "no zonefile public key" profile_resp_txt = srv.get_profile("foo.test") profile_resp = json.loads(profile_resp_txt) assert 'error' not in profile_resp, "error:\n%s" % json.dumps( profile_resp, indent=4, sort_keys=True) assert 'profile' in profile_resp, "missing profile:\n%s" % json.dumps( profile_resp, indent=4, sort_keys=True) # profile will be in 'raw' form raw_profile = profile_resp['profile'] profile = blockstack_client.storage.parse_mutable_data( raw_profile, user_pubkey) except Exception, e: traceback.print_exc() print "Invalid profile" return False
def check(state_engine): global synchronized, atlasdb_path, value_hashes, working_dir, atlas_dir if not synchronized: print "not synchronized" return False # not revealed, but ready ns = state_engine.get_namespace_reveal("test") if ns is not None: print "namespace not ready" return False ns = state_engine.get_namespace("test") if ns is None: print "no namespace" return False if ns['namespace_id'] != 'test': print "wrong namespace" return False for i in xrange(0, 10): name = 'foo_{}.test'.format(i) # not preordered preorder = state_engine.get_name_preorder( name, virtualchain.make_payment_script(wallets[2].addr), wallets[3].addr) if preorder is not None: print "still have preorder" return False # registered name_rec = state_engine.get_name(name) if name_rec is None: print "name does not exist" return False # owned if name_rec['address'] != wallets[3].addr or name_rec[ 'sender'] != virtualchain.make_payment_script(wallets[3].addr): print "name has wrong owner" return False # updated if name_rec['value_hash'] is None: print "wrong value hash: %s" % name_rec['value_hash'] return False for i in xrange(0, len(value_hashes)): name = 'foo_{}.test'.format(i) value_hash = value_hashes[i] cached_zonefile_txt = blockstack.lib.storage.get_atlas_zonefile_data( value_hash, zonefile_dir) if cached_zonefile_txt is None: print "no cached zonefile %s in %s" % (value_hash, zonefile_dir) return False cached_zonefile = blockstack_zones.parse_zone_file(cached_zonefile_txt) return True
def check_password_blockstack(self, user_id, password, localpart): id_address = localpart logger.info("checking blockstack:" + id_address + " " + password) pwd_parts = password.split("|") txid = pwd_parts[0] app = pwd_parts[1] blockstack_id = pwd_parts[2] r = requests.get(self.blockstack_node + '/v1/names/' + blockstack_id) if not r.status_code == requests.codes.ok: logger.info("invalid blockstack name") defer.returnValue(False) names_response = r.json() z = blockstack_zones.parse_zone_file(names_response["zonefile"]) r = requests.get(z["uri"][0]["target"]) if not r.status_code == requests.codes.ok: logger.info("invalid profile url") defer.returnValue(False) zone_file_response = r.json() claim = zone_file_response[0]["decodedToken"]["payload"]["claim"] account_type = -1 if localpart == blockstack_id: account_type = 0 elif localpart == names_response["address"].lower(): account_type = 1 elif claim["apps"].get(app) and localpart == self.getUserAppAddress( claim["apps"][app]): account_type = 2 if (account_type < 0): logger.info("localpart does not belong to user") defer.returnValue(False) challengeUrl = "http://auth.openintents.org/c/" + txid r = requests.get(challengeUrl) if not r.status_code == requests.codes.ok: logger.info("invalid txid") defer.returnValue(False) challenge_text = r.json()["challenge"] logger.info("Challenge for user %s: %s", user_id, challenge_text) responseUrl = claim["apps"][app] + "mxid.json" r = requests.get(responseUrl) if not r.status_code == requests.codes.ok: logger.info("invalid mxid.json url") defer.returnValue(False) mxid_response = r.text logger.info("Response for user %s: %s", user_id, mxid_response) if mxid_response == challenge_text: if (yield self.account_handler.check_user_exists(user_id)): logger.info("User %s exists, logging in", localpart) self.updateProfileFrom(claim, blockstack_id, localpart) defer.returnValue(True) else: try: user_id, access_token = (yield self.account_handler.register( localpart=localpart)) logger.info("User %s created, logging in", localpart) self.updateProfileFrom(claim, blockstack_id, localpart) defer.returnValue(True) except Exception as err: logger.warning("User %s not created (%s)", localpart, err) defer.returnValue(False) else: logger.warning("Wrong password for user %s", localpart) defer.returnValue(False)
def lookup_index_manifest_url( blockchain_id, driver_name, index_stem, config_path ): """ Given a blockchain ID, go and get the index manifest url. This is only applicable for certain drivers--i.e. the ones that need a name-to-URL index since the storage system generates URLs to data on-the-fly. This includes Dropbox, Google Drive, Onedrive, etc. The storage index URL will be located as an 'account', where * 'service' will be set to the driver name * 'identifier' will be set to 'storage' * 'contentUrl' will be set to the index url Return the index manifest URL on success. Return None if there is no URL Raise on error TODO: this method needs to be rewritten to use the token file format, and to use the proper public key to verify it. """ import blockstack_client import blockstack_client.proxy as proxy import blockstack_client.user import blockstack_client.storage import blockstack_client.schemas if blockchain_id is None: # try getting it directly (we should have it) return index_settings_get_index_manifest_url(driver_name, config_path) name_record = proxy.get_name_blockchain_record(blockchain_id) if 'error' in name_record: raise Exception("Failed to load name record for {}".format(blockchain_id)) zonefile_txt = get_zonefile_from_atlas(blockchain_id, config_path, name_record=name_record) zonefile_pubkey = None try: zonefile = blockstack_zones.parse_zone_file(zonefile_txt) zonefile = dict(zonefile) zonefile_pubkey = blockstack_client.user.user_zonefile_data_pubkey(zonefile) except: raise Exception("Non-standard zonefile for {}".format(blockchain_id)) # get the profile... # we're assuming here that some of the profile URLs are at least HTTP-accessible # (i.e. we can get them without having to go through the indexing system) # TODO: let drivers report their 'safety' profile_txt = None urls = blockstack_client.user.user_zonefile_urls(zonefile) for url in urls: profile_txt = None try: profile_txt = get_chunk_via_http(url, blockchain_id=blockchain_id) except Exception as e: if DEBUG: log.exception(e) log.debug("Failed to load profile from {}".format(url)) continue if profile_txt is None: log.debug("Failed to load profile from {}".format(url)) continue profile = blockstack_client.storage.parse_mutable_data(profile_txt, zonefile_pubkey, public_key_hash=name_record['address']) if not profile: log.debug("Failed to load profile from {}".format(url)) continue # TODO: load this from the tokens file # got profile! the storage information will be listed as an account, where the 'service' is the driver name and the 'identifier' is the manifest url if 'account' not in profile: log.error("No 'account' key in profile for {}".format(blockchain_id)) return None accounts = profile['account'] if not isinstance(accounts, list): log.error("Invalid 'account' key in profile for {}".format(blockchain_id)) return None for account in accounts: try: jsonschema.validate(account, blockstack_client.schemas.PROFILE_ACCOUNT_SCHEMA) except jsonschema.ValidationError: continue if account['service'] != driver_name: log.debug("Skipping account for '{}'".format(account['service'])) continue if account['identifier'] != 'storage': log.debug("Skipping non-storage account for '{}'".format(account['service'])) continue if not account.has_key('contentUrl'): continue url = account['contentUrl'] parsed_url = urlparse.urlparse(url) # must be valid http(s) URL, or a test:// URL if (not parsed_url.scheme or not parsed_url.netloc) and not url.startswith('test://'): log.warning("Skip invalid '{}' driver URL".format(driver_name)) continue log.debug("Index manifest URL for {} is {}".format(blockchain_id, url)) return url return None
def scenario(wallets, **kw): global wallet_keys, wallet_keys_2, error, index_file_data, resource_data wallet_keys = testlib.blockstack_client_initialize_wallet( "0123456789abcdef", wallets[5].privkey, wallets[3].privkey, wallets[3].privkey) test_proxy = testlib.TestAPIProxy() blockstack_client.set_default_proxy(test_proxy) testlib.blockstack_namespace_preorder("test", wallets[1].addr, wallets[0].privkey) testlib.next_block(**kw) testlib.blockstack_namespace_reveal( "test", wallets[1].addr, 52595, 250, 4, [6, 5, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 10, 10, wallets[0].privkey) testlib.next_block(**kw) testlib.blockstack_namespace_ready("test", wallets[1].privkey) testlib.next_block(**kw) testlib.blockstack_name_preorder("foo.test", wallets[2].privkey, wallets[3].addr) testlib.next_block(**kw) testlib.blockstack_name_register("foo.test", wallets[2].privkey, wallets[3].addr) testlib.next_block(**kw) # migrate profiles, but no data key in the zone file res = testlib.migrate_profile("foo.test", zonefile_has_data_key=False, proxy=test_proxy, wallet_keys=wallet_keys) if 'error' in res: res['test'] = 'Failed to initialize foo.test profile' print json.dumps(res, indent=4, sort_keys=True) error = True return # tell serialization-checker that value_hash can be ignored here print "BLOCKSTACK_SERIALIZATION_CHECK_IGNORE value_hash" sys.stdout.flush() testlib.next_block(**kw) config_path = os.environ.get("BLOCKSTACK_CLIENT_CONFIG", None) # make a session datastore_pk = keylib.ECPrivateKey(wallets[-1].privkey).to_hex() res = testlib.blockstack_cli_app_signin( "foo.test", datastore_pk, 'register.app', [ 'names', 'register', 'prices', 'zonefiles', 'blockchain', 'node_read', 'user_read' ]) if 'error' in res: print json.dumps(res, indent=4, sort_keys=True) error = True return ses = res['token'] # register the name bar.test. autogenerate the rest old_user_zonefile = blockstack_client.zonefile.make_empty_zonefile( 'bar.test', None) old_user_zonefile_txt = blockstack_zones.make_zone_file(old_user_zonefile) res = testlib.blockstack_REST_call('POST', '/v1/names', ses, data={ 'name': 'bar.test', 'zonefile': old_user_zonefile_txt, 'make_profile': True }) if 'error' in res: res['test'] = 'Failed to register user' print json.dumps(res) error = True return False print res tx_hash = res['response']['transaction_hash'] # wait for preorder to get confirmed... for i in xrange(0, 6): testlib.next_block(**kw) res = testlib.verify_in_queue(ses, 'bar.test', 'preorder', tx_hash) if not res: return False # wait for the preorder to get confirmed for i in xrange(0, 4): testlib.next_block(**kw) # wait for register to go through print 'Wait for register to be submitted' time.sleep(10) # wait for the register/update to get confirmed for i in xrange(0, 6): testlib.next_block(**kw) res = testlib.verify_in_queue(ses, 'bar.test', 'register', None) if not res: return False for i in xrange(0, 3): testlib.next_block(**kw) # should have nine confirmations now res = testlib.get_queue(ses, 'register') if 'error' in res: print res return False if len(res) != 1: print res return False reg = res[0] confs = blockstack_client.get_tx_confirmations(reg['tx_hash']) if confs != 9: print 'wrong number of confs for {} (expected 9): {}'.format( reg['tx_hash'], confs) return False # stop the API server testlib.stop_api() # advance blockchain testlib.next_block(**kw) testlib.next_block(**kw) confs = blockstack_client.get_tx_confirmations(reg['tx_hash']) if confs != 11: print 'wrong number of confs for {} (expected 11): {}'.format( reg['tx_hash'], confs) return False # make sure the registrar does not process reg/up zonefile replication # (i.e. we want to make sure that the zonefile gets processed even if the blockchain goes too fast) os.environ[ 'BLOCKSTACK_TEST_REGISTRAR_FAULT_INJECTION_SKIP_REGUP_REPLICATION'] = '1' testlib.start_api("0123456789abcdef") print 'Wait to verify that we do not remove the zone file just because the tx is confirmed' time.sleep(10) # verify that this is still in the queue res = testlib.get_queue(ses, 'register') if 'error' in res: print res return False if len(res) != 1: print res return False # clear the fault print 'Clearing regup replication fault' testlib.blockstack_test_setenv( "BLOCKSTACK_TEST_REGISTRAR_FAULT_INJECTION_SKIP_REGUP_REPLICATION", "0") # wait for register to go through print 'Wait for zonefile to replicate' time.sleep(10) res = testlib.blockstack_REST_call("GET", "/v1/names/bar.test", ses) if 'error' in res or res['http_status'] != 200: res['test'] = 'Failed to get name bar.test' print json.dumps(res) return False old_expire_block = res['response']['expire_block'] # get the zonefile res = testlib.blockstack_REST_call("GET", "/v1/names/bar.test/zonefile", ses) if 'error' in res or res['http_status'] != 200: res['test'] = 'Failed to get name zonefile' print json.dumps(res) return False # zonefile must not have a public key listed zonefile_txt = res['response']['zonefile'] print zonefile_txt parsed_zonefile = blockstack_zones.parse_zone_file(zonefile_txt) if parsed_zonefile.has_key('txt'): print 'have txt records' print parsed_zonefile return False # renew it, but put the *current* owner key as the zonefile's *new* public key new_user_zonefile = blockstack_client.zonefile.make_empty_zonefile( 'bar.test', wallets[3].pubkey_hex) new_user_zonefile_txt = blockstack_zones.make_zone_file(new_user_zonefile) res = testlib.blockstack_REST_call("POST", "/v1/names", ses, data={ 'name': 'bar.test', 'zonefile': new_user_zonefile_txt }) if 'error' in res or res['http_status'] != 202: res['test'] = 'Failed to renew name' print json.dumps(res) return False # verify in renew queue for i in xrange(0, 6): testlib.next_block(**kw) res = testlib.verify_in_queue(ses, 'bar.test', 'renew', None) if not res: return False for i in xrange(0, 3): testlib.next_block(**kw) # should have nine confirmations now res = testlib.get_queue(ses, 'renew') if 'error' in res: print res return False if len(res) != 1: print res return False reg = res[0] confs = blockstack_client.get_tx_confirmations(reg['tx_hash']) if confs != 9: print 'wrong number of confs for {} (expected 9): {}'.format( reg['tx_hash'], confs) return False # stop the API server testlib.stop_api() # advance blockchain testlib.next_block(**kw) testlib.next_block(**kw) confs = blockstack_client.get_tx_confirmations(reg['tx_hash']) if confs != 11: print 'wrong number of confs for {} (expected 11): {}'.format( reg['tx_hash'], confs) return False # make the registrar skip the first few steps, so the only thing it does is clear out confirmed updates # (i.e. we want to make sure that the renewal's zonefile gets processed even if the blockchain goes too fast) os.environ[ 'BLOCKSTACK_TEST_REGISTRAR_FAULT_INJECTION_SKIP_RENEWAL_REPLICATION'] = '1' testlib.start_api("0123456789abcdef") # wait a while print 'Wait to verify that clearing out confirmed transactions does NOT remove zonefiles' time.sleep(10) # verify that this is still in the queue res = testlib.get_queue(ses, 'renew') if 'error' in res: print res return False if len(res) != 1: print res return False # clear the fault print 'Clearing renewal replication fault' testlib.blockstack_test_setenv( "BLOCKSTACK_TEST_REGISTRAR_FAULT_INJECTION_SKIP_RENEWAL_REPLICATION", "0") # now the renewal zonefile should replicate print 'Wait for renewal zonefile to replicate' time.sleep(10) # new expire block res = testlib.blockstack_REST_call("GET", "/v1/names/bar.test", ses) if 'error' in res or res['http_status'] != 200: res['test'] = 'Failed to get name bar.test' print json.dumps(res) return False new_expire_block = res['response']['expire_block'] # do we have the history for the name? res = testlib.blockstack_REST_call("GET", "/v1/names/bar.test/history", ses) if 'error' in res or res['http_status'] != 200: res['test'] = "Failed to get name history for bar.test" print json.dumps(res) return False # valid history? hist = res['response'] if len(hist.keys()) != 3: res['test'] = 'Failed to get update history' res['history'] = hist print json.dumps(res, indent=4, sort_keys=True) return False # get the zonefile res = testlib.blockstack_REST_call("GET", "/v1/names/bar.test/zonefile", ses) if 'error' in res or res['http_status'] != 200: res['test'] = 'Failed to get name zonefile' print json.dumps(res) return False # zonefile must have old owner key zonefile_txt = res['response']['zonefile'] parsed_zonefile = blockstack_zones.parse_zone_file(zonefile_txt) if not parsed_zonefile.has_key('txt'): print 'missing txt' print parsed_zonefile return False found = False for txtrec in parsed_zonefile['txt']: if txtrec['name'] == 'pubkey' and txtrec[ 'txt'] == 'pubkey:data:{}'.format(wallets[3].pubkey_hex): found = True if not found: print 'missing public key {}'.format(wallets[3].pubkey_hex) return False # profile lookup must work res = testlib.blockstack_REST_call("GET", "/v1/users/bar.test", ses) if 'error' in res or res['http_status'] != 200: res['text'] = 'failed to get profile for bar.test' print json.dumps(res) return False print '' print json.dumps(res['response'], indent=4, sort_keys=True) print '' # verify pushed back if old_expire_block + 10 > new_expire_block: # didn't go through print >> sys.stderr, "Renewal didn't work: %s --> %s" % ( old_expire_block, new_expire_block) return False
def lookup_index_manifest_url(blockchain_id, driver_name, index_stem, config_path): """ Given a blockchain ID, go and get the index manifest url. This is only applicable for certain drivers--i.e. the ones that need a name-to-URL index since the storage system generates URLs to data on-the-fly. This includes Dropbox, Google Drive, Onedrive, etc. The storage index URL will be located as an 'account', where * 'service' will be set to the driver name * 'identifier' will be set to 'storage' * 'contentUrl' will be set to the index url Return the index manifest URL on success. Return None if there is no URL Raise on error TODO: this method needs to be rewritten to use the token file format, and to use the proper public key to verify it. """ import blockstack_client import blockstack_client.proxy as proxy import blockstack_client.user import blockstack_client.storage import blockstack_client.schemas if blockchain_id is None: # try getting it directly (we should have it) return index_settings_get_index_manifest_url(driver_name, config_path) name_record = proxy.get_name_blockchain_record(blockchain_id) if 'error' in name_record: raise Exception( "Failed to load name record for {}".format(blockchain_id)) zonefile_txt = get_zonefile_from_atlas(blockchain_id, config_path, name_record=name_record) zonefile_pubkey = None try: zonefile = blockstack_zones.parse_zone_file(zonefile_txt) zonefile = dict(zonefile) zonefile_pubkey = blockstack_client.user.user_zonefile_data_pubkey( zonefile) except: raise Exception("Non-standard zonefile for {}".format(blockchain_id)) # get the profile... # we're assuming here that some of the profile URLs are at least HTTP-accessible # (i.e. we can get them without having to go through the indexing system) # TODO: let drivers report their 'safety' profile_txt = None urls = blockstack_client.user.user_zonefile_urls(zonefile) for url in urls: profile_txt = None try: profile_txt = get_chunk_via_http(url, blockchain_id=blockchain_id) except Exception as e: if DEBUG: log.exception(e) log.debug("Failed to load profile from {}".format(url)) continue if profile_txt is None: log.debug("Failed to load profile from {}".format(url)) continue profile = blockstack_client.storage.parse_mutable_data( profile_txt, zonefile_pubkey, public_key_hash=name_record['address']) if not profile: log.debug("Failed to load profile from {}".format(url)) continue # TODO: load this from the tokens file # got profile! the storage information will be listed as an account, where the 'service' is the driver name and the 'identifier' is the manifest url if 'account' not in profile: log.error( "No 'account' key in profile for {}".format(blockchain_id)) return None accounts = profile['account'] if not isinstance(accounts, list): log.error("Invalid 'account' key in profile for {}".format( blockchain_id)) return None for account in accounts: try: jsonschema.validate( account, blockstack_client.schemas.PROFILE_ACCOUNT_SCHEMA) except jsonschema.ValidationError: continue if account['service'] != driver_name: log.debug("Skipping account for '{}'".format( account['service'])) continue if account['identifier'] != 'storage': log.debug("Skipping non-storage account for '{}'".format( account['service'])) continue if not account.has_key('contentUrl'): continue url = account['contentUrl'] parsed_url = urlparse.urlparse(url) # must be valid http(s) URL, or a test:// URL if (not parsed_url.scheme or not parsed_url.netloc) and not url.startswith('test://'): log.warning("Skip invalid '{}' driver URL".format(driver_name)) continue log.debug("Index manifest URL for {} is {}".format( blockchain_id, url)) return url return None
def scenario( wallets, **kw ): global wallet_keys, wallet_keys_2, error, index_file_data, resource_data wallet_keys = testlib.blockstack_client_initialize_wallet( "0123456789abcdef", wallets[5].privkey, wallets[3].privkey, wallets[3].privkey ) test_proxy = testlib.TestAPIProxy() blockstack_client.set_default_proxy( test_proxy ) testlib.blockstack_namespace_preorder( "test", wallets[1].addr, wallets[0].privkey ) testlib.next_block( **kw ) testlib.blockstack_namespace_reveal( "test", wallets[1].addr, 52595, 250, 4, [6,5,4,3,2,1,0,0,0,0,0,0,0,0,0,0], 10, 10, wallets[0].privkey ) testlib.next_block( **kw ) testlib.blockstack_namespace_ready( "test", wallets[1].privkey ) testlib.next_block( **kw ) testlib.blockstack_name_preorder( "foo.test", wallets[2].privkey, wallets[3].addr ) testlib.next_block( **kw ) testlib.blockstack_name_register( "foo.test", wallets[2].privkey, wallets[3].addr ) testlib.next_block( **kw ) # migrate profiles, but no data key in the zone file res = testlib.migrate_profile( "foo.test", zonefile_has_data_key=False, proxy=test_proxy, wallet_keys=wallet_keys ) if 'error' in res: res['test'] = 'Failed to initialize foo.test profile' print json.dumps(res, indent=4, sort_keys=True) error = True return # tell serialization-checker that value_hash can be ignored here print "BLOCKSTACK_SERIALIZATION_CHECK_IGNORE value_hash" sys.stdout.flush() testlib.next_block( **kw ) config_path = os.environ.get("BLOCKSTACK_CLIENT_CONFIG", None) # make a session datastore_pk = keylib.ECPrivateKey(wallets[-1].privkey).to_hex() res = testlib.blockstack_cli_app_signin("foo.test", datastore_pk, 'register.app', ['names', 'register', 'prices', 'zonefiles', 'blockchain', 'node_read', 'user_read']) if 'error' in res: print json.dumps(res, indent=4, sort_keys=True) error = True return ses = res['token'] # register the name bar.test. autogenerate the rest old_user_zonefile = blockstack_client.zonefile.make_empty_zonefile('bar.test', None) old_user_zonefile_txt = blockstack_zones.make_zone_file(old_user_zonefile) res = testlib.blockstack_REST_call('POST', '/v1/names', ses, data={'name': 'bar.test', 'zonefile': old_user_zonefile_txt, 'make_profile': True} ) if 'error' in res: res['test'] = 'Failed to register user' print json.dumps(res) error = True return False print res tx_hash = res['response']['transaction_hash'] # wait for preorder to get confirmed... for i in xrange(0, 6): testlib.next_block( **kw ) res = testlib.verify_in_queue(ses, 'bar.test', 'preorder', tx_hash ) if not res: return False # wait for the preorder to get confirmed for i in xrange(0, 4): testlib.next_block( **kw ) # wait for register to go through print 'Wait for register to be submitted' time.sleep(10) # wait for the register/update to get confirmed for i in xrange(0, 6): testlib.next_block( **kw ) res = testlib.verify_in_queue(ses, 'bar.test', 'register', None ) if not res: return False for i in xrange(0, 4): testlib.next_block( **kw ) # wait for register to go through print 'Wait for zonefile to replicate' time.sleep(10) res = testlib.blockstack_REST_call("GET", "/v1/names/bar.test", ses) if 'error' in res or res['http_status'] != 200: res['test'] = 'Failed to get name bar.test' print json.dumps(res) return False old_expire_block = res['response']['expire_block'] # get the zonefile res = testlib.blockstack_REST_call("GET", "/v1/names/bar.test/zonefile", ses ) if 'error' in res or res['http_status'] != 200: res['test'] = 'Failed to get name zonefile' print json.dumps(res) return False # zonefile must not have a public key listed zonefile_txt = res['response']['zonefile'] print zonefile_txt parsed_zonefile = blockstack_zones.parse_zone_file(zonefile_txt) if parsed_zonefile.has_key('txt'): print 'have txt records' print parsed_zonefile return False # renew it, but put the *current* owner key as the zonefile's *new* public key new_user_zonefile = blockstack_client.zonefile.make_empty_zonefile('bar.test', wallets[3].pubkey_hex ) new_user_zonefile_txt = blockstack_zones.make_zone_file(new_user_zonefile) res = testlib.blockstack_REST_call("POST", "/v1/names", ses, data={'name': 'bar.test', 'zonefile': new_user_zonefile_txt} ) if 'error' in res or res['http_status'] != 202: res['test'] = 'Failed to renew name' print json.dumps(res) return False # verify in renew queue for i in xrange(0, 6): testlib.next_block( **kw ) res = testlib.verify_in_queue(ses, 'bar.test', 'renew', None ) if not res: return False for i in xrange(0, 4): testlib.next_block( **kw ) # new expire block res = testlib.blockstack_REST_call("GET", "/v1/names/bar.test", ses) if 'error' in res or res['http_status'] != 200: res['test'] = 'Failed to get name bar.test' print json.dumps(res) return False print res new_expire_block = res['response']['expire_block'] # do we have the history for the name? res = testlib.blockstack_REST_call("GET", "/v1/names/bar.test/history", ses ) if 'error' in res or res['http_status'] != 200: res['test'] = "Failed to get name history for bar.test" print json.dumps(res) return False # valid history? hist = res['response'] if len(hist.keys()) != 3: res['test'] = 'Failed to get update history' res['history'] = hist print json.dumps(res, indent=4, sort_keys=True) return False # get the zonefile res = testlib.blockstack_REST_call("GET", "/v1/names/bar.test/zonefile", ses ) if 'error' in res or res['http_status'] != 200: res['test'] = 'Failed to get name zonefile' print json.dumps(res) return False # zonefile must have old owner key zonefile_txt = res['response']['zonefile'] parsed_zonefile = blockstack_zones.parse_zone_file(zonefile_txt) if not parsed_zonefile.has_key('txt'): print 'missing txt' print parsed_zonefile return False found = False for txtrec in parsed_zonefile['txt']: if txtrec['name'] == 'pubkey' and txtrec['txt'] == 'pubkey:data:{}'.format(wallets[3].pubkey_hex): found = True if not found: print 'missing public key {}'.format(wallets[3].pubkey_hex) return False # profile lookup must work res = testlib.blockstack_REST_call("GET", "/v1/users/bar.test", ses) if 'error' in res or res['http_status'] != 200: res['text'] = 'failed to get profile for bar.test' print json.dumps(res) return False print '' print json.dumps(res['response'], indent=4, sort_keys=True) print '' # verify pushed back if old_expire_block + 10 > new_expire_block: # didn't go through print >> sys.stderr, "Renewal didn't work: %s --> %s" % (old_expire_block, new_expire_block) return False
def check( state_engine ): global wallet_keys, datasets, zonefile_hash # not revealed, but ready ns = state_engine.get_namespace_reveal( "test" ) if ns is not None: print "namespace not ready" return False ns = state_engine.get_namespace( "test" ) if ns is None: print "no namespace" return False if ns['namespace_id'] != 'test': print "wrong namespace" return False # not preordered preorder = state_engine.get_name_preorder( "foo.test", pybitcoin.make_pay_to_address_script(wallets[2].addr), wallets[3].addr ) if preorder is not None: print "still have preorder" return False # registered name_rec = state_engine.get_name( "foo.test" ) if name_rec is None: print "name does not exist" return False # owned if name_rec['address'] != wallets[3].addr or name_rec['sender'] != pybitcoin.make_pay_to_address_script(wallets[3].addr): print "name has wrong owner" return False srv = xmlrpclib.ServerProxy("http://localhost:%s" % blockstack.RPC_SERVER_PORT) # zonefile and profile replicated to blockstack server try: zonefile_by_name_str = srv.get_zonefiles_by_names(['foo.test']) zonefile_by_hash_str = srv.get_zonefiles([name_rec['value_hash']]) zonefile_by_name = json.loads(zonefile_by_name_str) zonefile_by_hash = json.loads(zonefile_by_hash_str) assert 'error' not in zonefile_by_name, json.dumps(zonefile_by_name, indent=4, sort_keys=True) assert 'error' not in zonefile_by_hash, json.dumps(zonefile_by_hash, indent=4, sort_keys=True) zf1 = None zf2 = None try: zf1 = base64.b64decode( zonefile_by_name['zonefiles']['foo.test'] ) except: print zonefile_by_name raise try: zf2 = base64.b64decode( zonefile_by_hash['zonefiles'][name_rec['value_hash']] ) except: print zonefile_by_hash raise assert zf1 == zf2 zonefile = blockstack_zones.parse_zone_file( zf1 ) user_pubkey = blockstack_client.user.user_zonefile_data_pubkey( zonefile ) assert user_pubkey is not None, "no zonefile public key" profile_resp_txt = srv.get_profile("foo.test") profile_resp = json.loads(profile_resp_txt) assert 'error' not in profile_resp, "error:\n%s" % json.dumps(profile_resp, indent=4, sort_keys=True) assert 'profile' in profile_resp, "missing profile:\n%s" % json.dumps(profile_resp, indent=4, sort_keys=True) # profile will be in 'raw' form raw_profile = profile_resp['profile'] profile = blockstack_client.storage.parse_mutable_data( raw_profile, user_pubkey ) except Exception, e: traceback.print_exc() print "Invalid profile" return False
#!/bin/python ## The following uses blockstack_zones library import blockstack_zones import sys dat = None with open(sys.argv[1], "r") as f: dat = f.read() f.close() site = [] temp_site = "" dns = blockstack_zones.parse_zone_file(dat) for xx in xrange(len(dns['a'])): if (dns['a'][xx]['name'] != "@") and (dns['a'][xx]['name'] != "."): temp_site = "%s.%s" % (dns['a'][xx]['name'], sys.argv[1]) temp_site = temp_site.replace("#", "").replace("_", "") if temp_site not in site: print temp_site site.append(temp_site) for xx in xrange(len(dns['cname'])): if (dns['cname'][xx]['name'] != "@") and (dns['cname'][xx]['name'] != "."): temp_site = "%s.%s" % (dns['cname'][xx]['name'], sys.argv[1]) temp_site = temp_site.replace("#", "").replace("_", "") if temp_site not in site: print temp_site site.append(temp_site)