コード例 #1
0
def index():
    current_dir = os.path.abspath(os.path.dirname(__file__))
    server_info = getinfo()

    return render_template('index.html',
                           server_info=server_info,
                           server_url=PUBLIC_NODE_URL)
コード例 #2
0
def index():
    current_dir = os.path.abspath(os.path.dirname(__file__))
    server_info = getinfo()

    return render_template('index.html',
                           server_info=server_info,
                           server_url=PUBLIC_NODE_URL)
コード例 #3
0
def index():
    current_dir = os.path.abspath(os.path.dirname(__file__))
    api_calls = get_api_calls(current_dir + '/api_v1.md')
    server_info = getinfo()

    return render_template('index.html',
                           api_calls=api_calls,
                           server_info=server_info,
                           server_url=PUBLIC_NODE_URL)
コード例 #4
0
ファイル: fetch_data.py プロジェクト: iefken/blockstack-core
def update_profiles():
    if not os.path.exists(SEARCH_LAST_INDEX_DATA_FILE):
        return {'error': 'No last index, you need to rebuild the whole index.'}
    with open(SEARCH_LAST_INDEX_DATA_FILE, 'r') as fin:
        search_indexer_info = json.load(fin)

    last_block_processed = search_indexer_info['last_block_height']
    last_full_index = search_indexer_info['last_full_index']
    last_subdomain_seq = search_indexer_info['last_subdomain_seq']

    info_resp = proxy.getinfo()
    try:
        new_block_height = info_resp['last_block_processed']
    except:
        print info_resp
        raise

    with open(SEARCH_BLOCKCHAIN_DATA_FILE, 'r') as fin:
        existing_names = set(json.load(fin))

    if last_block_processed - 1 > new_block_height:
        return {'status': True, 'message': 'No new blocks since last indexing'}

    subdomaindb = subdomains.SubdomainDB()
    subdomain_names = [
        name for name in subdomaindb.get_all_subdomains(
            above_seq=last_subdomain_seq) if name not in existing_names
    ]
    last_subdomain_seq = subdomaindb.get_last_index()

    # aaron: note, sometimes it may take a little while for
    #  new zonefiles to have propagated to the network, so
    #  we over-fetch a little bit
    zonefiles_resp = proxy.get_zonefiles_by_block(last_block_processed - 1,
                                                  new_block_height)
    zonefiles_updated = zonefiles_resp['zonefile_info']
    names_updated = [
        zf_info['name'] for zf_info in zonefiles_updated if 'name' in zf_info
    ]
    names_updated += subdomain_names
    names_to_insert = set(
        [name for name in names_updated if name not in existing_names])

    updated_profiles = {}
    actually_updated_names = set()
    print "Updating {} entries...".format(len(names_updated))
    for ix, name in enumerate(names_to_insert):
        print_status_bar(ix + 1, len(names_to_insert))
        profile_entry = {}
        profile_entry['fqu'] = name
        try:
            profile_resp = get_profile(name, use_legacy=True)
            profile_entry['profile'] = profile_resp['profile']
            updated_profiles[name] = (profile_entry)
            actually_updated_names.add(name)
        except KeyboardInterrupt as e:
            raise e
        except:
            import traceback as tb
            tb.print_exc()

    names_updated = actually_updated_names

    if len(names_updated) == 0:
        return {'status': True, 'message': 'No new profiles'}

    with open(SEARCH_PROFILE_DATA_FILE, 'r') as fin:
        all_profiles = json.load(fin)
    existing_names = list(existing_names)

    for name_to_add in names_updated:
        all_profiles.append(updated_profiles[name_to_add])
        existing_names.append(name_to_add)

    if not obtain_lockfile():
        return {'error': 'Could not obtain lockfile, abandoning my update.'}
    with open(SEARCH_LAST_INDEX_DATA_FILE, 'r') as fin:
        search_indexer_info = json.load(fin)
    if search_indexer_info['last_full_index'] != last_full_index:
        return {'error': 'Full re-index written during our update. Abandoning'}

    with open(SEARCH_BLOCKCHAIN_DATA_FILE, 'w') as fout:
        json.dump(existing_names, fout)
    with open(SEARCH_PROFILE_DATA_FILE, 'w') as fout:
        json.dump(all_profiles, fout)
    with open(SEARCH_LAST_INDEX_DATA_FILE, 'w') as fout:
        search_indexer_info['last_block_height'] = new_block_height
        search_indexer_info['last_subdomain_seq'] = last_subdomain_seq
        json.dump(search_indexer_info, fout)

    return {
        'status': True,
        'message': 'Indexed {} profiles'.format(len(names_updated))
    }
コード例 #5
0
ファイル: fetch_data.py プロジェクト: iefken/blockstack-core
def fetch_profiles(max_to_fetch=None, just_test_set=False):
    """
        Fetch profile data using Blockstack Core and save the data.
        Data is saved in: data/profile_data.json
        Format of the data is <fqu, profile>
        * fqu: fully-qualified name
        * profile: json profile data
    """

    with open(SEARCH_BLOCKCHAIN_DATA_FILE, 'r') as fin:
        all_names = json.load(fin)

    info_resp = proxy.getinfo()
    last_block_processed = info_resp['last_block_processed']

    all_profiles = []

    if max_to_fetch == None:
        max_to_fetch = len(all_names)

    if just_test_set:
        from api.tests.search_tests import SEARCH_TEST_USERS
        all_names = ["{}.id".format(u) for u in SEARCH_TEST_USERS]

    for ix, fqu in enumerate(all_names):
        if ix % 100 == 0:
            print_status_bar(ix, max_to_fetch)
        if ix >= max_to_fetch:
            break

        resp = {}
        resp['fqu'] = fqu

        try:
            resp['profile'] = get_profile(fqu, use_legacy=True)['profile']
            all_profiles.append(resp)
        except KeyboardInterrupt as e:
            raise e
        except:
            pass

    attempts = 0
    while not obtain_lockfile():
        attempts += 1
        time.sleep(5)
        if attempts > 10:
            print "ERROR! Could not obtain lockfile"
            return

    subdomaindb = subdomains.SubdomainDB()
    last_subdomain_seq = subdomaindb.get_last_index()

    with open(SEARCH_PROFILE_DATA_FILE, 'w') as fout:
        json.dump(all_profiles, fout)
    with open(SEARCH_LAST_INDEX_DATA_FILE, 'w') as fout:
        search_index_data = {
            'last_block_height': last_block_processed,
            'last_full_index': datetime.now().isoformat(),
            'last_subdomain_seq': last_subdomain_seq
        }
        json.dump(search_index_data, fout)
コード例 #6
0
    def update(self, full_refresh=False):
        if not is_resolving_subdomains():
            log.warn(
                'Configured not to resolve subdomains, but tried to update subdomain cache anyways...'
            )
            return

        if full_refresh:
            self._drop_tables()
            self._create_tables()
            last_block = 0
            if not constants.BLOCKSTACK_TESTNET:
                last_block = SUBDOMAINS_FIRST_BLOCK
        else:
            last_block = self.last_seen()
            if not constants.BLOCKSTACK_TESTNET:
                last_block = max(last_block, SUBDOMAINS_FIRST_BLOCK)

        core_last_block = proxy.getinfo()['last_block_processed']
        log.debug("Fetching zonefiles in range ({}, {})".format(
            last_block + 1, core_last_block))
        if core_last_block < last_block + 1:
            return

        zonefiles_in_blocks = proxy.get_zonefiles_by_block(
            last_block + 1, core_last_block)
        if 'error' in zonefiles_in_blocks:
            log.error(
                "Error fetching zonefile info: {}".format(zonefiles_in_blocks))
            return
        core_last_block = min(zonefiles_in_blocks['last_block'],
                              core_last_block)
        zonefiles_info = zonefiles_in_blocks['zonefile_info']
        if len(zonefiles_info) == 0:
            return
        zonefiles_info.sort(key=lambda a: a['block_height'])
        domains, hashes, blockids, txids = map(
            list,
            zip(*[(x['name'], x['zonefile_hash'], x['block_height'], x['txid'])
                  for x in zonefiles_info]))
        zf_dict = {}
        zonefiles_to_fetch_per = 100
        for offset in range(0, len(hashes) / zonefiles_to_fetch_per + 1):
            lower = offset * zonefiles_to_fetch_per
            upper = min(lower + zonefiles_to_fetch_per, len(hashes))
            zf_resp = proxy.get_zonefiles(None,
                                          hashes[lower:upper],
                                          proxy=proxy.get_default_proxy())
            if 'zonefiles' not in zf_resp:
                log.error(
                    "Couldn't get zonefiles from proxy {}".format(zf_resp))
                return
            zf_dict.update(zf_resp['zonefiles'])
        if len(zf_dict) == 0:
            return
        could_not_find = []
        zonefiles = []
        for ix, zf_hash in enumerate(hashes):
            if zf_hash not in zf_dict:
                could_not_find.append(ix)
            else:
                zonefiles.append(zf_dict[zf_hash])
        could_not_find.sort(reverse=True)
        for ix in could_not_find:
            del domains[ix]
            del hashes[ix]
            del blockids[ix]
            del txids[ix]

        _build_subdomain_db(domains, zonefiles, self, txids)

        last_block = core_last_block

        self._set_last_seen(last_block)
コード例 #7
0
    def update(self, full_refresh=False):
        if full_refresh:
            self._drop_tables()
            self._create_tables()
            last_block = 0
            if not constants.BLOCKSTACK_TESTNET:
                last_block = SUBDOMAINS_FIRST_BLOCK
        else:
            last_block = self.last_seen()
            if not constants.BLOCKSTACK_TESTNET:
                last_block = max(last_block, SUBDOMAINS_FIRST_BLOCK)

        core_last_block = proxy.getinfo()['last_block_processed']
        log.debug("Fetching zonefiles in range ({}, {})".format(
            last_block + 1, core_last_block))
        if core_last_block < last_block + 1:
            return

        zonefiles_in_blocks = proxy.get_zonefiles_by_block(last_block + 1,
                                                           core_last_block)
        if 'error' in zonefiles_in_blocks:
            log.error("Error fetching zonefile info: {}".format(zonefiles_in_blocks))
            return
        core_last_block = min(zonefiles_in_blocks['last_block'],
                              core_last_block)
        zonefiles_info = zonefiles_in_blocks['zonefile_info']
        if len(zonefiles_info) == 0:
            return
        zonefiles_info.sort( key = lambda a : a['block_height'] )
        domains, hashes, blockids, txids = map( list,
                                                zip(* [ ( x['name'], x['zonefile_hash'],
                                                          x['block_height'],
                                                          x['txid'] )
                                                        for x in zonefiles_info ]))
        zf_dict = {}
        zonefiles_to_fetch_per = 100
        for offset in range(0, len(hashes)/zonefiles_to_fetch_per + 1):
            lower = offset * zonefiles_to_fetch_per
            upper = min(lower + zonefiles_to_fetch_per, len(hashes))
            zf_resp = proxy.get_zonefiles(
                None, hashes[lower:upper], proxy = proxy.get_default_proxy())
            if 'zonefiles' not in zf_resp:
                log.error("Couldn't get zonefiles from proxy {}".format(zf_resp))
                return
            zf_dict.update( zf_resp['zonefiles'] )
        if len(zf_dict) == 0:
            return
        could_not_find = []
        zonefiles = []
        for ix, zf_hash in enumerate(hashes):
            if zf_hash not in zf_dict:
                could_not_find.append(ix)
            else:
                zonefiles.append(zf_dict[zf_hash])
        could_not_find.sort(reverse=True)
        for ix in could_not_find:
            del domains[ix]
            del hashes[ix]
            del blockids[ix]
            del txids[ix]

        _build_subdomain_db(domains, zonefiles, self, txids)

        last_block = core_last_block

        self._set_last_seen(last_block)