Пример #1
0
    def update(self, full_refresh=False):
        if not is_resolving_subdomains():
            log.warn(
                'Configured not to resolve subdomains, but tried to update subdomain cache anyways...'
            )
            return

        if full_refresh:
            self._drop_tables()
            self._create_tables()
            last_block = 0
            if not constants.BLOCKSTACK_TESTNET:
                last_block = SUBDOMAINS_FIRST_BLOCK
        else:
            last_block = self.last_seen()
            if not constants.BLOCKSTACK_TESTNET:
                last_block = max(last_block, SUBDOMAINS_FIRST_BLOCK)

        core_last_block = proxy.getinfo()['last_block_processed']
        log.debug("Fetching zonefiles in range ({}, {})".format(
            last_block + 1, core_last_block))
        if core_last_block < last_block + 1:
            return

        zonefiles_in_blocks = proxy.get_zonefiles_by_block(
            last_block + 1, core_last_block)
        if 'error' in zonefiles_in_blocks:
            log.error(
                "Error fetching zonefile info: {}".format(zonefiles_in_blocks))
            return
        core_last_block = min(zonefiles_in_blocks['last_block'],
                              core_last_block)
        zonefiles_info = zonefiles_in_blocks['zonefile_info']
        if len(zonefiles_info) == 0:
            return
        zonefiles_info.sort(key=lambda a: a['block_height'])
        domains, hashes, blockids, txids = map(
            list,
            zip(*[(x['name'], x['zonefile_hash'], x['block_height'], x['txid'])
                  for x in zonefiles_info]))
        zf_dict = {}
        zonefiles_to_fetch_per = 100
        for offset in range(0, len(hashes) / zonefiles_to_fetch_per + 1):
            lower = offset * zonefiles_to_fetch_per
            upper = min(lower + zonefiles_to_fetch_per, len(hashes))
            zf_resp = proxy.get_zonefiles(None,
                                          hashes[lower:upper],
                                          proxy=proxy.get_default_proxy())
            if 'zonefiles' not in zf_resp:
                log.error(
                    "Couldn't get zonefiles from proxy {}".format(zf_resp))
                return
            zf_dict.update(zf_resp['zonefiles'])
        if len(zf_dict) == 0:
            return
        could_not_find = []
        zonefiles = []
        for ix, zf_hash in enumerate(hashes):
            if zf_hash not in zf_dict:
                could_not_find.append(ix)
            else:
                zonefiles.append(zf_dict[zf_hash])
        could_not_find.sort(reverse=True)
        for ix in could_not_find:
            del domains[ix]
            del hashes[ix]
            del blockids[ix]
            del txids[ix]

        _build_subdomain_db(domains, zonefiles, self, txids)

        last_block = core_last_block

        self._set_last_seen(last_block)
    def update(self, full_refresh=False):
        if full_refresh:
            self._drop_tables()
            self._create_tables()
            last_block = 0
            if not constants.BLOCKSTACK_TESTNET:
                last_block = SUBDOMAINS_FIRST_BLOCK
        else:
            last_block = self.last_seen()
            if not constants.BLOCKSTACK_TESTNET:
                last_block = max(last_block, SUBDOMAINS_FIRST_BLOCK)

        core_last_block = proxy.getinfo()['last_block_processed']
        log.debug("Fetching zonefiles in range ({}, {})".format(
            last_block + 1, core_last_block))
        if core_last_block < last_block + 1:
            return

        zonefiles_in_blocks = proxy.get_zonefiles_by_block(last_block + 1,
                                                           core_last_block)
        if 'error' in zonefiles_in_blocks:
            log.error("Error fetching zonefile info: {}".format(zonefiles_in_blocks))
            return
        core_last_block = min(zonefiles_in_blocks['last_block'],
                              core_last_block)
        zonefiles_info = zonefiles_in_blocks['zonefile_info']
        if len(zonefiles_info) == 0:
            return
        zonefiles_info.sort( key = lambda a : a['block_height'] )
        domains, hashes, blockids, txids = map( list,
                                                zip(* [ ( x['name'], x['zonefile_hash'],
                                                          x['block_height'],
                                                          x['txid'] )
                                                        for x in zonefiles_info ]))
        zf_dict = {}
        zonefiles_to_fetch_per = 100
        for offset in range(0, len(hashes)/zonefiles_to_fetch_per + 1):
            lower = offset * zonefiles_to_fetch_per
            upper = min(lower + zonefiles_to_fetch_per, len(hashes))
            zf_resp = proxy.get_zonefiles(
                None, hashes[lower:upper], proxy = proxy.get_default_proxy())
            if 'zonefiles' not in zf_resp:
                log.error("Couldn't get zonefiles from proxy {}".format(zf_resp))
                return
            zf_dict.update( zf_resp['zonefiles'] )
        if len(zf_dict) == 0:
            return
        could_not_find = []
        zonefiles = []
        for ix, zf_hash in enumerate(hashes):
            if zf_hash not in zf_dict:
                could_not_find.append(ix)
            else:
                zonefiles.append(zf_dict[zf_hash])
        could_not_find.sort(reverse=True)
        for ix in could_not_find:
            del domains[ix]
            del hashes[ix]
            del blockids[ix]
            del txids[ix]

        _build_subdomain_db(domains, zonefiles, self, txids)

        last_block = core_last_block

        self._set_last_seen(last_block)