示例#1
0
def fetch_transactions(args: dict, web3: Web3):
    tx_hashes = []
    for filename in args["files"]:
        with smart_open(filename) as fin:
            for tx in csv.DictReader(fin):
                tx_hashes.append(tx["hash"])

    tx_tracer = TransactionTracer(web3)
    done = set()
    with smart_open(args["output"], "w") as fout:
        for i, tx_hash in enumerate(tx_hashes):
            if i % 10 == 0:
                logger.info("progress: %s/%s", i, len(tx_hashes))
            if tx_hash in done:
                continue
            try:
                tx = dict(web3.eth.getTransaction(tx_hash))
                if args["include_receipt"]:
                    tx["receipt"] = web3.eth.getTransactionReceipt(tx_hash)
                if args["include_traces"]:
                    tx["traces"] = tx_tracer.trace_transaction(tx_hash)
                json.dump(tx, fout, cls=EthJSONEncoder)
                print(file=fout)
                done.add(tx_hash)
            except Exception as ex:  # pylint: disable=broad-except
                logger.warning("failed to trace %s: %s", tx_hash, ex)
                continue
    def collect_results(self,
                        func_name,
                        start_block,
                        end_block=None,
                        block_interval=DEFAULT_BLOCK_INTERVAL,
                        contract_args=None):
        max_workers = multiprocessing.cpu_count() * 5
        if end_block is None:
            end_block = self.contract.web3.eth.blockNumber
        if start_block is None:
            start_block = end_block
        if contract_args is None:
            contract_args = []
        contract_args = [self.transform_arg(arg) for arg in contract_args]

        def run_task(block):
            try:
                return self.call_func(func_name, block, contract_args)
            except Exception as ex:  # pylint: disable=broad-except
                logger.error("failed to fetch block %s: %s", block, ex)

        with ThreadPoolExecutor(max_workers=max_workers) as executor:
            blocks = range(start_block, end_block + 1, block_interval)
            total_count = len(blocks)
            results = executor.map(run_task, blocks)
            for i, (block, result) in enumerate(zip(blocks, results)):
                if i % 10 == 0 and total_count > 10:
                    logger.info("progress: %s/%s (%.2f%%)", i, total_count,
                                i / total_count * 100)
                if result is not None:
                    yield (block, result)
示例#3
0
 def __next__(self) -> Block:
     block_number = next(self._blocks_iter)
     self._processed_count += 1
     if self.log_interval and self.processed_count % self.log_interval == 0:
         logger.info("%s/%s", self.processed_count, self.blocks_count)
     block = self.web3.eth.getBlock(block_number)
     return Block(block)
示例#4
0
 def __next__(self) -> Block:
     if self.current_block > self.end_block:
         raise StopIteration
     if self.log_interval and self.processed_count % self.log_interval == 0:
         logger.info("%s/%s", self.processed_count, self.blocks_count)
     block = self.web3.eth.getBlock(self.current_block)
     self.current_block += 1
     return Block(block)
示例#5
0
 def log_balance(self, counter: int, f, block_number: int, balance: int):
     if self.start is not None and self.start > block_number:
         return
     if self.end is not None and self.end < block_number:
         return
     logger.info("Blocks: %i", counter)
     f.write(
         json.dumps({
             'blockNumber': block_number,
             'balance': balance
         }) + "\n")
 def fetch_contract_transactions(self, address, internal=False):
     logger.debug("getting transactions (internal=%s) for %s", internal,
                  address)
     count = 0
     for page in range(1, MAX_TRANSACTIONS // TRANSACTIONS_PER_PAGE + 1):
         logger.debug("requesting page %d for %s", page, address)
         returned_results = self._make_transactions_request(
             address, page, internal=internal)
         if not returned_results:
             break
         for transaction in returned_results:
             yield transaction
         count += len(returned_results)
     else:
         logger.warning(
             "more than %s transactions for %s, fetched first %s transactions",
             MAX_TRANSACTIONS, address, count)
     logger.info("fetched %s transactions (internal=%s) for %s", count,
                 internal, address)
示例#7
0
    def fetch_events(self,
                     start_block: int,
                     end_block: int = None) -> Iterator[LogReceipt]:
        if end_block is None:
            end_block = self.contract.web3.eth.blockNumber

        block_count = end_block - start_block + 1
        granularity = self.BLOCK_GRANULARITIES[0]

        for i in range(math.ceil(block_count / granularity)):
            logger.info(
                "%s progress: %s/%s",
                self.contract.address,
                i * granularity,
                block_count,
            )
            batch_start_block = start_block + i * granularity
            batch_end_block = min(batch_start_block + granularity - 1,
                                  end_block)
            yield from self._fetch_batch(batch_start_block, batch_end_block)
示例#8
0
 def fetch_all_events(self, fetch_tasks: List[FetchTask],
                      output_directory: str):
     with ThreadPoolExecutor() as executor:
         filepaths = [
             path.join(output_directory, task.display_name) + ".jsonl.gz"
             for task in fetch_tasks
         ]
         futures = {
             executor.submit(self.fetch_and_persist_events, task, output):
             task
             for task, output in zip(fetch_tasks, filepaths)
         }
         for future in as_completed(futures):
             task = futures[future]
             ex = future.exception()
             if ex:
                 logger.error("failed to process %s (%s): %s", task.name,
                              task.address, ex)
             else:
                 logger.info("completed to process %s (%s)", task.name,
                             task.address)
示例#9
0
 def __iter__(self):
     logger.info("processing %s blocks", self.blocks_count)
     return self