Exemple #1
0
def export_receipts_and_logs(
    batch_size,
    transaction_hashes,
    provider_uri,
    max_workers,
    receipts_output,
    logs_output,
):
    """Exports receipts and logs."""
    with smart_open(transaction_hashes, "r") as transaction_hashes_file:
        job = ExportReceiptsJob(
            transaction_hashes_iterable=(
                transaction_hash.strip()
                for transaction_hash in transaction_hashes_file),
            batch_size=batch_size,
            batch_web3_provider=ThreadLocalProxy(
                lambda: get_provider_from_uri(provider_uri, batch=True)),
            max_workers=max_workers,
            item_exporter=receipts_and_logs_item_exporter(
                receipts_output, logs_output),
            export_receipts=receipts_output is not None,
            export_logs=logs_output is not None,
        )

        job.run()
Exemple #2
0
def test_export_blocks_job(tmpdir, start_block, end_block, batch_size,
                           resource_group, web3_provider_type):
    blocks_output_file = str(tmpdir.join("actual_blocks.csv"))
    transactions_output_file = str(tmpdir.join("actual_transactions.csv"))

    job = ExportBlocksJob(
        start_block=start_block,
        end_block=end_block,
        batch_size=batch_size,
        batch_web3_provider=ThreadLocalProxy(lambda: get_web3_provider(
            web3_provider_type,
            lambda file: read_resource(resource_group, file),
            batch=True,
        )),
        max_workers=5,
        item_exporter=blocks_and_transactions_item_exporter(
            blocks_output_file, transactions_output_file),
        export_blocks=blocks_output_file is not None,
        export_transactions=transactions_output_file is not None,
    )
    job.run()

    compare_lines_ignore_order(
        read_resource(resource_group, "expected_blocks.csv"),
        read_file(blocks_output_file),
    )

    compare_lines_ignore_order(
        read_resource(resource_group, "expected_transactions.csv"),
        read_file(transactions_output_file),
    )
def export_blocks_and_transactions(
    start_block,
    end_block,
    batch_size,
    provider_uri,
    max_workers,
    blocks_output,
    transactions_output,
):
    """Exports blocks and transactions."""
    if blocks_output is None and transactions_output is None:
        raise ValueError(
            "Either --blocks-output or --transactions-output options must be provided"
        )

    job = ExportBlocksJob(
        start_block=start_block,
        end_block=end_block,
        batch_size=batch_size,
        batch_web3_provider=ThreadLocalProxy(
            lambda: get_provider_from_uri(provider_uri, batch=True)
        ),
        max_workers=max_workers,
        item_exporter=blocks_and_transactions_item_exporter(
            blocks_output, transactions_output
        ),
        export_blocks=blocks_output is not None,
        export_transactions=transactions_output is not None,
    )
    job.run()
Exemple #4
0
def export_transaction_logs(start_block, end_block, provider_uri, max_workers, output_dir, output_format):
    """Exports transaction logs."""

    job = ExportTransactionLogsJob(
        start_block=start_block,
        end_block=end_block,
        iotex_rpc=ThreadLocalProxy(lambda: IotexRpc(provider_uri)),
        max_workers=max_workers,
        item_exporter=IotexItemExporter(output_dir, output_format=output_format),
        batch_size=10
    )
    job.run()
def export_ds_blocks(start_block, end_block, provider_uri, max_workers,
                     output_dir, output_format):
    """Exports ds blocks."""

    job = ExportDsBlocksJob(
        start_block=start_block,
        end_block=end_block,
        zilliqa_api=ThreadLocalProxy(lambda: ZilliqaAPI(provider_uri)),
        max_workers=max_workers,
        item_exporter=ZilliqaItemExporter(output_dir,
                                          output_format=output_format),
    )
    job.run()
Exemple #6
0
def export(start_block, end_block, provider_uri, max_workers, output_dir,
           output_format):
    """Exports blocks, balance updates, and operations."""

    job = ExportJob(
        start_block=start_block,
        end_block=end_block,
        tezos_rpc=ThreadLocalProxy(lambda: TezosRpc(provider_uri)),
        max_workers=max_workers,
        item_exporter=TezosItemExporter(output_dir,
                                        output_format=output_format),
    )
    job.run()
def test_export_transaction_logs_job(tmpdir, start_block, end_block, resource_group, provider_type):
    job = ExportTransactionLogsJob(
        start_block=start_block,
        end_block=end_block,
        iotex_rpc=ThreadLocalProxy(
            lambda: get_iotex_rpc(
                provider_type,
                read_resource_lambda=lambda file: read_resource(resource_group, file))),
        max_workers=5,
        item_exporter=IotexItemExporter(str(tmpdir)),
    )
    job.run()

    compare_lines_ignore_order(
        read_resource(resource_group, 'expected_transaction_logs.json'), read_file(str(tmpdir.join('transaction_logs.json')))
    )
def export_blocks(start_block, end_block, provider_uri, testnet, max_workers,
                  batch_size, output_dir, output_format):
    """Exports blocks, actions, receipts, and logs."""

    if testnet:
        set_iotex_utils_context(address_prefix='it')

    job = ExportBlocksJob(
        start_block=start_block,
        end_block=end_block,
        iotex_rpc=ThreadLocalProxy(lambda: IotexRpc(provider_uri)),
        max_workers=max_workers,
        item_exporter=IotexItemExporter(output_dir,
                                        output_format=output_format),
        batch_size=batch_size)
    job.run()
def export_partitioned(partitions, output_dir, output_format, provider_uri,
                       max_workers, batch_size):
    for batch_start_block, batch_end_block, partition_dir, *args in partitions:
        # # # start # # #

        start_time = time()

        padded_batch_start_block = str(batch_start_block).zfill(8)
        padded_batch_end_block = str(batch_end_block).zfill(8)
        block_range = '{padded_batch_start_block}-{padded_batch_end_block}'.format(
            padded_batch_start_block=padded_batch_start_block,
            padded_batch_end_block=padded_batch_end_block,
        )

        partition_output_dir = '{output_dir}/blocks{partition_dir}'.format(
            output_dir=output_dir,
            partition_dir=partition_dir,
        )
        os.makedirs(os.path.dirname(partition_output_dir), exist_ok=True)

        logger.info(
            'Exporting blocks {block_range} to {partition_output_dir}'.format(
                block_range=block_range,
                partition_output_dir=partition_output_dir,
            ))

        job = ExportJob(
            start_block=batch_start_block,
            end_block=batch_end_block,
            batch_size=batch_size,
            tezos_rpc=ThreadLocalProxy(lambda: TezosRpc(provider_uri)),
            max_workers=max_workers,
            item_exporter=TezosItemExporter(partition_output_dir,
                                            output_format=output_format),
        )
        job.run()

        # # # finish # # #

        end_time = time()
        time_diff = round(end_time - start_time, 5)
        logger.info(
            'Exporting blocks {block_range} took {time_diff} seconds'.format(
                block_range=block_range,
                time_diff=time_diff,
            ))
Exemple #10
0
def test_stream(tmpdir, start_block, end_block, batch_size, resource_group,
                provider_type):
    try:
        os.remove('last_synced_block.txt')
    except OSError:
        pass

    blocks_output_file = str(tmpdir.join('actual_block.json'))
    transactions_output_file = str(tmpdir.join("actual_transactions.json"))
    actions_output_file = str(tmpdir.join("actual_actions.json"))

    streamer_adapter = EosStreamerAdapter(
        eos_rpc=ThreadLocalProxy(
            lambda: get_eos_rpc(provider_type,
                                read_resource_lambda=lambda file:
                                read_resource(resource_group, file))),
        batch_size=batch_size,
        item_exporter=blocks_item_exporter(blocks_output_file,
                                           transactions_output_file,
                                           actions_output_file),
    )
    streamer = Streamer(blockchain_streamer_adapter=streamer_adapter,
                        start_block=start_block,
                        end_block=end_block,
                        retry_errors=False)
    streamer.stream()

    print('=====================')
    print(read_file(blocks_output_file))
    compare_lines_ignore_order(
        read_resource(resource_group, 'expected_blocks.json'),
        read_file(blocks_output_file))

    print('=====================')
    print(read_file(transactions_output_file))
    compare_lines_ignore_order(
        read_resource(resource_group, 'expected_transactions.json'),
        read_file(transactions_output_file))

    print('=====================')
    print(read_file(actions_output_file))
    compare_lines_ignore_order(
        read_resource(resource_group, 'expected_actions.json'),
        read_file(actions_output_file))
Exemple #11
0
def export_blocks(start_block, end_block, provider_uri, max_workers,
                  blocks_output, transactions_output, actions_output):
    """Export blocks, transactions and actions."""

    if blocks_output is None and transactions_output is None and actions_output is None:
        raise ValueError(
            'Either --blocks-output or --transactions-output or --actions-output options must be provided'
        )

    job = ExportBlocksJob(
        start_block=start_block,
        end_block=end_block,
        eos_rpc=ThreadLocalProxy(lambda: EosRpc(provider_uri)),
        max_workers=max_workers,
        item_exporter=blocks_item_exporter(blocks_output, transactions_output,
                                           actions_output),
        export_blocks=blocks_output is not None,
        export_transactions=transactions_output is not None,
        export_actions=actions_output is not None)
    job.run()
Exemple #12
0
def test_export_blocks_job(tmpdir, start_block, end_block, resource_group,
                           provider_type):
    job = ExportBlocksJob(
        start_block=start_block,
        end_block=end_block,
        iotex_rpc=ThreadLocalProxy(
            lambda: get_iotex_rpc(provider_type,
                                  read_resource_lambda=lambda file:
                                  read_resource(resource_group, file))),
        max_workers=5,
        item_exporter=IotexItemExporter(str(tmpdir)),
    )
    job.run()

    all_files = ['blocks.json', 'actions.json', 'logs.json']

    for file in all_files:
        print(read_file(str(tmpdir.join(file))))
        compare_lines_ignore_order(
            read_resource(resource_group, f'expected_{file}'),
            read_file(str(tmpdir.join(file))))
def test_export_tx_blocks_job(tmpdir, start_block, end_block, resource_group, provider_type):
    job = ExportTxBlocksJob(
        start_block=start_block,
        end_block=end_block,
        zilliqa_api=ThreadLocalProxy(
            lambda: get_zilliqa_api(
                provider_type,
                read_resource_lambda=lambda file: read_resource(resource_group, file))),
        max_workers=5,
        item_exporter=ZilliqaItemExporter(str(tmpdir)),
    )
    job.run()

    all_files = ['tx_blocks.json', 'ds_blocks.json', 'transactions.json', 'event_logs.json', 'transitions.json',
                 'exceptions.json']

    for file in all_files:
        print(read_file(str(tmpdir.join(file))))
        compare_lines_ignore_order(
            read_resource(resource_group, f'expected_{file}'), read_file(str(tmpdir.join(file)))
        )
def export_tx_blocks(start_block,
                     end_block,
                     provider_uri,
                     max_workers,
                     output_dir,
                     output_format,
                     rate_limit=None):
    """Exports tx blocks."""

    zilliqa_api = ThreadLocalProxy(lambda: ZilliqaAPI(provider_uri))
    if rate_limit is not None and rate_limit > 0:
        zilliqa_api = RateLimitingProxy(zilliqa_api, max_per_second=rate_limit)
    job = ExportTxBlocksJob(
        start_block=start_block,
        end_block=end_block,
        zilliqa_api=zilliqa_api,
        max_workers=max_workers,
        item_exporter=ZilliqaItemExporter(output_dir,
                                          output_format=output_format),
    )
    job.run()
Exemple #15
0
def test_export_blocks_job(tmpdir, start_block, end_block, batch_size,
                           resource_group, provider_type):
    blocks_output_file = str(tmpdir.join('actual_block.json'))
    transactions_output_file = str(tmpdir.join("actual_transactions.json"))
    actions_output_file = str(tmpdir.join("actual_actions.json"))

    job = ExportBlocksJob(
        start_block=start_block,
        end_block=end_block,
        batch_size=batch_size,
        eos_rpc=ThreadLocalProxy(
            lambda: get_eos_rpc(provider_type,
                                read_resource_lambda=lambda file:
                                read_resource(resource_group, file))),
        max_workers=5,
        item_exporter=blocks_item_exporter(blocks_output_file,
                                           transactions_output_file,
                                           actions_output_file),
        export_blocks=blocks_output_file is not None,
        export_transactions=transactions_output_file is not None,
        export_actions=actions_output_file is not None)
    job.run()

    print('=====================')
    print(read_file(blocks_output_file))
    compare_lines_ignore_order(
        read_resource(resource_group, 'expected_blocks.json'),
        read_file(blocks_output_file))

    print('=====================')
    print(read_file(transactions_output_file))
    compare_lines_ignore_order(
        read_resource(resource_group, 'expected_transactions.json'),
        read_file(transactions_output_file))

    print('=====================')
    print(read_file(actions_output_file))
    compare_lines_ignore_order(
        read_resource(resource_group, 'expected_actions.json'),
        read_file(actions_output_file))
Exemple #16
0
def test_export_job(tmpdir, start_block, end_block, resource_group,
                    provider_type):
    job = ExportJob(
        start_block=start_block,
        end_block=end_block,
        tezos_rpc=ThreadLocalProxy(
            lambda: get_tezos_rpc(provider_type,
                                  read_resource_lambda=lambda file:
                                  read_resource(resource_group, file))),
        max_workers=5,
        item_exporter=TezosItemExporter(str(tmpdir)),
    )
    job.run()

    all_files = ['blocks.json', 'balance_updates.json'] + \
                [f'{operation_kind}_operations.json' for operation_kind in OperationKind.ALL]

    for file in all_files:
        print('=====================')
        print(read_file(str(tmpdir.join(file))))
        compare_lines_ignore_order(
            read_resource(resource_group, f'expected_{file}'),
            read_file(str(tmpdir.join(file))))
Exemple #17
0
def stream(last_synced_block_file,
           lag,
           provider_uri,
           output,
           start_block,
           period_seconds=10,
           batch_size=1,
           block_batch_size=1,
           max_workers=5,
           log_file=None,
           pid_file=None):
    """Streams all data types to console or Google Pub/Sub."""
    configure_logging(log_file)
    configure_signals()

    from eosetl.streaming.eos_streamer_adapter import EosStreamerAdapter
    from blockchainetl_common.streaming.streamer import Streamer

    # TODO: Implement fallback mechanism for provider uris instead of picking randomly
    provider_uri = pick_random_provider_uri(provider_uri)
    logging.info('Using ' + provider_uri)

    streamer_adapter = EosStreamerAdapter(
        eos_rpc=ThreadLocalProxy(lambda: EosRpc(provider_uri)),
        item_exporter=get_item_exporter(output),
        batch_size=batch_size,
        max_workers=max_workers)
    streamer = Streamer(
        blockchain_streamer_adapter=streamer_adapter,
        last_synced_block_file=last_synced_block_file,
        lag=lag,
        start_block=start_block,
        period_seconds=period_seconds,
        block_batch_size=block_batch_size,
        pid_file=pid_file,
    )
    streamer.stream()
Exemple #18
0
def get_partitions(start, end, partition_batch_size, provider_uri):
    """Yield partitions based on input data type."""
    if is_date_range(start, end):
        start_date = datetime.strptime(start, '%Y-%m-%d').date()
        end_date = datetime.strptime(end, '%Y-%m-%d').date()

        day = timedelta(days=1)

        eos_service = EosBlockRangeService(
            eos_rpc=ThreadLocalProxy(lambda: EosRpc(provider_uri))
        )

        while start_date <= end_date:
            batch_start_block, batch_end_block = eos_service.get_block_range_for_date(start_date)
            partition_dir = '/date={start_date!s}/'.format(start_date=start_date)
            yield batch_start_block, batch_end_block, partition_dir, start_date
            start_date += day

    elif is_block_range(start, end):
        start_block = int(start)
        end_block = int(end)

        for batch_start_block in range(start_block, end_block + 1, partition_batch_size):
            batch_end_block = batch_start_block + partition_batch_size - 1
            if batch_end_block > end_block:
                batch_end_block = end_block

            padded_batch_start_block = str(batch_start_block).zfill(8)
            padded_batch_end_block = str(batch_end_block).zfill(8)
            partition_dir = '/start_block={padded_batch_start_block}/end_block={padded_batch_end_block}'.format(
                padded_batch_start_block=padded_batch_start_block,
                padded_batch_end_block=padded_batch_end_block,
            )
            yield batch_start_block, batch_end_block, partition_dir

    else:
        raise ValueError('start and end must be either block numbers or ISO dates')
Exemple #19
0
def test_export_receipts_job(
    tmpdir,
    batch_size,
    transaction_hashes,
    output_format,
    resource_group,
    web3_provider_type,
):
    receipts_output_file = str(tmpdir.join("actual_receipts." + output_format))
    logs_output_file = str(tmpdir.join("actual_logs." + output_format))

    job = ExportReceiptsJob(
        transaction_hashes_iterable=transaction_hashes,
        batch_size=batch_size,
        batch_web3_provider=ThreadLocalProxy(lambda: get_web3_provider(
            web3_provider_type,
            lambda file: read_resource(resource_group, file),
            batch=True,
        )),
        max_workers=5,
        item_exporter=receipts_and_logs_item_exporter(receipts_output_file,
                                                      logs_output_file),
        export_receipts=receipts_output_file is not None,
        export_logs=logs_output_file is not None,
    )
    job.run()

    compare_lines_ignore_order(
        read_resource(resource_group, "expected_receipts." + output_format),
        read_file(receipts_output_file),
    )

    compare_lines_ignore_order(
        read_resource(resource_group, "expected_logs." + output_format),
        read_file(logs_output_file),
    )
Exemple #20
0
def mock_stream(
    input_directory,
    output,
    start_block,
    end_block,
    entity_types,
    kafka_blocks_topic,
    kafka_transactions_topic,
    kafka_logs_topic,
    kafka_compression_type,
    kafka_schema_registry_url,
    kafka_use_schema_registry,
    batch_size=2,
    max_workers=5,
    log_file=None,
):
    # Clear existing last_synced_block.txt
    if os.path.exists("last_synced_block.txt"):
        os.remove("last_synced_block.txt")
    """Streams all data types to console or Google Pub/Sub."""
    configure_logging(log_file)
    configure_signals()
    entity_types = parse_entity_types(entity_types)
    validate_entity_types(entity_types, output)

    from blockchainetl_common.streaming.streamer import Streamer

    from iconetl.streaming.icx_streamer_adapter import IcxStreamerAdapter
    from iconetl.streaming.item_exporter_creator import create_item_exporter
    from tests.iconetl.job.utils import get_web3_provider

    kafka_settings = {
        "topic_map": {
            "block": kafka_blocks_topic,
            "transaction": kafka_transactions_topic,
            "log": kafka_logs_topic,
        },
        "compression_type": kafka_compression_type,
        "enable_schema_registry": kafka_use_schema_registry,
        "schema_registry_url": kafka_schema_registry_url,
    }

    streamer_adapter = IcxStreamerAdapter(
        batch_web3_provider=ThreadLocalProxy(lambda: get_web3_provider(
            provider_type="mock",
            read_resource_lambda=lambda file: read_resource(
                input_directory, file),
            batch=True,
        )),
        item_exporter=create_item_exporter(output, kafka_settings),
        batch_size=batch_size,
        max_workers=max_workers,
        entity_types=entity_types,
    )
    streamer = Streamer(
        blockchain_streamer_adapter=streamer_adapter,
        start_block=start_block,
        end_block=end_block,
        retry_errors=False,
    )
    streamer.stream()
def export_all_common(partitions, output_dir, provider_uri, max_workers,
                      batch_size):
    for batch_start_block, batch_end_block, partition_dir in partitions:

        start_time = time()

        padded_batch_start_block = str(batch_start_block).zfill(8)
        padded_batch_end_block = str(batch_end_block).zfill(8)
        block_range = "{padded_batch_start_block}-{padded_batch_end_block}".format(
            padded_batch_start_block=padded_batch_start_block,
            padded_batch_end_block=padded_batch_end_block,
        )
        file_name_suffix = "{padded_batch_start_block}_{padded_batch_end_block}".format(
            padded_batch_start_block=padded_batch_start_block,
            padded_batch_end_block=padded_batch_end_block,
        )

        blocks_output_dir = "{output_dir}/blocks{partition_dir}".format(
            output_dir=output_dir,
            partition_dir=partition_dir,
        )
        os.makedirs(os.path.dirname(blocks_output_dir), exist_ok=True)

        transactions_output_dir = "{output_dir}/transactions{partition_dir}".format(
            output_dir=output_dir,
            partition_dir=partition_dir,
        )
        os.makedirs(os.path.dirname(transactions_output_dir), exist_ok=True)

        blocks_file = "{blocks_output_dir}/blocks_{file_name_suffix}.csv".format(
            blocks_output_dir=blocks_output_dir,
            file_name_suffix=file_name_suffix,
        )
        transactions_file = "{transactions_output_dir}/transactions_{file_name_suffix}.csv".format(
            transactions_output_dir=transactions_output_dir,
            file_name_suffix=file_name_suffix,
        )
        logger.info("Exporting blocks {block_range} to {blocks_file}".format(
            block_range=block_range,
            blocks_file=blocks_file,
        ))
        logger.info(
            "Exporting transactions from blocks {block_range} to {transactions_file}"
            .format(
                block_range=block_range,
                transactions_file=transactions_file,
            ))

        job = ExportBlocksJob(
            start_block=batch_start_block,
            end_block=batch_end_block,
            batch_size=batch_size,
            batch_web3_provider=ThreadLocalProxy(
                lambda: get_provider_from_uri(provider_uri, batch=True)),
            max_workers=max_workers,
            item_exporter=blocks_and_transactions_item_exporter(
                blocks_file, transactions_file),
            export_blocks=blocks_file is not None,
            export_transactions=transactions_file is not None,
        )
        job.run()

        cache_output_dir = "{output_dir}/.tmp{partition_dir}".format(
            output_dir=output_dir,
            partition_dir=partition_dir,
        )
        os.makedirs(os.path.dirname(cache_output_dir), exist_ok=True)

        transaction_hashes_file = "{cache_output_dir}/transaction_hashes_{file_name_suffix}.csv".format(
            cache_output_dir=cache_output_dir,
            file_name_suffix=file_name_suffix,
        )
        logger.info(
            "Extracting hash column from transaction file {transactions_file}".
            format(transactions_file=transactions_file, ))
        extract_csv_column_unique(transactions_file, transaction_hashes_file,
                                  "hash")

        receipts_output_dir = "{output_dir}/receipts{partition_dir}".format(
            output_dir=output_dir,
            partition_dir=partition_dir,
        )
        os.makedirs(os.path.dirname(receipts_output_dir), exist_ok=True)

        logs_output_dir = "{output_dir}/logs{partition_dir}".format(
            output_dir=output_dir,
            partition_dir=partition_dir,
        )
        os.makedirs(os.path.dirname(logs_output_dir), exist_ok=True)

        receipts_file = "{receipts_output_dir}/receipts_{file_name_suffix}.csv".format(
            receipts_output_dir=receipts_output_dir,
            file_name_suffix=file_name_suffix,
        )
        logs_file = "{logs_output_dir}/logs_{file_name_suffix}.csv".format(
            logs_output_dir=logs_output_dir,
            file_name_suffix=file_name_suffix,
        )
        logger.info(
            "Exporting receipts and logs from blocks {block_range} to {receipts_file} and {logs_file}"
            .format(
                block_range=block_range,
                receipts_file=receipts_file,
                logs_file=logs_file,
            ))

        with smart_open(transaction_hashes_file, "r") as transaction_hashes:
            job = ExportReceiptsJob(
                transaction_hashes_iterable=(
                    transaction_hash.strip()
                    for transaction_hash in transaction_hashes),
                batch_size=batch_size,
                batch_web3_provider=ThreadLocalProxy(
                    lambda: get_provider_from_uri(provider_uri, batch=True)),
                max_workers=max_workers,
                item_exporter=receipts_and_logs_item_exporter(
                    receipts_file, logs_file),
                export_receipts=receipts_file is not None,
                export_logs=logs_file is not None,
            )
            job.run()

        shutil.rmtree(os.path.dirname(cache_output_dir))
        end_time = time()
        time_diff = round(end_time - start_time, 5)
        logger.info(
            "Exporting blocks {block_range} took {time_diff} seconds".format(
                block_range=block_range,
                time_diff=time_diff,
            ))
def test_stream(
    tmpdir,
    start_block,
    end_block,
    batch_size,
    resource_group,
    entity_types,
    provider_type,
):
    try:
        os.remove("last_synced_block.txt")
    except OSError:
        pass

    blocks_output_file = str(tmpdir.join("actual_blocks.json"))
    transactions_output_file = str(tmpdir.join("actual_transactions.json"))
    logs_output_file = str(tmpdir.join("actual_logs.json"))

    streamer_adapter = IcxStreamerAdapter(
        batch_web3_provider=ThreadLocalProxy(
            lambda: get_web3_provider(
                provider_type,
                read_resource_lambda=lambda file: read_resource(resource_group, file),
                batch=True,
            )
        ),
        batch_size=batch_size,
        item_exporter=CompositeItemExporter(
            filename_mapping={
                "block": blocks_output_file,
                "transaction": transactions_output_file,
                "log": logs_output_file,
            }
        ),
        entity_types=entity_types,
    )
    streamer = Streamer(
        blockchain_streamer_adapter=streamer_adapter,
        start_block=start_block,
        end_block=end_block,
        retry_errors=False,
    )
    streamer.stream()

    if "block" in entity_types:
        print("=====================")
        print(read_file(blocks_output_file))
        compare_lines_ignore_order(
            read_resource(resource_group, "expected_blocks.json"),
            read_file(blocks_output_file),
        )

    if "transaction" in entity_types:
        print("=====================")
        print(read_file(transactions_output_file))
        compare_lines_ignore_order(
            read_resource(resource_group, "expected_transactions.json"),
            read_file(transactions_output_file),
        )

    if "log" in entity_types:
        print("=====================")
        print(read_file(logs_output_file))
        compare_lines_ignore_order(
            read_resource(resource_group, "expected_logs.json"),
            read_file(logs_output_file),
        )
Exemple #23
0
def export_all(partitions, output_dir, provider_uri, max_workers, batch_size):
    for batch_start_block, batch_end_block, partition_dir, *args in partitions:
        # # # start # # #

        start_time = time()

        padded_batch_start_block = str(batch_start_block).zfill(8)
        padded_batch_end_block = str(batch_end_block).zfill(8)
        block_range = '{padded_batch_start_block}-{padded_batch_end_block}'.format(
            padded_batch_start_block=padded_batch_start_block,
            padded_batch_end_block=padded_batch_end_block,
        )
        file_name_suffix = '{padded_batch_start_block}_{padded_batch_end_block}'.format(
            padded_batch_start_block=padded_batch_start_block,
            padded_batch_end_block=padded_batch_end_block,
        )

        # # # blocks_and_transactions # # #

        blocks_output_dir = '{output_dir}/blocks{partition_dir}'.format(
            output_dir=output_dir,
            partition_dir=partition_dir,
        )
        os.makedirs(os.path.dirname(blocks_output_dir), exist_ok=True)

        transactions_output_dir = '{output_dir}/transactions{partition_dir}'.format(
            output_dir=output_dir,
            partition_dir=partition_dir,
        )
        os.makedirs(os.path.dirname(transactions_output_dir), exist_ok=True)

        actions_output_dir = '{output_dir}/actions{partition_dir}'.format(
            output_dir=output_dir,
            partition_dir=partition_dir,
        )
        os.makedirs(os.path.dirname(actions_output_dir), exist_ok=True)

        blocks_file = '{blocks_output_dir}/blocks_{file_name_suffix}.json'.format(
            blocks_output_dir=blocks_output_dir,
            file_name_suffix=file_name_suffix,
        )
        transactions_file = '{transactions_output_dir}/transactions_{file_name_suffix}.json'.format(
            transactions_output_dir=transactions_output_dir,
            file_name_suffix=file_name_suffix,
        )
        actions_file = '{actions_output_dir}/actions_{file_name_suffix}.json'.format(
            actions_output_dir=actions_output_dir,
            file_name_suffix=file_name_suffix,
        )
        logger.info('Exporting blocks {block_range} to {blocks_file}'.format(
            block_range=block_range,
            blocks_file=blocks_file,
        ))
        logger.info('Exporting transactions from blocks {block_range} to {transactions_file}'.format(
            block_range=block_range,
            transactions_file=transactions_file,
        ))
        logger.info('Exporting actions from blocks {block_range} to {actions_file}'.format(
            block_range=block_range,
            actions_file=actions_file,
        ))

        job = ExportBlocksJob(
            start_block=batch_start_block,
            end_block=batch_end_block,
            batch_size=batch_size,
            eos_rpc=ThreadLocalProxy(lambda: EosRpc(provider_uri)),
            max_workers=max_workers,
            item_exporter=blocks_item_exporter(blocks_file, transactions_file, actions_file),
            export_blocks=blocks_file is not None,
            export_transactions=transactions_file is not None)
        job.run()

        # # # finish # # #

        end_time = time()
        time_diff = round(end_time - start_time, 5)
        logger.info('Exporting blocks {block_range} took {time_diff} seconds'.format(
            block_range=block_range,
            time_diff=time_diff,
        ))
def test_stream(tmpdir, start_block, end_block, batch_size, resource_group,
                entity_types, provider_type):
    try:
        os.remove('last_synced_block.txt')
    except OSError:
        pass

    blocks_output_file = str(tmpdir.join('actual_blocks.json'))
    actions_output_file = str(tmpdir.join('actual_actions.json'))
    logs_output_file = str(tmpdir.join('actual_logs.json'))
    transaction_logs_output_file = str(
        tmpdir.join('actual_transaction_logs.json'))

    streamer_adapter = IotexStreamerAdapter(
        iotex_rpc=ThreadLocalProxy(
            lambda: get_iotex_rpc(provider_type,
                                  read_resource_lambda=lambda file:
                                  read_resource(resource_group, file))),
        batch_size=batch_size,
        item_exporter=CompositeItemExporter(
            filename_mapping={
                'block': blocks_output_file,
                'action': actions_output_file,
                'log': logs_output_file,
                'transaction_log': transaction_logs_output_file,
            }),
        entity_types=entity_types,
    )
    streamer = Streamer(blockchain_streamer_adapter=streamer_adapter,
                        start_block=start_block,
                        end_block=end_block,
                        retry_errors=False)
    streamer.stream()

    if EntityType.BLOCK in entity_types:
        print('=====================')
        print(read_file(blocks_output_file))
        compare_lines_ignore_order(
            read_resource(resource_group, 'expected_blocks.json'),
            read_file(blocks_output_file))

    if EntityType.ACTION in entity_types:
        print('=====================')
        print(read_file(actions_output_file))
        compare_lines_ignore_order(
            read_resource(resource_group, 'expected_actions.json'),
            read_file(actions_output_file))

    if EntityType.LOG in entity_types:
        print('=====================')
        print(read_file(logs_output_file))
        compare_lines_ignore_order(
            read_resource(resource_group, 'expected_logs.json'),
            read_file(logs_output_file))

    if EntityType.TRANSACTION_LOG in entity_types:
        print('=====================')
        print(read_file(logs_output_file))
        compare_lines_ignore_order(
            read_resource(resource_group, 'expected_transaction_logs.json'),
            read_file(transaction_logs_output_file))