def generate_go_ethereum_fixture(destination_dir): with contextlib.ExitStack() as stack: datadir = stack.enter_context(common.tempdir()) keystore_dir = os.path.join(datadir, 'keystore') common.ensure_path_exists(keystore_dir) keyfile_path = os.path.join(keystore_dir, common.KEYFILE_FILENAME) with open(keyfile_path, 'w') as keyfile: keyfile.write(common.KEYFILE_DATA) genesis_file_path = os.path.join(datadir, 'genesis.json') with open(genesis_file_path, 'w') as genesis_file: genesis_file.write(json.dumps(common.GENESIS_DATA)) geth_ipc_path_dir = stack.enter_context(common.tempdir()) geth_ipc_path = os.path.join(geth_ipc_path_dir, 'geth.ipc') geth_port = get_open_port() geth_binary = common.get_geth_binary() with get_geth_process( geth_binary=geth_binary, datadir=datadir, genesis_file_path=genesis_file_path, geth_ipc_path=geth_ipc_path, geth_port=geth_port): common.wait_for_socket(geth_ipc_path) w3 = Web3(Web3.IPCProvider(geth_ipc_path)) chain_data = setup_chain_state(w3) # close geth by exiting context # must be closed before copying data dir verify_chain_state(w3, chain_data) # verify that chain state is still valid after closing # and re-opening geth with get_geth_process( geth_binary=geth_binary, datadir=datadir, genesis_file_path=genesis_file_path, geth_ipc_path=geth_ipc_path, geth_port=geth_port): common.wait_for_socket(geth_ipc_path) w3 = Web3(Web3.IPCProvider(geth_ipc_path)) verify_chain_state(w3, chain_data) static_data = { 'raw_txn_account': common.RAW_TXN_ACCOUNT, 'keyfile_pw': common.KEYFILE_PW, } config = merge(chain_data, static_data) pprint.pprint(config) write_config_json(config, datadir) shutil.make_archive(destination_dir, 'zip', datadir)
def test_happy_run_elastic_fault_tolerant(self): if skip_lightning_tests: self.skipTest( 'Spark PyTorch Lightning tests conflict with Tensorflow 2.5.x: ' 'https://github.com/horovod/horovod/pull/3263') if not gloo_built(): self.skipTest("Gloo is not available") with spark_session('test_happy_run_elastic_fault_tolerant', max_failures=3): with tempdir() as dir: # these files make training function fail in given rank, epoch and batch with open(os.path.sep.join([dir, 'rank_1_epoch_2_batch_4_fail']), 'w'), \ open(os.path.sep.join([dir, 'rank_0_epoch_3_batch_1_fail']), 'w'), \ open(os.path.sep.join([dir, 'rank_1_epoch_4_batch_2_fail']), 'w'): pass res = horovod.spark.run_elastic( fn, args=(2, 5, 5, dir), env={'HOROVOD_LOG_LEVEL': 'DEBUG'}, num_proc=2, min_num_proc=2, max_num_proc=2, start_timeout=5, verbose=2) self.assertListEqual([([0, 4, 0, 4, 1, 4, 0, 4], 0), ([0, 4, 0, 4, 1, 4, 0, 4], 1)], res)
def test_model_checkpoint_callback(self): from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint with spark_session('test_fit_model') as spark: df = create_noisy_xor_data(spark) model = create_xor_model() with tempdir() as dir: checkpoint_callback = ModelCheckpoint(dirpath=dir) callbacks = [checkpoint_callback] with local_store() as store: torch_estimator = hvd_spark.TorchEstimator( num_proc=2, store=store, model=model, input_shapes=[[-1, 2]], feature_cols=['features'], label_cols=['y'], validation=0.2, batch_size=4, epochs=2, verbose=2, callbacks=callbacks) torch_model = torch_estimator.fit(df) # TODO: Find a way to pass log metrics from remote, and assert base on the logger. trained_model = torch_model.getModel() pred = trained_model(torch.ones([1, 2], dtype=torch.int32)) assert len(pred) == 1 assert pred.dtype == torch.float32
def test_model_override_trainer_args(self): if skip_lightning_tests: self.skipTest( 'Spark PyTorch Lightning tests conflict with Tensorflow 2.5.x: ' 'https://github.com/horovod/horovod/pull/3263') from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint with spark_session('test_fit_model') as spark: df = create_noisy_xor_data(spark) model = create_xor_model() with tempdir() as dir: with local_store() as store: torch_estimator = hvd_spark.TorchEstimator( num_proc=2, store=store, model=model, input_shapes=[[-1, 2]], feature_cols=['features'], label_cols=['y'], validation=0.2, batch_size=4, epochs=2, verbose=2, trainer_args={'stochastic_weight_avg': True}) torch_model = torch_estimator.fit(df) # TODO: Find a way to pass log metrics from remote, and assert base on the logger. trained_model = torch_model.getModel() pred = trained_model(torch.ones([1, 2], dtype=torch.int32)) assert len(pred) == 1 assert pred.dtype == torch.float32
def test_spark_task_service_env(self): key = secret.make_secret_key() service_env = dict([(key, '{} value'.format(key)) for key in SparkTaskService.SERVICE_ENV_KEYS]) service_env.update({"other": "value"}) with os_environ(service_env): service = SparkTaskService(1, key, None) client = SparkTaskClient(1, service.addresses(), key, 3) with tempdir() as d: file = '{}/env'.format(d) command = "env | grep -v '^PWD='> {}".format(file) command_env = {"test": "value"} try: client.run_command(command, command_env) client.wait_for_command_termination() finally: service.shutdown() with open(file) as f: env = sorted([line.strip() for line in f.readlines()]) expected = [ 'HADOOP_TOKEN_FILE_LOCATION=HADOOP_TOKEN_FILE_LOCATION value', 'test=value' ] self.assertEqual(env, expected)
def test_gen_pipeline_with_non_code_changes(self): with tempdir() as dir: tmp_gen_pipeline_sh = os.path.join(dir, 'gen-pipeline.sh') copy(GEN_PIPELINE_FNAME, tmp_gen_pipeline_sh) with open(os.path.join(dir, 'get_changed_code_files.py'), 'w') as py: py.write("pass") gen_pipeline_env = dict(BUILDKITE_PIPELINE_SLUG='SLUG', BUILDKITE_BRANCH='BRANCH') exit_code, actual_pipeline, gen_pipeline_log = self._run( tmp_gen_pipeline_sh, gen_pipeline_env) self.assertEqual(0, exit_code) self.assertEqual('', gen_pipeline_log) self.assertEqual( "steps:\n" "- label: \':book: Build Docs\'\n" " command: 'cd /workdir/docs && pip install -r requirements.txt && make html'\n" " plugins:\n" " - docker#v3.1.0:\n" " image: 'python:3.7'\n" " timeout_in_minutes: 5\n" " retry:\n" " automatic: true\n" " agents:\n" " queue: cpu\n" "- wait\n" "- wait\n" "- wait\n", actual_pipeline)
def test_happy_run_elastic_fault_tolerant_fails(self): self.skipTest( 'elastic horovod does not support shutdown from the spark driver ' 'while elastic driver is waiting for hosts to come up') if not gloo_built(): self.skipTest("Gloo is not available") with spark_session('test_happy_run_elastic_fault_tolerant_fails', max_failures=2): with tempdir() as dir: # these files make training function fail in given rank, epoch and batch # we have as many failures as Spark has max_failures (per task / index) with open(os.path.sep.join([dir, 'rank_1_epoch_2_batch_4_fail']), 'w'), \ open(os.path.sep.join([dir, 'rank_1_epoch_3_batch_1_fail']), 'w'): pass res = horovod.spark.run_elastic( fn, args=(2, 5, 5, dir), env={'HOROVOD_LOG_LEVEL': 'DEBUG'}, num_proc=2, min_num_proc=2, max_num_proc=2, start_timeout=5, verbose=2) self.assertListEqual([([0, 4, 0, 4, 1, 4, 0, 4], 0), ([0, 4, 0, 4, 1, 4, 0, 4], 1)], res)
def single_tiff_setup(n_images, fmt='foo-{:05}.tif'): with tempdir() as d: data = np.ones((512, 512), dtype=np.float32) for i in range(n_images): tifffile.imsave(d.path(fmt.format(i)), data) yield d
def test_gen_pipeline_with_empty_changes(self): with tempdir() as dir: tmp_gen_pipeline_sh = os.path.join(dir, 'gen-pipeline.sh') copy('../.buildkite/gen-pipeline.sh', tmp_gen_pipeline_sh) with open(os.path.join(dir, 'get_commit_files.py'), 'w') as py: py.write("pass") self.do_test_gen_full_pipeline(tmp_gen_pipeline_sh)
def test_gen_pipeline_on_default_branch(self): with tempdir() as dir: tmp_gen_pipeline_sh = os.path.join(dir, 'gen-pipeline.sh') copy('../.buildkite/gen-pipeline.sh', tmp_gen_pipeline_sh) with open(os.path.join(dir, 'get_commit_files.py'), 'w') as py: py.write("print('.github/new_file')") env = dict(BUILDKITE_BRANCH='default', BUILDKITE_PIPELINE_DEFAULT_BRANCH='default') self.do_test_gen_full_pipeline(tmp_gen_pipeline_sh, env)
def test_get_spark_df_input_files(spark): with tempdir() as d: pq_dir = os.path.join(d, 'test_spark_df_output') with spark_session('test_get_spark_df_input_files') as spark: spark.range(100).repartition(4).write.parquet(pq_dir) pq_files = _get_spark_df_saved_file_list(pq_dir) pq_files = sorted(pq_files) assert len(pq_files) == 4 for i in range(4): assert pq_files[i].startswith('part-0000' + str(i))
def test_gen_pipeline_with_failing_py(self): with tempdir() as dir: tmp_gen_pipeline_sh = os.path.join(dir, 'gen-pipeline.sh') copy(GEN_PIPELINE_FNAME, tmp_gen_pipeline_sh) with open(os.path.join(dir, 'get_changed_code_files.py'), 'w') as py: py.write('import sys\n') py.write('sys.exit(1)') self.do_test_gen_full_pipeline(tmp_gen_pipeline_sh)
def test_memory_in(): with tempdir() as d: from ufo import MemoryIn, Write ref = random.astype(np.float32) read = MemoryIn(pointer=ref.__array_interface__['data'][0], number=1, width=ref.shape[1], height=ref.shape[0]) write = Write(filename=d.path('foo.tif')) write(read()).run().join() result = tifffile.imread(d.path('foo.tif')) assert(np.all(ref == result))
def test_gen_pipeline_on_default_branch(self): with tempdir() as dir: tmp_gen_pipeline_sh = os.path.join(dir, 'gen-pipeline.sh') copy(GEN_PIPELINE_FNAME, tmp_gen_pipeline_sh) with open(os.path.join(dir, 'get_changed_code_files.py'), 'w') as py: py.write("pass") env = dict(BUILDKITE_BRANCH='default', BUILDKITE_PIPELINE_DEFAULT_BRANCH='default') self.do_test_gen_full_pipeline(tmp_gen_pipeline_sh, env)
def test_task_count(): from ufo import DummyData, Write, Average with tempdir() as d: generate = DummyData(number=5, width=512, height=512) write = Write(filename=d.path('foo-%i.tif')) average = Average() write(average(generate())).run().join() assert (generate.task.props.num_processed == 0) assert (average.task.props.num_processed == 5) assert (write.task.props.num_processed == 1)
def test_memory_out(): with tempdir() as d: from ufo import MemoryOut, Read ref = random.astype(np.float32) out = np.zeros_like(ref).astype(np.float32) tifffile.imsave(d.path('foo.tif'), ref) read = Read(path=d.path('foo.tif')) write = MemoryOut(pointer=out.__array_interface__['data'][0], max_size=ref.nbytes) write(read()).run().join() assert(np.all(out == ref))
def test_read_multi_tiffs(): from ufo import Read, Null with tempdir() as d: n_images = 32 data = np.ones((512, 512, n_images)) tifffile.imsave(d.path('foo.tif'), data) read = Read(path=d.path('foo.tif')) null = Null() null(read()).run().join() assert (null.task.props.num_processed == n_images)
def test_task_count(): from ufo import DummyData, Write, Average with tempdir() as d: generate = DummyData(number=5, width=512, height=512) write = Write(filename=d.path('foo-%i.tif')) average = Average() write(average(generate())).run().join() assert(generate.task.props.num_processed == 0) assert(average.task.props.num_processed == 5) assert(write.task.props.num_processed == 1)
def test_read_multi_tiffs(): from ufo import Read, Null with tempdir() as d: n_images = 32 data = np.ones((512, 512, n_images)) tifffile.imsave(d.path('foo.tif'), data) read = Read(path=d.path('foo.tif')) null = Null() null(read()).run().join() assert(null.task.props.num_processed == n_images)
def generate_go_ethereum_fixture(destination_dir): with contextlib.ExitStack() as stack: datadir = stack.enter_context(common.tempdir()) keystore_dir = os.path.join(datadir, 'keystore') common.ensure_path_exists(keystore_dir) keyfile_path = os.path.join(keystore_dir, common.KEYFILE_FILENAME) with open(keyfile_path, 'w') as keyfile: keyfile.write(common.KEYFILE_DATA) genesis_file_path = os.path.join(datadir, 'genesis.json') with open(genesis_file_path, 'w') as genesis_file: genesis_file.write(json.dumps(common.GENESIS_DATA)) geth_ipc_path_dir = stack.enter_context(common.tempdir()) geth_ipc_path = os.path.join(geth_ipc_path_dir, 'geth.ipc') geth_port = get_open_port() geth_binary = common.get_geth_binary() geth_proc = stack.enter_context(common.get_geth_process( # noqa: F841 geth_binary=geth_binary, datadir=datadir, genesis_file_path=genesis_file_path, ipc_path=geth_ipc_path, port=geth_port, networkid=str(common.GENESIS_DATA['config']['chainId']) )) common.wait_for_socket(geth_ipc_path) web3 = Web3(Web3.IPCProvider(geth_ipc_path)) chain_data = setup_chain_state(web3) static_data = { 'raw_txn_account': common.RAW_TXN_ACCOUNT, 'keyfile_pw': common.KEYFILE_PW, } pprint.pprint(merge(chain_data, static_data)) shutil.copytree(datadir, destination_dir)
def generate_go_ethereum_fixture(destination_dir): with contextlib.ExitStack() as stack: datadir = stack.enter_context(common.tempdir()) keystore_dir = os.path.join(datadir, 'keystore') common.ensure_path_exists(keystore_dir) keyfile_path = os.path.join(keystore_dir, common.KEYFILE_FILENAME) with open(keyfile_path, 'w') as keyfile: keyfile.write(common.KEYFILE_DATA) genesis_file_path = os.path.join(datadir, 'genesis.json') with open(genesis_file_path, 'w') as genesis_file: genesis_file.write(json.dumps(common.GENESIS_DATA)) geth_ipc_path_dir = stack.enter_context(common.tempdir()) geth_ipc_path = os.path.join(geth_ipc_path_dir, 'geth.ipc') geth_port = common.get_open_port() geth_binary = common.get_geth_binary() geth_proc = stack.enter_context( common.get_geth_process( # noqa: F841 geth_binary=geth_binary, datadir=datadir, genesis_file_path=genesis_file_path, ipc_path=geth_ipc_path, port=geth_port, networkid=str(common.GENESIS_DATA['config']['chainId']))) common.wait_for_socket(geth_ipc_path) web3 = Web3(Web3.IPCProvider(geth_ipc_path)) chain_data = setup_chain_state(web3) static_data = { 'raw_txn_account': common.RAW_TXN_ACCOUNT, 'keyfile_pw': common.KEYFILE_PW, } pprint.pprint(merge(chain_data, static_data)) shutil.copytree(datadir, destination_dir)
def test_gen_pipeline_with_code_changes(self): with tempdir() as dir: tmp_gen_pipeline_sh = os.path.join(dir, 'gen-pipeline.sh') copy('../.buildkite/gen-pipeline.sh', tmp_gen_pipeline_sh) for filename in [ '.buildkite/gen-pipeline.sh', 'cmake/file', 'examples/file', 'horovod/file', 'test/file', 'Dockerfile.cpu', '' ]: with open(os.path.join(dir, 'get_commit_files.py'), 'w') as py: py.write("print('{}')".format(filename)) self.do_test_gen_full_pipeline(tmp_gen_pipeline_sh)
def test_memory_in(): with tempdir() as d: from ufo import MemoryIn, Write ref = random.astype(np.float32) read = MemoryIn(pointer=ref.__array_interface__['data'][0], number=1, width=ref.shape[1], height=ref.shape[0]) write = Write(filename=d.path('foo.tif')) write(read()).run().join() result = tifffile.imread(d.path('foo.tif')) assert (np.all(ref == result))
def test_memory_out(): with tempdir() as d: from ufo import MemoryOut, Read ref = random.astype(np.float32) out = np.zeros_like(ref).astype(np.float32) tifffile.imsave(d.path('foo.tif'), ref) read = Read(path=d.path('foo.tif')) write = MemoryOut(pointer=out.__array_interface__['data'][0], max_size=ref.nbytes) write(read()).run().join() assert (np.all(out == ref))
def test_core_issue_61_16_bit_tiffs(): from ufo import Read, Write orig = np.random.randint(0, 65535, (512, 512)).astype(np.uint16) with tempdir() as d: tifffile.imsave(d.path('temp.tif'), orig) read = Read(path=d.path('temp.tif')) write = Write(filename=d.path('temp-%i.tif')) write(read()).run().join() produced = tifffile.imread(d.path('temp-0.tif')) assert(np.sum(orig - produced) == 0.0)
def test_core_issue_61_16_bit_tiffs(): from ufo import Read, Write orig = np.random.randint(0, 65535, (512, 512)).astype(np.uint16) with tempdir() as d: tifffile.imsave(d.path('temp.tif'), orig) read = Read(path=d.path('temp.tif')) write = Write(filename=d.path('temp-%i.tif')) write(read()).run().join() produced = tifffile.imread(d.path('temp-0.tif')) assert (np.sum(orig - produced) == 0.0)
def test_gen_pipeline_with_non_code_changes(self): with tempdir() as dir: tmp_gen_pipeline_sh = os.path.join(dir, 'gen-pipeline.sh') copy(GEN_PIPELINE_FNAME, tmp_gen_pipeline_sh) with open(os.path.join(dir, 'get_changed_code_files.py'), 'w') as py: py.write("pass") gen_pipeline_env = dict(BUILDKITE_PIPELINE_SLUG='SLUG', BUILDKITE_BRANCH='BRANCH') exit_code, actual_pipeline, gen_pipeline_log = self._run( tmp_gen_pipeline_sh, gen_pipeline_env) self.assertEqual(0, exit_code) self.assertEqual('', gen_pipeline_log) self.assertEqual("steps:\n" "- wait\n" "- wait\n" "- wait\n", actual_pipeline)
def test_broadcast(): from ufo import Generate, Writer import glob with tempdir() as d: generate = Generate(number=5, width=512, height=512) write1 = Writer(filename=d.path('foo-%i.tif')) write2 = Writer(filename=d.path('bar-%i.tif')) g = Ufo.TaskGraph() g.connect_nodes(generate.task, write1.task) g.connect_nodes(generate.task, write2.task) sched = Ufo.Scheduler() sched.run(g) foos = glob.glob(d.path('foo-*')) bars = glob.glob(d.path('bar-*')) assert(len(foos) == 5) assert(len(bars) == 5)
def test_broadcast(): from ufo import Generate, Writer import glob with tempdir() as d: generate = Generate(number=5, width=512, height=512) write1 = Writer(filename=d.path('foo-%i.tif')) write2 = Writer(filename=d.path('bar-%i.tif')) g = Ufo.TaskGraph() g.connect_nodes(generate.task, write1.task) g.connect_nodes(generate.task, write2.task) sched = Ufo.Scheduler() sched.run(g) foos = glob.glob(d.path('foo-*')) bars = glob.glob(d.path('bar-*')) assert (len(foos) == 5) assert (len(bars) == 5)
def test_wait_file_available_on_dbfs(): with tempdir() as d: pq_dir = os.path.join(d, 'test_ev') os.makedirs(pq_dir) file1_path = os.path.join(pq_dir, 'file1') file2_path = os.path.join(pq_dir, 'file2') url1 = 'file://' + file1_path.replace(os.sep, '/') url2 = 'file://' + file2_path.replace(os.sep, '/') url_list = [url1, url2] def create_file(p): with open(p, 'w'): pass # 1. test all files exists. create_file(file1_path) create_file(file2_path) _wait_file_available_on_dbfs(url_list) # 2. test one file does not exists. Raise error. os.remove(file2_path) with pytest.raises( RuntimeError, match= 'Timeout while waiting for all parquet-store files to appear'): _wait_file_available_on_dbfs(url_list) # 3. test one file accessible after 1 second. def delay_create_file2(): time.sleep(1) create_file(file2_path) threading.Thread(target=delay_create_file2()).start() _wait_file_available_on_dbfs(url_list)
def generate_parity_fixture(destination_dir): """ The parity fixture generation strategy is to start a ghuc client with existing fixtures copied into a temp datadir. Then a parity client is started is peered with the ghuc client. """ with contextlib.ExitStack() as stack: ghuc_datadir = stack.enter_context(common.tempdir()) ghuc_port = common.get_open_port() ghuc_ipc_path_dir = stack.enter_context(common.tempdir()) ghuc_ipc_path = os.path.join(ghuc_ipc_path_dir, 'ghuc.ipc') ghuc_keystore_dir = os.path.join(ghuc_datadir, 'keystore') common.ensure_path_exists(ghuc_keystore_dir) ghuc_keyfile_path = os.path.join(ghuc_keystore_dir, common.KEYFILE_FILENAME) with open(ghuc_keyfile_path, 'w') as keyfile: keyfile.write(common.KEYFILE_DATA) genesis_file_path = os.path.join(ghuc_datadir, 'genesis.json') with open(genesis_file_path, 'w') as genesis_file: genesis_file.write(json.dumps(common.GENESIS_DATA)) stack.enter_context( common.get_ghuc_process(common.get_ghuc_binary(), ghuc_datadir, genesis_file_path, ghuc_ipc_path, ghuc_port, str(CHAIN_CONFIG['params']['networkID']))) # set up fixtures common.wait_for_socket(ghuc_ipc_path) webu_ghuc = Webu(Webu.IPCProvider(ghuc_ipc_path)) chain_data = go_happyuc.setup_chain_state(webu_ghuc) fixture_block_count = webu_ghuc.eth.blockNumber datadir = stack.enter_context(common.tempdir()) keystore_dir = os.path.join(datadir, 'keys') os.makedirs(keystore_dir, exist_ok=True) parity_keyfile_path = os.path.join(keystore_dir, common.KEYFILE_FILENAME) with open(parity_keyfile_path, 'w') as keyfile: keyfile.write(common.KEYFILE_DATA) chain_config_file_path = os.path.join(datadir, 'chain_config.json') with open(chain_config_file_path, 'w') as chain_file: chain_file.write(json.dumps(CHAIN_CONFIG)) parity_ipc_path_dir = stack.enter_context(common.tempdir()) parity_ipc_path = os.path.join(parity_ipc_path_dir, 'jsonrpc.ipc') parity_port = common.get_open_port() parity_binary = get_parity_binary() parity_proc = stack.enter_context( get_parity_process( # noqa: F841 parity_binary=parity_binary, datadir=datadir, ipc_path=parity_ipc_path, keys_path=keystore_dir, chain_config_file_path=chain_config_file_path, parity_port=parity_port, )) common.wait_for_socket(parity_ipc_path) webu = Webu(Webu.IPCProvider(parity_ipc_path)) time.sleep(10) connect_nodes(webu, webu_ghuc) wait_for_chain_sync(webu, fixture_block_count) static_data = { 'raw_txn_account': common.RAW_TXN_ACCOUNT, 'keyfile_pw': common.KEYFILE_PW, } pprint.pprint(merge(chain_data, static_data)) shutil.copytree(datadir, destination_dir) parity_proc = stack.enter_context( parity_export_blocks_process( # noqa: F841 parity_binary=parity_binary, datadir=destination_dir, chain_config_file_path=os.path.join(destination_dir, 'chain_config.json'), parity_port=parity_port, ))
def local_store(): with tempdir() as tmp: store = LocalStore(tmp) yield store
def generate_parity_fixture(destination_dir): """ The parity fixture generation strategy is to start a geth client with existing fixtures copied into a temp datadir. Then a parity client is started is peered with the geth client. """ with contextlib.ExitStack() as stack: geth_datadir = stack.enter_context(common.tempdir()) geth_port = get_open_port() geth_ipc_path_dir = stack.enter_context(common.tempdir()) geth_ipc_path = os.path.join(geth_ipc_path_dir, 'geth.ipc') geth_keystore_dir = os.path.join(geth_datadir, 'keystore') common.ensure_path_exists(geth_keystore_dir) geth_keyfile_path = os.path.join(geth_keystore_dir, common.KEYFILE_FILENAME) with open(geth_keyfile_path, 'w') as keyfile: keyfile.write(common.KEYFILE_DATA) genesis_file_path = os.path.join(geth_datadir, 'genesis.json') with open(genesis_file_path, 'w') as genesis_file: genesis_file.write(json.dumps(common.GENESIS_DATA)) stack.enter_context( common.get_geth_process( common.get_geth_binary(), geth_datadir, genesis_file_path, geth_ipc_path, geth_port, str(CHAIN_CONFIG['params']['networkID'])) ) # set up fixtures common.wait_for_socket(geth_ipc_path) web3_geth = Web3(Web3.IPCProvider(geth_ipc_path)) chain_data = go_ethereum.setup_chain_state(web3_geth) fixture_block_count = web3_geth.eth.blockNumber datadir = stack.enter_context(common.tempdir()) keystore_dir = os.path.join(datadir, 'keys') os.makedirs(keystore_dir, exist_ok=True) parity_keyfile_path = os.path.join(keystore_dir, common.KEYFILE_FILENAME) with open(parity_keyfile_path, 'w') as keyfile: keyfile.write(common.KEYFILE_DATA) chain_config_file_path = os.path.join(datadir, 'chain_config.json') with open(chain_config_file_path, 'w') as chain_file: chain_file.write(json.dumps(CHAIN_CONFIG)) parity_ipc_path_dir = stack.enter_context(common.tempdir()) parity_ipc_path = os.path.join(parity_ipc_path_dir, 'jsonrpc.ipc') parity_port = get_open_port() parity_binary = get_parity_binary() parity_proc = stack.enter_context(get_parity_process( # noqa: F841 parity_binary=parity_binary, datadir=datadir, ipc_path=parity_ipc_path, keys_path=keystore_dir, chain_config_file_path=chain_config_file_path, parity_port=parity_port, )) common.wait_for_socket(parity_ipc_path) web3 = Web3(Web3.IPCProvider(parity_ipc_path)) time.sleep(10) connect_nodes(web3, web3_geth) wait_for_chain_sync(web3, fixture_block_count) static_data = { 'raw_txn_account': common.RAW_TXN_ACCOUNT, 'keyfile_pw': common.KEYFILE_PW, } pprint.pprint(merge(chain_data, static_data)) shutil.copytree(datadir, destination_dir) parity_proc = stack.enter_context(parity_export_blocks_process( # noqa: F841 parity_binary=parity_binary, datadir=destination_dir, chain_config_file_path=os.path.join(destination_dir, 'chain_config.json'), parity_port=parity_port, ))