def run_experiments(config): server = config['server'] rest_client.server_get_status(server) results = {'experiments': []} for experiment in config['experiments']: try: threshold = experiment['threshold'] logger.info('running experiment: {}'.format(experiment)) size_a, size_b = get_exp_sizes(experiment) # create project credentials = rest_client.project_create( server, config['schema'], 'mapping', "benchy_{}".format(experiment)) try: # upload clks upload_binary_clks(config, size_a, size_b, credentials) # create run run = rest_client.run_create( server, credentials['project_id'], credentials['result_token'], threshold, "{}_{}".format(experiment, threshold)) # wait for result run_id = run['run_id'] logger.info(f'waiting for run {run_id} to finish') status = rest_client.wait_for_run(server, credentials['project_id'], run['run_id'], credentials['result_token'], timeout=config['timeout']) if status['state'] != 'completed': raise RuntimeError( 'run did not finish!\n{}'.format(status)) logger.info('experiment successful. Evaluating results now...') mapping = rest_client.run_get_result_text( server, credentials['project_id'], run['run_id'], credentials['result_token']) mapping = json.loads(mapping)['mapping'] mapping = {int(k): int(v) for k, v in mapping.items()} tt = score_mapping(mapping, *load_truth(config, size_a, size_b)) result = compose_result(status, tt, experiment, (size_a, size_b), threshold) results['experiments'].append(result) logger.info('cleaning up...') delete_resources(config, credentials, run) except Exception as e: delete_resources(config, credentials, run) raise e except Exception as e: e_trace = format_exc() logger.warning("experiment '{}' failed: {}".format( experiment, e_trace)) results['experiments'].append({ 'name': experiment, 'status': 'ERROR', 'description': e_trace }) return results
def status(server, output, verbose): """Connect to an entity matching server and check the service status. Use "-" to output status to stdout. """ if verbose: log("Connecting to Entity Matching Server: {}".format(server)) service_status = server_get_status(server) if verbose: log("Status: {}".format(service_status['status'])) print(json.dumps(service_status), file=output)
def main(): config = read_config() server_status = rest_client.server_get_status(config['server']) version = requests.get(config['server'] + "/api/v1/version").json() logger.info(server_status) download_data(config) with open(config['schema_path'], 'rt') as f: schema = json.load(f) config['schema'] = schema try: results = run_experiments(config) except Exception as e: results = {'status': 'ERROR', 'description': format_exc()} raise e finally: results['server'] = config['server'] results['version'] = version pprint(results) with open(config['results_path'], 'wt') as f: json.dump(results, f)
def test_status(self): assert 'status' in rest_client.server_get_status(self.url) assert 'project_count' in rest_client.server_get_status(self.url)
def test_status_calls_correct_url(requests_mock): requests_mock.get('http://testing-es-url/api/v1/status', json={'status': 'ok'}) rest_client.server_get_status('http://testing-es-url') assert requests_mock.called
def test_status_invalid_json_raises_service_error(requests_mock): requests_mock.get('http://testing-es-url/api/v1/status', status_code=200, text='NOT JSON') with pytest.raises(ServiceError): rest_client.server_get_status('http://testing-es-url')
def test_status_500_raises_service_error(requests_mock): requests_mock.get('http://testing-es-url/api/v1/status', status_code=500) with pytest.raises(ServiceError): rest_client.server_get_status('http://testing-es-url')