Beispiel #1
0
def results(project, apikey, run, watch, server, output):
    """
    Check to see if results are available for a particular mapping
    and if so download.

    Authentication is carried out using the --apikey option which
    must be provided. Depending on the server operating mode this
    may return a mask, a linkage table, or a permutation. Consult
    the entity service documentation for details.
    """

    status = run_get_status(server, project, run, apikey)
    log(format_run_status(status))
    if watch:
        for status in watch_run_status(server, project, run, apikey,
                                       24 * 60 * 60):
            log(format_run_status(status))

    if status['state'] == 'completed':
        log("Downloading result")
        response = run_get_result_text(server, project, run, apikey)
        log("Received result")
        print(response, file=output)
    elif status['state'] == 'error':
        log("There was an error")
        error_result = run_get_result_text(server, project, run, apikey)
        print(error_result, file=output)
    else:
        log("No result yet")
Beispiel #2
0
    def test_project_run(self):
        p = self._create_project()

        p_id = p['project_id']
        upload_response_1 = rest_client.project_upload_clks(
            self.url, p_id, p['update_tokens'][0], self.clk_data_1)
        upload_response_2 = rest_client.project_upload_clks(
            self.url, p_id, p['update_tokens'][1], self.clk_data_2)

        run_response = rest_client.run_create(self.url,
                                              p_id,
                                              p['result_token'],
                                              0.999,
                                              name='clkhash rest client test')
        assert 'run_id' in run_response
        r_id = run_response['run_id']
        with pytest.raises(ServiceError):
            rest_client.run_get_status(self.url, p_id, 'invalid-run-id',
                                       p['result_token'])
        with pytest.raises(ServiceError):
            rest_client.run_get_status(self.url, p_id, r_id, 'invalid-token')

        status1 = rest_client.run_get_status(self.url, p_id, r_id,
                                             p['result_token'])
        assert 'state' in status1
        assert 'stages' in status1
        print(rest_client.format_run_status(status1))

        # Check we can watch the run progress this will raise if not
        # completed in 10 seconds
        for status_update in rest_client.watch_run_status(
                self.url, p_id, r_id, p['result_token'], 10, 0.5):
            print(rest_client.format_run_status(status_update))

        # Check that we can still "wait" on a completed run and get a valid
        # status
        status2 = rest_client.wait_for_run(self.url, p_id, r_id,
                                           p['result_token'], 10)
        assert status2['state'] == 'completed'
        coord_result_raw = rest_client.run_get_result_text(
            self.url, p_id, r_id, p['result_token'])
        coord_result = json.loads(coord_result_raw)
        assert 'mask' in coord_result
        assert len(coord_result['mask']) == 1000
        assert coord_result['mask'].count(1) > 10

        result_a = json.loads(
            rest_client.run_get_result_text(
                self.url, p_id, r_id, upload_response_1['receipt_token']))
        result_b = json.loads(
            rest_client.run_get_result_text(
                self.url, p_id, r_id, upload_response_2['receipt_token']))
        assert 'permutation' in result_a
        assert 'rows' in result_a
        assert 1000 == result_b['rows'] == result_a['rows']

        rest_client.run_delete(self.url, p_id, r_id, p['result_token'])
        rest_client.project_delete(self.url, p_id, p['result_token'])
Beispiel #3
0
def run_experiments(config):
    server = config['server']
    rest_client.server_get_status(server)

    results = {'experiments': []}
    for experiment in config['experiments']:
        try:
            threshold = experiment['threshold']
            logger.info('running experiment: {}'.format(experiment))
            size_a, size_b = get_exp_sizes(experiment)
            # create project
            credentials = rest_client.project_create(
                server, config['schema'], 'mapping',
                "benchy_{}".format(experiment))
            try:
                # upload clks
                upload_binary_clks(config, size_a, size_b, credentials)
                # create run
                run = rest_client.run_create(
                    server, credentials['project_id'],
                    credentials['result_token'], threshold,
                    "{}_{}".format(experiment, threshold))
                # wait for result
                run_id = run['run_id']
                logger.info(f'waiting for run {run_id} to finish')
                status = rest_client.wait_for_run(server,
                                                  credentials['project_id'],
                                                  run['run_id'],
                                                  credentials['result_token'],
                                                  timeout=config['timeout'])
                if status['state'] != 'completed':
                    raise RuntimeError(
                        'run did not finish!\n{}'.format(status))
                logger.info('experiment successful. Evaluating results now...')
                mapping = rest_client.run_get_result_text(
                    server, credentials['project_id'], run['run_id'],
                    credentials['result_token'])
                mapping = json.loads(mapping)['mapping']
                mapping = {int(k): int(v) for k, v in mapping.items()}
                tt = score_mapping(mapping,
                                   *load_truth(config, size_a, size_b))
                result = compose_result(status, tt, experiment,
                                        (size_a, size_b), threshold)
                results['experiments'].append(result)
                logger.info('cleaning up...')
                delete_resources(config, credentials, run)
            except Exception as e:
                delete_resources(config, credentials, run)
                raise e
        except Exception as e:
            e_trace = format_exc()
            logger.warning("experiment '{}' failed: {}".format(
                experiment, e_trace))
            results['experiments'].append({
                'name': experiment,
                'status': 'ERROR',
                'description': e_trace
            })
    return results
Beispiel #4
0
    def test_project_run_before_data(self):
        p = self._create_project()

        p_id = p['project_id']
        upload_response_1 = rest_client.project_upload_clks(self.url, p_id, p['update_tokens'][0], self.clk_data_1)
        run_response = rest_client.run_create(self.url, p_id, p['result_token'], 0.999, name='clkhash rest client test')
        with pytest.raises(ServiceError):
            json.loads(rest_client.run_get_result_text(self.url, p_id, run_response['run_id'], upload_response_1['receipt_token']))