コード例 #1
0
def test_validation_job_id(dispatcher_live_fixture):
    server = dispatcher_live_fixture
    DispatcherJobState.remove_scratch_folders()

    logger.info("constructed server: %s", server)

    # let's generate a valid token
    token_payload = {
        **default_token_payload,
    }
    encoded_token = jwt.encode(token_payload, secret_key, algorithm='HS256')

    # these parameters define request content
    base_dict_param = dict(
        instrument="empty-async",
        product_type="dummy",
        query_type="real",
    )

    dict_param = dict(query_status="new",
                      token=encoded_token,
                      **base_dict_param)

    # this should return status submitted, so email sent
    c = requests.get(server + "/run_analysis", dict_param)

    print(json.dumps(c.json(), sort_keys=True, indent=4))

    assert c.status_code == 200
    dispatcher_job_state = DispatcherJobState.from_run_analysis_response(c)
    jdata = c.json()
    assert jdata['exit_status']['job_status'] == 'submitted'

    # let's generate another valid token, just for a different user
    token_payload['sub'] = "*****@*****.**"

    # this should return status submitted, so email sent
    dict_param['token'] = jwt.encode(token_payload,
                                     secret_key,
                                     algorithm='HS256')
    dict_param[
        'job_id'] = dispatcher_job_state.job_id  # this is job id from different user
    dict_param['query_status'] = 'submitted'

    c = requests.get(server + "/run_analysis", dict_param)

    wrong_job_id = make_hash({**base_dict_param, "sub": "*****@*****.**"})

    from cdci_data_analysis.flask_app.dispatcher_query import InstrumentQueryBackEnd
    assert InstrumentQueryBackEnd.restricted_par_dic(
        dict_param) == base_dict_param

    assert c.status_code == 403, json.dumps(c.json(), indent=4, sort_keys=True)
    jdata = c.json()

    assert jdata["exit_status"]["debug_message"] == \
           f'The provided job_id={dispatcher_job_state.job_id} does not match with the ' \
           f'job_id={wrong_job_id} derived from the request parameters for your user account email; parameters are derived from recorded job state'
    assert jdata["exit_status"]["error_message"] == ""
    assert jdata["exit_status"]["message"] == "Request not authorized"
コード例 #2
0
def test_email_parameters_html_conflicting(dispatcher_long_living_fixture,
                                           dispatcher_local_mail_server):
    server = dispatcher_long_living_fixture

    DispatcherJobState.remove_scratch_folders()

    # let's generate a valid token with high threshold
    token_payload = {**default_token_payload, "tem": 0}
    encoded_token = jwt.encode(token_payload, secret_key, algorithm='HS256')
    # set the time the request was initiated
    time_request = time.time()

    name_parameter_value = "< bla bla: this is not a tag > <"

    c = requests.get(server + "/run_analysis",
                     params=dict(query_status="new",
                                 query_type="Real",
                                 instrument="empty-async",
                                 product_type="numerical",
                                 string_like_name=name_parameter_value,
                                 token=encoded_token,
                                 time_request=time_request))

    logger.info("response from run_analysis: %s", json.dumps(c.json(),
                                                             indent=4))

    dispatcher_job_state = DispatcherJobState.from_run_analysis_response(c)

    jdata = c.json()
    assert jdata['exit_status']['email_status'] == 'email sent'

    dispatcher_job_state.assert_email("submitted")

    email_data = dispatcher_job_state.load_emails()[0]

    print(email_data)

    assert name_parameter_value in email_data

    from bs4 import BeautifulSoup
    assert name_parameter_value in BeautifulSoup(email_data).get_text()
コード例 #3
0
def test_email_unnecessary_job_id(dispatcher_live_fixture,
                                  dispatcher_local_mail_server):
    # remove all the current scratch folders
    DispatcherJobState.remove_scratch_folders()

    server = dispatcher_live_fixture

    dict_param = dict(query_status="new",
                      query_type="Real",
                      instrument="empty-async",
                      product_type="dummy",
                      job_id="something-else")

    # this should return status submitted, so email sent
    c = requests.get(server + "/run_analysis", dict_param)

    assert c.status_code == 400

    jdata = c.json()
    assert 'unnecessarily' in jdata['error']
    assert dict_param['job_id'] in jdata['error']
コード例 #4
0
def test_email_very_long_unbreakable_string(length,
                                            dispatcher_long_living_fixture,
                                            dispatcher_local_mail_server):

    server = dispatcher_long_living_fixture

    DispatcherJobState.remove_scratch_folders()

    # let's generate a valid token with high threshold
    token_payload = {**default_token_payload, "tem": 0}
    encoded_token = jwt.encode(token_payload, secret_key, algorithm='HS256')
    # set the time the request was initiated
    time_request = time.time()

    params = dict(query_status="new",
                  query_type="Real",
                  instrument="empty-async",
                  product_type="numerical",
                  token=encoded_token,
                  time_request=time_request)

    # this kind of parameters never really happen, and we should be alerted
    # we might as well send something in email, like failed case. but better let's make us look immediately
    params['very_long_parameter_' * length] = "unset"

    c = requests.get(server + "/run_analysis", params=params)

    logger.info("response from run_analysis: %s", json.dumps(c.json(),
                                                             indent=4))

    dispatcher_job_state = DispatcherJobState.from_run_analysis_response(c)

    jdata = c.json()

    if all([len(k) < 900 for k in params.keys()]):
        assert jdata['exit_status']['email_status'] == 'email sent'
    else:
        assert jdata['exit_status']['email_status'] == 'sending email failed'
コード例 #5
0
def test_scws_list_file(dispatcher_live_fixture):
    server = dispatcher_live_fixture
    logger.info("constructed server: %s", server)

    # let's generate a valid token
    token_payload = {
        **default_token_payload,
        "roles": "unige-hpc-full, general",
    }
    encoded_token = jwt.encode(token_payload, secret_key, algorithm='HS256')

    params = {
        **default_params, 'product_type': 'numerical',
        'query_type': "Dummy",
        'instrument': 'empty',
        'p': 5,
        'use_scws': 'user_file',
        'token': encoded_token
    }

    file_path = DispatcherJobState.create_p_value_file(p_value=5)

    list_file = open(file_path)

    jdata = ask(server,
                params,
                expected_query_status=["done"],
                max_time_s=150,
                method='post',
                files={"user_scw_list_file": list_file.read()})

    list_file.close()
    assert 'p_list' in jdata['products']['analysis_parameters']
    assert 'use_scws' not in jdata['products']['analysis_parameters']
    assert jdata['products']['analysis_parameters']['p_list'] == ['5']
    # test job_id
    job_id = jdata['products']['job_id']
    params.pop('use_scws', None)
    # adapting some values to string
    for k, v in params.items():
        params[k] = str(v)

    restricted_par_dic = InstrumentQueryBackEnd.restricted_par_dic({
        **params, "p_list": ["5"],
        "sub":
        "*****@*****.**"
    })
    calculated_job_id = make_hash(restricted_par_dic)

    assert job_id == calculated_job_id
コード例 #6
0
def test_email_submitted_frontend_like_job_id(dispatcher_live_fixture,
                                              dispatcher_local_mail_server):
    DispatcherJobState.remove_scratch_folders()

    server = dispatcher_live_fixture
    logger.info("constructed server: %s", server)

    # email content in plain text and html format
    smtp_server_log = dispatcher_local_mail_server.local_smtp_output_json_fn

    encoded_token = jwt.encode(default_token_payload,
                               secret_key,
                               algorithm='HS256')

    dict_param = dict(query_status="new",
                      query_type="Real",
                      instrument="empty-async",
                      product_type="dummy",
                      token=encoded_token,
                      job_id="")

    # this should return status submitted, so email sent
    c = requests.get(server + "/run_analysis", dict_param)

    assert c.status_code == 200

    dispatcher_job_state = DispatcherJobState.from_run_analysis_response(c)

    jdata = c.json()
    assert jdata['exit_status']['job_status'] == 'submitted'
    assert jdata['exit_status']['email_status'] == 'email sent'

    # check the email in the email folders, and that the first one was produced

    dispatcher_job_state.assert_email(state="submitted")
    dispatcher_local_mail_server.assert_email_number(1)
コード例 #7
0
def test_email_failure_callback_after_run_analysis(dispatcher_live_fixture):
    # TODO: for now, this is not very different from no-prior-run_analysis. This will improve

    server = dispatcher_live_fixture
    logger.info("constructed server: %s", server)

    # let's generate a valid token with high threshold
    token_payload = {**default_token_payload, "tem": 0}
    encoded_token = jwt.encode(token_payload, secret_key, algorithm='HS256')
    # set the time the request was initiated
    time_request = time.time()
    c = requests.get(server + "/run_analysis",
                     params=dict(query_status="new",
                                 query_type="Real",
                                 instrument="empty-async",
                                 product_type="dummy",
                                 token=encoded_token,
                                 time_request=time_request))

    logger.info("response from run_analysis: %s", json.dumps(c.json(),
                                                             indent=4))

    dispatcher_job_state = DispatcherJobState.from_run_analysis_response(c)

    jdata = c.json()
    assert jdata['exit_status']['email_status'] == 'sending email failed'

    # this triggers email
    c = requests.get(server + "/call_back",
                     params={
                         'job_id': dispatcher_job_state.job_id,
                         'session_id': dispatcher_job_state.session_id,
                         'instrument_name': "empty-async",
                         'action': 'failed',
                         'node_id': 'node_failed',
                         'message': 'failed',
                         'token': encoded_token,
                         'time_original_request': time_request
                     })
    assert c.status_code == 200

    job_monitor_call_back_failed_json_fn = f'{dispatcher_job_state.scratch_dir}/job_monitor_node_failed_failed_.json'

    jdata = json.load(open(job_monitor_call_back_failed_json_fn))

    assert jdata['email_status'] == 'sending email failed'
    assert not os.path.exists(dispatcher_job_state.email_history_folder)
コード例 #8
0
def test_email_done(dispatcher_live_fixture, dispatcher_local_mail_server):
    DispatcherJobState.remove_scratch_folders()

    server = dispatcher_live_fixture
    logger.info("constructed server: %s", server)

    token_payload = {**default_token_payload, "tem": 0}

    encoded_token = jwt.encode(token_payload, secret_key, algorithm='HS256')

    dict_param = dict(query_status="new",
                      query_type="Real",
                      instrument="empty-async",
                      product_type="dummy",
                      token=encoded_token)

    # this should return status submitted, so email sent
    c = requests.get(server + "/run_analysis", dict_param)

    logger.info("response from run_analysis: %s", json.dumps(c.json(),
                                                             indent=4))
    jdata = c.json()

    dispatcher_job_state = DispatcherJobState.from_run_analysis_response(c)

    time_request = jdata['time_request']

    c = requests.get(server + "/call_back",
                     params=dict(job_id=dispatcher_job_state.job_id,
                                 session_id=dispatcher_job_state.session_id,
                                 instrument_name="empty-async",
                                 action='done',
                                 node_id='node_final',
                                 message='done',
                                 token=encoded_token,
                                 time_original_request=time_request))
    assert c.status_code == 200

    jdata = dispatcher_job_state.load_job_state_record('node_final', 'done')
    assert 'email_status' in jdata
    assert jdata['email_status'] == 'email sent'

    # a number of done call_backs, but none should trigger the email sending since this already happened
    for i in range(3):
        c = requests.get(server + "/call_back",
                         params=dict(
                             job_id=dispatcher_job_state.job_id,
                             session_id=dispatcher_job_state.session_id,
                             instrument_name="empty-async",
                             action='done',
                             node_id='node_final',
                             message='done',
                             token=encoded_token,
                             time_original_request=time_request))
        assert c.status_code == 200

        jdata = dispatcher_job_state.load_job_state_record(
            'node_final', 'done')

        assert 'email_status' in jdata
        assert jdata['email_status'] == 'multiple completion email detected'

    # check the email in the email folders, and that the first one was produced

    dispatcher_job_state.assert_email("submitted")
    dispatcher_job_state.assert_email("done")
コード例 #9
0
def test_email_submitted_multiple_requests(dispatcher_live_fixture,
                                           dispatcher_local_mail_server):
    # remove all the current scratch folders
    dir_list = glob.glob('scratch_*')
    for d in dir_list:
        shutil.rmtree(d)

    server = dispatcher_live_fixture
    logger.info("constructed server: %s", server)

    # let's generate a valid token with high threshold
    token_payload = {**default_token_payload, "intsub": 3}

    encoded_token = jwt.encode(token_payload, secret_key, algorithm='HS256')

    dict_param = dict(query_status="new",
                      query_type="Real",
                      instrument="empty-async",
                      product_type="dummy",
                      token=encoded_token)

    # this should return status submitted, so email sent
    c = requests.get(server + "/run_analysis", dict_param)
    assert c.status_code == 200

    logger.info("response from run_analysis: %s", json.dumps(c.json(),
                                                             indent=4))

    dispatcher_job_state = DispatcherJobState.from_run_analysis_response(c)

    jdata = c.json()
    assert jdata['exit_status']['job_status'] == 'submitted'
    assert jdata['exit_status']['email_status'] == 'email sent'

    # check the email in the email folders, and that the first one was produced
    dispatcher_job_state.assert_email('submitted')

    # re-submit the same request (so that the same job_id will be generated) but as a different session,
    # in order to produce a sequence of submitted status
    # and verify not a sequence of submitted-status emails are generated
    # a sequence of clicks of the link provided with the email is simulated
    dict_param = dict(query_status="new",
                      query_type="Real",
                      instrument="empty-async",
                      product_type="dummy",
                      token=encoded_token)

    for i in range(5):
        c = requests.get(server + "/run_analysis", dict_param)

        assert c.status_code == 200
        jdata = c.json()
        assert jdata['exit_status']['job_status'] == 'submitted'
        assert 'email_status' not in jdata['exit_status']

    # jobs will be aliased
    dispatcher_job_state.assert_email('submitted')

    # let the interval time pass, so that a new email is sent
    time.sleep(5)
    c = requests.get(server + "/run_analysis", dict_param)

    assert c.status_code == 200
    jdata = c.json()
    assert jdata['exit_status']['job_status'] == 'submitted'
    assert jdata['exit_status']['email_status'] == 'email sent'
    session_id = jdata['session_id']

    # check the email in the email folders, and that the first one was produced
    assert os.path.exists(
        f'scratch_sid_{session_id}_jid_{dispatcher_job_state.job_id}_aliased')
    list_email_files_last_request = glob.glob(
        f'scratch_sid_{session_id}_jid_{dispatcher_job_state.job_id}_aliased/email_history/email_submitted_*.email'
    )
    assert len(list_email_files_last_request) == 1
    list_overall_email_files = glob.glob(
        f'scratch_sid_*_jid_{dispatcher_job_state.job_id}*/email_history/email_submitted_*.email'
    )
    assert len(list_overall_email_files) == 2
コード例 #10
0
def test_email_submitted_same_job(dispatcher_live_fixture,
                                  dispatcher_local_mail_server):
    # remove all the current scratch folders
    dir_list = glob.glob('scratch_*')
    [shutil.rmtree(d) for d in dir_list]

    server = dispatcher_live_fixture
    logger.info("constructed server: %s", server)

    # email content in plain text and html format
    smtp_server_log = dispatcher_local_mail_server.local_smtp_output_json_fn

    # let's generate a valid token with high threshold
    token_payload = {**default_token_payload, "tem": 0, "intsub": 3}

    encoded_token = jwt.encode(token_payload, secret_key, algorithm='HS256')

    dict_param = dict(query_status="new",
                      query_type="Real",
                      instrument="empty-async",
                      product_type="dummy",
                      token=encoded_token)

    # this should return status submitted, so email sent
    c = requests.get(server + "/run_analysis", dict_param)

    assert c.status_code == 200

    dispatcher_job_state = DispatcherJobState.from_run_analysis_response(c)

    #dict_param_complete = dict_param.copy()
    #dict_param_complete.pop("token")

    jdata = c.json()
    assert jdata['exit_status']['job_status'] == 'submitted'
    assert jdata['exit_status']['email_status'] == 'email sent'

    # check the email in the email folders, and that the first one was produced

    dispatcher_job_state.assert_email(state="submitted")
    dispatcher_local_mail_server.assert_email_number(1)

    # re-submit the very same request, in order to produce a sequence of submitted status
    # and verify not a sequence of emails are generated
    dict_param = dict(query_status="new",
                      query_type="Real",
                      instrument="empty-async",
                      product_type="dummy",
                      session_id=dispatcher_job_state.session_id,
                      job_id=dispatcher_job_state.job_id,
                      token=encoded_token)

    for i in range(3):
        c = requests.get(server + "/run_analysis", dict_param)

        assert c.status_code == 200
        jdata = c.json()
        assert jdata['exit_status']['job_status'] == 'submitted'
        assert 'email_status' not in jdata['exit_status']

        # check the email in the email folders, and that the first one was produced
        dispatcher_job_state.assert_email(state="submitted", number=1)
        dispatcher_local_mail_server.assert_email_number(1)

    # let the interval time pass, so that a new email si sent
    time.sleep(5)
    c = requests.get(server + "/run_analysis", dict_param)

    assert c.status_code == 200
    jdata = c.json()

    try:
        assert jdata['exit_status']['job_status'] == 'submitted'
        assert jdata['exit_status']['email_status'] == 'email sent'
    except KeyError:
        logger.error(json.dumps(jdata, indent=4, sort_keys=True))
        raise

    # check the email in the email folders, and that the first one was produced

    dispatcher_job_state.assert_email(state="submitted", number=2)
    dispatcher_local_mail_server.assert_email_number(2)

    # let the interval time pass again, so that a new email si sent
    time.sleep(5)
    c = requests.get(server + "/run_analysis", dict_param)

    assert c.status_code == 200
    jdata = c.json()
    assert jdata['exit_status']['job_status'] == 'submitted'
    assert jdata['exit_status']['email_status'] == 'email sent'

    # check the email in the email folders, and that the first one was produced
    dispatcher_local_mail_server.assert_email_number(3)
コード例 #11
0
def test_email_run_analysis_callback(dispatcher_long_living_fixture,
                                     dispatcher_local_mail_server,
                                     default_values, request_cred,
                                     time_original_request_none):
    from cdci_data_analysis.plugins.dummy_instrument.data_server_dispatcher import DataServerQuery
    DataServerQuery.set_status('submitted')

    server = dispatcher_long_living_fixture

    DispatcherJobState.remove_scratch_folders()

    token_none = (request_cred == 'public')

    expect_email = True

    if token_none:
        encoded_token = None
    else:
        # let's generate a valid token with high threshold
        token_payload = {**default_token_payload, "tem": 0}

        if default_values:
            token_payload.pop('tem')
            token_payload.pop('mstout')
            token_payload.pop('mssub')
            token_payload.pop('intsub')

        if request_cred == 'private-no-email':
            token_payload['mssub'] = False
            token_payload['msdone'] = False
            token_payload['msfail'] = False
            expect_email = False

        encoded_token = jwt.encode(token_payload,
                                   secret_key,
                                   algorithm='HS256')

    dict_param = dict(query_status="new",
                      query_type="Real",
                      instrument="empty-async",
                      product_type="dummy",
                      token=encoded_token)

    # this should return status submitted, so email sent
    c = requests.get(server + "/run_analysis", dict_param)
    assert c.status_code == 200
    jdata = c.json()

    logger.info("response from run_analysis: %s", json.dumps(jdata, indent=4))
    dispatcher_job_state = DispatcherJobState.from_run_analysis_response(c)

    session_id = jdata['session_id']
    job_id = jdata['job_monitor']['job_id']

    products_url = get_expected_products_url(dict_param,
                                             token=encoded_token,
                                             session_id=session_id,
                                             job_id=job_id)
    assert jdata['exit_status']['job_status'] == 'submitted'
    # get the original time the request was made
    assert 'time_request' in jdata
    # set the time the request was initiated
    time_request = jdata['time_request']
    time_request_str = time.strftime('%Y-%m-%d %H:%M:%S',
                                     time.localtime(float(time_request)))

    if token_none or not expect_email:
        # email not supposed to be sent for public request
        assert 'email_status' not in jdata
    else:
        assert jdata['exit_status']['email_status'] == 'email sent'

        validate_email_content(
            dispatcher_local_mail_server.get_email_record(),
            'submitted',
            dispatcher_job_state,
            time_request_str=time_request_str,
            products_url=products_url,
            dispatcher_live_fixture=None,
        )

    # for the call_back(s) in case the time of the original request is not provided
    if time_original_request_none:
        time_request = None
        time_request_str = 'None'

    for i in range(5):
        # imitating what a backend would do
        c = requests.get(server + "/call_back",
                         params=dict(
                             job_id=dispatcher_job_state.job_id,
                             session_id=dispatcher_job_state.session_id,
                             instrument_name="empty-async",
                             action='progress',
                             node_id=f'node_{i}',
                             message='progressing',
                             token=encoded_token,
                             time_original_request=time_request))

    # this dones nothing special
    c = requests.get(server + "/call_back",
                     params=dict(job_id=dispatcher_job_state.job_id,
                                 session_id=dispatcher_job_state.session_id,
                                 instrument_name="empty-async",
                                 action='ready',
                                 node_id='node_ready',
                                 message='ready',
                                 token=encoded_token,
                                 time_original_request=time_request))

    DataServerQuery.set_status('done')

    # this triggers email
    c = requests.get(server + "/call_back",
                     params=dict(job_id=dispatcher_job_state.job_id,
                                 session_id=dispatcher_job_state.session_id,
                                 instrument_name="empty-async",
                                 action='done',
                                 node_id='node_final',
                                 message='done',
                                 token=encoded_token,
                                 time_original_request=time_request))
    assert c.status_code == 200

    # TODO build a test that effectively test both paths
    jdata = dispatcher_job_state.load_job_state_record('node_final', 'done')

    if token_none or not expect_email:
        assert 'email_status' not in jdata

    elif time_original_request_none:
        assert 'email_status' in jdata

    elif default_values:
        assert 'email_status' not in jdata

    else:
        assert jdata['email_status'] == 'email sent'

        # check the email in the email folders, and that the first one was produced
        dispatcher_job_state.assert_email(state="done")

        # check the email in the log files
        validate_email_content(
            dispatcher_local_mail_server.get_email_record(1),
            'done',
            dispatcher_job_state,
            time_request_str=time_request_str,
            dispatcher_live_fixture=server,
        )

    # this also triggers email (simulate a failed request)
    c = requests.get(server + "/call_back",
                     params={
                         'job_id': dispatcher_job_state.job_id,
                         'session_id': dispatcher_job_state.session_id,
                         'instrument_name': "empty-async",
                         'action': 'failed',
                         'node_id': 'node_failed',
                         'message': 'failed',
                         'token': encoded_token,
                         'time_original_request': time_request
                     })

    assert c.status_code == 200

    jdata = dispatcher_job_state.load_job_state_record('node_failed', 'failed')

    if token_none or not expect_email:
        # email not supposed to be sent for public request
        assert 'email_status' not in jdata
    else:
        assert jdata['email_status'] == 'email sent'

        # check the email in the email folders, and that the first one was produced
        if default_values or time_original_request_none:
            dispatcher_job_state.assert_email(
                'failed', comment="expected one email in total, failed")
            dispatcher_local_mail_server.assert_email_number(2)
        else:
            dispatcher_job_state.assert_email(
                'failed',
                comment="expected two emails in total, second failed")
            dispatcher_local_mail_server.assert_email_number(3)

        validate_email_content(
            dispatcher_local_mail_server.get_email_record(-1),
            'failed',
            dispatcher_job_state,
            time_request_str=time_request_str,
            dispatcher_live_fixture=server,
        )

    # TODO this will rewrite the value of the time_request in the query output, but it shouldn't be a problem?
    # This is not complete since DataServerQuery never returns done
    c = requests.get(
        server + "/run_analysis",
        params=dict(
            query_status=
            "ready",  # whether query is new or not, this should work
            query_type="Real",
            instrument="empty-async",
            product_type="dummy",
            async_dispatcher=False,
            session_id=dispatcher_job_state.session_id,
            job_id=dispatcher_job_state.job_id,
            token=encoded_token))

    logger.info("response from run_analysis: %s", json.dumps(c.json(),
                                                             indent=4))

    assert c.status_code == 200

    # TODO: test that this returns the result

    DataServerQuery.set_status(
        'submitted')  # sets the expected default for other tests
コード例 #12
0
def test_email_very_long_request_url(dispatcher_long_living_fixture,
                                     dispatcher_local_mail_server,
                                     request_length):
    # emails generally can not contain lines longer than 999 characters.
    # different SMTP servers will deal with these differently:
    #  * some will respond with error,
    #  * some, apparently, automatically introduce new line
    #
    # The latter  may cause an issue if it is added in the middle of data,
    # e.g. in some random place in json
    # we need:
    #  * to detect this and be clear we can not send these long lines. they are not often usable as URLs anyway
    #  * compress long parameters, e.g. selected_catalog
    #  * request by shortcut (job_d): but it is clear that it is not generally possible to derive parameters from job_id
    #  * make this or some other kind of URL shortener

    server = dispatcher_long_living_fixture

    DispatcherJobState.remove_scratch_folders()

    # let's generate a valid token with high threshold
    token_payload = {**default_token_payload, "tem": 0}
    encoded_token = jwt.encode(token_payload, secret_key, algorithm='HS256')
    # set the time the request was initiated
    time_request = time.time()

    name_parameter_value = "01" * request_length

    dict_param = dict(query_status="new",
                      query_type="Real",
                      instrument="empty-async",
                      product_type="numerical",
                      string_like_name=name_parameter_value,
                      token=encoded_token,
                      time_request=time_request)

    c = requests.get(server + "/run_analysis", params=dict_param)

    logger.info("response from run_analysis: %s", json.dumps(c.json(),
                                                             indent=4))

    dispatcher_job_state = DispatcherJobState.from_run_analysis_response(c)

    jdata = c.json()
    assert jdata['exit_status']['email_status'] == 'email sent'

    dispatcher_job_state.assert_email("submitted")

    email_data = dispatcher_job_state.load_emails()[0]

    print(email_data)

    session_id = jdata['session_id']
    job_id = jdata['job_monitor']['job_id']

    short_url = get_expected_products_url(dict_param,
                                          token=encoded_token,
                                          session_id=session_id,
                                          job_id=job_id)

    if short_url != "":
        assert short_url in email_data
        url = short_url.replace('PRODUCTS_URL/dispatch-data', server)

        print("url", url)

        c = requests.get(url, allow_redirects=False)

        assert c.status_code == 302, json.dumps(c.json(),
                                                sort_keys=True,
                                                indent=4)

        redirect_url = parse.urlparse(c.headers['Location'])
        print(redirect_url)

        # TODO: complete this
        # compressed = "z%3A" + base64.b64encode(zlib.compress(json.dumps(name_parameter_value).encode())).decode()
        # assert compressed in email_data
    else:
        assert """You can retrieve the results by repeating the request.
Unfortunately, due to a known issue with very large requests, a URL with the selected request parameters could not be generated.
This will be fixed in a future release.""" in email_data
コード例 #13
0
def test_email_oda_api(dispatcher_live_fixture, dispatcher_local_mail_server):
    DispatcherJobState.remove_scratch_folders()

    import oda_api.api

    # let's generate a valid token
    token_payload = {**default_token_payload}
    encoded_token = jwt.encode(token_payload, secret_key, algorithm='HS256')

    if isinstance(encoded_token, bytes):
        encoded_token = encoded_token.decode()

    disp = oda_api.api.DispatcherAPI(url=dispatcher_live_fixture, wait=False)

    for i in range(4):
        disp.get_product(product_type="Real",
                         instrument="empty-semi-async",
                         product="dummy",
                         osa_version="OSA10.2",
                         token=encoded_token,
                         p=0,
                         session_id=disp.session_id)

    dispatcher_job_state = DispatcherJobState(disp.session_id, disp.job_id)

    dispatcher_job_state.assert_email("submitted")

    disp = oda_api.api.DispatcherAPI(url=dispatcher_live_fixture,
                                     session_id=disp.session_id,
                                     wait=False)

    disp.get_product(product_type="Real",
                     instrument="empty-semi-async",
                     product="dummy",
                     osa_version="OSA10.2",
                     token=encoded_token,
                     p=4)

    dispatcher_job_state = DispatcherJobState(disp.session_id, disp.job_id)
    dispatcher_job_state.assert_email("*", number=0)

    disp = oda_api.api.DispatcherAPI(url=dispatcher_live_fixture,
                                     session_id=disp.session_id,
                                     wait=False)

    with pytest.raises(oda_api.api.RemoteException):
        disp.get_product(product_type="Real",
                         instrument="empty-semi-async",
                         product="dummy",
                         osa_version="OSA10.2",
                         token=encoded_token,
                         p=-1)

    dispatcher_job_state = DispatcherJobState(disp.session_id, disp.job_id)
    dispatcher_job_state.assert_email("*", number=0)
コード例 #14
0
def test_consistency_parameters_json_dump_file(dispatcher_live_fixture,
                                               request_cred):
    DispatcherJobState.remove_scratch_folders()
    server = dispatcher_live_fixture
    logger.info("constructed server: %s", server)

    if request_cred == 'public':
        encoded_token = None
    else:
        token_payload = {**default_token_payload, "sub": "*****@*****.**"}

        encoded_token = jwt.encode(token_payload,
                                   secret_key,
                                   algorithm='HS256')

    # issuing a request each, with the same set of parameters
    params = {
        **default_params, 'query_status': "new",
        'product_type': 'dummy',
        'query_type': "Dummy",
        'instrument': 'empty',
        'token': encoded_token
    }

    jdata = ask(
        server,
        params,
        expected_query_status=["done"],
        max_time_s=50,
    )

    assert jdata["exit_status"]["debug_message"] == ""
    assert jdata["exit_status"]["error_message"] == ""
    assert jdata["exit_status"]["message"] == ""

    job_id = jdata['job_monitor']['job_id']
    session_id = jdata['session_id']
    # get the analysis_parameters json file
    analysis_parameters_json_fn = f'scratch_sid_{session_id}_jid_{job_id}/analysis_parameters.json'
    # the aliased version might have been created
    analysis_parameters_json_fn_aliased = f'scratch_sid_{session_id}_jid_{job_id}_aliased/analysis_parameters.json'
    assert os.path.exists(analysis_parameters_json_fn) or os.path.exists(
        analysis_parameters_json_fn_aliased)
    if os.path.exists(analysis_parameters_json_fn):
        analysis_parameters_json_content_original = json.load(
            open(analysis_parameters_json_fn))
    else:
        analysis_parameters_json_content_original = json.load(
            open(analysis_parameters_json_fn_aliased))

    logger.info("starting query with the same session_id and job_id")

    # issue another call, different parameters but same job_id & session_id, to simulate the Fit button
    params = {
        **default_params, 'xspec_model': 'powerlaw',
        'product_type': 'dummy',
        'query_type': "Dummy",
        'instrument': 'empty',
        'token': encoded_token,
        'session_id': session_id,
        'job_id': job_id,
        'query_status': "ready"
    }

    jdata = ask(
        server,
        params,
        expected_query_status=["done"],
        max_time_s=50,
    )

    if os.path.exists(analysis_parameters_json_fn):
        analysis_parameters_json_content = json.load(
            open(analysis_parameters_json_fn))
    else:
        analysis_parameters_json_content = json.load(
            open(analysis_parameters_json_fn_aliased))

    assert analysis_parameters_json_content == analysis_parameters_json_content_original