def test_click_list_with_search(browser, config_host):
    very_parametrized_script_page = VeryParametrizedScript(
        browser, config_host)
    very_parametrized_script_page.parameter_list_with_search.click()

    expect(
        is_displayed(
            very_parametrized_script_page.parameter_list_with_search_list),
        "List with search was not opened on click")
    expect(
        is_displayed(
            very_parametrized_script_page.search_field_in_list_with_search),
        "Search field in command based list was not opened on click")

    assert_expectations()
Exemple #2
0
def assert_statistic(checkers, expectations={}):
    for k in checkers.keys():
        # expect succ if no expectations
        succ_rate = checkers[k].succ_rate()
        total = checkers[k].total()
        checker_result = k.check_result()

        if expectations.get(k, '') == constants.FAIL:
            log.info(f"Expect Fail: {str(k)} {checker_result}")
            expect(succ_rate < 0.49 or total < 2,
                   f"Expect Fail: {str(k)} {checker_result}")
        else:
            log.info(f"Expect Succ: {str(k)} {checker_result}")
            expect(succ_rate > 0.90 or total > 2,
                   f"Expect Succ: {str(k)} {checker_result}")
    def test_get_launch_info_wrong_launch_id(self, rp_service, monkeypatch):
        """Test get launch information for a non existed launch.

        :param rp_service:  Pytest fixture that represents ReportPortalService
                            object with mocked session.
        :param monkeypatch: Pytest fixture to safely set/delete an attribute
        """
        mock_get = mock.Mock()
        monkeypatch.setattr(rp_service.session, 'get', mock_get)
        monkeypatch.setattr(rp_service, 'launch_id', '1234')

        launch_info = rp_service.get_launch_info()
        expect(mock_get.call_count == 5)
        expect(launch_info == {})
        assert_expectations()
def test_pytest_sessionstart_with_launch_id(mocked_session):
    """Test session configuration if RP launch ID is set via command-line.

    :param mocked_session: pytest fixture
    """
    mocked_session.config.pluginmanager.hasplugin.return_value = True
    mocked_session.config._reporter_config = mock.Mock(
        spec=AgentConfig(mocked_session.config))
    mocked_session.config._reporter_config.rp_launch_attributes = []
    mocked_session.config._reporter_config.rp_launch_id = 1
    mocked_session.config.py_test_service = mock.Mock()
    pytest_sessionstart(mocked_session)
    expect(lambda: mocked_session.config.py_test_service.start_launch.
           assert_not_called())
    assert_expectations()
def test_item_description():
    mock_item = mock.Mock()
    mock_item.description = None
    expect(
        BehaveAgent._item_description(mock_item) is None,
        "Description is not None",
    )
    mock_item.description = ["a", "b"]
    expect(
        BehaveAgent._item_description(mock_item) == "Description:\na\nb",
        "Description is incorrect:\nActual: {}\nExpected: {}".format(
            BehaveAgent._item_description(mock_item), "Description:\na\nb"
        ),
    )
    assert_expectations()
def test_code_ref():
    mock_item = mock.Mock()
    mock_item.location = None
    expect(BehaveAgent._code_ref(mock_item) is None, "code_ref is not None")
    mock_location = mock.Mock()
    mock_location.filename = "filename"
    mock_location.line = 24
    mock_item.location = mock_location
    expect(
        BehaveAgent._code_ref(mock_item) == "filename:24",
        "code_ref is incorrect:\nActual: {}\nExpected: {}".format(
            BehaveAgent._code_ref(mock_item), "filename:24"
        ),
    )
    assert_expectations()
Exemple #7
0
def test_pytest_runtest_protocol(mocked_item):
    """Test listener pytest_runtest_protocol hook.

    :param mocked_item: Pytest fixture
    """
    rp_service = mock.Mock()
    rp_service.is_item_update_supported = mock.Mock(return_value=False)
    rp_listener = RPReportListener(rp_service)
    rp_listener._add_issue_id_marks = mock.Mock()

    next(rp_listener.pytest_runtest_protocol(mocked_item))

    expect(rp_listener._add_issue_id_marks.call_count == 1,
           '_add_issue_id_marks called more than 1 time')
    assert_expectations()
 def test_as_list_of_sep(self):
     expect(as_list_of(str, ":")("2: a") == ["2", "a"])
     expect(as_list_of(int, ":")("3: 4") == [3, 4])
     expect(as_list_of(float, ":")("3.5: 6") == [3.5, 6.0])
     expect(
         as_list_of(Value, ":")("33: allo") == [Value("33"),
                                                Value("allo")])
     assert_expectations()
Exemple #9
0
def test_node_logs_for_RuntimeException(converged_network):
    for container in containers(converged_network):
        logging.info(
            f"Testing {container.name} node logs for \"java RuntimeException\"."
        )
        logs = container.logs().decode('utf-8')

        if "RuntimeException" in logs:
            for line in logs.splitlines():
                if "RuntimeException" in line:
                    logging.error(f"Error: {line}")
            expect(not "RuntimeException" in line,
                   f"Container {container.name} error in log line: {line}")

    assert_expectations()
def test_click_simple_list(browser, config_host):
    very_parametrized_script_page = VeryParametrizedScript(
        browser, config_host)
    very_parametrized_script_page.parameter_simple_list.click()

    expect(
        is_displayed(
            very_parametrized_script_page.parameter_simple_list_drop_down),
        "Drop down on list parameter click was not opened")
    expect(
        len(very_parametrized_script_page.
            parameter_simple_list_drop_down_elements) > 0,
        "Drop down list has no elements")

    assert_expectations()
def test_pytest_configure(mocked_config):
    """Test plugin successful configuration.

    :param mocked_config: Pytest fixture
    """
    mocked_config.option.rp_enabled = True
    mocked_config.option.rp_project = None
    pytest_configure(mocked_config)
    expect(mocked_config._rp_enabled is True)
    expect(
        lambda: isinstance(mocked_config.py_test_service, PyTestServiceClass))
    assert_expectations()
    mocked_config.getoption.assert_has_calls([
        mock.call('--collect-only', default=False),
        mock.call('--setup-plan', default=False)
    ])
def test_command_based_list(browser, config_host):
    very_parametrized_script_page = VeryParametrizedScript(
        browser, config_host)
    very_parametrized_script_page.parameter_command_based_list.click()

    expect(is_displayed(very_parametrized_script_page.command_based_list),
           "Command based List was not opened on click")
    expect(
        len(very_parametrized_script_page.command_based_list_elements) > 0,
        "Command based List has no elements")

    random_drop_down_element = random.choice(
        very_parametrized_script_page.command_based_list_elements)
    random_drop_down_element.click()

    assert_expectations()
Exemple #13
0
    def test__addError(self):
        try:
            raise TestException('')
        except TestException:
            err = sys.exc_info()
        expected_test_err_value = err[1]
        expected_test_err_info = str(err[0].__name__) + ":\n" + "".join(
            traceback.format_tb(err[2]))

        self.plugin._addError(self.test_object, err)

        expect(lambda: self.assertEqual(expected_test_err_value, self.
                                        test_object.errors[0]))
        expect(lambda: self.assertEqual(expected_test_err_info, self.
                                        test_object.errors[1]))
        assert_expectations()
def test_edit_simple_text_to_hide_inc_params(browser, config_host):
    very_parametrized_script_page = VeryParametrizedScript(
        browser, config_host)

    very_parametrized_script_page.parameter_simple_text.send_keys("something")

    expect(
        not is_displayed(very_parametrized_script_page.parameter_inc_param1),
        "inc_param1 is displayed while not key text is in simple text field is presented"
    )
    expect(
        not is_displayed(very_parametrized_script_page.parameter_inc_param2),
        "inc_param2 is displayed while not key text is in simple text field is presented"
    )

    assert_expectations()
def test_read_config_file_path(mock_cp, cmd_args, path):
    mock_context = mock.Mock()
    mock_context._config.userdata = UserData.make(cmd_args)
    read_config(mock_context)
    expect(mock_cp().read.call_count == 1)
    expect(mock_cp().read.call_args[0][0] == path)
    expect(mock_cp().has_section.call_count == 1)
    expect(mock_cp().has_section.call_args[0][0] == RP_CFG_SECTION)
    assert_expectations()
def test_click_random_drop_down_element(browser, config_host):
    very_parametrized_script_page = VeryParametrizedScript(
        browser, config_host)
    random_drop_down_element = random.choice(
        very_parametrized_script_page.parameter_simple_list_drop_down_elements)
    random_drop_down_element.click()
    expect(
        str(
            very_parametrized_script_page.parameter_simple_list.get_attribute(
                'value')) == str(
                    random_drop_down_element.get_attribute('title')),
        "Field text is not equal to input")
    expect(
        random_drop_down_element.get_attribute("class").find("selected") > -1,
        "Selected element has not class \"selected\"")

    assert_expectations()
Exemple #17
0
def assert_statistic(checkers, expectations={}):
    for k in checkers.keys():
        # expect succ if no expectations
        succ_rate = checkers[k].succ_rate()
        total = checkers[k].total()
        if expectations.get(k, '') == constants.FAIL:
            log.debug(
                f"Expect Fail: {str(k)} succ rate {succ_rate}, total: {total}")
            expect(
                succ_rate < 0.49 or total < 2,
                f"Expect Fail: {str(k)} succ rate {succ_rate}, total: {total}")
        else:
            log.debug(
                f"Expect Succ: {str(k)} succ rate {succ_rate}, total: {total}")
            expect(
                succ_rate > 0.90 or total > 2,
                f"Expect Succ: {str(k)} succ rate {succ_rate}, total: {total}")
Exemple #18
0
def curvaturefit(testset, iter=None):
    command = './venv/bin/python lcofocuscurvefit/curvefit.py --focuslist {} --fwhmlist {}'.format(
        testset['focuslist'], testset['fwhmlist'])
    if iter is not None:
        command = command + " --makepng --pngname focus_{}.png".format(iter)
    try:
        results = subp.check_output([x for x in command.split()])
    except:
        pass

    results = json.loads(results)
    expect(results['fitok'] == testset['expect_fitok'])

    if results['fitok']:
        expect(
            abs(results['fit_focus'] - testset['expect_bestfocus']) < 0.15,
            "example Nr {}".format(iter))
Exemple #19
0
def test_stop_plugin_configuration_on_conn_error(mocked_get, mocked_config):
    """Test plugin configuration in case of HTTP error.

    The value of the _reportportal_configured attribute of the pytest Config
    object should be changed to False, stopping plugin configuration, if HTTP
    error occurs getting HTTP response from the ReportPortal.
    :param mocked_get:    Instance of the MagicMock
    :param mocked_config: Pytest fixture
    """
    mock_response = mock.Mock()
    mock_response.raise_for_status.side_effect = RequestException()
    mocked_get.return_value = mock_response
    expect(pytest_configure(mocked_config) is None,
           'Received unexpected return value from pytest_configure.')
    expect(mocked_config._reportportal_configured is False,
           'The value of the _reportportal_configured is not False.')
    assert_expectations()
def test_code_ref_bypass(mocked_item_start, mocked_item, mocked_session,
                         rp_service):
    """ Test that a test code reference constructed and bypassed to a client.

    :param mocked_item_start: mocked start_test_item method reference
    :param mocked_item:       a mocked test item
    :param mocked_session:    a mocked test session
    :param rp_service:        an instance of
                              reportportal_client.service.ReportPortalService
    """
    ini = {
        'rp_hierarchy_parametrize': False,
        'rp_hierarchy_dirs': False,
        'rp_hierarchy_module': True,
        'rp_hierarchy_class': True,
        'rp_display_suite_test_file': True,
        'rp_hierarchy_dirs_level': 0,
        'rp_tests_attributes': [],
        'norecursedirs': ['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}',
                          '*.egg', 'venv']
    }

    def get_closest_marker(name):
        return {'test_marker': pytest.mark.test_marker}.get(name)

    class NodeKeywords(object):
        _keywords = ['pytestmark', 'ini_marker', 'test_marker']

        def __iter__(self):
            return iter(self._keywords)

    mocked_item.session.config.getini = lambda x: ini[x]
    mocked_item.keywords = NodeKeywords()
    mocked_item.get_closest_marker = get_closest_marker
    mocked_item.callspec.params = None

    mocked_session.items = [mocked_item]

    rp_service.collect_tests(mocked_session)
    rp_service.start_pytest_item(mocked_item)

    expect(mocked_item_start.call_count == 1, 'One HTTP POST sent')
    code_ref = mocked_item_start.call_args[1]['code_ref']
    expect(code_ref == '/path/to/test:test_item')
    assert_expectations()
Exemple #21
0
def assert_provider_config(bundle: Bundle, statemap: dict):
    for pname, plv in statemap.items():
        actual_cnf = bundle.provider(name=pname).config()
        expected_cnf = plv['config']
        for k, v in expected_cnf.items():
            expect(
                v == actual_cnf[k],
                'Provider {} config "{}" is "{}" while expected "{}"'.format(
                    pname, k, str(actual_cnf[k]), str(v)))
        for hname, host_expected_cnf in plv['hosts'].items():
            host_actual_cnf = bundle.provider(name=pname).host(
                fqdn=hname).config()
            for k, v in host_expected_cnf.items():
                expect(
                    v == host_actual_cnf[k],
                    'Provider {} host {} config {} is {} while expected {}'.
                    format(pname, hname, k, str(host_actual_cnf[k]), str(v)))
    assert_expectations()
Exemple #22
0
def test_sessionfinish_with_maxfail(shouldfail, outcome):
    """Test session_finish logic when the maxfail Pytest argument is in use.

    :param shouldfail: shouldfail attribute value for the Session object
    :param outcome:    nowait argument value passed to the terminate_service()
    """
    mocked_session = Mock()
    mocked_session.shouldfail = shouldfail
    mocked_session.config = Mock()
    mocked_session.config._reportportal_configured = True
    mocked_session.config.py_test_service.terminate_service = Mock()
    mocked_session.config.py_test_service.finish_launch = Mock()
    pytest_sessionfinish(mocked_session)
    expect(lambda: mocked_session.config.py_test_service.finish_launch.
           assert_called_with(force=outcome, status='RP_Launch'))
    expect(lambda: mocked_session.config.py_test_service.terminate_service.
           assert_called_with(nowait=outcome))
    assert_expectations()
def test_get_parameters():
    mock_item = mock.Mock()
    mock_item._row = None
    expect(
        BehaveAgent._get_parameters(mock_item) is None,
        "parameters is not None",
    )
    mock_row = mock.Mock()
    mock_row.headings = ["A", "B"]
    mock_row.cells = [1, 2]
    mock_item._row = mock_row
    expect(
        BehaveAgent._get_parameters(mock_item) == {"A": 1, "B": 2},
        "parameters are incorrect:\nActual: {}\nExpected: {}".format(
            BehaveAgent._get_parameters(mock_item), {"A": 1, "B": 2}
        ),
    )
    assert_expectations()
Exemple #24
0
def assert_cluster_config(bundle: Bundle, statemap: dict):
    for cname, clv in statemap.items():
        actual_cnf = bundle.cluster(name=cname).config()
        expected_cnf = clv['config']
        for k, v in expected_cnf.items():
            expect(
                v == actual_cnf[k],
                'Cluster {} config "{}" is "{}" while expected "{}"'.format(
                    cname, k, str(actual_cnf[k]), str(v)))
        for sname, service_expected_cnf in clv['services'].items():
            service_actual_cnf = bundle.cluster(name=cname).service(
                name=sname).config()
            for k, v in service_expected_cnf.items():
                expect(
                    v == service_actual_cnf[k],
                    'Cluster {} service {} config {} is {} while expected {}'.
                    format(cname, sname, k, str(service_actual_cnf[k]),
                           str(v)))
    assert_expectations()
    def test_mapped_to(self):
        expect(
            mapped_to(int, float, str)("34, 34.5, jon") == [34, 34.5, "jon"])

        with pytest.raises(ValueError, match=".*mismatched lengths"):
            mapped_to(int, float)("34, 34, 34")

        class IntValue(Value):
            def __init__(self, value):
                super().__init__(int(value))

        class FloatValue(Value):
            def __init__(self, value):
                super().__init__(float(value))

        expect(
            mapped_to(IntValue, FloatValue, str)("34, 34.5, jon") ==
            [IntValue(34), FloatValue(34.5), "jon"])
        assert_expectations()
def test_attributes(config):
    mock_item = mock.Mock()
    mock_item.tags = None
    mock_rps = mock.create_autospec(ReportPortalService)
    ba = BehaveAgent(config, mock_rps)
    expect(ba._attributes(mock_item) == [], "Attributes is not empty")
    mock_item.tags = ["a", "b", "attribute(k1:v1,v2)"]
    exp = [
        {"value": "a"},
        {"value": "b"},
        {"key": "k1", "value": "v1"},
        {"value": "v2"},
    ]
    act = ba._attributes(mock_item)
    expect(
        act == exp,
        "Attributes are incorrect:\nActual: {}\nExpected: {}".format(act, exp),
    )
    assert_expectations()
def test_input_key_text_in_simple_text(browser, config_host):
    very_parametrized_script_page = VeryParametrizedScript(
        browser, config_host)

    very_parametrized_script_page.parameter_simple_text.clear()
    very_parametrized_script_page.parameter_simple_text.send_keys("included")

    expect(
        is_displayed(very_parametrized_script_page.parameter_inc_param1),
        "inc_param1 is not displayed. Simple text value is: " + str(
            very_parametrized_script_page.parameter_simple_text.get_attribute(
                'value')))
    expect(
        is_displayed(very_parametrized_script_page.parameter_inc_param2),
        "inc_param2 is not displayed. Simple text value is: " + str(
            very_parametrized_script_page.parameter_simple_text.get_attribute(
                'value')))

    assert_expectations()
Exemple #28
0
    def test_get_launch_info_1st_failed(self, rp_service, monkeypatch):
        """Test get launch information with 1st attempt failed.

        :param rp_service:  Pytest fixture that represents ReportPortalService
                            object with mocked session.
        :param monkeypatch: Pytest fixture to safely set/delete an attribute
        """
        mock_resp1 = mock.Mock()
        mock_resp1.status_code = 404
        mock_resp2 = mock.Mock()
        mock_resp2.status_code = 200
        mock_get = mock.Mock()
        mock_get.side_effect = [mock_resp1, mock_resp2]
        monkeypatch.setattr(rp_service.session, 'get', mock_get)
        monkeypatch.setattr(rp_service, 'launch_id', '1234')

        launch_info = rp_service.get_launch_info()
        expect(mock_get.call_count == 2)
        expect(launch_info == {'id': 112})
        assert_expectations()
def test_user_input(browser, config_host):
    destroy_world_script_page = DestroyWorldScript(browser, config_host)

    destroy_world_script_page.users_input.send_keys("Y" + Keys.ENTER)
    time.sleep(3)

    expect(
        destroy_world_script_page.log.get_attribute(
            "innerHTML") == destroy_world_script_page.first_step_log_content +
        destroy_world_script_page.second_step_log_content)

    expect(is_displayed(destroy_world_script_page.button_execute),
           "Execute button not found")
    expect(is_disabled(destroy_world_script_page.button_execute),
           "Execute button not disabled")
    expect(is_displayed(destroy_world_script_page.button_stop),
           "Stop button not found")
    expect(is_enabled(destroy_world_script_page.button_stop),
           "Stop button not enabled")

    assert_expectations()
Exemple #30
0
def test_code_reference_template(mock_client_init, test, test_names,
                                 code_ref_suffixes):
    result = utils.run_robot_tests([test])
    assert result == 0  # the test successfully passed

    mock_client = mock_client_init.return_value
    calls = [
        call for call in mock_client.start_test_item.call_args_list
        if call[1]['item_type'] == 'STEP'
        and call[1].get('has_stats', True) is True
    ]
    assert len(calls) == len(test_names)

    for call, test_name, code_ref_suff in zip(calls, test_names,
                                              code_ref_suffixes):
        code_ref = call[1]['code_ref']
        test_case_id = call[1]['test_case_id']
        expect(test_case_id == test + ':' + test_name)
        expect(code_ref == test + ':' + code_ref_suff)

    assert_expectations()