Ejemplo n.º 1
0
 def test_bad_length(self):
     for i in range(1, 10):
         validation_fn = x_hex_character_validation_gen(i)
         payload = choices(hex_list, k=i + 1)
         payload = "".join(payload)
         actual = validation_fn(payload)
         expect(actual is False)
     for i in range(1, 10):
         validation_fn = x_hex_character_validation_gen(i)
         payload = choices(hex_list, k=i - 1)
         payload = "".join(payload)
         actual = validation_fn(payload)
         expect(actual is False)
     assert_expectations()
Ejemplo n.º 2
0
def test_code_ref_bypass(mocked_item_start, mocked_item, mocked_session,
                         rp_service):
    """ Test that a test code reference constructed and bypassed to a client
    :param mocked_item_start: mocked start_test_item method reference
    :param mocked_item:       a mocked test item
    :param mocked_session:    a mocked test session
    :param rp_service:        an instance of
                              reportportal_client.service.ReportPortalService
    """
    ini = {
        'rp_hierarchy_parametrize':
        False,
        'rp_hierarchy_dirs':
        False,
        'rp_hierarchy_module':
        True,
        'rp_hierarchy_class':
        True,
        'rp_display_suite_test_file':
        True,
        'rp_hierarchy_dirs_level':
        0,
        'rp_tests_attributes': [],
        'norecursedirs':
        ['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv']
    }

    def get_closest_marker(name):
        return {'test_marker': pytest.mark.test_marker}.get(name)

    class NodeKeywords(object):
        _keywords = ['pytestmark', 'ini_marker', 'test_marker']

        def __iter__(self):
            return iter(self._keywords)

    mocked_item.session.config.getini = lambda x: ini[x]
    mocked_item.keywords = NodeKeywords()
    mocked_item.get_closest_marker = get_closest_marker
    mocked_item.callspec.params = None

    mocked_session.items = [mocked_item]

    rp_service.collect_tests(mocked_session)
    rp_service.start_pytest_item(mocked_item)

    expect(mocked_item_start.call_count == 1, 'One HTTP POST sent')
    code_ref = mocked_item_start.call_args[1]['code_ref']
    expect(code_ref == '/path/to/test - test_item')
    assert_expectations()
Ejemplo n.º 3
0
def test_rp_log_batch_payload_size(mock_client_init):
    log_size = 123456
    variables = {'rp_log_batch_payload_size': log_size}
    variables.update(utils.DEFAULT_VARIABLES.items())

    result = utils.run_pytest_tests(['examples/test_rp_logging.py'],
                                    variables=variables)
    assert int(result) == 0, 'Exit code should be 0 (no errors)'

    expect(mock_client_init.call_count == 1)

    constructor_args = mock_client_init.call_args_list[0][1]
    expect(constructor_args['log_batch_payload_size'] == log_size)
    assert_expectations()
Ejemplo n.º 4
0
def test_load_seizure_dataset_cnn():
    example = io.StringIO(example_seizure_file)
    tensor_dataset = load_seizure_dataset(example, 'CNN')

    expect(
        type(tensor_dataset) == torch.utils.data.dataset.TensorDataset,
        "it should return TensorDataset")

    data_tensor, target_tensor = tensor_dataset.tensors

    expect(data_tensor.size() == (3, 1, 178),
           "it does not have expected shapes")
    expect(target_tensor.size() == (3, ), "it does not have expected shapes")
    assert_expectations()
Ejemplo n.º 5
0
def test_node_logs_for_RuntimeException(converged_complete_network):
    for node in converged_complete_network.nodes:
        logging.info(
            f"Testing {node.name} node logs for \"java RuntimeException\".")
        logs = node.logs()

        if "RuntimeException" in logs:
            for line in logs.splitlines():
                if "RuntimeException" in line:
                    logging.error(f"Error: {line}")
            expect(not "RuntimeException" in line,
                   f"Node {node.name} error in log line: {line}")

    assert_expectations()
Ejemplo n.º 6
0
def test_item_description():
    mock_item = mock.Mock()
    mock_item.description = None
    expect(
        BehaveAgent._item_description(mock_item) is None,
        "Description is not None",
    )
    mock_item.description = ["a", "b"]
    expect(
        BehaveAgent._item_description(mock_item) == "Description:\na\nb",
        "Description is incorrect:\nActual: {}\nExpected: {}".format(
            BehaveAgent._item_description(mock_item), "Description:\na\nb"),
    )
    assert_expectations()
Ejemplo n.º 7
0
def test_code_ref():
    mock_item = mock.Mock()
    mock_item.location = None
    expect(BehaveAgent._code_ref(mock_item) is None, "code_ref is not None")
    mock_location = mock.Mock()
    mock_location.filename = "filename"
    mock_location.line = 24
    mock_item.location = mock_location
    expect(
        BehaveAgent._code_ref(mock_item) == "filename:24",
        "code_ref is incorrect:\nActual: {}\nExpected: {}".format(
            BehaveAgent._code_ref(mock_item), "filename:24"),
    )
    assert_expectations()
def test_elements_in_app_section(browser, config_host):
    very_parametrized_script_page = VeryParametrizedScript(
        browser, config_host)
    very_parametrized_script_page.load()

    expect(is_displayed(very_parametrized_script_page.script_description),
           "Script description not found")
    expect(is_displayed(very_parametrized_script_page.script_parameters_panel),
           "Parameters panel not found")
    expect(is_displayed(very_parametrized_script_page.button_execute),
           "Execute button not found")
    expect(is_enabled(very_parametrized_script_page.button_execute),
           "Execute button not enabled")

    assert_expectations()
def test_pytest_sessionstart_with_launch_id(mocked_session):
    """Test session configuration if RP launch ID is set via command-line.

    :param mocked_session: pytest fixture
    """
    mocked_session.config.pluginmanager.hasplugin.return_value = True
    mocked_session.config._reporter_config = mock.Mock(
        spec=AgentConfig(mocked_session.config))
    mocked_session.config._reporter_config.rp_launch_attributes = []
    mocked_session.config._reporter_config.rp_launch_id = 1
    mocked_session.config.py_test_service = mock.Mock()
    pytest_sessionstart(mocked_session)
    expect(lambda: mocked_session.config.py_test_service.start_launch.
           assert_not_called())
    assert_expectations()
Ejemplo n.º 10
0
def test_node_logs_for_RuntimeException(converged_network):
    for container in containers(converged_network):
        logging.info(
            f"Testing {container.name} node logs for \"java RuntimeException\"."
        )
        logs = container.logs().decode('utf-8')

        if "RuntimeException" in logs:
            for line in logs.splitlines():
                if "RuntimeException" in line:
                    logging.error(f"Error: {line}")
            expect(not "RuntimeException" in line,
                   f"Container {container.name} error in log line: {line}")

    assert_expectations()
def test_click_simple_list(browser, config_host):
    very_parametrized_script_page = VeryParametrizedScript(
        browser, config_host)
    very_parametrized_script_page.parameter_simple_list.click()

    expect(
        is_displayed(
            very_parametrized_script_page.parameter_simple_list_drop_down),
        "Drop down on list parameter click was not opened")
    expect(
        len(very_parametrized_script_page.
            parameter_simple_list_drop_down_elements) > 0,
        "Drop down list has no elements")

    assert_expectations()
def test_click_list_with_search(browser, config_host):
    very_parametrized_script_page = VeryParametrizedScript(
        browser, config_host)
    very_parametrized_script_page.parameter_list_with_search.click()

    expect(
        is_displayed(
            very_parametrized_script_page.parameter_list_with_search_list),
        "List with search was not opened on click")
    expect(
        is_displayed(
            very_parametrized_script_page.search_field_in_list_with_search),
        "Search field in command based list was not opened on click")

    assert_expectations()
Ejemplo n.º 13
0
def test_pytest_runtest_protocol(mocked_item):
    """Test listener pytest_runtest_protocol hook.

    :param mocked_item: Pytest fixture
    """
    rp_service = mock.Mock()
    rp_service.is_item_update_supported = mock.Mock(return_value=False)
    rp_listener = RPReportListener(rp_service)
    rp_listener._add_issue_id_marks = mock.Mock()

    next(rp_listener.pytest_runtest_protocol(mocked_item))

    expect(rp_listener._add_issue_id_marks.call_count == 1,
           '_add_issue_id_marks called more than 1 time')
    assert_expectations()
Ejemplo n.º 14
0
    def test_operations(self, collection_name):
        # start the monitor threads to check the milvus ops
        log.info("*********************Test Start**********************")
        log.info(connections.get_connection_addr('default'))
        c_name = collection_name
        self.init_health_checkers(collection_name=c_name)
        cc.start_monitor_threads(self.health_checkers)
        # wait 20s
        sleep(constants.WAIT_PER_OP * 2)
        # assert all expectations
        assert_statistic(self.health_checkers)
        assert_expectations()

        log.info(
            "*********************Chaos Test Completed**********************")
Ejemplo n.º 15
0
    def test_as_int_range(self):
        expect(as_int_range("3-66") == range(3, 66))
        expect(as_int_range("0 - 0") == range(0))

        expect(as_int_range(":33:3") == range(0, 33, 3))
        expect(as_int_range(":33") == range(0, 33))
        expect(as_int_range(":33") == range(0, 33))
        expect(as_int_range("0:33") == range(0, 33))
        expect(as_int_range("-28:33") == range(-28, 33))

        expect(as_int_range("from 333 to -28") == range(333, -28, -1))
        expect(as_int_range("from -333 to -28") == range(-333, -28, 1))
        expect(as_int_range("from -333 to 28") == range(-333, 28, 1))
        expect(as_int_range("333 to 2833") == range(333, 2833, 1))
        assert_expectations()
Ejemplo n.º 16
0
    def test_get_launch_info_wrong_launch_id(self, rp_service, monkeypatch):
        """Test get launch information for a non existed launch.

        :param rp_service:  Pytest fixture that represents ReportPortalService
                            object with mocked session.
        :param monkeypatch: Pytest fixture to safely set/delete an attribute
        """
        mock_get = mock.Mock()
        monkeypatch.setattr(rp_service.session, 'get', mock_get)
        monkeypatch.setattr(rp_service, 'launch_id', '1234')

        launch_info = rp_service.get_launch_info()
        expect(mock_get.call_count == 5)
        expect(launch_info == {})
        assert_expectations()
Ejemplo n.º 17
0
 def equal(self, e, *args):
     expect(
         e.value.error.title == self.title,
         'Expected title is "{}", actual is "{}"'.format(
             self.title, e.value.error.title))
     expect(
         e.value.error['code'] == self.code,
         'Expected error code is "{}", actual is "{}"'.format(
             self.code, e.value.error['code']))
     for i in args:
         expect(
             i in e.value.error['desc'],
             'Expected part of desc is "{}", actual desc is "{}"'.format(
                 i, e.value.error['desc']))
     assert_expectations()
Ejemplo n.º 18
0
def test_stereo_doesnt_overflow():
    """Ensure loud stereo tracks do not overflow."""
    wave = Wave("tests/stereo in-phase.wav")

    samp = 100
    stride = 1
    data = wave.get_around(wave.nsamp // 2, samp, stride)
    expect(np.amax(data) > 0.99)
    expect(np.amin(data) < -0.99)

    # In the absence of overflow, sine waves have no large jumps.
    # In the presence of overflow, stereo sum will jump between INT_MAX and INT_MIN.
    # np.mean and rescaling converts to 0.499... and -0.5, which is nearly 1.
    expect(np.amax(np.abs(np.diff(data))) < 0.5)

    assert_expectations()
def test_command_based_list(browser, config_host):
    very_parametrized_script_page = VeryParametrizedScript(
        browser, config_host)
    very_parametrized_script_page.parameter_command_based_list.click()

    expect(is_displayed(very_parametrized_script_page.command_based_list),
           "Command based List was not opened on click")
    expect(
        len(very_parametrized_script_page.command_based_list_elements) > 0,
        "Command based List has no elements")

    random_drop_down_element = random.choice(
        very_parametrized_script_page.command_based_list_elements)
    random_drop_down_element.click()

    assert_expectations()
Ejemplo n.º 20
0
def test_pytest_configure(mocked_config):
    """Test plugin successful configuration.

    :param mocked_config: Pytest fixture
    """
    mocked_config.option.rp_enabled = True
    mocked_config.option.rp_project = None
    pytest_configure(mocked_config)
    expect(mocked_config._rp_enabled is True)
    expect(
        lambda: isinstance(mocked_config.py_test_service, PyTestServiceClass))
    assert_expectations()
    mocked_config.getoption.assert_has_calls([
        mock.call('--collect-only', default=False),
        mock.call('--setup-plan', default=False)
    ])
Ejemplo n.º 21
0
    def test__addError(self):
        try:
            raise TestException('')
        except TestException:
            err = sys.exc_info()
        expected_test_err_value = err[1]
        expected_test_err_info = str(err[0].__name__) + ":\n" + "".join(
            traceback.format_tb(err[2]))

        self.plugin._addError(self.test_object, err)

        expect(lambda: self.assertEqual(expected_test_err_value, self.
                                        test_object.errors[0]))
        expect(lambda: self.assertEqual(expected_test_err_info, self.
                                        test_object.errors[1]))
        assert_expectations()
Ejemplo n.º 22
0
def test_pytest_configure(mocked_config):
    """Test plugin successful configuration.

    :param mocked_get:    Instance of the MagicMock
    :param mocked_config: Pytest fixture
    """
    mocked_config.getoption.side_effect = (False, False)
    mocked_config.option = mock.Mock()
    mocked_config.option.rp_enabled = True
    mocked_config.option.rp_project = None
    pytest_configure(mocked_config)
    expect(mocked_config._reportportal_configured is True)
    expect(
        lambda: isinstance(mocked_config.py_test_service, PyTestServiceClass))
    expect(lambda: isinstance(mocked_config._reporter, RPReportListener))
    assert_expectations()
def test_edit_simple_text_to_hide_inc_params(browser, config_host):
    very_parametrized_script_page = VeryParametrizedScript(
        browser, config_host)

    very_parametrized_script_page.parameter_simple_text.send_keys("something")

    expect(
        not is_displayed(very_parametrized_script_page.parameter_inc_param1),
        "inc_param1 is displayed while not key text is in simple text field is presented"
    )
    expect(
        not is_displayed(very_parametrized_script_page.parameter_inc_param2),
        "inc_param2 is displayed while not key text is in simple text field is presented"
    )

    assert_expectations()
    def _assert_movie_lists_match(self, movies, api_movies):
        assert len(api_movies) == len(movies)

        iter_api_movies = iter(api_movies)

        for movie in movies:
            api_movie = next(iter_api_movies)
            items = ('title', 'description', 'imdb')
            for item in items:
                expect(movie[item] == api_movie[item], msg=f"{item}: {movie[item]} != {api_movie[item]}")
            if api_movie['image'] == '':
                expect(movie['image'] == f'{self.url}static/img/no-poster.ccba1b0.png',
                       msg='expect this static image to be shown if no image was found')
            else:
                expect(movie['image'] == api_movie['image'], msg=f"image: {movie['image']} != {api_movie['image']}")

        assert_expectations()
Ejemplo n.º 25
0
    def test_quality(self, get_city_from_postgres):

        page = QualityLoginPage()
        # Открыть страницу и не наебнуться
        page.openn()
        #  Заполнить форму авторизации и Нажать кнопку 'Войти' и ахуеть
        page.login(QualityLogin.login, QualityLogin.password)
        print(QualityLogin.login, QualityLogin.password)
        # Проверить что авторизовались успешно (появилась cтраница NGINX)
        page.openn3()
        # Заполнить поле 'наименование' (ввести значение 'мос')
        #     проверить что в выпадающем списке содержаться подсказки (Москва, Мосальск, Московский, Мостовской, Мосты)
        #     нажать на подсказку 'Москва'
        #     проверить что выбранное значение появилось в поле
        page.search_organization(get_city_from_postgres[0],
                                 get_city_from_postgres[1])
        assert_expectations()
def test_search_in_command_based_list(browser, config_host):
    very_parametrized_script_page = VeryParametrizedScript(
        browser, config_host)
    very_parametrized_script_page.search_field_in_command_based_list.send_keys(
        search_request)

    expect(is_displayed(very_parametrized_script_page.command_based_list),
           "Command based List is not displayed after search")
    for element in get_visible_values_of_list(
            very_parametrized_script_page.command_based_list):
        expect(is_displayed(element), "Visible list element is not displayed")
    for element in get_hidden_values_of_list(
            very_parametrized_script_page.command_based_list):
        expect(not is_displayed(element),
               "Hidden list element is not displayed")

    assert_expectations()
def test_click_random_drop_down_element(browser, config_host):
    very_parametrized_script_page = VeryParametrizedScript(
        browser, config_host)
    random_drop_down_element = random.choice(
        very_parametrized_script_page.parameter_simple_list_drop_down_elements)
    random_drop_down_element.click()
    expect(
        str(
            very_parametrized_script_page.parameter_simple_list.get_attribute(
                'value')) == str(
                    random_drop_down_element.get_attribute('title')),
        "Field text is not equal to input")
    expect(
        random_drop_down_element.get_attribute("class").find("selected") > -1,
        "Selected element has not class \"selected\"")

    assert_expectations()
Ejemplo n.º 28
0
def test_stop_plugin_configuration_on_conn_error(mocked_get, mocked_config):
    """Test plugin configuration in case of HTTP error.

    The value of the _reportportal_configured attribute of the pytest Config
    object should be changed to False, stopping plugin configuration, if HTTP
    error occurs getting HTTP response from the ReportPortal.
    :param mocked_get:    Instance of the MagicMock
    :param mocked_config: Pytest fixture
    """
    mock_response = mock.Mock()
    mock_response.raise_for_status.side_effect = RequestException()
    mocked_get.return_value = mock_response
    expect(pytest_configure(mocked_config) is None,
           'Received unexpected return value from pytest_configure.')
    expect(mocked_config._reportportal_configured is False,
           'The value of the _reportportal_configured is not False.')
    assert_expectations()
Ejemplo n.º 29
0
def validate_response(response, op, required_params=None):
    """
    Validate response of some API operation.

    :param dict response:  Response getting after sending request.
    :param str op:  Name of requested API function.
    :param list[str|tuple] required_params: List of required parametrs to be compared with.
        Example: ["param1", ("param2": int), ("param3": 12.3)]
        Provided checks:
            - "param1" is in response and value of "param1" has `str` type. Equivalent with ("param1", str) tuple
            - "param2" is in response and value of "param2" has `int` type
            - "param3" is in response, value of "param3" has `float` type and value == 12.3
    """

    assert "error" not in response, "%s operation failed: %s" % (
        op, response["error"])

    if not required_params:
        required_params = []

    for param in required_params:
        try:
            key, value = param
        except ValueError:
            key = param
            value = str  # default type for most fields
        val_type = type(value)
        if val_type == type:  # e.g. if was passed type, not value
            val_type = value
        expect(
            key in response, "Parameter '%s' is missing in '%s' response: %s" %
            (key, op, response))
        if key not in response:
            continue
        expect(
            isinstance(response[key], val_type),
            "Parameter '%s' of '%s' has invalid value type '%s', expected '%s': %s"
            % (key, op, type(response[key]), val_type, response))
        if val_type == value or isinstance(response[key], val_type):
            continue
        expect(
            response[key] == value,
            "Parameter '%s' of '%s' has invalid value '%s', expected '%s': %s"
            % (key, op, response[key], value, response))
    assert_expectations()
Ejemplo n.º 30
0
def assert_provider_config(bundle: Bundle, statemap: dict):
    for pname, plv in statemap.items():
        actual_cnf = bundle.provider(name=pname).config()
        expected_cnf = plv['config']
        for k, v in expected_cnf.items():
            expect(
                v == actual_cnf[k],
                'Provider {} config "{}" is "{}" while expected "{}"'.format(
                    pname, k, str(actual_cnf[k]), str(v)))
        for hname, host_expected_cnf in plv['hosts'].items():
            host_actual_cnf = bundle.provider(name=pname).host(
                fqdn=hname).config()
            for k, v in host_expected_cnf.items():
                expect(
                    v == host_actual_cnf[k],
                    'Provider {} host {} config {} is {} while expected {}'.
                    format(pname, hname, k, str(host_actual_cnf[k]), str(v)))
    assert_expectations()