def test_drift_analysis(request, provider, instance, soft_assert):
    """ Tests drift analysis is correct

    Metadata:
        test_flag: vm_analysis
    """

    instance.load_details()
    drift_num_orig = 0
    drift_orig = InfoBlock("Relationships", "Drift History").text
    if drift_orig != 'None':
        drift_num_orig = int(drift_orig)
    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15, timeout="15m", fail_func=lambda: toolbar.select('Reload'))
    instance.load_details()
    wait_for(
        lambda: int(InfoBlock("Relationships", "Drift History").text) == drift_num_orig + 1,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )
    drift_new = int(InfoBlock("Relationships", "Drift History").text)

    # add a tag and a finalizer to remove it
    tag = ('Department', 'Accounting')
    instance.add_tag(tag, single_value=False)
    request.addfinalizer(lambda: instance.remove_tag(tag))

    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15, timeout="15m", fail_func=lambda: toolbar.select('Reload'))
    instance.load_details()
    wait_for(
        lambda: int(InfoBlock("Relationships", "Drift History").text) == drift_new + 1,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )

    # check drift difference
    soft_assert(not instance.equal_drift_results('Department (1)', 'My Company Tags', 0, 1),
                "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Accounting tag should not be displayed, because it was changed to True
    toolbar.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell('Accounting', 0)

    # Accounting tag should be displayed now
    toolbar.select("Attributes with different values")
    d_grid.get_cell('Accounting', 0)
def test_host_drift_analysis(request, setup_provider, provider, host_type, host_name, soft_assert):
    """Tests host drift analysis

    Metadata:
        test_flag: host_drift_analysis
    """
    host_data = get_host_data_by_name(provider.key, host_name)
    test_host = host.Host(name=host_name)

    wait_for(lambda: test_host.exists, delay=10, num_sec=120, fail_func=sel.refresh,
             message="hosts_exists")

    # get drift history num
    drift_num_orig = int(test_host.get_detail('Relationships', 'Drift History'))

    # add credentials to host + finalizer to remove them
    if not test_host.has_valid_credentials:
        test_host.update(
            updates={'credentials': host.get_credentials_from_config(host_data['credentials'])}
        )
        wait_for(
            lambda: test_host.has_valid_credentials,
            delay=10,
            num_sec=120,
            fail_func=sel.refresh,
            message="has_valid_credentials"
        )

        def test_host_remove_creds():
            test_host.update(
                updates={
                    'credentials': host.Host.Credential(
                        principal="",
                        secret="",
                        verify_secret=""
                    )
                }
            )
        request.addfinalizer(test_host_remove_creds)

    # initiate 1st analysis
    test_host.run_smartstate_analysis()

    # wait for for drift history num+1
    wait_for(
        lambda: int(test_host.get_detail('Relationships', 'Drift History')) == drift_num_orig + 1,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )

    # add a tag and a finalizer to remove it
    tag = ('Department', 'Accounting')
    test_host.tag(tag, single_value=False)
    request.addfinalizer(lambda: test_host.untag(tag))

    # initiate 2nd analysis
    test_host.run_smartstate_analysis()

    # wait for for drift history num+2
    wait_for(
        lambda: int(test_host.get_detail('Relationships', 'Drift History')) == drift_num_orig + 2,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )

    # check drift difference
    soft_assert(not test_host.equal_drift_results('Department (1)', 'My Company Tags', 0, 1),
        "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Accounting tag should not be displayed, because it was changed to True
    tb.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell('Accounting', 0)

    # Accounting tag should be displayed now
    tb.select("Attributes with different values")
    d_grid.get_cell('Accounting', 0)
def test_host_drift_analysis(request, setup_provider, provider, host, soft_assert):
    """Tests host drift analysis

    Metadata:
        test_flag: host_drift_analysis
    """
    test_host = host_obj.Host(name=host['name'], provider=provider)

    wait_for(lambda: test_host.exists, delay=10, num_sec=120, fail_func=sel.refresh,
             message="hosts_exists")

    # get drift history num
    drift_num_orig = int(test_host.get_detail('Relationships', 'Drift History'))

    # add credentials to host + finalizer to remove them
    if not test_host.has_valid_credentials:
        test_host.update(
            updates={'credentials': host_obj.get_credentials_from_config(host['credentials'])},
            validate_credentials=True
        )

        @request.addfinalizer
        def test_host_remove_creds():
            test_host.update(
                updates={
                    'credentials': host_obj.Host.Credential(
                        principal="",
                        secret="",
                        verify_secret=""
                    )
                }
            )
    # clear table
    view = navigate_to(Tasks, 'AllOtherTasks')
    view.delete.item_select('Delete All', handle_alert=True)

    # initiate 1st analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    def is_host_analysis_finished():
        """ Check if analysis is finished - if not, reload page
        """
        finished = False
        view = navigate_to(Tasks, 'AllOtherTasks')
        host_analysis_row = view.tabs.allothertasks.table.row(
            task_name="SmartState Analysis for '{}'".format(test_host.name))
        if host_analysis_row.state.text == 'Finished':
            finished = True
            # select the row and delete the task
            host_analysis_row[0].check()
            view.delete.item_select('Delete', handle_alert=True)
        else:
            view.reload.click()
        return finished

    wait_for(is_host_analysis_finished, delay=5, timeout="8m")

    # wait for for drift history num+1
    wait_for(
        lambda: int(test_host.get_detail('Relationships', 'Drift History')) == drift_num_orig + 1,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )

    # add a tag and a finalizer to remove it
    tag = ('Department', 'Accounting')
    test_host.tag(tag, single_value=False)
    request.addfinalizer(lambda: test_host.untag(tag))

    # initiate 2nd analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    wait_for(is_host_analysis_finished, delay=5, timeout="8m")

    # wait for for drift history num+2
    wait_for(
        lambda: int(test_host.get_detail('Relationships', 'Drift History')) == drift_num_orig + 2,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )

    # check drift difference
    soft_assert(not test_host.equal_drift_results('Department (1)', 'My Company Tags', 0, 1),
        "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Accounting tag should not be displayed, because it was changed to True
    tb.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell('Accounting', 0)

    # Accounting tag should be displayed now
    tb.select("Attributes with different values")
    d_grid.get_cell('Accounting', 0)
def test_host_drift_analysis(request, setup_provider, provider, host_type, host_name, soft_assert):
    """Tests host drift analysis

    Metadata:
        test_flag: host_drift_analysis
    """
    host_data = get_host_data_by_name(provider.key, host_name)
    test_host = host.Host(name=host_name)

    wait_for(lambda: test_host.exists, delay=10, num_sec=120, fail_func=sel.refresh,
             message="hosts_exists")

    # get drift history num
    drift_num_orig = int(test_host.get_detail('Relationships', 'Drift History'))

    # add credentials to host + finalizer to remove them
    if not test_host.has_valid_credentials:
        test_host.update(
            updates={'credentials': host.get_credentials_from_config(host_data['credentials'])},
            validate_credentials=True
        )

        @request.addfinalizer
        def test_host_remove_creds():
            test_host.update(
                updates={
                    'credentials': host.Host.Credential(
                        principal="",
                        secret="",
                        verify_secret=""
                    )
                }
            )

    # initiate 1st analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    def is_host_analysis_finished():
        """ Check if analysis is finished - if not, reload page
        """
        if not sel.is_displayed(tasks.tasks_table) or not tabs.is_tab_selected('All Other Tasks'):
            sel.force_navigate('tasks_all_other')
        host_analysis_finished = tasks.tasks_table.find_row_by_cells({
            'task_name': "SmartState Analysis for '{}'".format(host_name),
            'state': 'Finished'
        })
        if host_analysis_finished:
            # Delete the task
            tasks.tasks_table.select_row_by_cells({
                'task_name': "SmartState Analysis for '{}'".format(host_name),
                'state': 'Finished'
            })
            tb.select('Delete Tasks', 'Delete', invokes_alert=True)
            sel.handle_alert()
        return host_analysis_finished is not None

    wait_for(is_host_analysis_finished,
             delay=15, timeout="8m", fail_func=lambda: tb.select('Reload'))

    # wait for for drift history num+1
    wait_for(
        lambda: int(test_host.get_detail('Relationships', 'Drift History')) == drift_num_orig + 1,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )

    # add a tag and a finalizer to remove it
    tag = ('Department', 'Accounting')
    test_host.tag(tag, single_value=False)
    request.addfinalizer(lambda: test_host.untag(tag))

    # initiate 2nd analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    wait_for(is_host_analysis_finished,
             delay=15, timeout="8m", fail_func=lambda: tb.select('Reload'))

    # wait for for drift history num+2
    wait_for(
        lambda: int(test_host.get_detail('Relationships', 'Drift History')) == drift_num_orig + 2,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )

    # check drift difference
    soft_assert(not test_host.equal_drift_results('Department (1)', 'My Company Tags', 0, 1),
        "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Accounting tag should not be displayed, because it was changed to True
    tb.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell('Accounting', 0)

    # Accounting tag should be displayed now
    tb.select("Attributes with different values")
    d_grid.get_cell('Accounting', 0)
Beispiel #5
0
def test_host_drift_analysis(request, setup_provider, provider, host_type,
                             host_name, soft_assert):
    """Tests host drift analysis

    Metadata:
        test_flag: host_drift_analysis
    """
    host_data = get_host_data_by_name(provider.key, host_name)
    test_host = host.Host(name=host_name)

    wait_for(lambda: test_host.exists,
             delay=10,
             num_sec=120,
             fail_func=sel.refresh,
             message="hosts_exists")

    # get drift history num
    drift_num_orig = int(test_host.get_detail('Relationships',
                                              'Drift History'))

    # add credentials to host + finalizer to remove them
    if not test_host.has_valid_credentials:
        test_host.update(updates={
            'credentials':
            host.get_credentials_from_config(host_data['credentials'])
        },
                         validate_credentials=True)

        @request.addfinalizer
        def test_host_remove_creds():
            test_host.update(
                updates={
                    'credentials':
                    host.Host.Credential(
                        principal="", secret="", verify_secret="")
                })

    # initiate 1st analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    def is_host_analysis_finished():
        """ Check if analysis is finished - if not, reload page
        """
        if not sel.is_displayed(tasks.tasks_table) or not tabs.is_tab_selected(
                'All Other Tasks'):
            sel.force_navigate('tasks_all_other')
        host_analysis_finished = tasks.tasks_table.find_row_by_cells({
            'task_name':
            "SmartState Analysis for '{}'".format(host_name),
            'state':
            'Finished'
        })
        if host_analysis_finished:
            # Delete the task
            tasks.tasks_table.select_row_by_cells({
                'task_name':
                "SmartState Analysis for '{}'".format(host_name),
                'state':
                'Finished'
            })
            tb.select('Delete Tasks', 'Delete', invokes_alert=True)
            sel.handle_alert()
        return host_analysis_finished is not None

    wait_for(is_host_analysis_finished,
             delay=15,
             timeout="8m",
             fail_func=lambda: tb.select('Reload'))

    # wait for for drift history num+1
    wait_for(lambda: int(test_host.get_detail('Relationships', 'Drift History')
                         ) == drift_num_orig + 1,
             delay=20,
             num_sec=120,
             message="Waiting for Drift History count to increase",
             fail_func=sel.refresh)

    # add a tag and a finalizer to remove it
    tag = ('Department', 'Accounting')
    test_host.tag(tag, single_value=False)
    request.addfinalizer(lambda: test_host.untag(tag))

    # initiate 2nd analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    wait_for(is_host_analysis_finished,
             delay=15,
             timeout="8m",
             fail_func=lambda: tb.select('Reload'))

    # wait for for drift history num+2
    wait_for(lambda: int(test_host.get_detail('Relationships', 'Drift History')
                         ) == drift_num_orig + 2,
             delay=20,
             num_sec=120,
             message="Waiting for Drift History count to increase",
             fail_func=sel.refresh)

    # check drift difference
    soft_assert(
        not test_host.equal_drift_results('Department (1)', 'My Company Tags',
                                          0, 1),
        "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Accounting tag should not be displayed, because it was changed to True
    tb.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell('Accounting', 0)

    # Accounting tag should be displayed now
    tb.select("Attributes with different values")
    d_grid.get_cell('Accounting', 0)
Beispiel #6
0
def test_drift_analysis(request, provider, instance, soft_assert):
    """ Tests drift analysis is correct

    Metadata:
        test_flag: vm_analysis
    """

    instance.load_details()
    drift_num_orig = 0
    drift_orig = InfoBlock("Relationships", "Drift History").text
    if drift_orig != 'None':
        drift_num_orig = int(drift_orig)
    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15,
             timeout="35m",
             fail_func=lambda: toolbar.select('Reload'))
    instance.load_details()
    wait_for(lambda: int(InfoBlock("Relationships", "Drift History").text) ==
             drift_num_orig + 1,
             delay=20,
             num_sec=120,
             message="Waiting for Drift History count to increase",
             fail_func=sel.refresh)
    drift_new = int(InfoBlock("Relationships", "Drift History").text)

    # add a tag and a finalizer to remove it
    instance.add_tag('Department', 'Accounting')
    request.addfinalizer(
        lambda: instance.remove_tag('Department', 'Accounting'))

    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15,
             timeout="35m",
             fail_func=lambda: toolbar.select('Reload'))
    instance.load_details()
    wait_for(lambda: int(InfoBlock("Relationships", "Drift History").text) ==
             drift_new + 1,
             delay=20,
             num_sec=120,
             message="Waiting for Drift History count to increase",
             fail_func=sel.refresh)

    # check drift difference
    soft_assert(
        not instance.equal_drift_results('Department (1)', 'My Company Tags',
                                         0, 1),
        "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Accounting tag should not be displayed, because it was changed to True
    toolbar.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell('Accounting', 0)

    # Accounting tag should be displayed now
    toolbar.select("Attributes with different values")
    d_grid.get_cell('Accounting', 0)
def test_host_drift_analysis(request, setup_provider, provider_key, host_type, host_name, soft_assert):
    """Tests host drift analysis

    Metadata:
        test_flag: host_drift_analysis
    """
    host_data = get_host_data_by_name(provider_key, host_name)
    test_host = host.Host(name=host_name)

    wait_for(lambda: test_host.exists, delay=10, num_sec=120, fail_func=sel.refresh, message="hosts_exists")

    # get drift history num
    drift_num_orig = int(test_host.get_detail("Relationships", "Drift History"))

    # add credentials to host + finalizer to remove them
    if not test_host.has_valid_credentials:
        test_host.update(updates={"credentials": host.get_credentials_from_config(host_data["credentials"])})
        wait_for(
            lambda: test_host.has_valid_credentials,
            delay=10,
            num_sec=120,
            fail_func=sel.refresh,
            message="has_valid_credentials",
        )

        def test_host_remove_creds():
            test_host.update(updates={"credentials": host.Host.Credential(principal="", secret="", verify_secret="")})

        request.addfinalizer(test_host_remove_creds)

    # initiate 1st analysis
    test_host.run_smartstate_analysis()

    # wait for for drift history num+1
    wait_for(
        lambda: int(test_host.get_detail("Relationships", "Drift History")) == drift_num_orig + 1,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh,
    )

    # change host name + finalizer to change it back
    orig_host_name = test_host.name
    with update(test_host):
        test_host.name = "{}_tmp_drift_rename".format(test_host.name)

    def host_reset_name():
        with update(test_host):
            test_host.name = orig_host_name

    request.addfinalizer(host_reset_name)

    # initiate 2nd analysis
    test_host.run_smartstate_analysis()

    # wait for for drift history num+2
    wait_for(
        lambda: int(test_host.get_detail("Relationships", "Drift History")) == drift_num_orig + 2,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh,
    )

    # check drift difference
    soft_assert(
        not test_host.equal_drift_results("All Sections", 0, 1),
        "Drift analysis results are equal when they shouldn't be",
    )

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Name should not be displayed, because it was changed
    tb.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell("Name", 0)

    # Name should be displayed now
    tb.select("Attributes with different values")
    d_grid.get_cell("Name", 0)
def test_host_drift_analysis(appliance, request, setup_provider, provider,
                             host, soft_assert):
    """Tests host drift analysis

    Metadata:
        test_flag: host_drift_analysis
    """
    host_collection = appliance.collections.hosts
    test_host = host_collection.instantiate(name=host['name'],
                                            provider=provider)
    wait_for(lambda: test_host.exists,
             delay=20,
             num_sec=120,
             fail_func=sel.refresh,
             message="hosts_exists")

    # get drift history num
    drift_num_orig = int(test_host.get_detail('Relationships',
                                              'Drift History'))

    # add credentials to host + finalizer to remove them
    if not test_host.has_valid_credentials:
        test_host.update(updates={
            'credentials':
            host_obj.get_credentials_from_config(host['credentials'])
        },
                         validate_credentials=True)

        @request.addfinalizer
        def test_host_remove_creds():
            test_host.update(
                updates={
                    'credentials':
                    host_obj.Host.Credential(
                        principal="", secret="", verify_secret="")
                })

    # clear table
    view = navigate_to(Tasks, 'AllOtherTasks')
    view.delete.item_select('Delete All', handle_alert=True)

    # initiate 1st analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    def is_host_analysis_finished():
        """ Check if analysis is finished - if not, reload page
        """
        finished = False
        view = navigate_to(Tasks, 'AllOtherTasks')
        host_analysis_row = view.tabs.allothertasks.table.row(
            task_name="SmartState Analysis for '{}'".format(test_host.name))
        if host_analysis_row.state.text == 'Finished':
            finished = True
            # select the row and delete the task
            host_analysis_row[0].check()
            view.delete.item_select('Delete', handle_alert=True)
        else:
            view.reload.click()
        return finished

    wait_for(is_host_analysis_finished, delay=5, timeout="8m")

    # wait for for drift history num+1
    wait_for(lambda: int(test_host.get_detail('Relationships', 'Drift History')
                         ) == drift_num_orig + 1,
             delay=20,
             num_sec=120,
             message="Waiting for Drift History count to increase",
             fail_func=sel.refresh)

    # add a tag and a finalizer to remove it
    test_host.add_tag(category='Department', tag='Accounting')
    request.addfinalizer(
        lambda: test_host.remove_tag(category='Department', tag='Accounting'))

    # initiate 2nd analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    wait_for(is_host_analysis_finished, delay=5, timeout="8m")

    # wait for for drift history num+2
    wait_for(lambda: int(test_host.get_detail('Relationships', 'Drift History')
                         ) == drift_num_orig + 2,
             delay=20,
             num_sec=120,
             message="Waiting for Drift History count to increase",
             fail_func=sel.refresh)

    # check drift difference
    soft_assert(
        not test_host.equal_drift_results('Department (1)', 'My Company Tags',
                                          0, 1),
        "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Accounting tag should not be displayed, because it was changed to True
    tb.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell('Accounting', 0)

    # Accounting tag should be displayed now
    tb.select("Attributes with different values")
    d_grid.get_cell('Accounting', 0)