Пример #1
0
    def equal_drift_results(self, row_text, *indexes):
        """ Compares drift analysis results of a row specified by it's title text

        Args:
            row_text: Title text of the row to compare
            indexes: Indexes of results to compare starting with 0 for first row (latest result).
                     Compares all available drifts, if left empty (default).

        Note:
            There have to be at least 2 drift results available for this to work.

        Returns:
            ``True`` if equal, ``False`` otherwise.
        """
        # mark by indexes or mark all
        sel.force_navigate('infrastructure_host', context={'host': self})
        list_acc.select('Relationships', 'Show host drift history')
        if indexes:
            drift_table.select_rows_by_indexes(*indexes)
        else:
            # We can't compare more than 10 drift results at once
            # so when selecting all, we have to limit it to the latest 10
            if len(list(drift_table.rows())) > 10:
                drift_table.select_rows_by_indexes(*range(0, 10))
            else:
                drift_table.select_all()
        tb.select("Select up to 10 timestamps for Drift Analysis")

        d_grid = DriftGrid()
        if not tb.is_active("All attributes"):
            tb.select("All attributes")
        if any(d_grid.cell_indicates_change(row_text, i) for i in range(0, len(indexes))):
            return False
        return True
Пример #2
0
    def equal_drift_results(self, row_text, section, *indexes):
        """ Compares drift analysis results of a row specified by it's title text

        Args:
            row_text: Title text of the row to compare
            section: Accordion section where the change happened; this section must be activated
            indexes: Indexes of results to compare starting with 0 for first row (latest result).
                     Compares all available drifts, if left empty (default).

        Note:
            There have to be at least 2 drift results available for this to work.

        Returns:
            ``True`` if equal, ``False`` otherwise.
        """
        # mark by indexes or mark all
        navigate_to(self, 'Details')
        list_acc.select(
            'Relationships',
            version.pick({
                version.LOWEST: 'Show host drift history',
                '5.4': 'Show Host drift history'
            }))
        if indexes:
            drift_table.select_rows_by_indexes(*indexes)
        else:
            # We can't compare more than 10 drift results at once
            # so when selecting all, we have to limit it to the latest 10
            if len(list(drift_table.rows())) > 10:
                drift_table.select_rows_by_indexes(*range(0, 10))
            else:
                drift_table.select_all()
        tb.select("Select up to 10 timestamps for Drift Analysis")

        # Make sure the section we need is active/open
        sec_loc_map = {
            'Properties': 'Properties',
            'Security': 'Security',
            'Configuration': 'Configuration',
            'My Company Tags': 'Categories'
        }
        active_sec_loc = "//div[@id='all_sections_treebox']//li[contains(@id, 'group_{}')]"\
            "/span[contains(@class, 'dynatree-selected')]".format(sec_loc_map[section])
        sec_checkbox_loc = "//div[@id='all_sections_treebox']//li[contains(@id, 'group_{}')]"\
            "//span[contains(@class, 'dynatree-checkbox')]".format(sec_loc_map[section])
        sec_apply_btn = "//div[@id='accordion']/a[contains(normalize-space(text()), 'Apply')]"

        # If the section is not active yet, activate it
        if not sel.is_displayed(active_sec_loc):
            sel.click(sec_checkbox_loc)
            sel.click(sec_apply_btn)

        if not tb.is_active("All attributes"):
            tb.select("All attributes")
        d_grid = DriftGrid()
        if any(
                d_grid.cell_indicates_change(row_text, i)
                for i in range(0, len(indexes))):
            return False
        return True
def test_drift_analysis(request, provider, instance, soft_assert):
    """ Tests drift analysis is correct

    Metadata:
        test_flag: vm_analysis
    """

    instance.load_details()
    drift_num_orig = 0
    drift_orig = InfoBlock("Relationships", "Drift History").text
    if drift_orig != 'None':
        drift_num_orig = int(drift_orig)
    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15, timeout="15m", fail_func=lambda: toolbar.select('Reload'))
    instance.load_details()
    wait_for(
        lambda: int(InfoBlock("Relationships", "Drift History").text) == drift_num_orig + 1,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )
    drift_new = int(InfoBlock("Relationships", "Drift History").text)

    # add a tag and a finalizer to remove it
    tag = ('Department', 'Accounting')
    instance.add_tag(tag, single_value=False)
    request.addfinalizer(lambda: instance.remove_tag(tag))

    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15, timeout="15m", fail_func=lambda: toolbar.select('Reload'))
    instance.load_details()
    wait_for(
        lambda: int(InfoBlock("Relationships", "Drift History").text) == drift_new + 1,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )

    # check drift difference
    soft_assert(not instance.equal_drift_results('Department (1)', 'My Company Tags', 0, 1),
                "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Accounting tag should not be displayed, because it was changed to True
    toolbar.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell('Accounting', 0)

    # Accounting tag should be displayed now
    toolbar.select("Attributes with different values")
    d_grid.get_cell('Accounting', 0)
Пример #4
0
    def equal_drift_results(self, row_text, section, *indexes):
        """ Compares drift analysis results of a row specified by it's title text

        Args:
            row_text: Title text of the row to compare
            section: Accordion section where the change happened; this section must be activated
            indexes: Indexes of results to compare starting with 0 for first row (latest result).
                     Compares all available drifts, if left empty (default).

        Note:
            There have to be at least 2 drift results available for this to work.

        Returns:
            ``True`` if equal, ``False`` otherwise.
        """
        # mark by indexes or mark all
        navigate_to(self, 'Details')
        list_acc.select('Relationships',
            version.pick({
                version.LOWEST: 'Show host drift history',
                '5.4': 'Show Host drift history'}))
        if indexes:
            drift_table.select_rows_by_indexes(*indexes)
        else:
            # We can't compare more than 10 drift results at once
            # so when selecting all, we have to limit it to the latest 10
            if len(list(drift_table.rows())) > 10:
                drift_table.select_rows_by_indexes(*range(0, 10))
            else:
                drift_table.select_all()
        tb.select("Select up to 10 timestamps for Drift Analysis")

        # Make sure the section we need is active/open
        sec_loc_map = {
            'Properties': 'Properties',
            'Security': 'Security',
            'Configuration': 'Configuration',
            'My Company Tags': 'Categories'}
        active_sec_loc = "//div[@id='all_sections_treebox']//li[contains(@id, 'group_{}')]"\
            "/span[contains(@class, 'dynatree-selected')]".format(sec_loc_map[section])
        sec_checkbox_loc = "//div[@id='all_sections_treebox']//li[contains(@id, 'group_{}')]"\
            "//span[contains(@class, 'dynatree-checkbox')]".format(sec_loc_map[section])
        sec_apply_btn = "//div[@id='accordion']/a[contains(normalize-space(text()), 'Apply')]"

        # If the section is not active yet, activate it
        if not sel.is_displayed(active_sec_loc):
            sel.click(sec_checkbox_loc)
            sel.click(sec_apply_btn)

        if not tb.is_active("All attributes"):
            tb.select("All attributes")
        d_grid = DriftGrid()
        if any(d_grid.cell_indicates_change(row_text, i) for i in range(0, len(indexes))):
            return False
        return True
Пример #5
0
    def equal_drift_results(self, row_text, section, *indexes):
        """ Compares drift analysis results of a row specified by it's title text

        Args:
            row_text: Title text of the row to compare
            section: Accordion section where the change happened; this section will be activated
            indexes: Indexes of results to compare starting with 0 for first row (latest result).
                     Compares all available drifts, if left empty (default).

        Note:
            There have to be at least 2 drift results available for this to work.

        Returns:
            ``True`` if equal, ``False`` otherwise.
        """
        # mark by indexes or mark all
        self.load_details(refresh=True)
        sel.click(InfoBlock("Properties", "Drift History"))
        if indexes:
            drift_table.select_rows_by_indexes(*indexes)
        else:
            # We can't compare more than 10 drift results at once
            # so when selecting all, we have to limit it to the latest 10
            if len(list(drift_table.rows())) > 10:
                drift_table.select_rows_by_indexes(*range(0, min(10, len)))
            else:
                drift_table.select_all()
        tb.select("Select up to 10 timestamps for Drift Analysis")

        # Make sure the section we need is active/open
        sec_apply_btn = "//div[@id='accordion']/a[contains(normalize-space(text()), 'Apply')]"

        # Deselect other sections
        for other_section in drift_section.child_items():
            drift_section.check_node(other_section.text)
            drift_section.uncheck_node(other_section.text)

        # Activate the required section
        drift_section.check_node(section)
        sel.click(sec_apply_btn)

        if not tb.is_active("All attributes"):
            tb.select("All attributes")
        drift_grid = DriftGrid()
        if any(
                drift_grid.cell_indicates_change(row_text, i)
                for i in range(0, len(indexes))):
            return False
        return True
Пример #6
0
    def equal_drift_results(self, row_text, section, *indexes):
        """ Compares drift analysis results of a row specified by it's title text

        Args:
            row_text: Title text of the row to compare
            section: Accordion section where the change happened; this section will be activated
            indexes: Indexes of results to compare starting with 0 for first row (latest result).
                     Compares all available drifts, if left empty (default).

        Note:
            There have to be at least 2 drift results available for this to work.

        Returns:
            ``True`` if equal, ``False`` otherwise.
        """
        # mark by indexes or mark all
        self.load_details(refresh=True)
        sel.click(InfoBlock("Properties", "Drift History"))
        if indexes:
            drift_table.select_rows_by_indexes(*indexes)
        else:
            # We can't compare more than 10 drift results at once
            # so when selecting all, we have to limit it to the latest 10
            if len(list(drift_table.rows())) > 10:
                drift_table.select_rows_by_indexes(*range(0, min(10, len)))
            else:
                drift_table.select_all()
        tb.select("Select up to 10 timestamps for Drift Analysis")

        # Make sure the section we need is active/open
        sec_apply_btn = "//div[@id='accordion']/a[contains(normalize-space(text()), 'Apply')]"

        # Deselect other sections
        for other_section in drift_section.child_items():
            drift_section.check_node(other_section.text)
            drift_section.uncheck_node(other_section.text)

        # Activate the required section
        drift_section.check_node(section)
        sel.click(sec_apply_btn)

        if not tb.is_active("All attributes"):
            tb.select("All attributes")
        drift_grid = DriftGrid()
        if any(drift_grid.cell_indicates_change(row_text, i) for i in range(0, len(indexes))):
            return False
        return True
def test_drift_analysis(request, provider, instance, soft_assert):
    """ Tests drift analysis is correct

    Metadata:
        test_flag: vm_analysis
    """

    instance.load_details()
    drift_num_orig = 0
    drift_orig = InfoBlock("Relationships", "Drift History").text
    if drift_orig != 'None':
        drift_num_orig = int(drift_orig)
    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15, timeout="35m", fail_func=lambda: toolbar.select('Reload'))
    instance.load_details()
    wait_for(
        lambda: int(InfoBlock("Relationships", "Drift History").text) == drift_num_orig + 1,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )
    drift_new = int(InfoBlock("Relationships", "Drift History").text)

    # add a tag and a finalizer to remove it
    tag = ('Department', 'Accounting')
    instance.add_tag(tag, single_value=False)
    request.addfinalizer(lambda: instance.remove_tag(tag))

    instance.smartstate_scan()
    wait_for(lambda: is_vm_analysis_finished(instance.name),
             delay=15, timeout="35m", fail_func=lambda: toolbar.select('Reload'))
    instance.load_details()
    wait_for(
        lambda: int(InfoBlock("Relationships", "Drift History").text) == drift_new + 1,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )

    # check drift difference
    soft_assert(not instance.equal_drift_results('Department (1)', 'My Company Tags', 0, 1),
                "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Accounting tag should not be displayed, because it was changed to True
    toolbar.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell('Accounting', 0)

    # Accounting tag should be displayed now
    toolbar.select("Attributes with different values")
    d_grid.get_cell('Accounting', 0)
Пример #8
0
def test_host_drift_analysis(request, setup_provider, provider, host,
                             soft_assert):
    """Tests host drift analysis

    Metadata:
        test_flag: host_drift_analysis
    """
    test_host = host_obj.Host(name=host['name'], provider=provider)

    wait_for(lambda: test_host.exists,
             delay=10,
             num_sec=120,
             fail_func=sel.refresh,
             message="hosts_exists")

    # get drift history num
    drift_num_orig = int(test_host.get_detail('Relationships',
                                              'Drift History'))

    # add credentials to host + finalizer to remove them
    if not test_host.has_valid_credentials:
        test_host.update(updates={
            'credentials':
            host_obj.get_credentials_from_config(host['credentials'])
        },
                         validate_credentials=True)

        @request.addfinalizer
        def test_host_remove_creds():
            test_host.update(
                updates={
                    'credentials':
                    host_obj.Host.Credential(
                        principal="", secret="", verify_secret="")
                })

    # clear table
    view = navigate_to(Tasks, 'AllOtherTasks')
    view.delete.item_select('Delete All', handle_alert=True)

    # initiate 1st analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    def is_host_analysis_finished():
        """ Check if analysis is finished - if not, reload page
        """
        finished = False
        view = navigate_to(Tasks, 'AllOtherTasks')
        host_analysis_row = view.tabs.allothertasks.table.row(
            task_name="SmartState Analysis for '{}'".format(test_host.name))
        if host_analysis_row.state.text == 'Finished':
            finished = True
            # select the row and delete the task
            host_analysis_row[0].check()
            view.delete.item_select('Delete', handle_alert=True)
        else:
            view.reload.click()
        return finished

    wait_for(is_host_analysis_finished, delay=5, timeout="8m")

    # wait for for drift history num+1
    wait_for(lambda: int(test_host.get_detail('Relationships', 'Drift History')
                         ) == drift_num_orig + 1,
             delay=20,
             num_sec=120,
             message="Waiting for Drift History count to increase",
             fail_func=sel.refresh)

    # add a tag and a finalizer to remove it
    tag = ('Department', 'Accounting')
    test_host.tag(tag, single_value=False)
    request.addfinalizer(lambda: test_host.untag(tag))

    # initiate 2nd analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    wait_for(is_host_analysis_finished, delay=5, timeout="8m")

    # wait for for drift history num+2
    wait_for(lambda: int(test_host.get_detail('Relationships', 'Drift History')
                         ) == drift_num_orig + 2,
             delay=20,
             num_sec=120,
             message="Waiting for Drift History count to increase",
             fail_func=sel.refresh)

    # check drift difference
    soft_assert(
        not test_host.equal_drift_results('Department (1)', 'My Company Tags',
                                          0, 1),
        "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Accounting tag should not be displayed, because it was changed to True
    tb.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell('Accounting', 0)

    # Accounting tag should be displayed now
    tb.select("Attributes with different values")
    d_grid.get_cell('Accounting', 0)
    def equal_drift_results(self, row_text, section, *indexes):
        """ Compares drift analysis results of a row specified by it's title text

        Args:
            row_text: Title text of the row to compare
            section: Accordion section where the change happened; this section must be activated
            indexes: Indexes of results to compare starting with 0 for first row (latest result).
                     Compares all available drifts, if left empty (default).

        Note:
            There have to be at least 2 drift results available for this to work.

        Returns:
            ``True`` if equal, ``False`` otherwise.
        """
        # mark by indexes or mark all
        self.load_details(refresh=True)
        sel.click(InfoBlock("Properties", "Drift History"))
        if indexes:
            drift_table.select_rows_by_indexes(*indexes)
        else:
            # We can't compare more than 10 drift results at once
            # so when selecting all, we have to limit it to the latest 10
            if len(list(drift_table.rows())) > 10:
                drift_table.select_rows_by_indexes(*range(0, min(10, len)))
            else:
                drift_table.select_all()
        tb.select("Select up to 10 timestamps for Drift Analysis")

        # Make sure the section we need is active/open
        sec_loc_map = {
            'Properties': 'Properties',
            'Security': 'Security',
            'Configuration': 'Configuration',
            'My Company Tags': 'Categories'}
        sec_loc_template = "//div[@id='all_sections_treebox']//li[contains(@id, 'group_{}')]"\
            "//span[contains(@class, 'dynatree-checkbox')]"
        sec_checkbox_loc = "//div[@id='all_sections_treebox']//li[contains(@id, 'group_{}')]"\
            "//span[contains(@class, 'dynatree-checkbox')]".format(sec_loc_map[section])
        sec_apply_btn = "//div[@id='accordion']/a[contains(normalize-space(text()), 'Apply')]"

        # Deselect other sections
        for other_section in sec_loc_map.keys():
            other_section_loc = sec_loc_template.format(sec_loc_map[other_section])
            other_section_classes = sel.get_attribute(other_section_loc + '/..', "class")
            if other_section != section and 'dynatree-partsel' in other_section_classes:
                # Element needs to be checked out if it has no dynatree-selected
                if 'dynatree-selected' not in other_section_classes:
                    sel.click(other_section_loc)
                sel.click(other_section_loc)

        # Activate the required section
        sel.click(sec_checkbox_loc)
        sel.click(sec_apply_btn)

        if not tb.is_active("All attributes"):
            tb.select("All attributes")
        d_grid = DriftGrid()
        if any(d_grid.cell_indicates_change(row_text, i) for i in range(0, len(indexes))):
            return False
        return True
def test_host_drift_analysis(request, setup_provider, provider, host, soft_assert):
    """Tests host drift analysis

    Metadata:
        test_flag: host_drift_analysis
    """
    test_host = host_obj.Host(name=host['name'], provider=provider)

    wait_for(lambda: test_host.exists, delay=10, num_sec=120, fail_func=sel.refresh,
             message="hosts_exists")

    # get drift history num
    drift_num_orig = int(test_host.get_detail('Relationships', 'Drift History'))

    # add credentials to host + finalizer to remove them
    if not test_host.has_valid_credentials:
        test_host.update(
            updates={'credentials': host_obj.get_credentials_from_config(host['credentials'])},
            validate_credentials=True
        )

        @request.addfinalizer
        def test_host_remove_creds():
            test_host.update(
                updates={
                    'credentials': host_obj.Host.Credential(
                        principal="",
                        secret="",
                        verify_secret=""
                    )
                }
            )
    # clear table
    view = navigate_to(Tasks, 'AllOtherTasks')
    view.delete.item_select('Delete All', handle_alert=True)

    # initiate 1st analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    def is_host_analysis_finished():
        """ Check if analysis is finished - if not, reload page
        """
        finished = False
        view = navigate_to(Tasks, 'AllOtherTasks')
        host_analysis_row = view.tabs.allothertasks.table.row(
            task_name="SmartState Analysis for '{}'".format(test_host.name))
        if host_analysis_row.state.text == 'Finished':
            finished = True
            # select the row and delete the task
            host_analysis_row[0].check()
            view.delete.item_select('Delete', handle_alert=True)
        else:
            view.reload.click()
        return finished

    wait_for(is_host_analysis_finished, delay=5, timeout="8m")

    # wait for for drift history num+1
    wait_for(
        lambda: int(test_host.get_detail('Relationships', 'Drift History')) == drift_num_orig + 1,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )

    # add a tag and a finalizer to remove it
    tag = ('Department', 'Accounting')
    test_host.tag(tag, single_value=False)
    request.addfinalizer(lambda: test_host.untag(tag))

    # initiate 2nd analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    wait_for(is_host_analysis_finished, delay=5, timeout="8m")

    # wait for for drift history num+2
    wait_for(
        lambda: int(test_host.get_detail('Relationships', 'Drift History')) == drift_num_orig + 2,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )

    # check drift difference
    soft_assert(not test_host.equal_drift_results('Department (1)', 'My Company Tags', 0, 1),
        "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Accounting tag should not be displayed, because it was changed to True
    tb.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell('Accounting', 0)

    # Accounting tag should be displayed now
    tb.select("Attributes with different values")
    d_grid.get_cell('Accounting', 0)
Пример #11
0
    def equal_drift_results(self, row_text, section, *indexes):
        """ Compares drift analysis results of a row specified by it's title text

        Args:
            row_text: Title text of the row to compare
            section: Accordion section where the change happened; this section will be activated
            indexes: Indexes of results to compare starting with 0 for first row (latest result).
                     Compares all available drifts, if left empty (default).

        Note:
            There have to be at least 2 drift results available for this to work.

        Returns:
            ``True`` if equal, ``False`` otherwise.
        """
        # mark by indexes or mark all
        self.load_details(refresh=True)
        sel.click(InfoBlock("Properties", "Drift History"))
        if indexes:
            drift_table.select_rows_by_indexes(*indexes)
        else:
            # We can't compare more than 10 drift results at once
            # so when selecting all, we have to limit it to the latest 10
            if len(list(drift_table.rows())) > 10:
                drift_table.select_rows_by_indexes(*range(0, min(10, len)))
            else:
                drift_table.select_all()
        tb.select("Select up to 10 timestamps for Drift Analysis")

        # Make sure the section we need is active/open
        sec_loc_map = {
            'Properties': 'Properties',
            'Security': 'Security',
            'Configuration': 'Configuration',
            'My Company Tags': 'Categories'}
        sec_loc_template = "//div[@id='all_sections_treebox']//li[contains(@id, 'group_{}')]" \
                           "//span[contains(@class, 'dynatree-checkbox')]"
        sec_checkbox_loc = "//div[@id='all_sections_treebox']//li[contains(@id, 'group_{}')]" \
            "//span[contains(@class, 'dynatree-checkbox')]".format(sec_loc_map[section])
        sec_apply_btn = "//div[@id='accordion']/a[contains(normalize-space(text()), 'Apply')]"

        # Deselect other sections
        for other_section in sec_loc_map.keys():
            other_section_loc = sec_loc_template.format(sec_loc_map[other_section])
            other_section_classes = sel.get_attribute(other_section_loc + '/..', "class")
            if other_section != section and 'dynatree-partsel' in other_section_classes:
                # Element needs to be checked out if it has no dynatree-selected
                if 'dynatree-selected' not in other_section_classes:
                    sel.click(other_section_loc)
                sel.click(other_section_loc)

        # Activate the required section
        sel.click(sec_checkbox_loc)
        sel.click(sec_apply_btn)

        if not tb.is_active("All attributes"):
            tb.select("All attributes")
        drift_grid = DriftGrid()
        if any(drift_grid.cell_indicates_change(row_text, i) for i in range(0, len(indexes))):
            return False
        return True
Пример #12
0
def test_host_drift_analysis(request, setup_provider, provider, host_type, host_name, soft_assert):
    """Tests host drift analysis

    Metadata:
        test_flag: host_drift_analysis
    """
    host_data = get_host_data_by_name(provider.key, host_name)
    test_host = host.Host(name=host_name)

    wait_for(lambda: test_host.exists, delay=10, num_sec=120, fail_func=sel.refresh,
             message="hosts_exists")

    # get drift history num
    drift_num_orig = int(test_host.get_detail('Relationships', 'Drift History'))

    # add credentials to host + finalizer to remove them
    if not test_host.has_valid_credentials:
        test_host.update(
            updates={'credentials': host.get_credentials_from_config(host_data['credentials'])}
        )
        wait_for(
            lambda: test_host.has_valid_credentials,
            delay=10,
            num_sec=120,
            fail_func=sel.refresh,
            message="has_valid_credentials"
        )

        def test_host_remove_creds():
            test_host.update(
                updates={
                    'credentials': host.Host.Credential(
                        principal="",
                        secret="",
                        verify_secret=""
                    )
                }
            )
        request.addfinalizer(test_host_remove_creds)

    # initiate 1st analysis
    test_host.run_smartstate_analysis()

    # wait for for drift history num+1
    wait_for(
        lambda: int(test_host.get_detail('Relationships', 'Drift History')) == drift_num_orig + 1,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )

    # add a tag and a finalizer to remove it
    tag = ('Department', 'Accounting')
    test_host.tag(tag, single_value=False)
    request.addfinalizer(lambda: test_host.untag(tag))

    # initiate 2nd analysis
    test_host.run_smartstate_analysis()

    # wait for for drift history num+2
    wait_for(
        lambda: int(test_host.get_detail('Relationships', 'Drift History')) == drift_num_orig + 2,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )

    # check drift difference
    soft_assert(not test_host.equal_drift_results('Department (1)', 'My Company Tags', 0, 1),
        "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Accounting tag should not be displayed, because it was changed to True
    tb.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell('Accounting', 0)

    # Accounting tag should be displayed now
    tb.select("Attributes with different values")
    d_grid.get_cell('Accounting', 0)
def test_host_drift_analysis(request, setup_provider, provider, host_type, host_name, soft_assert):
    """Tests host drift analysis

    Metadata:
        test_flag: host_drift_analysis
    """
    host_data = get_host_data_by_name(provider.key, host_name)
    test_host = host.Host(name=host_name)

    wait_for(lambda: test_host.exists, delay=10, num_sec=120, fail_func=sel.refresh,
             message="hosts_exists")

    # get drift history num
    drift_num_orig = int(test_host.get_detail('Relationships', 'Drift History'))

    # add credentials to host + finalizer to remove them
    if not test_host.has_valid_credentials:
        test_host.update(
            updates={'credentials': host.get_credentials_from_config(host_data['credentials'])},
            validate_credentials=True
        )

        @request.addfinalizer
        def test_host_remove_creds():
            test_host.update(
                updates={
                    'credentials': host.Host.Credential(
                        principal="",
                        secret="",
                        verify_secret=""
                    )
                }
            )

    # initiate 1st analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    def is_host_analysis_finished():
        """ Check if analysis is finished - if not, reload page
        """
        if not sel.is_displayed(tasks.tasks_table) or not tabs.is_tab_selected('All Other Tasks'):
            sel.force_navigate('tasks_all_other')
        host_analysis_finished = tasks.tasks_table.find_row_by_cells({
            'task_name': "SmartState Analysis for '{}'".format(host_name),
            'state': 'Finished'
        })
        if host_analysis_finished:
            # Delete the task
            tasks.tasks_table.select_row_by_cells({
                'task_name': "SmartState Analysis for '{}'".format(host_name),
                'state': 'Finished'
            })
            tb.select('Delete Tasks', 'Delete', invokes_alert=True)
            sel.handle_alert()
        return host_analysis_finished is not None

    wait_for(is_host_analysis_finished,
             delay=15, timeout="8m", fail_func=lambda: tb.select('Reload'))

    # wait for for drift history num+1
    wait_for(
        lambda: int(test_host.get_detail('Relationships', 'Drift History')) == drift_num_orig + 1,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )

    # add a tag and a finalizer to remove it
    tag = ('Department', 'Accounting')
    test_host.tag(tag, single_value=False)
    request.addfinalizer(lambda: test_host.untag(tag))

    # initiate 2nd analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    wait_for(is_host_analysis_finished,
             delay=15, timeout="8m", fail_func=lambda: tb.select('Reload'))

    # wait for for drift history num+2
    wait_for(
        lambda: int(test_host.get_detail('Relationships', 'Drift History')) == drift_num_orig + 2,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh
    )

    # check drift difference
    soft_assert(not test_host.equal_drift_results('Department (1)', 'My Company Tags', 0, 1),
        "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Accounting tag should not be displayed, because it was changed to True
    tb.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell('Accounting', 0)

    # Accounting tag should be displayed now
    tb.select("Attributes with different values")
    d_grid.get_cell('Accounting', 0)
Пример #14
0
def test_host_drift_analysis(request, setup_provider, provider, host_type,
                             host_name, soft_assert):
    """Tests host drift analysis

    Metadata:
        test_flag: host_drift_analysis
    """
    host_data = get_host_data_by_name(provider.key, host_name)
    test_host = host.Host(name=host_name)

    wait_for(lambda: test_host.exists,
             delay=10,
             num_sec=120,
             fail_func=sel.refresh,
             message="hosts_exists")

    # get drift history num
    drift_num_orig = int(test_host.get_detail('Relationships',
                                              'Drift History'))

    # add credentials to host + finalizer to remove them
    if not test_host.has_valid_credentials:
        test_host.update(updates={
            'credentials':
            host.get_credentials_from_config(host_data['credentials'])
        },
                         validate_credentials=True)

        @request.addfinalizer
        def test_host_remove_creds():
            test_host.update(
                updates={
                    'credentials':
                    host.Host.Credential(
                        principal="", secret="", verify_secret="")
                })

    # initiate 1st analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    def is_host_analysis_finished():
        """ Check if analysis is finished - if not, reload page
        """
        if not sel.is_displayed(tasks.tasks_table) or not tabs.is_tab_selected(
                'All Other Tasks'):
            sel.force_navigate('tasks_all_other')
        host_analysis_finished = tasks.tasks_table.find_row_by_cells({
            'task_name':
            "SmartState Analysis for '{}'".format(host_name),
            'state':
            'Finished'
        })
        if host_analysis_finished:
            # Delete the task
            tasks.tasks_table.select_row_by_cells({
                'task_name':
                "SmartState Analysis for '{}'".format(host_name),
                'state':
                'Finished'
            })
            tb.select('Delete Tasks', 'Delete', invokes_alert=True)
            sel.handle_alert()
        return host_analysis_finished is not None

    wait_for(is_host_analysis_finished,
             delay=15,
             timeout="8m",
             fail_func=lambda: tb.select('Reload'))

    # wait for for drift history num+1
    wait_for(lambda: int(test_host.get_detail('Relationships', 'Drift History')
                         ) == drift_num_orig + 1,
             delay=20,
             num_sec=120,
             message="Waiting for Drift History count to increase",
             fail_func=sel.refresh)

    # add a tag and a finalizer to remove it
    tag = ('Department', 'Accounting')
    test_host.tag(tag, single_value=False)
    request.addfinalizer(lambda: test_host.untag(tag))

    # initiate 2nd analysis
    test_host.run_smartstate_analysis()

    # Wait for the task to finish
    wait_for(is_host_analysis_finished,
             delay=15,
             timeout="8m",
             fail_func=lambda: tb.select('Reload'))

    # wait for for drift history num+2
    wait_for(lambda: int(test_host.get_detail('Relationships', 'Drift History')
                         ) == drift_num_orig + 2,
             delay=20,
             num_sec=120,
             message="Waiting for Drift History count to increase",
             fail_func=sel.refresh)

    # check drift difference
    soft_assert(
        not test_host.equal_drift_results('Department (1)', 'My Company Tags',
                                          0, 1),
        "Drift analysis results are equal when they shouldn't be")

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Accounting tag should not be displayed, because it was changed to True
    tb.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell('Accounting', 0)

    # Accounting tag should be displayed now
    tb.select("Attributes with different values")
    d_grid.get_cell('Accounting', 0)
Пример #15
0
def test_host_drift_analysis(request, setup_provider, provider_key, host_type, host_name, soft_assert):
    """Tests host drift analysis

    Metadata:
        test_flag: host_drift_analysis
    """
    host_data = get_host_data_by_name(provider_key, host_name)
    test_host = host.Host(name=host_name)

    wait_for(lambda: test_host.exists, delay=10, num_sec=120, fail_func=sel.refresh, message="hosts_exists")

    # get drift history num
    drift_num_orig = int(test_host.get_detail("Relationships", "Drift History"))

    # add credentials to host + finalizer to remove them
    if not test_host.has_valid_credentials:
        test_host.update(updates={"credentials": host.get_credentials_from_config(host_data["credentials"])})
        wait_for(
            lambda: test_host.has_valid_credentials,
            delay=10,
            num_sec=120,
            fail_func=sel.refresh,
            message="has_valid_credentials",
        )

        def test_host_remove_creds():
            test_host.update(updates={"credentials": host.Host.Credential(principal="", secret="", verify_secret="")})

        request.addfinalizer(test_host_remove_creds)

    # initiate 1st analysis
    test_host.run_smartstate_analysis()

    # wait for for drift history num+1
    wait_for(
        lambda: int(test_host.get_detail("Relationships", "Drift History")) == drift_num_orig + 1,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh,
    )

    # change host name + finalizer to change it back
    orig_host_name = test_host.name
    with update(test_host):
        test_host.name = "{}_tmp_drift_rename".format(test_host.name)

    def host_reset_name():
        with update(test_host):
            test_host.name = orig_host_name

    request.addfinalizer(host_reset_name)

    # initiate 2nd analysis
    test_host.run_smartstate_analysis()

    # wait for for drift history num+2
    wait_for(
        lambda: int(test_host.get_detail("Relationships", "Drift History")) == drift_num_orig + 2,
        delay=20,
        num_sec=120,
        message="Waiting for Drift History count to increase",
        fail_func=sel.refresh,
    )

    # check drift difference
    soft_assert(
        not test_host.equal_drift_results("All Sections", 0, 1),
        "Drift analysis results are equal when they shouldn't be",
    )

    # Test UI features that modify the drift grid
    d_grid = DriftGrid()

    # Name should not be displayed, because it was changed
    tb.select("Attributes with same values")
    with error.expected(sel.NoSuchElementException):
        d_grid.get_cell("Name", 0)

    # Name should be displayed now
    tb.select("Attributes with different values")
    d_grid.get_cell("Name", 0)