示例#1
0
    def test_invalid_handler_for_is_open_raises_error(self):
        """Assert is_open w/ invalid handlers raise AttributeError"""

        for issue in ("BZ123456", "XX:123456", "KK:89456", "123456", 999999):
            with self.subTest(issue=issue):
                with self.assertRaises(AttributeError):
                    is_open(issue)
示例#2
0
def pytest_collection_modifyitems(items, config):
    """Called after collection has been performed, may filter or re-order
    the items in-place.
    """

    log("Collected %s test cases" % len(items))

    # First collect all issues in use and build an issue collection
    # This collection includes pre-processed `is_open` status for each issue
    # generate_issue_collection will save a file `bz_cache.json` on each run.
    pytest.issue_data = generate_issue_collection(items, config)

    # Modify items based on collected issue_data
    deselected_items = []

    for item in items:
        # 1. Deselect tests marked with @pytest.mark.deselect
        # WONTFIX BZs makes test to be dynamically marked as deselect.
        deselect = item.get_closest_marker('deselect')
        if deselect:
            deselected_items.append(item)
            reason = deselect.kwargs.get('reason', deselect.args)
            log(f"Deselected test '{item.name}' reason: {reason}")
            # Do nothing more with deselected tests
            continue

        # 2. Skip items based on skip_if_open marker
        skip_if_open = item.get_closest_marker('skip_if_open')
        if skip_if_open:
            # marker must have `BZ:123456` as argument.
            issue = skip_if_open.kwargs.get('reason') or skip_if_open.args[0]
            item.add_marker(pytest.mark.skipif(is_open(issue), reason=issue))

    config.hook.pytest_deselected(items=deselected_items)
    items[:] = [item for item in items if item not in deselected_items]
示例#3
0
 def test_bz_is_open_using_dupe_of_dupe_data_higher_version(self):
     """Assert that if BZ has a dupe, the dupe data is considered.
     The dupe is CLOSED/ERRATA but on a future version, no clones for
     backport the solution."""
     data = {
         "id": 123456,
         "status": "CLOSED",
         "resolution": "DUPLICATE",
         "target_milestone": "Unspecified",
         "flags": [],
         "dupe_data": {
             "id": 999999,
             "status": "CLOSED",
             "resolution": "DUPLICATE",
             "target_milestone": "Unspecified",
             "flags": [{
                 "status": "+",
                 "name": "sat-6.7.z"
             }],
             "dupe_data": {
                 "id": 888888,
                 "status": "CLOSED",
                 "resolution": "ERRATA",
                 "target_milestone": "Unspecified",
                 "flags": [{
                     "status": "+",
                     "name": "sat-6.7.z"
                 }],
             },
         },
     }
     assert is_open("BZ:123456", data)
示例#4
0
    def test_post_crud_virt_who_configuration(self):
        """Virt-who config is intact post upgrade and verify the config can be updated and deleted.

        :id: postupgrade-d7ae7b2b-3291-48c8-b412-cb54e444c7a4

        :steps:
            1. Post upgrade, Verify virt-who exists and has same status.
            2. Verify the connection of the guest on Content host.
            3. Verify the virt-who config-file exists.
            4. Update virt-who config with new name.
            5. Delete virt-who config.

        :expectedresults:
            1. virt-who config is intact post upgrade.
            2. the config and guest connection have the same status.
            3. virt-who config should update and delete successfully.
        """
        org = entities.Organization().search(query={'search': 'name={0}'.format(self.org_name)})[0]

        # Post upgrade, Verify virt-who exists and has same status.
        vhd = entities.VirtWhoConfig(organization_id=org.id).search(
            query={'search': 'name={}'.format(self.name)}
        )[0]
        if not is_open('BZ:1802395'):
            self.assertEqual(vhd.status, 'ok')
        # Verify virt-who status via CLI as we cannot check it via API now
        vhd_cli = VirtWhoConfig.exists(search=('name', self.name))
        self.assertEqual(
            VirtWhoConfig.info({'id': vhd_cli['id']})['general-information']['status'], 'OK'
        )

        # Vefify the connection of the guest on Content host
        entity_data = get_entity_data(self.__class__.__name__)
        hypervisor_name = entity_data.get('hypervisor_name')
        guest_name = entity_data.get('guest_name')
        hosts = [hypervisor_name, guest_name]
        for hostname in hosts:
            result = (
                entities.Host(organization=org.id)
                .search(query={'search': hostname})[0]
                .read_json()
            )
            self.assertEqual(result['subscription_status_label'], 'Fully entitled')

        # Verify the virt-who config-file exists.
        config_file = get_configure_file(vhd.id)
        get_configure_option('hypervisor_id', config_file),

        # Update virt-who config
        modify_name = gen_string('alpha')
        vhd.name = modify_name
        vhd.update(['name'])

        # Delete virt-who config
        vhd.delete()
        self.assertFalse(
            entities.VirtWhoConfig(organization_id=org.id).search(
                query={'search': 'name={}'.format(modify_name)}
            )
        )
示例#5
0
 def test_bz_is_not_open_using_dupe_of_dupe_data_lower_version(self):
     """Assert that if BZ has a dupe, the dupe data is considered.
     The dupe is CLOSED/ERRATA in a previous version."""
     data = {
         "id": 123456,
         "status": "CLOSED",
         "resolution": "DUPLICATE",
         "target_milestone": "Unspecified",
         "flags": [],
         "dupe_data": {
             "id": 999999,
             "status": "CLOSED",
             "resolution": "DUPLICATE",
             "target_milestone": "Unspecified",
             "flags": [{
                 "status": "+",
                 "name": "sat-6.3.z"
             }],
             "dupe_data": {
                 "id": 888888,
                 "status": "CLOSED",
                 "resolution": "ERRATA",
                 "target_milestone": "Unspecified",
                 "flags": [{
                     "status": "+",
                     "name": "sat-6.3.z"
                 }],
             },
         },
     }
     assert not is_open("BZ:123456", data)
示例#6
0
def ui_entities(module_org, module_loc):
    """Collects the list of all applicable UI entities for testing and does all
    required preconditions.
    """
    ui_entities = []
    for entity in BOOKMARK_ENTITIES:
        # Skip the entities, which can't be tested ATM (not implemented in
        # airgun or have open BZs)
        skip = entity.get('skip_for_ui')
        if isinstance(skip, (tuple, list)):
            skip = any([is_open(issue) for issue in skip])
        if skip is True:
            continue
        ui_entities.append(entity)
        # Some pages require at least 1 existing entity for search bar to
        # appear. Creating 1 entity for such pages
        entity_name, entity_setup = entity['name'], entity.get('setup')
        if entity_setup:
            # entities with 1 organization and location
            if entity_name in ('Host',):
                entity_setup(organization=module_org, location=module_loc).create()
            # entities with no organizations and locations
            elif entity_name in (
                'ComputeProfile',
                'ConfigGroup',
                'GlobalParameter',
                'HardwareModel',
                'PuppetClass',
                'UserGroup',
            ):
                entity_setup().create()
            # entities with multiple organizations and locations
            else:
                entity_setup(organization=[module_org], location=[module_loc]).create()
    return ui_entities
示例#7
0
    def test_positive_post_status_code(self):
        """Issue a POST request and check the returned status code.

        :id: 40247cdd-ad72-4b7b-97c6-583addb1b25a

        :expectedresults: HTTP 201 is returned with an ``application/json``
            content-type

        :CaseImportance: Critical

        :BZ: 1118015
        """
        exclude_list = (
            entities.TemplateKind,  # see comments in class definition
        )
        for entity_cls in set(valid_entities()) - set(exclude_list):
            with self.subTest(entity_cls):
                self.logger.info('test_post_status_code arg: %s', entity_cls)

                # Libvirt compute resources suffer from BZ 1118015. However,
                # partials cannot be compared for class identity and the class
                # hierarchy needs fixing (SatelliteQE/nailgun#42), so we just
                # comment it out above.
                if entity_cls in BZ_1118015_ENTITIES and is_open('BZ:1118015'):
                    logger.info(
                        'Pytest can skip inside a subTest, continuing the loop'
                        ' due to BZ:1118015')
                    continue

                response = entity_cls().create_raw()
                self.assertEqual(http_client.CREATED, response.status_code)
                self.assertIn('application/json',
                              response.headers['content-type'])
示例#8
0
    def _traverse_command_tree(self):
        """Walk through the hammer commands tree and assert that the expected
        options are present.

        """
        raw_output = ssh.command(
            'hammer full-help', output_format='plain').stdout
        commands = re.split('.*\n(?=hammer.*\n^[-]+)', raw_output, flags=re.M)
        commands.pop(0)  # remove "Hammer CLI help" line
        for raw_command in commands:
            raw_command = raw_command.splitlines()
            command = raw_command.pop(0).replace(' >', '')
            output = hammer.parse_help(raw_command)
            command_options = set([
                option['name'] for option in output['options']])
            command_subcommands = set(
                [subcommand['name'] for subcommand in output['subcommands']]
            )
            expected = _fetch_command_info(command)
            expected_options = set()
            expected_subcommands = set()

            if expected is not None:
                expected_options = set(
                    [option['name'] for option in expected['options']]
                )
                expected_subcommands = set([
                    subcommand['name']
                    for subcommand in expected['subcommands']
                ])
            if is_open('BZ:1666687'):
                cmds = ['hammer report-template create', 'hammer report-template update']
                if command in cmds:
                    command_options.add('interactive')
                if 'hammer virt-who-config fetch' in command:
                    command_options.add('output')
            added_options = tuple(command_options - expected_options)
            removed_options = tuple(expected_options - command_options)
            added_subcommands = tuple(
                command_subcommands - expected_subcommands)
            removed_subcommands = tuple(
                expected_subcommands - command_subcommands)

            if (added_options or added_subcommands or removed_options or
                    removed_subcommands):
                diff = {
                    'added_command': expected is None,
                }
                if added_options:
                    diff['added_options'] = added_options
                if removed_options:
                    diff['removed_options'] = removed_options
                if added_subcommands:
                    diff['added_subcommands'] = added_subcommands
                if removed_subcommands:
                    diff['removed_subcommands'] = removed_subcommands
                self.differences[command] = diff
示例#9
0
    def test_positive_synchronize_custom_products_future_sync_date(self):
        """Create a sync plan with sync date in a future and sync multiple
        custom products with multiple repos automatically.

        :id: e646196e-3951-4297-8c3c-1494d9895347

        :expectedresults: Products are synchronized successfully.

        :CaseLevel: System

        :BZ: 1695733
        """
        delay = 2 * 60  # delay for sync date in seconds
        products = [
            entities.Product(organization=self.org).create() for _ in range(3)
        ]
        repos = [
            entities.Repository(product=product).create()
            for product in products for _ in range(2)
        ]
        # Verify products have not been synced yet
        for repo in repos:
            with self.assertRaises(AssertionError):
                self.validate_task_status(repo.id)
        # Create and Associate sync plan with products
        if is_open('BZ:1695733'):
            self.logger.info('Need to set seconds to zero because BZ#1695733')
            sync_date = datetime.utcnow().replace(second=0) + timedelta(
                seconds=delay)
        else:
            sync_date = datetime.utcnow() + timedelta(seconds=delay)
        sync_plan = entities.SyncPlan(organization=self.org,
                                      enabled=True,
                                      sync_date=sync_date).create()
        sync_plan.add_products(
            data={'product_ids': [product.id for product in products]})
        # Wait quarter of expected time
        self.logger.info(
            'Waiting {0} seconds to check products were not synced'.format(
                delay / 4))
        sleep(delay / 4)
        # Verify products has not been synced yet
        for repo in repos:
            with self.assertRaises(AssertionError):
                self.validate_task_status(repo.id, max_tries=1)
        # Wait the rest of expected time
        self.logger.info(
            'Waiting {0} seconds to check products were synced'.format(delay *
                                                                       3 / 4))
        sleep(delay * 3 / 4)
        # Verify product was synced successfully
        for repo in repos:
            self.validate_task_status(repo.id,
                                      repo_backend_id=repo.backend_identifier)
            self.validate_repo_content(repo,
                                       ['erratum', 'package', 'package_group'])
示例#10
0
    def test_positive_validate_capsule_certificate(self):
        """Check that Capsules cert handles additional proxy names.

        :id: 8b53fc3d-704f-44f4-899e-74654529bfcf

        :steps:

            1. Generate a Capsule certificate
            2. Confirm proxy server's FQDN for DNS is present
            3. Confirm that format of alternative names does not include []

        :expectedresults: Capsule certs has valid DNS values

        :BZ: 1747581

        :CaseAutomation: automated
        """
        DNS_Check = False
        with get_connection(timeout=200) as connection:
            # extract the cert from the tar file
            result = connection.run(
                'tar -xf {0}/capsule_certs.tar --directory {0}/ '.format(
                    self.tmp_dir))
            assert result.return_code == 0, 'Extraction to working directory failed.'
            # Extract raw data from RPM to a file
            result = connection.run(
                'rpm2cpio {0}/ssl-build/capsule.example.com/'
                'capsule.example.com-qpid-router-server*.rpm'
                '>> {0}/ssl-build/capsule.example.com/cert-raw-data'.format(
                    self.tmp_dir))
            # Extract the cert data from file cert-raw-data and write to cert-data
            result = connection.run(
                'openssl x509 -noout -text -in {0}/ssl-build/capsule.example.com/cert-raw-data'
                '>> {0}/ssl-build/capsule.example.com/cert-data'.format(
                    self.tmp_dir))
            # use same location on remote and local for cert_file
            download_file(self.caps_cert_file)
            # search the file for the line with DNS
            with open(self.caps_cert_file, "r") as file:
                for line in file:
                    if re.search(r'\bDNS:', line):
                        self.logger.info(
                            'Found the line with alternative names for DNS')
                        match = re.search(r'capsule.example.com', line)
                        assert match, "No proxy name found."
                        if is_open('BZ:1747581'):
                            DNS_Check = True
                        else:
                            match = re.search(r'\[]', line)
                            assert not match, "Incorrect parsing of alternative proxy name."
                            DNS_Check = True
                        break
                    # if no match for "DNS:" found, then raise error.
            assert DNS_Check, "Cannot find Subject Alternative Name"
示例#11
0
 def test_bz_is_open_by_status(self):
     """Assert status in NEW, ASSIGNED, POST, MODIFIED is open"""
     for status in OPEN_STATUSES:
         with self.subTest(status=status):
             data = {
                 "id": 123456,
                 "status": status,
                 "resolution": "",
                 "target_milestone": "Unspecified",
                 "flags": []
             }
             self.assertTrue(is_open("BZ:123456", data))
示例#12
0
    def test_positive_synchronize_custom_product_future_sync_date(self):
        """Create a sync plan with sync date in a future and sync one custom
        product with it automatically.

        :id: b70a0c50-7335-4285-b24c-edfc1187f034

        :expectedresults: Product is synchronized successfully.

        :CaseLevel: System

        :BZ: 1655595, 1695733
        """
        delay = 2 * 60  # delay for sync date in seconds
        product = entities.Product(organization=self.org).create()
        repo = entities.Repository(product=product).create()
        # Verify product is not synced and doesn't have any content
        with self.assertRaises(AssertionError):
            self.validate_task_status(repo.id, max_tries=1)
        self.validate_repo_content(repo,
                                   ['erratum', 'package', 'package_group'],
                                   after_sync=False)
        # Create and Associate sync plan with product
        if is_open('BZ:1695733'):
            self.logger.info('Need to set seconds to zero because BZ#1695733')
            sync_date = datetime.utcnow().replace(second=0) + timedelta(
                seconds=delay)
        else:
            sync_date = datetime.utcnow() + timedelta(seconds=delay)
        sync_plan = entities.SyncPlan(
            organization=self.org,
            enabled=True,
            sync_date=sync_date,
        ).create()
        sync_plan.add_products(data={'product_ids': [product.id]})
        # Wait quarter of expected time
        self.logger.info('Waiting {0} seconds to check product {1}'
                         ' was not synced'.format(delay / 4, product.name))
        sleep(delay / 4)
        # Verify product has not been synced yet
        with self.assertRaises(AssertionError):
            self.validate_task_status(repo.id, max_tries=1)
        self.validate_repo_content(repo,
                                   ['erratum', 'package', 'package_group'],
                                   after_sync=False)
        # Wait the rest of expected time
        self.logger.info('Waiting {0} seconds to check product {1}'
                         ' was synced'.format((delay * 3 / 4), product.name))
        sleep(delay * 3 / 4)
        # Verify product was synced successfully
        self.validate_task_status(repo.id,
                                  repo_backend_id=repo.backend_identifier)
        self.validate_repo_content(repo,
                                   ['erratum', 'package', 'package_group'])
示例#13
0
 def test_bz_is_open_by_resolution(self, resolution):
     """Assert a closed BZ in WONTFIX resolution is considered open"""
     assert is_open(
         "BZ:123456",
         {
             "id": 123456,
             "status": "CLOSED",
             "resolution": resolution,
             "target_milestone": "Unspecified",
             "flags": [],
         },
     )
示例#14
0
 def test_bz_is_open_by_resolution(self):
     """Assert a closed BZ in WONTFIX resolution is considered open"""
     for resolution in WONTFIX_RESOLUTIONS:
         with self.subTest(resolution=resolution):
             data = {
                 "id": 123456,
                 "status": "CLOSED",
                 "resolution": resolution,
                 "target_milestone": "Unspecified",
                 "flags": []
             }
             self.assertTrue(is_open("BZ:123456", data))
示例#15
0
 def test_bz_is_open_by_status(self, status):
     """Assert status in NEW, ASSIGNED, POST, MODIFIED is open"""
     assert is_open(
         "BZ:123456",
         {
             "id": 123456,
             "status": status,
             "resolution": "",
             "target_milestone": "Unspecified",
             "flags": [],
         },
     )
示例#16
0
 def test_bz_is_open_if_server_version_is_lower(self):
     """Assert bug is considered open if TM is set for a future version
     and there are no clones backporting the solution to server version.
     """
     data = {
         "id": 123456,
         "status": "CLOSED",
         "resolution": "ERRATA",
         "target_milestone": "7.0.1",
         "flags": [],
         "clones": []
     }
     self.assertTrue(is_open("BZ:123456", data))
示例#17
0
 def test_bz_is_not_open_if_server_version_is_higher_or_equal_tm(self):
     """Assert bug is considered not open if closed status and
     TM is higher or matches the running server version.
     """
     for status in CLOSED_STATUSES:
         with self.subTest(status=status):
             data = {
                 "id": 123456,
                 "status": status,
                 "resolution": "",
                 "target_milestone": "6.6.1",
                 "flags": [],
                 "clones": []
             }
             self.assertFalse(is_open("BZ:123456", data))
示例#18
0
 def test_bz_is_not_open_if_server_version_is_higher_or_equal_tm(
         self, status):
     """Assert bug is considered not open if closed status and
     TM is higher or matches the running server version.
     """
     assert not is_open(
         "BZ:123456",
         {
             "id": 123456,
             "status": status,
             "resolution": "",
             "target_milestone": "6.6.1",
             "flags": [],
             "clones": [],
         },
     )
示例#19
0
 def test_bz_is_open_if_server_version_is_lower_using_flags(self):
     """Assert bug is considered open if flag version is set for a future
     version and there are no clones backporting the solution.
     """
     data = {
         "id": 123456,
         "status": "CLOSED",
         "resolution": "ERRATA",
         "target_milestone": "Unspecified",
         "flags": [{
             "status": "+",
             "name": "sat-7.0.1"
         }],
         "clones": []
     }
     self.assertTrue(is_open("BZ:123456", data))
示例#20
0
 def test_bz_is_not_open_if_server_version_is_higher_or_equal_flags(
         self, status):
     """Assert bug is considered not open if closed status and
     min(flags) version is higher or matches the running server version.
     """
     assert not is_open(
         "BZ:123456",
         {
             "id": 123456,
             "status": status,
             "resolution": "",
             "target_milestone": "Unspecified",
             "flags": [{
                 "status": "+",
                 "name": "sat-6.6.0"
             }],
             "clones": [],
         },
     )
示例#21
0
    def _capsule_cleanup(self):
        """make the necessary cleanup in case of a crash"""
        if self._subscribed:
            # use try except to unregister the host, in case of host not
            # reachable (or any other failure), the capsule is not deleted and
            # this failure will hide any prior failure.
            try:
                self.unregister()
            except Exception as exp:
                logger.error('Failed to unregister the host: {0}\n{1}'.format(
                    self.hostname, exp))

        if self._capsule_hostname:
            # do cleanup as using a static hostname that can be reused by
            # other tests and organizations
            try:
                # try to delete the hostname first
                Host.delete({'name': self._capsule_hostname})
                # try delete the capsule
            except Exception as exp:
                # log the exception
                # as maybe that the host was not registered or setup does not
                # reach that stage
                # or maybe that the capsule was not registered or setup does
                # not reach that stage
                # Destroys the Capsule VM on the provisioning server if
                # exception has 'return_code=70(Error: host not found)'
                if exp.return_code == 70:
                    super(CapsuleVirtualMachine, self).destroy()
                if is_open('BZ:1622064'):
                    logger.warn('Failed to cleanup the host: {0}\n{1}'.format(
                        self.hostname, exp))
                else:
                    logger.error('Failed to cleanup the host: {0}\n{1}'.format(
                        self.hostname, exp))
                    raise
            try:
                # try to delete the capsule if it was added already
                Capsule.delete({'name': self._capsule_hostname})
            except Exception as exp:
                logger.error('Failed to cleanup the capsule: {0}\n{1}'.format(
                    self.hostname, exp))
                raise
示例#22
0
    def test_positive_create_with_os_family(self):
        """Create operating system with every OS family possible

        :id: 6ad32d22-53cc-4bab-ac10-f466f75d7cc6

        :expectedresults: Operating system entity is created and has proper OS
            family assigned

        :CaseAutomation: Automated

        :CaseImportance: Critical
        """
        for os_family in OPERATING_SYSTEMS:
            with self.subTest(os_family):
                if is_open('BZ:1709683'):
                    if os_family == 'Debian':
                        continue
                os = entities.OperatingSystem(family=os_family).create()
                self.assertEqual(os.family, os_family)
示例#23
0
 def test_bz_is_not_open_using_dupe_of_dupe_data_by_status(self):
     """Assert that if BZ has a dupe, the dupe data is considered.
     The dupe is CLOSED/ERRATA in a previous version."""
     data = {
         "id": 123456,
         "status": "CLOSED",
         "resolution": "DUPLICATE",
         "target_milestone": "Unspecified",
         "dupe_data": {
             "id": 999999,
             "status": "CLOSED",
             "resolution": "DUPLICATE",
             "target_milestone": "Unspecified",
             "dupe_data": {
                 "id": 999999,
                 "status": "CLOSED",
                 "resolution": "ERRATA",
                 "target_milestone": "Unspecified",
                 "flags": []
             }
         }
     }
     self.assertFalse(is_open("BZ:123456", data))
示例#24
0
 def test_bz_is_open_using_dupe_of_dupe_data_by_status(self):
     """Assert that if BZ has a dupe, the dupe data is considered.
     The dupe is CLOSED/ERRATA but on a future version, no clones for
     backport the solution."""
     data = {
         "id": 123456,
         "status": "CLOSED",
         "resolution": "DUPLICATE",
         "target_milestone": "Unspecified",
         "flags": [],
         "dupe_data": {
             "id": 999999,
             "status": "CLOSED",
             "resolution": "DUPLICATE",
             "target_milestone": "Unspecified",
             "dupe_data": {
                 "id": 888888,
                 "status": "NEW",
                 "resolution": "",
                 "target_milestone": "Unspecified",
             }
         }
     }
     self.assertTrue(is_open("BZ:123456", data))
示例#25
0
    def test_positive_capsule_sync(self):
        """Create repository, add it to lifecycle environment, assign lifecycle
        environment with a capsule, sync repository, sync it once again, update
        repository (add 1 new package), sync repository once again.

        :id: 35513099-c918-4a8e-90d0-fd4c87ad2f82

        :customerscenario: true

        :BZ: 1394354, 1439691

        :expectedresults:

            1. Repository sync triggers capsule sync
            2. After syncing capsule contains same repo content as satellite
            3. Syncing repository which has no changes for a second time does
               not trigger any new publish task
            4. Repository revision on capsule remains exactly the same after
               second repo sync with no changes
            5. Syncing repository which was updated will update the content on
               capsule

        :CaseLevel: System
        """
        repo_name = gen_string('alphanumeric')
        # Create and publish custom repository with 2 packages in it
        repo_url = create_repo(
            repo_name,
            FAKE_1_YUM_REPO,
            FAKE_1_YUM_REPO_RPMS[0:2],
        )
        # Create organization, product, repository in satellite, and lifecycle
        # environment
        org = entities.Organization(smart_proxy=[self.capsule_id]).create()
        product = entities.Product(organization=org).create()
        repo = entities.Repository(
            product=product,
            url=repo_url,
        ).create()
        lce = entities.LifecycleEnvironment(organization=org).create()
        # Associate the lifecycle environment with the capsule
        capsule = entities.Capsule(id=self.capsule_id).read()
        capsule.content_add_lifecycle_environment(data={
            'environment_id': lce.id,
        })
        result = capsule.content_lifecycle_environments()
        self.assertGreaterEqual(len(result['results']), 1)
        self.assertIn(lce.id,
                      [capsule_lce['id'] for capsule_lce in result['results']])
        # Create a content view with the repository
        cv = entities.ContentView(
            organization=org,
            repository=[repo],
        ).create()
        # Sync repository
        repo.sync()
        repo = repo.read()
        # Publish new version of the content view
        cv.publish()
        cv = cv.read()
        self.assertEqual(len(cv.version), 1)
        cvv = cv.version[-1].read()
        # Promote content view to lifecycle environment
        promote(cvv, lce.id)
        cvv = cvv.read()
        self.assertEqual(len(cvv.environment), 2)
        # Assert that a task to sync lifecycle environment to the capsule
        # is started (or finished already)
        sync_status = capsule.content_get_sync()
        self.assertTrue(
            len(sync_status['active_sync_tasks']) >= 1
            or sync_status['last_sync_time'])
        # Assert that the content of the published content view in
        # lifecycle environment is exactly the same as content of
        # repository
        lce_repo_path = form_repo_path(
            org=org.label,
            lce=lce.label,
            cv=cv.label,
            prod=product.label,
            repo=repo.label,
        )
        cvv_repo_path = form_repo_path(
            org=org.label,
            cv=cv.label,
            cvv=cvv.version,
            prod=product.label,
            repo=repo.label,
        )
        # Wait till capsule sync finishes
        for task in sync_status['active_sync_tasks']:
            entities.ForemanTask(id=task['id']).poll()
        sync_status = capsule.content_get_sync()
        last_sync_time = sync_status['last_sync_time']

        # If BZ1439691 is open, need to sync repo once more, as repodata
        # will change on second attempt even with no changes in repo
        if is_open('BZ:1439691'):
            repo.sync()
            repo = repo.read()
            cv.publish()
            cv = cv.read()
            self.assertEqual(len(cv.version), 2)
            cv.version.sort(key=lambda version: version.id)
            cvv = cv.version[-1].read()
            promote(cvv, lce.id)
            cvv = cvv.read()
            self.assertEqual(len(cvv.environment), 2)
            sync_status = capsule.content_get_sync()
            self.assertTrue(
                len(sync_status['active_sync_tasks']) >= 1
                or sync_status['last_sync_time'] != last_sync_time)
            for task in sync_status['active_sync_tasks']:
                entities.ForemanTask(id=task['id']).poll()
            sync_status = capsule.content_get_sync()
            last_sync_time = sync_status['last_sync_time']

        # Assert that the content published on the capsule is exactly the
        # same as in repository on satellite
        lce_revision_capsule = get_repomd_revision(lce_repo_path,
                                                   hostname=self.capsule_ip)
        self.assertEqual(
            get_repo_files(lce_repo_path, hostname=self.capsule_ip),
            get_repo_files(cvv_repo_path))
        # Sync repository for a second time
        result = repo.sync()
        # Assert that the task summary contains a message that says the
        # publish was skipped because content had not changed
        self.assertEqual(result['result'], 'success')
        self.assertTrue(result['output']['post_sync_skipped'])
        self.assertEqual(result['humanized']['output'], 'No new packages.')
        # Publish a new version of content view
        cv.publish()
        cv = cv.read()
        cv.version.sort(key=lambda version: version.id)
        cvv = cv.version[-1].read()
        # Promote new content view version to lifecycle environment
        promote(cvv, lce.id)
        cvv = cvv.read()
        self.assertEqual(len(cvv.environment), 2)
        # Wait till capsule sync finishes
        sync_status = capsule.content_get_sync()
        tasks = []
        if not sync_status['active_sync_tasks']:
            self.assertNotEqual(sync_status['last_sync_time'], last_sync_time)
        else:
            for task in sync_status['active_sync_tasks']:
                tasks.append(entities.ForemanTask(id=task['id']))
                tasks[-1].poll()
        # Assert that the value of repomd revision of repository in
        # lifecycle environment on the capsule has not changed
        new_lce_revision_capsule = get_repomd_revision(
            lce_repo_path, hostname=self.capsule_ip)
        self.assertEqual(lce_revision_capsule, new_lce_revision_capsule)
        # Update a repository with 1 new rpm
        create_repo(
            repo_name,
            FAKE_1_YUM_REPO,
            FAKE_1_YUM_REPO_RPMS[-1:],
        )
        # Sync, publish and promote the repository
        repo.sync()
        repo = repo.read()
        cv.publish()
        cv = cv.read()
        cv.version.sort(key=lambda version: version.id)
        cvv = cv.version[-1].read()
        promote(cvv, lce.id)
        cvv = cvv.read()
        self.assertEqual(len(cvv.environment), 2)
        # Assert that a task to sync lifecycle environment to the capsule
        # is started (or finished already)
        sync_status = capsule.content_get_sync()
        self.assertTrue(
            len(sync_status['active_sync_tasks']) >= 1
            or sync_status['last_sync_time'] != last_sync_time)
        # Assert that packages count in the repository is updated
        self.assertEqual(repo.content_counts['package'], 3)
        # Assert that the content of the published content view in
        # lifecycle environment is exactly the same as content of the
        # repository
        cvv_repo_path = form_repo_path(
            org=org.label,
            cv=cv.label,
            cvv=cvv.version,
            prod=product.label,
            repo=repo.label,
        )
        self.assertEqual(
            repo.content_counts['package'],
            cvv.package_count,
        )
        self.assertEqual(get_repo_files(lce_repo_path),
                         get_repo_files(cvv_repo_path))
        # Wait till capsule sync finishes
        for task in sync_status['active_sync_tasks']:
            entities.ForemanTask(id=task['id']).poll()
        # Assert that the content published on the capsule is exactly the
        # same as in the repository
        self.assertEqual(
            get_repo_files(lce_repo_path, hostname=self.capsule_ip),
            get_repo_files(cvv_repo_path))
示例#26
0
    def _setup_capsule(self):
        """Prepare the virtual machine to host a capsule node"""
        # setup the name resolution
        self._capsule_setup_name_resolution()
        logger.info('adding repofiles required for capsule installation')
        self.create_custom_repos(capsule=settings.capsule_repo,
                                 rhscl=settings.rhscl_repo,
                                 ansible=settings.ansible_repo,
                                 maint=settings.satmaintenance_repo)
        self.configure_rhel_repo(settings.__dict__[self.distro[:-1] + '_repo'])
        self.run('yum repolist')
        self.run('yum -y install satellite-capsule', timeout=1200)
        result = self.run('rpm -q satellite-capsule')
        if result.return_code != 0:
            raise CapsuleVirtualMachineError(
                u'Failed to install satellite-capsule package\n{}'.format(
                    result.stderr))
        cert_file_path = '/root/{0}-certs.tar'.format(self.hostname)
        certs_gen = ssh.command('capsule-certs-generate '
                                '--foreman-proxy-fqdn {0} '
                                '--certs-tar {1}'.format(
                                    self.hostname, cert_file_path))
        if certs_gen.return_code != 0:
            raise CapsuleVirtualMachineError(
                u'Unable to generate certificate\n{}'.format(certs_gen.stderr))
        # copy the certificate to capsule vm
        _, temporary_local_cert_file_path = mkstemp(suffix='-certs.tar')
        logger.info('downloading the certs file: {0}'.format(cert_file_path))
        download_file(remote_file=cert_file_path,
                      local_file=temporary_local_cert_file_path,
                      hostname=settings.server.hostname)
        logger.info('uploading the certs file: {0}'.format(cert_file_path))
        upload_file(key_filename=settings.server.ssh_key,
                    local_file=temporary_local_cert_file_path,
                    remote_file=cert_file_path,
                    hostname=self.ip_addr)
        # delete the temporary file
        os.remove(temporary_local_cert_file_path)

        installer_cmd = extract_capsule_satellite_installer_command(
            certs_gen.stdout)
        result = self.run(installer_cmd, timeout=1800)
        if result.return_code != 0:
            # before exit download the capsule log file
            _, log_path = mkstemp(prefix='capsule_external-', suffix='.log')
            download_file('/var/log/foreman-installer/capsule.log', log_path,
                          self.ip_addr)
            raise CapsuleVirtualMachineError(
                result.return_code, result.stderr,
                u'foreman installer failed at capsule host')

        # manually start pulp_celerybeat service if BZ1446930 is open
        result = self.run('systemctl status pulp_celerybeat.service')
        if 'inactive (dead)' in '\n'.join(result.stdout):
            if is_open('BZ:1446930'):
                result = self.run('systemctl start pulp_celerybeat.service')
                if result.return_code != 0:
                    raise CapsuleVirtualMachineError(
                        'Failed to start pulp_celerybeat service\n{}'.format(
                            result.stderr))
            else:
                raise CapsuleVirtualMachineError(
                    'pulp_celerybeat service not running')
示例#27
0
def test_positive_create_matcher_prioritize_and_delete(session, puppet_class,
                                                       module_host, domain):
    """Merge the values of all the associated matchers, remove duplicates.
    Delete smart variable.

    :id: 75fc514f-70dd-4cc1-8069-221e9edda89a

    :BZ: 1734022, 1745938

    :steps:

        1.  Create variable with type array and value.
        2.  Create first matcher for attribute fqdn with some value.
        3.  Create second matcher for other attribute with same value as
            fqdn matcher.
            Note - The fqdn/host should have this attribute.
        4.  Select 'Merge overrides' checkbox.
        5.  Select 'Merge default' checkbox.
        6.  Select 'Avoid Duplicates' checkbox.
        7.  Submit the change.
        8.  Go to YAML output of associated host.
        9.  Then delete the smart variable

    :expectedresults:

        1.  The YAML output has the values merged from all the associated
            matchers.
        2.  The YAML output has the default value of variable.
        3.  Duplicate values in YAML output are removed / not displayed.
        4.  In Host-> variables tab, the smart variable should be displayed
            with its respective puppet class.
        5. The smart Variable is deleted successfully.
        6. In YAML output of associated Host, the variable should be
           removed.
        7. In Host-> variables tab, the smart variable should be removed.

    :CaseLevel: Integration
    """
    name = gen_string('alpha')
    override_value = '[80, 90]'
    override_value2 = '[90, 100]'
    with session:
        session.smartvariable.create({
            'variable.key':
            name,
            'variable.puppet_class':
            puppet_class.name,
            'variable.default_value':
            '[20]',
            'variable.parameter_type':
            'array',
            'variable.prioritize_attribute_order.merge_overrides':
            True,
            'variable.prioritize_attribute_order.merge_default':
            True,
            'variable.prioritize_attribute_order.avoid_duplicates':
            True,
            'variable.matchers': [
                {
                    'Attribute type': {
                        'matcher_attribute_type': 'fqdn',
                        'matcher_attribute_value': module_host.name,
                    },
                    'Value': override_value,
                },
                {
                    'Attribute type': {
                        'matcher_attribute_type': 'domain',
                        'matcher_attribute_value': domain.name,
                    },
                    'Value': override_value2,
                },
            ],
        })
        assert session.smartvariable.search(name)[0]['Variable'] == name
        output = yaml.load(session.host.read_yaml_output(module_host.name))
        assert output['parameters'][name] == [20, 80, 90, 100]
        host_values = session.host.read(module_host.name,
                                        widget_names='parameters')
        smart_variable = next(
            (item
             for item in host_values['parameters']['puppet_class_parameters']
             if item['Name'] == name))
        if not is_open('BZ:1745938'):
            assert smart_variable['Puppet Class'] == puppet_class.name
            assert smart_variable['Value']['value'] == [20, 80, 90, 100]
        # Delete smart variable
        session.smartvariable.delete(name)
        assert not session.smartvariable.search(name)
        # Verify that that smart variable is not present in YAML output
        output = yaml.load(session.host.read_yaml_output(module_host.name))
        assert name not in output['parameters']
        # Verify that smart variable is not present on Host page
        host_values = session.host.read(module_host.name,
                                        'parameters.puppet_class_parameters')
        smart_variables = [
            item
            for item in host_values['parameters']['puppet_class_parameters']
            if item['Name'] == name
        ]
        assert not smart_variables
示例#28
0
    elif settings.virtwho.hypervisor_type == 'kubevirt':
        del form['hypervisor_server']
        form['kubeconfig'] = settings.virtwho.hypervisor_config_file
    else:
        form['hypervisor_username'] = settings.virtwho.hypervisor_username
        form['hypervisor_password'] = settings.virtwho.hypervisor_password
    return form


@fixture()
def virtwho_config(form_data):
    return entities.VirtWhoConfig(**form_data).create()


@skipif(
    condition=(settings.virtwho.hypervisor_type == 'kubevirt' and is_open('BZ:1735540')),
    reason='We have not supported kubevirt hypervisor yet',
)
class TestVirtWhoConfig:
    def _try_to_get_guest_bonus(self, hypervisor_name, sku):
        subscriptions = entities.Subscription().search(query={'search': sku})
        for item in subscriptions:
            item = item.read_json()
            if hypervisor_name.lower() in item['hypervisor']['name']:
                return item['id']

    def _get_guest_bonus(self, hypervisor_name, sku):
        vdc_id, time = wait_for(
            self._try_to_get_guest_bonus,
            func_args=(hypervisor_name, sku),
            fail_condition=None,
示例#29
0
    def test_positive_synchronize_rh_product_future_sync_date(self):
        """Create a sync plan with sync date in a future and sync one RH
        product with it automatically.

        :id: 6697a00f-2181-4c2b-88eb-2333268d780b

        :expectedresults: Product is synchronized successfully.

        :CaseLevel: System
        """
        delay = 2 * 60  # delay for sync date in seconds
        org = entities.Organization().create()
        with manifests.clone() as manifest:
            entities.Subscription().upload(data={'organization_id': org.id},
                                           files={'content': manifest.content})
        repo_id = enable_rhrepo_and_fetchid(
            basearch='x86_64',
            org_id=org.id,
            product=PRDS['rhel'],
            repo=REPOS['rhst7']['name'],
            reposet=REPOSET['rhst7'],
            releasever=None,
        )
        product = entities.Product(name=PRDS['rhel'],
                                   organization=org).search()[0]
        repo = entities.Repository(id=repo_id).read()
        if is_open('BZ:1695733'):
            self.logger.info('Need to set seconds to zero because BZ:1695733')
            sync_date = datetime.utcnow().replace(second=0) + timedelta(
                seconds=delay)
        else:
            sync_date = (datetime.utcnow() + timedelta(seconds=delay), )
        sync_plan = entities.SyncPlan(organization=org,
                                      enabled=True,
                                      interval='hourly',
                                      sync_date=sync_date).create()
        # Create and Associate sync plan with product
        sync_plan.add_products(data={'product_ids': [product.id]})
        # Verify product is not synced and doesn't have any content
        with self.assertRaises(AssertionError):
            self.validate_task_status(repo.id, max_tries=1)
        self.validate_repo_content(repo,
                                   ['erratum', 'package', 'package_group'],
                                   after_sync=False)
        # Wait quarter of expected time
        self.logger.info('Waiting {0} seconds to check product {1}'
                         ' was not synced'.format(delay / 4, product.name))
        sleep(delay / 4)
        # Verify product has not been synced yet
        with self.assertRaises(AssertionError):
            self.validate_task_status(repo.id, max_tries=1)
        self.validate_repo_content(repo,
                                   ['erratum', 'package', 'package_group'],
                                   after_sync=False)
        # Wait the rest of expected time
        self.logger.info('Waiting {0} seconds to check product {1}'
                         ' was synced'.format((delay * 3 / 4), product.name))
        sleep(delay * 3 / 4)
        # Verify product was synced successfully
        self.validate_task_status(repo.id,
                                  repo_backend_id=repo.backend_identifier)
        self.validate_repo_content(repo,
                                   ['erratum', 'package', 'package_group'])
示例#30
0
def test_positive_host_configuration_status(session):
    """Check if the Host Configuration Status Widget links are working

    :id: ffb0a6a1-2b65-4578-83c7-61492122d865

    :Steps:

        1. Navigate to Monitor -> Dashboard
        2. Review the Host Configuration Status
        3. Navigate to each of the links which has search string associated
           with it.

    :expectedresults: Each link shows the right info

    :BZ: 1631219

    :CaseLevel: Integration
    """
    org = entities.Organization().create()
    loc = entities.Location().create()
    host = entities.Host(organization=org, location=loc).create()
    criteria_list = [
        'Hosts that had performed modifications without error',
        'Hosts in error state',
        'Good host reports in the last 30 minutes',
        'Hosts that had pending changes',
        'Out of sync hosts',
        'Hosts with alerts disabled',
        'Hosts with no reports',
    ]
    search_strings_list = [
        'last_report > \"30 minutes ago\" and (status.applied > 0 or'
        ' status.restarted > 0) and (status.failed = 0)',
        'last_report > \"30 minutes ago\" and (status.failed > 0 or'
        ' status.failed_restarts > 0) and status.enabled = true',
        'last_report > \"30 minutes ago\" and status.enabled = true and'
        ' status.applied = 0 and status.failed = 0 and status.pending = 0',
        'last_report > \"30 minutes ago\" and status.pending > 0 and status.enabled = true',
        'last_report < \"30 minutes ago\" and status.enabled = true',
        'status.enabled = false',
        'not has last_report and status.enabled = true',
    ]
    if is_open('BZ:1631219'):
        criteria_list.pop()
        search_strings_list.pop()

    with session:
        session.organization.select(org_name=org.name)
        session.location.select(loc_name=loc.name)
        dashboard_values = session.dashboard.read('HostConfigurationStatus')
        for criteria in criteria_list:
            if criteria == 'Hosts with no reports':
                assert dashboard_values['status_list'][criteria] == 1
            else:
                assert dashboard_values['status_list'][criteria] == 0

        for criteria, search in zip(criteria_list, search_strings_list):
            if criteria == 'Hosts with no reports':
                session.dashboard.action(
                    {'HostConfigurationStatus': {
                        'status_list': criteria
                    }})
                values = session.host.read_all()
                assert values['searchbox'] == search
                assert len(values['table']) == 1
                assert values['table'][0]['Name'] == host.name
            else:
                session.dashboard.action(
                    {'HostConfigurationStatus': {
                        'status_list': criteria
                    }})
                values = session.host.read_all()
                assert values['searchbox'] == search
                assert len(values['table']) == 0