def test_positive_incremental_update_puppet(self): """Incrementally update a CVV with a puppet module. :id: 19b2fe3b-6c91-4713-9910-17517fba661f :expectedresults: The incremental update succeeds with no errors, and the content view is given an additional version. :CaseLevel: Integration """ # Create a content view and add a yum repository to it. Publish the CV. product = entities.Product().create() yum_repo = entities.Repository( content_type='yum', product=product, ).create() content_view = entities.ContentView( organization=product.organization, repository=[yum_repo], ).create() content_view.publish() content_view = content_view.read() # Create a puppet repository and upload a puppet module into it. puppet_repo = entities.Repository( content_type='puppet', product=product, ).create() with open(get_data_file(PUPPET_MODULE_NTP_PUPPETLABS), 'rb') as handle: puppet_repo.upload_content(files={'content': handle}) # Extract all the available puppet modules. puppet_modules = content_view.available_puppet_modules()['results'] # Make sure that we have results. Uploading content does not # seem to create a task so we cannot pool it for status. We # should then check that we have some results back before # proceeding. self.assertGreater(len(puppet_modules), 0) puppet_module = entities.PuppetModule(id=puppet_modules[0]['id']) # Incrementally update the CVV with the puppet module. payload = { 'content_view_version_environments': [{ 'content_view_version_id': content_view.version[0].id, 'environment_ids': [ environment.id for environment in content_view.version[0].read().environment ], }], 'add_content': { 'puppet_module_ids': [puppet_module.id] }, } content_view.version[0].incremental_update(data=payload) content_view = content_view.read() # The CV now has two versions. The first version has no puppet modules, # and the second version has one puppet module. Let's verify this. # NOTE: The `read_json` lines should be refactored after the 'minor' # attribute is added to the ContentViewVersion entity class. self.assertEqual(len(content_view.version), 2) for i in range(len(content_view.version)): content_view.version[i] = content_view.version[i].read() content_view.version.sort(key=lambda cvv: cvv.read_json()['minor']) self.assertEqual(len(content_view.version[0].puppet_module), 0) self.assertEqual(len(content_view.version[1].puppet_module), 1) self.assertEqual( content_view.version[1].puppet_module[0].id, puppet_module.id, )
def test_positive_iso_library_sync(self): """Ensure RH repo with ISOs after publishing to Library is synchronized to capsule automatically :id: 221a2d41-0fef-46dd-a804-fdedd7187163 :customerscenario: true :BZ: 1303102, 1480358, 1303103, 1734312 :expectedresults: ISOs are present on external capsule :CaseLevel: System """ # Create organization, product, enable & sync RH repository with ISOs org = entities.Organization(smart_proxy=[self.capsule_id]).create() with manifests.clone() as manifest: upload_manifest(org.id, manifest.content) rh_repo_id = enable_rhrepo_and_fetchid( basearch='x86_64', org_id=org.id, product=PRDS['rhsc'], repo=REPOS['rhsc7_iso']['name'], reposet=REPOSET['rhsc7_iso'], releasever=None, ) rh_repo = entities.Repository(id=rh_repo_id).read() call_entity_method_with_timeout(rh_repo.sync, timeout=2500) capsule = entities.Capsule(id=self.capsule_id).read() # Find "Library" lifecycle env for specific organization lce = entities.LifecycleEnvironment(organization=org).search( query={'search': 'name={}'.format(ENVIRONMENT)})[0] # Associate the lifecycle environment with the capsule capsule.content_add_lifecycle_environment(data={ 'environment_id': lce.id, }) result = capsule.content_lifecycle_environments() self.assertGreaterEqual(len(result['results']), 1) self.assertIn(lce.id, [capsule_lce['id'] for capsule_lce in result['results']]) # Create a content view with the repository cv = entities.ContentView( organization=org, repository=[rh_repo], ).create() # Publish new version of the content view cv.publish() cv = cv.read() self.assertEqual(len(cv.version), 1) # Verify ISOs are present on satellite repo_path = os.path.join(PULP_PUBLISHED_ISO_REPOS_PATH, rh_repo.backend_identifier) sat_isos = get_repo_files(repo_path, extension='iso') self.assertGreater(len(result), 0) # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule.content_get_sync() self.assertTrue( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time']) # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll(timeout=600) # Verify all the ISOs are present on capsule capsule_isos = get_repo_files(repo_path, extension='iso', hostname=self.capsule_ip) self.assertGreater(len(result), 0) self.assertEqual(set(sat_isos), set(capsule_isos))
def test_positive_mirror_on_sync(self): """Create 2 repositories with 'on_demand' download policy and mirror on sync option, associate them with capsule, sync first repo, move package from first repo to second one, sync it, attempt to install package on some host. :id: 39149642-1e7e-4ef8-8762-bec295913014 :BZ: 1426408 :expectedresults: host, subscribed to second repo only, can successfully install package :CaseLevel: System """ repo1_name = gen_string('alphanumeric') repo2_name = gen_string('alphanumeric') # Create and publish first custom repository with 2 packages in it repo1_url = create_repo( repo1_name, FAKE_1_YUM_REPO, FAKE_1_YUM_REPO_RPMS[1:3], ) # Create and publish second repo with no packages in it repo2_url = create_repo(repo2_name) # Create organization, product, repository in satellite, and lifecycle # environment org = entities.Organization().create() prod1 = entities.Product(organization=org).create() repo1 = entities.Repository( download_policy='on_demand', mirror_on_sync=True, product=prod1, url=repo1_url, ).create() prod2 = entities.Product(organization=org).create() repo2 = entities.Repository( download_policy='on_demand', mirror_on_sync=True, product=prod2, url=repo2_url, ).create() lce1 = entities.LifecycleEnvironment(organization=org).create() lce2 = entities.LifecycleEnvironment(organization=org).create() # Associate the lifecycle environments with the capsule capsule = entities.Capsule(id=self.capsule_id).read() for lce_id in (lce1.id, lce2.id): capsule.content_add_lifecycle_environment(data={ 'environment_id': lce_id, }) result = capsule.content_lifecycle_environments() self.assertGreaterEqual(len(result['results']), 2) self.assertTrue({lce1.id, lce2.id}.issubset( [capsule_lce['id'] for capsule_lce in result['results']]), ) # Create content views with the repositories cv1 = entities.ContentView( organization=org, repository=[repo1], ).create() cv2 = entities.ContentView( organization=org, repository=[repo2], ).create() # Sync first repository repo1.sync() repo1 = repo1.read() # Publish new version of the content view cv1.publish() cv1 = cv1.read() self.assertEqual(len(cv1.version), 1) cvv1 = cv1.version[-1].read() # Promote content view to lifecycle environment promote(cvv1, lce1.id) cvv1 = cvv1.read() self.assertEqual(len(cvv1.environment), 2) # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule.content_get_sync() self.assertTrue( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time']) # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() # Move one package from the first repo to second one ssh.command('mv {} {}'.format( os.path.join( PULP_PUBLISHED_YUM_REPOS_PATH, repo1_name, FAKE_1_YUM_REPO_RPMS[2], ), os.path.join( PULP_PUBLISHED_YUM_REPOS_PATH, repo2_name, FAKE_1_YUM_REPO_RPMS[2], ), )) # Update repositories (re-trigger 'createrepo' command) create_repo(repo1_name) create_repo(repo2_name) # Synchronize first repository repo1.sync() cv1.publish() cv1 = cv1.read() self.assertEqual(len(cv1.version), 2) cv1.version.sort(key=lambda version: version.id) cvv1 = cv1.version[-1].read() # Promote content view to lifecycle environment promote(cvv1, lce1.id) cvv1 = cvv1.read() self.assertEqual(len(cvv1.environment), 2) # Synchronize second repository repo2.sync() repo2 = repo2.read() self.assertEqual(repo2.content_counts['package'], 1) cv2.publish() cv2 = cv2.read() self.assertEqual(len(cv2.version), 1) cvv2 = cv2.version[-1].read() # Promote content view to lifecycle environment promote(cvv2, lce2.id) cvv2 = cvv2.read() self.assertEqual(len(cvv2.environment), 2) # Create activation key, add subscription to second repo only activation_key = entities.ActivationKey( content_view=cv2, environment=lce2, organization=org, ).create() subscription = entities.Subscription(organization=org).search( query={'search': 'name={}'.format(prod2.name)})[0] activation_key.add_subscriptions( data={'subscription_id': subscription.id}) # Subscribe a host with activation key with VirtualMachine(distro=DISTRO_RHEL7) as client: client.install_katello_ca() client.register_contenthost( org.label, activation_key.name, ) # Install the package package_name = FAKE_1_YUM_REPO_RPMS[2].rstrip('.rpm') result = client.run('yum install -y {}'.format(package_name)) self.assertEqual(result.return_code, 0) # Ensure package installed result = client.run('rpm -qa | grep {}'.format(package_name)) self.assertEqual(result.return_code, 0) self.assertIn(package_name, result.stdout[0])
def test_positive_sync_puppet_module_with_versions(self): """Ensure it's possible to sync multiple versions of the same puppet module to the capsule :id: 83a0ddd6-8a6a-43a0-b169-094a2556dd28 :customerscenario: true :BZ: 1365952, 1655243 :Steps: 1. Register a capsule 2. Associate LCE with the capsule 3. Sync a puppet module with multiple versions 4. Publish a CV with one version of puppet module and promote it to capsule's LCE 5. Wait for capsule synchronization to finish 6. Publish another CV with different version of puppet module and promote it to capsule's LCE 7. Wait for capsule synchronization to finish once more :expectedresults: Capsule was successfully synchronized, new version of puppet module is present on capsule :CaseLevel: System :CaseImportance: Medium """ module_name = 'versioned' module_versions = ['2.2.2', '3.3.3'] org = entities.Organization().create() lce = entities.LifecycleEnvironment(organization=org).create() content_view = entities.ContentView(organization=org).create() prod = entities.Product(organization=org).create() puppet_repository = entities.Repository( content_type=REPO_TYPE['puppet'], product=prod, url=CUSTOM_PUPPET_REPO, ).create() capsule = entities.Capsule(id=self.capsule_id).read() capsule.content_add_lifecycle_environment(data={ 'environment_id': lce.id, }) result = capsule.content_lifecycle_environments() self.assertGreaterEqual(len(result['results']), 1) self.assertIn(lce.id, [capsule_lce['id'] for capsule_lce in result['results']]) puppet_repository.sync() puppet_module_old = entities.PuppetModule().search( query={ 'search': 'name={} and version={}'.format(module_name, module_versions[0]) })[0] # Add puppet module to the CV entities.ContentViewPuppetModule( content_view=content_view, id=puppet_module_old.id, ).create() content_view = content_view.read() self.assertGreater(len(content_view.puppet_module), 0) # Publish and promote CVV content_view.publish() content_view = content_view.read() self.assertEqual(len(content_view.version), 1) cvv = content_view.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() self.assertEqual(len(cvv.environment), 2) # Wait till capsule sync finishes sync_status = capsule.content_get_sync() self.assertTrue( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time']) for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() sync_status = capsule.content_get_sync() last_sync_time = sync_status['last_sync_time'] # Unassign old puppet module version from CV entities.ContentViewPuppetModule( content_view=content_view, id=content_view.puppet_module[0].id, ).delete() # Assign new puppet module version puppet_module_new = entities.PuppetModule().search( query={ 'search': 'name={} and version={}'.format(module_name, module_versions[1]) })[0] entities.ContentViewPuppetModule( content_view=content_view, id=puppet_module_new.id, ).create() self.assertGreater(len(content_view.puppet_module), 0) # Publish and promote CVV content_view.publish() content_view = content_view.read() self.assertEqual(len(content_view.version), 2) cvv = content_view.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() self.assertEqual(len(cvv.environment), 2) # Wait till capsule sync finishes sync_status = capsule.content_get_sync() if sync_status['active_sync_tasks']: for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() else: self.assertNotEqual(sync_status['last_sync_time'], last_sync_time) stored_modules = get_repo_files(PULP_PUBLISHED_PUPPET_REPOS_PATH, 'gz', self.capsule_ip) with self.assertNotRaises(StopIteration): next( filename for filename in stored_modules if '{}-{}'.format(module_name, module_versions[1]) in filename)
def test_positive_checksum_sync(self): """Synchronize repository to capsule, update repository's checksum type, trigger capsule sync and make sure checksum type was updated on capsule :id: eb07bdf3-6cd8-4a2f-919b-8dfc84e16115 :customerscenario: true :BZ: 1288656, 1664288, 1732066 :expectedresults: checksum type is updated in repodata of corresponding repository on capsule :CaseLevel: System :CaseImportance: Critical """ repomd_path = 'repodata/repomd.xml' # Create organization, product, lce and repository with sha256 checksum # type org = entities.Organization(smart_proxy=[self.capsule_id]).create() product = entities.Product(organization=org).create() repo = entities.Repository(product=product, checksum_type='sha256', download_policy='immediate').create() lce = entities.LifecycleEnvironment(organization=org).create() # Associate the lifecycle environment with the capsule capsule = entities.Capsule(id=self.capsule_id).read() capsule.content_add_lifecycle_environment(data={ 'environment_id': lce.id, }) result = capsule.content_lifecycle_environments() self.assertGreaterEqual(len(result['results']), 1) self.assertIn(lce.id, [capsule_lce['id'] for capsule_lce in result['results']]) # Sync, publish and promote a repo cv = entities.ContentView( organization=org, repository=[repo], ).create() repo.sync() repo = repo.read() cv.publish() cv = cv.read() self.assertEqual(len(cv.version), 1) cvv = cv.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() self.assertEqual(len(cvv.environment), 2) # Wait till capsule sync finishes sync_status = capsule.content_get_sync() self.assertTrue( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time']) for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() sync_status = capsule.content_get_sync() last_sync_time = sync_status['last_sync_time'] # Verify repodata's checksum type is sha256, not sha1 on capsule lce_repo_path = form_repo_path( org=org.label, lce=lce.label, cv=cv.label, prod=product.label, repo=repo.label, ) result = ssh.command('grep -o \'checksum type="sha1"\' {}/{}'.format( lce_repo_path, repomd_path), hostname=self.capsule_ip) self.assertNotEqual(result.return_code, 0) self.assertEqual(len(result.stdout), 0) result = ssh.command('grep -o \'checksum type="sha256"\' {}/{}'.format( lce_repo_path, repomd_path), hostname=self.capsule_ip) self.assertEqual(result.return_code, 0) self.assertGreater(len(result.stdout), 0) # Update repo's checksum type to sha1 repo.checksum_type = 'sha1' repo = repo.update(['checksum_type']) # Sync, publish and promote repo repo.sync() cv.publish() cv = cv.read() self.assertEqual(len(cv.version), 2) cv.version.sort(key=lambda version: version.id) cvv = cv.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() self.assertEqual(len(cvv.environment), 2) # Wait till capsule sync finishes sync_status = capsule.content_get_sync() self.assertTrue( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time'] != last_sync_time) for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() # Verify repodata's checksum type has updated to sha1 on capsule result = ssh.command('grep -o \'checksum type="sha256"\' {}/{}'.format( lce_repo_path, repomd_path), hostname=self.capsule_ip) self.assertNotEqual(result.return_code, 0) self.assertEqual(len(result.stdout), 0) result = ssh.command('grep -o \'checksum type="sha1"\' {}/{}'.format( lce_repo_path, repomd_path), hostname=self.capsule_ip) self.assertEqual(result.return_code, 0) self.assertGreater(len(result.stdout), 0)
def test_positive_end_to_end(self, fake_manifest_is_set, default_sat, rhel7_contenthost): """Perform end to end smoke tests using RH and custom repos. 1. Create a new user with admin permissions 2. Using the new user from above 1. Create a new organization 2. Clone and upload manifest 3. Create a new lifecycle environment 4. Create a custom product 5. Create a custom YUM repository 6. Enable a Red Hat repository 7. Synchronize these two repositories 8. Create a new content view 9. Associate the YUM and Red Hat repositories to new content view 10. Publish content view 11. Promote content view to the lifecycle environment 12. Create a new activation key 13. Add the products to the activation key 14. Create a new libvirt compute resource 15. Create a new subnet 16. Create a new domain 17. Create a new hostgroup and associate previous entities to it 18. Provision a client ** NOT CURRENTLY PROVISIONING :id: b2f73740-d3ce-4e6e-abc7-b23e5562bac1 :expectedresults: All tests should succeed and Content should be successfully fetched by client. :parametrized: yes """ # step 1: Create a new user with admin permissions login = gen_string('alphanumeric') password = gen_string('alphanumeric') entities.User(admin=True, login=login, password=password).create() # step 2.1: Create a new organization server_config = get_nailgun_config() server_config.auth = (login, password) org = entities.Organization(server_config).create() # step 2.2: Clone and upload manifest if fake_manifest_is_set: with manifests.clone() as manifest: upload_manifest(org.id, manifest.content) # step 2.3: Create a new lifecycle environment le1 = entities.LifecycleEnvironment(server_config, organization=org).create() # step 2.4: Create a custom product prod = entities.Product(server_config, organization=org).create() repositories = [] # step 2.5: Create custom YUM repository custom_repo = entities.Repository(server_config, product=prod, content_type='yum', url=CUSTOM_RPM_REPO).create() repositories.append(custom_repo) # step 2.6: Enable a Red Hat repository if fake_manifest_is_set: rhel_repo = entities.Repository(id=enable_rhrepo_and_fetchid( basearch='x86_64', org_id=org.id, product=constants.PRDS['rhel'], repo=constants.REPOS['rhst7']['name'], reposet=constants.REPOSET['rhst7'], )) repositories.append(rhel_repo) # step 2.7: Synchronize these two repositories for repo in repositories: repo.sync() # step 2.8: Create content view content_view = entities.ContentView(server_config, organization=org).create() # step 2.9: Associate the YUM and Red Hat repositories to new content view content_view.repository = repositories content_view = content_view.update(['repository']) # step 2.10: Publish content view content_view.publish() # step 2.11: Promote content view to the lifecycle environment content_view = content_view.read() assert len(content_view.version) == 1 cv_version = content_view.version[0].read() assert len(cv_version.environment) == 1 promote(cv_version, le1.id) # check that content view exists in lifecycle content_view = content_view.read() assert len(content_view.version) == 1 cv_version = cv_version.read() # step 2.12: Create a new activation key activation_key_name = gen_string('alpha') activation_key = entities.ActivationKey( name=activation_key_name, environment=le1, organization=org, content_view=content_view).create() # step 2.13: Add the products to the activation key for sub in entities.Subscription(organization=org).search(): if sub.name == constants.DEFAULT_SUBSCRIPTION_NAME: activation_key.add_subscriptions(data={ 'quantity': 1, 'subscription_id': sub.id }) break # step 2.13.1: Enable product content if fake_manifest_is_set: activation_key.content_override( data={ 'content_overrides': [{ 'content_label': constants.REPOS['rhst7']['id'], 'value': '1' }] }) # BONUS: Create a content host and associate it with promoted # content view and last lifecycle where it exists content_host = entities.Host( content_facet_attributes={ 'content_view_id': content_view.id, 'lifecycle_environment_id': le1.id, }, organization=org, ).create() # check that content view matches what we passed assert content_host.content_facet_attributes[ 'content_view_id'] == content_view.id # check that lifecycle environment matches assert content_host.content_facet_attributes[ 'lifecycle_environment_id'] == le1.id # step 2.14: Create a new libvirt compute resource entities.LibvirtComputeResource( server_config, url=f'qemu+ssh://root@{settings.libvirt.libvirt_hostname}/system', ).create() # step 2.15: Create a new subnet subnet = entities.Subnet(server_config).create() # step 2.16: Create a new domain domain = entities.Domain(server_config).create() # step 2.17: Create a new hostgroup and associate previous entities to it entities.HostGroup(server_config, domain=domain, subnet=subnet).create() # step 2.18: Provision a client # TODO this isn't provisioning through satellite as intended # Note it wasn't well before the change that added this todo rhel7_contenthost.install_katello_ca(default_sat) # Register client with foreman server using act keys rhel7_contenthost.register_contenthost(org.label, activation_key_name) assert rhel7_contenthost.subscribed # Install rpm on client package_name = 'katello-agent' result = rhel7_contenthost.execute(f'yum install -y {package_name}') assert result.status == 0 # Verify that the package is installed by querying it result = rhel7_contenthost.run(f'rpm -q {package_name}') assert result.status == 0
def content_view_module_stream(module_org, sync_repo_module_stream): return entities.ContentView(organization=module_org, repository=[sync_repo_module_stream]).create()
def test_positive_delete_cv_promoted_to_multi_env(self): """Delete published content view with version promoted to multiple environments :id: c164bd97-e710-4a5a-9c9f-657e6bed804b :Steps: 1. Create a content view 2. Add a yum repo and a puppet module to the content view 3. Publish the content view 4. Promote the content view to multiple environment Library -> DEV -> QE -> STAGE -> PROD 5. Delete the content view, this should delete the content with all it's published/promoted versions from all environments :expectedresults: The content view doesn't exists :CaseLevel: Integration :CaseImportance: Critical """ org = entities.Organization().create() lce_dev = entities.LifecycleEnvironment(organization=org).create() lce_qe = entities.LifecycleEnvironment(organization=org, prior=lce_dev).create() lce_stage = entities.LifecycleEnvironment(organization=org, prior=lce_qe).create() lce_prod = entities.LifecycleEnvironment(organization=org, prior=lce_stage).create() product = entities.Product(organization=org).create() yum_repo = entities.Repository( url=FAKE_1_YUM_REPO, product=product, ).create() yum_repo.sync() puppet_repo = entities.Repository( url=FAKE_0_PUPPET_REPO, content_type='puppet', product=product, ).create() puppet_repo.sync() # create a content view and add to it the yum repo content_view = entities.ContentView(organization=org).create() content_view.repository = [yum_repo] content_view = content_view.update(['repository']) # get a random puppet module and add it to content view puppet_module = random.choice( content_view.available_puppet_modules()['results']) entities.ContentViewPuppetModule( author=puppet_module['author'], name=puppet_module['name'], content_view=content_view, ).create() # publish the content view content_view.publish() content_view = content_view.read() self.assertEqual(len(content_view.version), 1) content_view_version = content_view.version[0].read() self.assertEqual(len(content_view_version.environment), 1) lce_library = entities.LifecycleEnvironment( id=content_view_version.environment[0].id).read() self.assertEqual(lce_library.name, ENVIRONMENT) # promote content view version to DEV QE STAGE PROD lifecycle # environments for lce in [lce_dev, lce_qe, lce_stage, lce_prod]: promote(content_view_version, lce.id) content_view_version = content_view_version.read() self.assertEqual( {lce_library.id, lce_dev.id, lce_qe.id, lce_stage.id, lce_prod.id}, {lce.id for lce in content_view_version.environment}) # remove content view version from all lifecycle environments for lce in content_view_version.environment: content_view.delete_from_environment(lce.id) # delete the content view content_view.delete() with self.assertRaises(HTTPError): content_view.read()
def test_positive_view_VDC_subscription_products(self): """Ensure that Virtual Datacenters subscription provided products is not empty and that a consumed product exist in content products. :id: cc4593f0-66ab-4bf6-87d1-d4bd9c89eba5 :customerscenario: true :steps: 1. Upload a manifest with Virtual Datacenters subscription 2. Enable a products provided by Virtual Datacenters subscription, and synchronize the auto created repository 3. Create content view with the product repository, and publish it 4. Create a lifecycle environment and promote the content view to it. 5. Create an activation key with the content view and lifecycle environment 6. Subscribe a host to the activation key 7. Goto Hosts -> Content hosts and select the created content host 8. Attach VDC subscription to content host 9. Goto Content -> Red Hat Subscription 10. Select Virtual Datacenters subscription :expectedresults: 1. assert that the provided products is not empty 2. assert that the enabled product is in subscription Product Content :BZ: 1366327 :CaseLevel: System """ org = entities.Organization().create() subscription = entities.Subscription(organization=org) self.upload_manifest(org.id, manifests.clone()) vds_product_name = PRDS['rhdt'] vdc_repo_id = enable_rhrepo_and_fetchid( basearch='x86_64', org_id=org.id, product=vds_product_name, repo=REPOS['rhdt7']['name'], reposet=REPOSET['rhdt7'], releasever=None, ) vdc_repo = entities.Repository(id=vdc_repo_id) vdc_repo.sync() content_view = entities.ContentView(organization=org, repository=[vdc_repo]).create() content_view.publish() content_view = content_view.read() lce = entities.LifecycleEnvironment(organization=org).create() promote(content_view.version[0], lce.id) activation_key = entities.ActivationKey( organization=org, environment=lce, content_view=content_view).create() # add the default RH subscription for sub in subscription.search(): if sub.read_json()['product_name'] == DEFAULT_SUBSCRIPTION_NAME: activation_key.add_subscriptions(data={ 'quantity': 1, 'subscription_id': sub.id, }) break with VirtualMachine() as vm: vm.install_katello_ca() vm.register_contenthost(org.label, activation_key=activation_key.name) self.assertTrue(vm.subscribed) with Session(self) as session: set_context(session, org=org.name) self.contenthost.update( vm.hostname, add_subscriptions=[VDC_SUBSCRIPTION_NAME], ) self.assertIsNotNone( self.contenthost.wait_until_element( common_locators['alert.success_sub_form'])) # ensure that subscription provided products list is not empty provided_products = self.subscriptions.get_provided_products( VDC_SUBSCRIPTION_NAME) self.assertGreater(len(provided_products), 0) # ensure that the product is in provided products self.assertIn(vds_product_name, provided_products) # ensure that product is in content products content_products = self.subscriptions.get_content_products( VDC_SUBSCRIPTION_NAME) self.assertEqual(len(content_products), 1) self.assertIn(vds_product_name, content_products)
def test_positive_remove_prod_promoted_cv_version_from_default_env(self): """Remove PROD promoted content view version from Library environment :id: 24911876-7c2a-4a12-a3aa-98051dfda29d :Steps: 1. Create a content view 2. Add yum repositories, puppet modules, docker repositories to CV 3. Publish content view 4. Promote the content view version to multiple environments Library -> DEV -> QE -> PROD 5. remove the content view version from Library environment :expectedresults: Content view version exist only in DEV, QE, PROD and not in Library :CaseLevel: Integration """ org = entities.Organization().create() lce_dev = entities.LifecycleEnvironment(organization=org).create() lce_qe = entities.LifecycleEnvironment(organization=org, prior=lce_dev).create() lce_prod = entities.LifecycleEnvironment(organization=org, prior=lce_qe).create() product = entities.Product(organization=org).create() yum_repo = entities.Repository( url=FAKE_1_YUM_REPO, product=product, ).create() yum_repo.sync() docker_repo = entities.Repository( content_type=u'docker', docker_upstream_name=u'busybox', product=product, url=DOCKER_REGISTRY_HUB, ).create() docker_repo.sync() puppet_repo = entities.Repository( url=FAKE_0_PUPPET_REPO, content_type='puppet', product=product, ).create() puppet_repo.sync() # create a content view and add to it the yum and docker repos content_view = entities.ContentView(organization=org).create() content_view.repository = [yum_repo, docker_repo] content_view = content_view.update(['repository']) # get a random puppet module and add it to content view puppet_module = random.choice( content_view.available_puppet_modules()['results']) entities.ContentViewPuppetModule( author=puppet_module['author'], name=puppet_module['name'], content_view=content_view, ).create() # publish the content view content_view.publish() content_view = content_view.read() self.assertEqual(len(content_view.version), 1) content_view_version = content_view.version[0].read() self.assertEqual(len(content_view_version.environment), 1) lce_library = entities.LifecycleEnvironment( id=content_view_version.environment[0].id).read() self.assertEqual(lce_library.name, ENVIRONMENT) # promote content view version to DEV QE PROD lifecycle environments for lce in [lce_dev, lce_qe, lce_prod]: promote(content_view_version, lce.id) self.assertEqual( {lce_library.id, lce_dev.id, lce_qe.id, lce_prod.id}, {lce.id for lce in content_view_version.read().environment}) # remove the content view version from Library environment content_view.delete_from_environment(lce_library.id) # assert that the content view version exists only in DEV QE PROD and # not in Library environment self.assertEqual( {lce_dev.id, lce_qe.id, lce_prod.id}, {lce.id for lce in content_view_version.read().environment})
def test_positive_remove_cv_version_from_multi_env(self): """Remove promoted content view version from multiple environment :id: 18b86a68-8e6a-43ea-b95e-188fba125a26 :Steps: 1. Create a content view 2. Add a yum repo and a puppet module to the content view 3. Publish the content view 4. Promote the content view version to multiple environments Library -> DEV -> QE -> STAGE -> PROD 5. Remove content view version from QE, STAGE and PROD :expectedresults: Content view version exists only in Library, DEV :CaseLevel: Integration :CaseImportance: Low """ org = entities.Organization().create() lce_dev = entities.LifecycleEnvironment(organization=org).create() lce_qe = entities.LifecycleEnvironment(organization=org, prior=lce_dev).create() lce_stage = entities.LifecycleEnvironment(organization=org, prior=lce_qe).create() lce_prod = entities.LifecycleEnvironment(organization=org, prior=lce_stage).create() product = entities.Product(organization=org).create() yum_repo = entities.Repository( url=FAKE_1_YUM_REPO, product=product, ).create() yum_repo.sync() puppet_repo = entities.Repository( url=FAKE_0_PUPPET_REPO, content_type='puppet', product=product, ).create() puppet_repo.sync() # create a content view and add to it the yum repo content_view = entities.ContentView(organization=org).create() content_view.repository = [yum_repo] content_view = content_view.update(['repository']) # get a random puppet module and add it to content view puppet_module = random.choice( content_view.available_puppet_modules()['results']) entities.ContentViewPuppetModule( author=puppet_module['author'], name=puppet_module['name'], content_view=content_view, ).create() # publish the content view content_view.publish() content_view = content_view.read() self.assertEqual(len(content_view.version), 1) content_view_version = content_view.version[0].read() self.assertEqual(len(content_view_version.environment), 1) lce_library = entities.LifecycleEnvironment( id=content_view_version.environment[0].id).read() self.assertEqual(lce_library.name, ENVIRONMENT) # promote content view version to DEV QE STAGE PROD lifecycle # environments for lce in [lce_dev, lce_qe, lce_stage, lce_prod]: promote(content_view_version, lce.id) self.assertEqual( {lce_library.id, lce_dev.id, lce_qe.id, lce_stage.id, lce_prod.id}, {lce.id for lce in content_view_version.read().environment}) # remove the content view version from QE STAGE and PROD environments for lce in [lce_qe, lce_stage, lce_prod]: content_view.delete_from_environment(lce.id) # assert that the content view version exists only in Library and DEV # environments self.assertEqual( {lce_library.id, lce_dev.id}, {lce.id for lce in content_view_version.read().environment})
def setUp(self): """Init content view with repo per each test""" super(ContentViewVersionCreateTestCase, self).setUp() self.content_view = entities.ContentView( organization=self.org, ).create()
def test_positive_delete_with_puppet_content(self): """Delete content view version with puppet module content :id: cae1164c-6608-4e19-923c-936e75ed807b :steps: 1. Create a lifecycle environment 2. Create a content view 3. Add a puppet module to content view 4. Publish the content view 5. Promote the content view to lifecycle environment 6. Remove the content view versions from all lifecycle environments 7. Delete the content view version :expectedresults: Content view version deleted successfully :CaseLevel: Integration """ org = entities.Organization().create() lce_library = entities.LifecycleEnvironment( organization=org, name=ENVIRONMENT).search()[0].read() lce = entities.LifecycleEnvironment(organization=org, prior=lce_library).create() product = entities.Product(organization=org).create() puppet_repo = entities.Repository( url=FAKE_0_PUPPET_REPO, content_type=REPO_TYPE['puppet'], product=product, ).create() puppet_repo.sync() # create a content view and add the yum repo to it content_view = entities.ContentView(organization=org).create() # get a random puppet module puppet_module = random.choice( content_view.available_puppet_modules()['results']) # add the puppet module to content view entities.ContentViewPuppetModule( author=puppet_module['author'], name=puppet_module['name'], content_view=content_view, ).create() # publish the content view content_view.publish() content_view = content_view.read() self.assertEqual(len(content_view.version), 1) content_view_version = content_view.version[0].read() self.assertEqual(len(content_view_version.environment), 1) lce_library = entities.LifecycleEnvironment( id=content_view_version.environment[0].id).read() self.assertEqual(lce_library.name, ENVIRONMENT) # promote content view version to the created lifecycle environment promote(content_view_version, lce.id) content_view_version = content_view_version.read() self.assertEqual({lce_library.id, lce.id}, {lce.id for lce in content_view_version.environment}) # remove the content view versions from all lifecycle environments for env in (lce_library, lce): content_view.delete_from_environment(env.id) content_view_version = content_view_version.read() self.assertEqual(len(content_view_version.environment), 0) # delete the content view version content_view_version.delete() self.assertEqual(len(content_view.read().version), 0)
def test_positive_incremental_update_propagate_composite(self): """Incrementally update a CVV in composite CV with `propagate_all_composites` flag set :BZ: 1288148 :id: 1ddcb2ef-3819-442e-b070-cf44aba58dcd :customerscenario: true :Steps: 1. Create and publish CV with some content 2. Create composite CV, add previously created CV inside it 3. Publish composite CV 4. Create a puppet repository and upload a puppet module into it 5. Incrementally update the CVV with the puppet module with `propagate_all_composites` flag set to `True` :expectedresults: 1. The incremental update succeeds with no errors 2. New incremental CVV contains new puppet module 3. New incremental composite CVV contains new puppet module :CaseLevel: Integration :CaseImportance: Medium """ product = entities.Product().create() yum_repo = entities.Repository( content_type='yum', product=product, ).create() yum_repo.sync() content_view = entities.ContentView( organization=product.organization, repository=[yum_repo], ).create() content_view.publish() content_view = content_view.read() self.assertEqual(len(content_view.version), 1) self.assertEqual(len(content_view.version[0].read().puppet_module), 0) comp_content_view = entities.ContentView( component=[content_view.version[0].id], composite=True, organization=product.organization, ).create() comp_content_view.publish() comp_content_view = comp_content_view.read() self.assertEqual(len(comp_content_view.version), 1) self.assertEqual( len(comp_content_view.version[0].read().puppet_module), 0) puppet_repo = entities.Repository( content_type='puppet', product=product, ).create() with open(get_data_file(PUPPET_MODULE_NTP_PUPPETLABS), 'rb') as handle: puppet_repo.upload_content(files={'content': handle}) puppet_modules = content_view.available_puppet_modules()['results'] self.assertGreater(len(puppet_modules), 0) puppet_module = entities.PuppetModule(id=puppet_modules[0]['id']) content_view.version[0].incremental_update( data={ 'content_view_version_environments': [{ 'content_view_version_id': content_view.version[0].id, 'environment_ids': [ environment.id for environment in content_view.version[0].read().environment ], }], 'add_content': { 'puppet_module_ids': [puppet_module.id] }, 'propagate_all_composites': True, }) content_view = content_view.read() self.assertEqual(len(content_view.version), 2) cvv = content_view.version[-1].read() self.assertEqual(len(cvv.puppet_module), 1) self.assertEqual(cvv.puppet_module[0].id, puppet_module.id) comp_content_view = comp_content_view.read() self.assertEqual(len(comp_content_view.version), 2) comp_cvv = comp_content_view.version[-1].read() self.assertEqual(len(comp_cvv.puppet_module), 1) self.assertEqual(comp_cvv.puppet_module[0].id, puppet_module.id)
def setUpClass(cls): """Setup must ensure there is an Org with Golden Ticket enabled. Option 1) SQL:: UPDATE cp_owner SET content_access_mode = 'org_environment', content_access_mode_list='entitlement,org_environment' WHERE account='{org.label}'; Option 2) manifest:: Change manifest file as it looks like: Consumer: Name: ExampleCorp UUID: c319a1d8-4b30-44cd-b2cf-2ccba4b9a8db Content Access Mode: org_environment Type: satellite :steps: 1. Create a new organization. 2. Use either option 1 or option 2 (described above) to activate the Golden Ticket. 3. Create a Product and CV for org. 4. Add a repository pointing to a real repo which requires a RedHat subscription to access. 5. Create Content Host and assign that gated repos to it. 6. Create Host with no attached subscriptions. 7. Sync the gated repository. """ super(ContentAccessTestCase, cls).setUpClass() # Create Organization cls.org = entities.Organization().create() # upload organization manifest with org environment access enabled manifests.upload_manifest_locked( cls.org.id, manifests.clone(org_environment_access=True)) # Create repositories cls.repos = [ # Red Hat Enterprise Linux 7 { 'product': PRDS['rhel'], 'repository-set': REPOSET['rhel7'], 'repository': REPOS['rhel7']['name'], 'repository-id': REPOS['rhel7']['id'], 'releasever': REPOS['rhel7']['releasever'], 'arch': REPOS['rhel7']['arch'], 'cdn': True, }, # Red Hat Satellite Tools { 'product': PRDS['rhel'], 'repository-set': REPOSET['rhst7'], 'repository': REPOS['rhst7']['name'], 'repository-id': REPOS['rhst7']['id'], 'url': settings.sattools_repo['rhel7'], 'cdn': bool(settings.cdn or not settings.sattools_repo['rhel7']), }, ] cls.custom_product, cls.repos_info = setup_cdn_and_custom_repositories( cls.org.id, cls.repos) # Create a content view content_view = entities.ContentView( organization=cls.org, repository=[ entities.Repository(id=repo_info['id']) for repo_info in cls.repos_info ], ).create() # Publish the content view call_entity_method_with_timeout(content_view.publish, timeout=1500) cls.content_view = content_view.read()
def test_positive_filter_by_environment(session, module_org, module_repos_col): """Filter Content hosts by environment :id: 578c3a92-c4d8-4933-b122-7ff511c276ec :customerscenario: true :BZ: 1383729 :Setup: Errata synced on satellite server. :Steps: Go to Content -> Errata. Select an Errata -> Content Hosts tab -> Filter content hosts by Environment. :expectedresults: Content hosts can be filtered by Environment. :CaseLevel: System """ with VMBroker(nick=module_repos_col.distro, host_classes={'host': ContentHost}, _count=2) as clients: for client in clients: module_repos_col.setup_virtual_machine(client) assert _install_client_package(client, FAKE_1_CUSTOM_PACKAGE, errata_applicability=True) # Promote the latest content view version to a new lifecycle environment content_view = entities.ContentView( id=module_repos_col.setup_content_data['content_view'] ['id']).read() content_view_version = content_view.version[-1].read() lce = content_view_version.environment[-1].read() new_lce = entities.LifecycleEnvironment(organization=module_org, prior=lce).create() promote(content_view_version, new_lce.id) host = entities.Host().search( query={'search': f'name={clients[0].hostname}'})[0].read() host.content_facet_attributes = { 'content_view_id': content_view.id, 'lifecycle_environment_id': new_lce.id, } host.update(['content_facet_attributes']) with session: # search in new_lce values = session.errata.search_content_hosts( CUSTOM_REPO_ERRATA_ID, clients[0].hostname, environment=new_lce.name) assert values[0]['Name'] == clients[0].hostname assert not session.errata.search_content_hosts( CUSTOM_REPO_ERRATA_ID, clients[1].hostname, environment=new_lce.name) # search in lce values = session.errata.search_content_hosts(CUSTOM_REPO_ERRATA_ID, clients[1].hostname, environment=lce.name) assert values[0]['Name'] == clients[1].hostname assert not session.errata.search_content_hosts( CUSTOM_REPO_ERRATA_ID, clients[0].hostname, environment=lce.name)
def test_positive_create_as_non_admin_user_with_cv_published( module_org, test_name): """Create a repository as a non admin user in a product that already contain a repository that is used in a published content view. :id: 407864eb-50b8-4bc8-bbc7-0e6f8136d89f :expectedresults: New repository successfully created by non admin user :BZ: 1447829 :CaseLevel: Integration """ user_login = gen_string('alpha') user_password = gen_string('alphanumeric') repo_name = gen_string('alpha') user_permissions = { None: ['access_dashboard'], 'Katello::Product': [ 'view_products', 'create_products', 'edit_products', 'destroy_products', 'sync_products', 'export_products', ], } role = entities.Role().create() create_role_permissions(role, user_permissions) entities.User( login=user_login, password=user_password, role=[role], admin=False, default_organization=module_org, organization=[module_org], ).create() prod = entities.Product(organization=module_org).create() repo = entities.Repository(product=prod, url=FAKE_2_YUM_REPO).create() repo.sync() content_view = entities.ContentView(organization=module_org).create() content_view.repository = [repo] content_view = content_view.update(['repository']) content_view.publish() with Session(test_name, user_login, user_password) as session: # ensure that the created user is not a global admin user # check administer->users page with raises(NavigationTriesExceeded): pswd = gen_string('alphanumeric') session.user.create({ 'user.login': gen_string('alphanumeric'), 'user.auth': 'INTERNAL', 'user.password': pswd, 'user.confirm': pswd, }) # ensure that the created user has only the assigned permissions # check that host collections menu tab does not exist with raises(NavigationTriesExceeded): session.hostcollection.create({'name': gen_string('alphanumeric')}) session.repository.create( prod.name, { 'name': repo_name, 'repo_type': REPO_TYPE['yum'], 'repo_content.upstream_url': FAKE_1_YUM_REPO, }) assert session.repository.search(prod.name, repo.name)[0]['Name'] == repo.name
def test_positive_filtered_errata_status_installable_param( session, errata_status_installable): """Filter errata for specific content view and verify that host that was registered using that content view has different states in correspondence to filtered errata and `errata status installable` settings flag value :id: ed94cf34-b8b9-4411-8edc-5e210ea6af4f :Steps: 1. Prepare setup: Create Lifecycle Environment, Content View, Activation Key and all necessary repos 2. Register Content Host using created data 3. Create necessary Content View Filter and Rule for repository errata 4. Publish and Promote Content View to a new version. 5. Go to created Host page and check its properties 6. Change 'errata status installable' flag in the settings and check host properties once more :expectedresults: Check that 'errata status installable' flag works as intended :BZ: 1368254 :CaseLevel: System """ org = entities.Organization().create() lce = entities.LifecycleEnvironment(organization=org).create() repos_collection = RepositoryCollection( distro=DISTRO_RHEL7, repositories=[ SatelliteToolsRepository(), # As Satellite Tools may be added as custom repo and to have a "Fully entitled" host, # force the host to consume an RH product with adding a cdn repo. RHELAnsibleEngineRepository(cdn=True), YumRepository(url=CUSTOM_REPO_URL), ], ) repos_collection.setup_content(org.id, lce.id, upload_manifest=True) with VMBroker(nick=repos_collection.distro, host_classes={'host': ContentHost}) as client: repos_collection.setup_virtual_machine(client) assert _install_client_package(client, FAKE_1_CUSTOM_PACKAGE, errata_applicability=True) # Adding content view filter and content view filter rule to exclude errata for the # installed package. content_view = entities.ContentView( id=repos_collection.setup_content_data['content_view'] ['id']).read() cv_filter = entities.ErratumContentViewFilter( content_view=content_view, inclusion=False).create() errata = entities.Errata( content_view_version=content_view.version[-1]).search(query=dict( search=f'errata_id="{CUSTOM_REPO_ERRATA_ID}"'))[0] entities.ContentViewFilterRule(content_view_filter=cv_filter, errata=errata).create() content_view.publish() content_view = content_view.read() content_view_version = content_view.version[-1] promote(content_view_version, lce.id) with session: session.organization.select(org_name=org.name) _set_setting_value(errata_status_installable, True) expected_values = { 'Status': 'OK', 'Errata': 'All errata applied', 'Subscription': 'Fully entitled', } host_details_values = session.host.get_details(client.hostname) actual_values = { key: value for key, value in host_details_values['properties'] ['properties_table'].items() if key in expected_values } for key in actual_values: assert expected_values[key] in actual_values[ key], 'Expected text not found' _set_setting_value(errata_status_installable, False) expected_values = { 'Status': 'Error', 'Errata': 'Security errata applicable', 'Subscription': 'Fully entitled', } # navigate to host main page by making a search, to refresh the host details page session.host.search(client.hostname) host_details_values = session.host.get_details(client.hostname) actual_values = { key: value for key, value in host_details_values['properties'] ['properties_table'].items() if key in expected_values } for key in actual_values: assert expected_values[key] in actual_values[ key], 'Expected text not found'
def test_post_scenario_errata_count_installtion(self): """Post-upgrade scenario that installs the package on pre-upgrade client remotely and then verifies if the package installed. :id: 88fd28e6-b4df-46c0-91d6-784859fd1c21 :steps: 1. Recovered pre_upgrade data for post_upgrade verification 2. Verifying errata count has not changed on satellite 3. Update Katello-agent and Restart goferd 4. Verifying the errata_ids 5. Verifying installation errata passes successfully 6. Verifying that package installation passed successfully by remote docker exec :expectedresults: 1. errata count, erratum list should same after satellite upgrade 2. Installation of errata should be pass successfully """ entity_data = get_entity_data(self.__class__.__name__) client = entity_data.get('rhel_client') client_container_id = list(client.values())[0] custom_repo_id = entity_data.get('custom_repo_id') product_id = entity_data.get('product_id') conten_view_id = entity_data.get('conten_view_id') product = entities.Product(id=product_id).read() content_view = entities.ContentView(id=conten_view_id).read() custom_yum_repo = entities.Repository(id=custom_repo_id).read() activation_key = entity_data.get('activation_key') host = entities.Host().search( query={'search': 'activation_key={0}'.format(activation_key)})[0] installable_errata_count = host.content_facet_attributes[ 'errata_counts']['total'] tools_repo, rhel_repo = self._create_custom_rhel_tools_repos(product) product.sync() for repo in (tools_repo, rhel_repo): content_view.repository.append(repo) content_view = content_view.update(['repository']) content_view.publish() self._install_or_update_package(client_container_id, "katello-agent", update=True) self._run_goferd(client_container_id) self.assertGreater(installable_errata_count, 1) erratum_list = entities.Errata(repository=custom_yum_repo).search( query={ 'order': 'updated ASC', 'per_page': 1000, }) errata_ids = [errata.errata_id for errata in erratum_list] self.assertEqual(sorted(errata_ids), sorted(FAKE_9_YUM_ERRATUM)) for errata in FAKE_9_YUM_ERRATUM: host.errata_apply(data={'errata_ids': [errata]}) installable_errata_count -= 1 # waiting for errata count to become 0, as profile uploading take some amount of time wait_for(lambda: self._errata_count(ak=activation_key) == 0, timeout=200, delay=10, logger=self.logger) self.assertEqual( host.content_facet_attributes['errata_counts']['total'], 0) for package in FAKE_9_YUM_UPDATED_PACKAGES: self._check_package_installed(client_container_id, package)
def test_positive_get_count_for_host(module_org, rhel6_contenthost): """Available errata count when retrieving Host :id: 2f35933f-8026-414e-8f75-7f4ec048faae :Setup: 1. Errata synced on satellite server. 2. Some Content hosts present. :Steps: GET /api/v2/hosts :expectedresults: The available errata count is retrieved. :CaseLevel: System """ org = entities.Organization().create() env = entities.LifecycleEnvironment(organization=org).create() content_view = entities.ContentView(organization=org).create() activation_key = entities.ActivationKey(environment=env, organization=org).create() setup_org_for_a_rh_repo( { 'product': constants.PRDS['rhel'], 'repository-set': constants.REPOSET['rhst6'], 'repository': constants.REPOS['rhst6']['name'], 'organization-id': org.id, 'content-view-id': content_view.id, 'lifecycle-environment-id': env.id, 'activationkey-id': activation_key.id, }, force_manifest_upload=True, ) setup_org_for_a_custom_repo( { 'url': CUSTOM_REPO_URL, 'organization-id': org.id, 'content-view-id': content_view.id, 'lifecycle-environment-id': env.id, 'activationkey-id': activation_key.id, } ) repo_id = enable_rhrepo_and_fetchid( basearch=constants.DEFAULT_ARCHITECTURE, org_id=org.id, product=constants.PRDS['rhel'], repo=constants.REPOS['rhva6']['name'], reposet=constants.REPOSET['rhva6'], releasever=constants.DEFAULT_RELEASE_VERSION, ) repo = entities.Repository(id=repo_id) assert repo.sync()['result'] == 'success' content_view = content_view.read() content_view.repository.append(repo) content_view = content_view.update(['repository']) content_view.publish() versions = sorted(content_view.read().version, key=lambda ver: ver.id) cvv = versions[-1].read() promote(cvv, env.id) rhel6_contenthost.install_katello_ca() rhel6_contenthost.register_contenthost(org.label, activation_key.name) assert rhel6_contenthost.subscribed rhel6_contenthost.enable_repo(constants.REPOS['rhst6']['id']) rhel6_contenthost.enable_repo(constants.REPOS['rhva6']['id']) rhel6_contenthost.install_katello_agent() host = rhel6_contenthost.nailgun_host for errata in ('security', 'bugfix', 'enhancement'): _validate_errata_counts(module_org, host, errata_type=errata, expected_value=0) rhel6_contenthost.run(f'yum install -y {constants.FAKE_1_CUSTOM_PACKAGE}') _validate_errata_counts(module_org, host, errata_type='security', expected_value=1) rhel6_contenthost.run(f'yum install -y {constants.REAL_0_RH_PACKAGE}') for errata in ('bugfix', 'enhancement'): _validate_errata_counts(module_org, host, errata_type=errata, expected_value=1)
def test_positive_show_count_on_chost_details_page(self): """Errata count on Content host Details page @id: 388229da-2b0b-41aa-a457-9b5ecbf3df4b @Setup: 1. Errata synced on satellite server. 2. Some content hosts are present. @Steps: 1. Go to Hosts -> Content Hosts -> Select Content Host -> Details page. @Assert: 1. The errata section should be displayed with Security, Bugfix, Enhancement types. 2. The number should link to the errata details page, filtered by type. @CaseLevel: System """ org = entities.Organization().create() env = entities.LifecycleEnvironment(organization=org).create() content_view = entities.ContentView(organization=org).create() activation_key = entities.ActivationKey( environment=env, organization=org, ).create() setup_org_for_a_rh_repo({ 'product': PRDS['rhel'], 'repository-set': REPOSET['rhst6'], 'repository': REPOS['rhst6']['name'], 'organization-id': org.id, 'content-view-id': content_view.id, 'lifecycle-environment-id': env.id, 'activationkey-id': activation_key.id, }) setup_org_for_a_custom_repo({ 'url': CUSTOM_REPO_URL, 'organization-id': org.id, 'content-view-id': content_view.id, 'lifecycle-environment-id': env.id, 'activationkey-id': activation_key.id, }) RepositorySet.enable({ 'basearch': DEFAULT_ARCHITECTURE, 'name': REPOSET['rhva6'], 'organization-id': org.id, 'product': PRDS['rhel'], 'releasever': DEFAULT_RELEASE_VERSION, }) rhel_repo = Repository.info({ 'name': REPOS['rhva6']['name'], 'organization-id': org.id, 'product': PRDS['rhel'], }) Repository.synchronize({ 'name': REPOS['rhva6']['name'], 'organization-id': org.id, 'product': PRDS['rhel'], }) ContentView.add_repository({ 'id': content_view.id, 'organization-id': org.id, 'repository-id': rhel_repo['id'], }) ContentView.publish({'id': content_view.id}) cvv = ContentView.info({'id': content_view.id})['versions'][-1] ContentView.version_promote({ 'id': cvv['id'], 'organization-id': org.id, 'to-lifecycle-environment-id': env.id, }) with VirtualMachine(distro='rhel67') as client: client.install_katello_ca() result = client.register_contenthost( org.label, activation_key.name, ) self.assertEqual(result.return_code, 0) client.enable_repo(REPOS['rhst6']['id']) client.enable_repo(REPOS['rhva6']['id']) client.install_katello_agent() with Session(self.browser) as session: session.nav.go_to_select_org(org.name) result = self.contenthost.fetch_errata_counts( client.hostname, details_page=True) for errata in ('security', 'bug_fix', 'enhancement'): self.assertEqual(result[errata]['value'], 0) self.assertEqual(result[errata]['color'], 'black') client.run('yum install -y {0}'.format(FAKE_1_CUSTOM_PACKAGE)) result = self.contenthost.fetch_errata_counts( client.hostname, details_page=True) self.assertEqual(result['security']['value'], 1) self.assertEqual(result['security']['color'], 'red') client.run('yum install -y {0}'.format(REAL_0_RH_PACKAGE)) result = self.contenthost.fetch_errata_counts( client.hostname, details_page=True) for errata in ('bug_fix', 'enhancement'): self.assertEqual(result[errata]['value'], 1) self.assertEqual(result[errata]['color'], 'yellow')
def test_positive_get_applicable_for_host(module_org, rhel6_contenthost): """Get applicable errata ids for a host :id: 51d44d51-eb3f-4ee4-a1df-869629d427ac :Setup: 1. Errata synced on satellite server. 2. Some Content hosts present. :Steps: GET /api/v2/hosts/:id/errata :expectedresults: The available errata is retrieved. :CaseLevel: System """ org = entities.Organization().create() env = entities.LifecycleEnvironment(organization=org).create() content_view = entities.ContentView(organization=org).create() activation_key = entities.ActivationKey(environment=env, organization=org).create() setup_org_for_a_rh_repo( { 'product': constants.PRDS['rhel'], 'repository-set': constants.REPOSET['rhst6'], 'repository': constants.REPOS['rhst6']['name'], 'organization-id': org.id, 'content-view-id': content_view.id, 'lifecycle-environment-id': env.id, 'activationkey-id': activation_key.id, }, force_manifest_upload=True, ) setup_org_for_a_custom_repo( { 'url': CUSTOM_REPO_URL, 'organization-id': org.id, 'content-view-id': content_view.id, 'lifecycle-environment-id': env.id, 'activationkey-id': activation_key.id, } ) repo_id = enable_rhrepo_and_fetchid( basearch=constants.DEFAULT_ARCHITECTURE, org_id=org.id, product=constants.PRDS['rhel'], repo=constants.REPOS['rhva6']['name'], reposet=constants.REPOSET['rhva6'], releasever=constants.DEFAULT_RELEASE_VERSION, ) repo = entities.Repository(id=repo_id) assert repo.sync()['result'] == 'success' content_view = content_view.read() content_view.repository.append(repo) content_view = content_view.update(['repository']) content_view.publish() versions = sorted(content_view.read().version, key=lambda ver: ver.id) cvv = versions[-1].read() promote(cvv, env.id) rhel6_contenthost.install_katello_ca() rhel6_contenthost.register_contenthost(org.label, activation_key.name) assert rhel6_contenthost.subscribed rhel6_contenthost.enable_repo(constants.REPOS['rhst6']['id']) rhel6_contenthost.enable_repo(constants.REPOS['rhva6']['id']) rhel6_contenthost.install_katello_agent() host = rhel6_contenthost.nailgun_host erratum = _fetch_available_errata(module_org, host, expected_amount=0) assert len(erratum) == 0 rhel6_contenthost.run(f'yum install -y {constants.FAKE_1_CUSTOM_PACKAGE}') erratum = _fetch_available_errata(module_org, host, 1) assert len(erratum) == 1 assert CUSTOM_REPO_ERRATA_ID in [errata['errata_id'] for errata in erratum] rhel6_contenthost.run(f'yum install -y {constants.REAL_0_RH_PACKAGE}') erratum = _fetch_available_errata(module_org, host, 3) assert len(erratum) == 3 assert {constants.REAL_1_ERRATA_ID, constants.REAL_2_ERRATA_ID}.issubset( {errata['errata_id'] for errata in erratum} )
def test_positive_uploaded_content_library_sync(self): """Ensure custom repo with no upstream url and manually uploaded content after publishing to Library is synchronized to capsule automatically :id: f5406312-dd31-4551-9f03-84eb9c3415f5 :customerscenario: true :BZ: 1340686 :expectedresults: custom content is present on external capsule :CaseLevel: System """ # Create organization, product, repository with no upstream url org = entities.Organization(smart_proxy=[self.capsule_id]).create() product = entities.Product(organization=org).create() repo = entities.Repository( product=product, url=None, ).create() capsule = entities.Capsule(id=self.capsule_id).search( query={'search': 'name={0}'.format(self.capsule_hostname)})[0] # Find "Library" lifecycle env for specific organization lce = entities.LifecycleEnvironment(organization=org).search( query={'search': 'name={}'.format(ENVIRONMENT)})[0] # Associate the lifecycle environment with the capsule capsule.content_add_lifecycle_environment(data={ 'environment_id': lce.id, }) result = capsule.content_lifecycle_environments() self.assertGreaterEqual(len(result['results']), 1) self.assertIn(lce.id, [capsule_lce['id'] for capsule_lce in result['results']]) # Create a content view with the repository cv = entities.ContentView( organization=org, repository=[repo], ).create() # Upload custom content into the repo with open(get_data_file(RPM_TO_UPLOAD), 'rb') as handle: repo.upload_content(files={'content': handle}) self.assertEqual(repo.read().content_counts['package'], 1) # Publish new version of the content view cv.publish() cv = cv.read() self.assertEqual(len(cv.version), 1) # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule.content_get_sync() self.assertTrue( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time']) # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() # Verify previously uploaded content is present on capsule lce_repo_path = form_repo_path( org=org.label, lce=lce.label, cv=cv.label, prod=product.label, repo=repo.label, ) for i in range(5): capsule_rpms = get_repo_files(lce_repo_path, hostname=self.capsule_ip) if (len(capsule_rpms) != 0): break else: sleep(5) self.assertEqual(len(capsule_rpms), 1) self.assertEqual(capsule_rpms[0], RPM_TO_UPLOAD)
def setUpClass(cls): """Steps required to create a real host on libvirt 1. Creates new Organization and Location. 2. Search 'Kickstart default' partition table and OS along with provisioning/PXE templates. 3. Associates org, location and OS with provisioning and PXE templates 4. Search for x86_64 architecture 5. Associate arch, partition table, provisioning/PXE templates with OS 6. Find and specify proper Repo URL for OS distribution folder 7. Creates new life-cycle environment. 8. Creates new product and OS custom repository. 9. Creates new content-view and associate with created repository. 10. Publish and promote the content-view to next environment. 11. Search for puppet environment and associate location. 12. Search for smart-proxy and associate organization/location. 13. Search for existing domain or create new otherwise. Associate org, location and dns proxy. 14. Search for '192.168.100.0' network and associate org, location, dns/dhcp/tftp proxy, and if its not there then creates new. 15. Search for existing compute-resource with 'libvirt' provider and associate org.location, and if its not there then creates new. 16. Create new host group with all required entities """ super(LibvirtHostTestCase, cls).setUpClass() # Create a new Organization and Location cls.org_ = entities.Organization(name=gen_string('alpha')).create() cls.org_name = cls.org_.name cls.loc = entities.Location( name=gen_string('alpha'), organization=[cls.org_] ).create() cls.loc_name = cls.loc.name # Get the Partition table ID cls.ptable = entities.PartitionTable().search( query={ u'search': u'name="{0}"'.format(DEFAULT_PTABLE) } )[0] # Get the OS ID cls.os = entities.OperatingSystem().search(query={ u'search': u'name="RedHat" AND (major="{0}" OR major="{1}")' .format(RHEL_6_MAJOR_VERSION, RHEL_7_MAJOR_VERSION) })[0].read() # Get the templates and update with OS, Org, Location cls.templates = [] for template_name in [ 'Kickstart default PXELinux', 'Discovery Red Hat kexec', 'Kickstart default iPXE', 'Satellite Kickstart Default', 'Satellite Kickstart Default Finish', 'Satellite Kickstart Default User Data' ]: template = entities.ConfigTemplate().search( query={ u'search': u'name="{}"'.format(template_name) } )[0].read() template.operatingsystem.append(cls.os) template.organization.append(cls.org_) template.location.append(cls.loc) template = template.update([ 'location', 'operatingsystem', 'organization' ]) cls.templates.append(template) # Get the arch ID cls.arch = entities.Architecture().search( query={u'search': u'name="x86_64"'} )[0] # Update the OS to associate arch, ptable, templates cls.os.architecture = [cls.arch] cls.os.ptable = [cls.ptable] cls.os.config_template = cls.templates cls.os = cls.os.update([ 'architecture', 'config_template', 'ptable', ]) # Check what OS was found to use correct media if cls.os.major == str(RHEL_6_MAJOR_VERSION): os_distr_url = settings.rhel6_os elif cls.os.major == str(RHEL_7_MAJOR_VERSION): os_distr_url = settings.rhel7_os else: raise ValueError('Proposed RHEL version is not supported') # Create a new Life-Cycle environment cls.lc_env = entities.LifecycleEnvironment( name=gen_string('alpha'), organization=cls.org_ ).create() # Create a Product and Repository for OS distribution content cls.product = entities.Product( name=gen_string('alpha'), organization=cls.org_ ).create() cls.repo = entities.Repository( name=gen_string('alpha'), product=cls.product, url=os_distr_url ).create() # Increased timeout value for repo sync cls.old_task_timeout = entity_mixins.TASK_TIMEOUT entity_mixins.TASK_TIMEOUT = 3600 cls.repo.sync() # Create, Publish and promote CV cls.content_view = entities.ContentView( name=gen_string('alpha'), organization=cls.org_ ).create() cls.content_view.repository = [cls.repo] cls.content_view = cls.content_view.update(['repository']) cls.content_view.publish() cls.content_view = cls.content_view.read() promote(cls.content_view.version[0], cls.lc_env.id) entity_mixins.TASK_TIMEOUT = cls.old_task_timeout # Search for puppet environment and associate location cls.environment = entities.Environment( organization=[cls.org_.id]).search()[0] cls.environment.location = [cls.loc] cls.environment = cls.environment.update(['location']) # Search for SmartProxy, and associate organization/location cls.proxy = entities.SmartProxy().search( query={ u'search': u'name={0}'.format( settings.server.hostname ) } )[0].read() cls.proxy.location.append(cls.loc) cls.proxy.organization.append(cls.org_) cls.proxy = cls.proxy.update(['location', 'organization']) # Search for existing domain or create new otherwise. Associate org, # location and dns to it _, _, domain = settings.server.hostname.partition('.') domain = entities.Domain().search( query={ u'search': u'name="{0}"'.format(domain) } ) if len(domain) > 0: cls.domain = domain[0].read() cls.domain.location.append(cls.loc) cls.domain.organization.append(cls.org_) cls.domain.dns = cls.proxy cls.domain = cls.domain.update(['dns', 'location', 'organization']) else: cls.domain = entities.Domain( dns=cls.proxy, location=[cls.loc], organization=[cls.org_], ).create() cls.domain_name = cls.domain.name # Search if subnet is defined with given network. # If so, just update its relevant fields otherwise, # Create new subnet network = settings.vlan_networking.subnet subnet = entities.Subnet().search( query={u'search': u'network={0}'.format(network)} ) if len(subnet) > 0: cls.subnet = subnet[0].read() cls.subnet.domain.append(cls.domain) cls.subnet.location.append(cls.loc) cls.subnet.organization.append(cls.org_) cls.subnet.dns = cls.proxy cls.subnet.dhcp = cls.proxy cls.subnet.ipam = 'DHCP' cls.subnet.tftp = cls.proxy cls.subnet.discovery = cls.proxy cls.subnet = cls.subnet.update([ 'domain', 'discovery', 'dhcp', 'dns', 'ipam', 'location', 'organization', 'tftp', ]) else: # Create new subnet cls.subnet = entities.Subnet( name=gen_string('alpha'), network=network, mask=settings.vlan_networking.netmask, domain=[cls.domain], ipam='DHCP', location=[cls.loc], organization=[cls.org_], dns=cls.proxy, dhcp=cls.proxy, tftp=cls.proxy, discovery=cls.proxy ).create() # Search if Libvirt compute-resource already exists # If so, just update its relevant fields otherwise, # Create new compute-resource with 'libvirt' provider. resource_url = u'qemu+ssh://root@{0}/system'.format( settings.compute_resources.libvirt_hostname ) comp_res = [ res for res in entities.LibvirtComputeResource().search() if (res.provider == FOREMAN_PROVIDERS['libvirt'] and res.url == resource_url) ] if len(comp_res) > 0: cls.computeresource = entities.LibvirtComputeResource( id=comp_res[0].id).read() cls.computeresource.location.append(cls.loc) cls.computeresource.organization.append(cls.org_) cls.computeresource = cls.computeresource.update([ 'location', 'organization']) else: # Create Libvirt compute-resource cls.computeresource = entities.LibvirtComputeResource( name=gen_string('alpha'), provider=FOREMAN_PROVIDERS['libvirt'], url=resource_url, set_console_password=False, display_type=u'VNC', location=[cls.loc.id], organization=[cls.org_.id], ).create() cls.resource = u'{0} (Libvirt)'.format(cls.computeresource.name) cls.puppet_env = entities.Environment( location=[cls.loc], organization=[cls.org_], ).create(True) cls.root_pwd = gen_string('alpha', 15) # Create Hostgroup cls.host_group = entities.HostGroup( architecture=cls.arch, domain=cls.domain.id, subnet=cls.subnet.id, lifecycle_environment=cls.lc_env.id, content_view=cls.content_view.id, location=[cls.loc.id], name=gen_string('alpha'), environment=cls.environment.id, puppet_proxy=cls.proxy, puppet_ca_proxy=cls.proxy, content_source=cls.proxy, operatingsystem=cls.os.id, organization=[cls.org_.id], ptable=cls.ptable.id, ).create()
def test_positive_capsule_sync(self): """Create repository, add it to lifecycle environment, assign lifecycle environment with a capsule, sync repository, sync it once again, update repository (add 1 new package), sync repository once again. :id: 35513099-c918-4a8e-90d0-fd4c87ad2f82 :customerscenario: true :BZ: 1394354, 1439691 :expectedresults: 1. Repository sync triggers capsule sync 2. After syncing capsule contains same repo content as satellite 3. Syncing repository which has no changes for a second time does not trigger any new publish task 4. Repository revision on capsule remains exactly the same after second repo sync with no changes 5. Syncing repository which was updated will update the content on capsule :CaseLevel: System """ repo_name = gen_string('alphanumeric') # Create and publish custom repository with 2 packages in it repo_url = create_repo( repo_name, FAKE_1_YUM_REPO, FAKE_1_YUM_REPO_RPMS[0:2], ) # Create organization, product, repository in satellite, and lifecycle # environment org = entities.Organization(smart_proxy=[self.capsule_id]).create() product = entities.Product(organization=org).create() repo = entities.Repository( product=product, url=repo_url, ).create() lce = entities.LifecycleEnvironment(organization=org).create() # Associate the lifecycle environment with the capsule capsule = entities.Capsule(id=self.capsule_id).read() capsule.content_add_lifecycle_environment(data={ 'environment_id': lce.id, }) result = capsule.content_lifecycle_environments() self.assertGreaterEqual(len(result['results']), 1) self.assertIn(lce.id, [capsule_lce['id'] for capsule_lce in result['results']]) # Create a content view with the repository cv = entities.ContentView( organization=org, repository=[repo], ).create() # Sync repository repo.sync() repo = repo.read() # Publish new version of the content view cv.publish() cv = cv.read() self.assertEqual(len(cv.version), 1) cvv = cv.version[-1].read() # Promote content view to lifecycle environment promote(cvv, lce.id) cvv = cvv.read() self.assertEqual(len(cvv.environment), 2) # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule.content_get_sync() self.assertTrue( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time']) # Assert that the content of the published content view in # lifecycle environment is exactly the same as content of # repository lce_repo_path = form_repo_path( org=org.label, lce=lce.label, cv=cv.label, prod=product.label, repo=repo.label, ) cvv_repo_path = form_repo_path( org=org.label, cv=cv.label, cvv=cvv.version, prod=product.label, repo=repo.label, ) # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() sync_status = capsule.content_get_sync() last_sync_time = sync_status['last_sync_time'] # If BZ1439691 is open, need to sync repo once more, as repodata # will change on second attempt even with no changes in repo if is_open('BZ:1439691'): repo.sync() repo = repo.read() cv.publish() cv = cv.read() self.assertEqual(len(cv.version), 2) cv.version.sort(key=lambda version: version.id) cvv = cv.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() self.assertEqual(len(cvv.environment), 2) sync_status = capsule.content_get_sync() self.assertTrue( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time'] != last_sync_time) for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() sync_status = capsule.content_get_sync() last_sync_time = sync_status['last_sync_time'] # Assert that the content published on the capsule is exactly the # same as in repository on satellite lce_revision_capsule = get_repomd_revision(lce_repo_path, hostname=self.capsule_ip) self.assertEqual( get_repo_files(lce_repo_path, hostname=self.capsule_ip), get_repo_files(cvv_repo_path)) # Sync repository for a second time result = repo.sync() # Assert that the task summary contains a message that says the # publish was skipped because content had not changed self.assertEqual(result['result'], 'success') self.assertTrue(result['output']['post_sync_skipped']) self.assertEqual(result['humanized']['output'], 'No new packages.') # Publish a new version of content view cv.publish() cv = cv.read() cv.version.sort(key=lambda version: version.id) cvv = cv.version[-1].read() # Promote new content view version to lifecycle environment promote(cvv, lce.id) cvv = cvv.read() self.assertEqual(len(cvv.environment), 2) # Wait till capsule sync finishes sync_status = capsule.content_get_sync() tasks = [] if not sync_status['active_sync_tasks']: self.assertNotEqual(sync_status['last_sync_time'], last_sync_time) else: for task in sync_status['active_sync_tasks']: tasks.append(entities.ForemanTask(id=task['id'])) tasks[-1].poll() # Assert that the value of repomd revision of repository in # lifecycle environment on the capsule has not changed new_lce_revision_capsule = get_repomd_revision( lce_repo_path, hostname=self.capsule_ip) self.assertEqual(lce_revision_capsule, new_lce_revision_capsule) # Update a repository with 1 new rpm create_repo( repo_name, FAKE_1_YUM_REPO, FAKE_1_YUM_REPO_RPMS[-1:], ) # Sync, publish and promote the repository repo.sync() repo = repo.read() cv.publish() cv = cv.read() cv.version.sort(key=lambda version: version.id) cvv = cv.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() self.assertEqual(len(cvv.environment), 2) # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule.content_get_sync() self.assertTrue( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time'] != last_sync_time) # Assert that packages count in the repository is updated self.assertEqual(repo.content_counts['package'], 3) # Assert that the content of the published content view in # lifecycle environment is exactly the same as content of the # repository cvv_repo_path = form_repo_path( org=org.label, cv=cv.label, cvv=cvv.version, prod=product.label, repo=repo.label, ) self.assertEqual( repo.content_counts['package'], cvv.package_count, ) self.assertEqual(get_repo_files(lce_repo_path), get_repo_files(cvv_repo_path)) # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() # Assert that the content published on the capsule is exactly the # same as in the repository self.assertEqual( get_repo_files(lce_repo_path, hostname=self.capsule_ip), get_repo_files(cvv_repo_path))
def setUpClass(cls): """Steps required to create a Atomic host on libvirt 1. Creates new Organization and Location. 2. Creates new life-cycle environment. 3. Creates new product and sync RH Atomic OSTree repository. 4. Creates new content-view by associating RH Atomic repository. 5. Publish and promote the content-view to next environment. 6. Search for smart-proxy and associate location. 7. Search for existing domain or create new otherwise. Associate org, location and dns proxy. 8. Search for '192.168.100.0' network and associate org, location, dns/dhcp/tftp proxy, and if its not there then creates new. 9. Search for existing compute-resource with 'libvirt' provider and associate org.location, and if its not there then creates new. 10. Search 'Kickstart default' partition table and RH Atomic OS along with PXE templates. 11. Associates org, location and OS with provisioning and PXE templates 12. Search for x86_64 architecture 13. Associate arch, partition table, provisioning/PXE templates with OS 14. Search for existing Atomic media or create new otherwise and associate org/location 15. Create new host group with all required entities """ super(AtomicHostTestCase, cls).setUpClass() # Create a new Organization and Location cls.org = entities.Organization().create() cls.org_name = cls.org.name cls.loc = entities.Location(organization=[cls.org]).create() cls.loc_name = cls.loc.name # Create a new Life-Cycle environment cls.lc_env = entities.LifecycleEnvironment( organization=cls.org ).create() cls.rh_ah_repo = { 'name': REPOS['rhaht']['name'], 'product': PRDS['rhah'], 'reposet': REPOSET['rhaht'], 'basearch': None, 'releasever': None, } with manifests.clone() as manifest: upload_manifest(cls.org.id, manifest.content) # Enables the RedHat repo and fetches it's Id. cls.repo_id = enable_rhrepo_and_fetchid( basearch=cls.rh_ah_repo['basearch'], # OrgId is passed as data in API hence str org_id=str(cls.org.id), product=cls.rh_ah_repo['product'], repo=cls.rh_ah_repo['name'], reposet=cls.rh_ah_repo['reposet'], releasever=cls.rh_ah_repo['releasever'], ) # Sync repository with custom timeout call_entity_method_with_timeout( entities.Repository(id=cls.repo_id).sync, timeout=1500) cls.cv = entities.ContentView(organization=cls.org).create() cls.cv.repository = [entities.Repository(id=cls.repo_id)] cls.cv = cls.cv.update(['repository']) cls.cv.publish() cls.cv = cls.cv.read() promote(cls.cv.version[0], cls.lc_env.id) # Search for SmartProxy, and associate location cls.proxy = entities.SmartProxy().search( query={ u'search': u'name={0}'.format( settings.server.hostname ) } )[0].read() cls.proxy.location.append(cls.loc) cls.proxy.organization.append(cls.org) cls.proxy = cls.proxy.update(['organization', 'location']) # Search for existing domain or create new otherwise. Associate org, # location and dns to it _, _, domain = settings.server.hostname.partition('.') cls.domain = entities.Domain().search( query={ u'search': u'name="{0}"'.format(domain) } ) if len(cls.domain) > 0: cls.domain = cls.domain[0].read() cls.domain.location.append(cls.loc) cls.domain.organization.append(cls.org) cls.domain.dns = cls.proxy cls.domain = cls.domain.update(['dns', 'location', 'organization']) else: cls.domain = entities.Domain( dns=cls.proxy, location=[cls.loc], organization=[cls.org], ).create() cls.domain_name = cls.domain.name # Search if subnet is defined with given network. # If so, just update its relevant fields otherwise, # Create new subnet network = settings.vlan_networking.subnet subnet = entities.Subnet().search( query={u'search': u'network={0}'.format(network)} ) if len(subnet) > 0: cls.subnet = subnet[0].read() cls.subnet.domain.append(cls.domain) cls.subnet.location.append(cls.loc) cls.subnet.organization.append(cls.org) cls.subnet.dns = cls.proxy cls.subnet.dhcp = cls.proxy cls.subnet.ipam = 'DHCP' cls.subnet.tftp = cls.proxy cls.subnet.discovery = cls.proxy cls.subnet = cls.subnet.update([ 'domain', 'discovery', 'dhcp', 'dns', 'ipam', 'location', 'organization', 'tftp', ]) else: # Create new subnet cls.subnet = entities.Subnet( name=gen_string('alpha'), network=network, mask=settings.vlan_networking.netmask, domain=[cls.domain], location=[cls.loc], organization=[cls.org], dns=cls.proxy, dhcp=cls.proxy, ipam='DHCP', tftp=cls.proxy, discovery=cls.proxy ).create() # Search if Libvirt compute-resource already exists # If so, just update its relevant fields otherwise, # Create new compute-resource with 'libvirt' provider. resource_url = u'qemu+ssh://root@{0}/system'.format( settings.compute_resources.libvirt_hostname ) comp_res = [ res for res in entities.LibvirtComputeResource().search() if res.provider == 'Libvirt' and res.url == resource_url ] if len(comp_res) > 0: cls.computeresource = entities.LibvirtComputeResource( id=comp_res[0].id).read() cls.computeresource.location.append(cls.loc) cls.computeresource.organization.append(cls.org) cls.computeresource = cls.computeresource.update([ 'location', 'organization']) else: # Create Libvirt compute-resource cls.computeresource = entities.LibvirtComputeResource( name=gen_string('alpha'), provider=u'libvirt', url=resource_url, set_console_password=False, display_type=u'VNC', location=[cls.loc.id], organization=[cls.org.id], ).create() # Get the Partition table ID cls.ptable = entities.PartitionTable().search( query={ u'search': u'name="{0}"'.format(DEFAULT_PTABLE) } )[0].read() cls.ptable.location.append(cls.loc) cls.ptable.organization.append(cls.org) cls.ptable = cls.ptable.update(['location', 'organization']) # Get the OS ID os = entities.OperatingSystem().search(query={ u'search': u'name="RedHat_Enterprise_Linux_Atomic_Host"' }) if len(os) > 0: cls.os = os[0].read() else: cls.os = entities.OperatingSystem( name='RedHat_Enterprise_Linux_Atomic_Host', family='Redhat', major=RHEL_7_MAJOR_VERSION, ).create() # update the provisioning templates with OS, Org and Location cls.templates = [] for template_name in [DEFAULT_ATOMIC_TEMPLATE, DEFAULT_PXE_TEMPLATE]: template = entities.ConfigTemplate().search( query={ u'search': u'name="{0}"'.format(template_name) } )[0].read() template.operatingsystem.append(cls.os) template.organization.append(cls.org) template.location.append(cls.loc) template = template.update( ['location', 'operatingsystem', 'organization'] ) cls.templates.append(template) # Get the arch ID cls.arch = entities.Architecture().search( query={u'search': u'name="x86_64"'} )[0] # Get the ostree installer URL ostree_path = settings.ostree.ostree_installer # Get the Media media = entities.Media().search(query={ u'search': u'path={0}'.format(ostree_path) }) if len(media) > 0: cls.media = media[0].read() cls.media.location.append(cls.loc) cls.media.organization.append(cls.org) cls.media = cls.media.update(['location', 'organization']) else: cls.media = entities.Media( organization=[cls.org], location=[cls.loc], os_family='Redhat', path_=ostree_path ).create() # Update the OS to associate arch, ptable, templates cls.os.architecture = [cls.arch] cls.os.ptable = [cls.ptable] cls.os.config_template = cls.templates cls.os.medium = [cls.media] cls.os = cls.os.update([ 'architecture', 'config_template', 'ptable', 'medium', ]) # Create Hostgroup cls.host_group = entities.HostGroup( architecture=cls.arch, domain=cls.domain.id, subnet=cls.subnet.id, lifecycle_environment=cls.lc_env.id, content_view=cls.cv.id, location=[cls.loc.id], name=gen_string('alpha'), medium=cls.media, operatingsystem=cls.os.id, organization=[cls.org.id], ptable=cls.ptable.id, ).create()
def test_positive_on_demand_sync(self): """Create a repository with 'on_demand' sync, add it to lifecycle environment with a capsule, sync repository, examine existing packages on capsule, download any package, examine packages once more :id: ba470269-a7ad-4181-bc7c-8e17a177ca20 :expectedresults: 1. After initial syncing only symlinks are present on both satellite and capsule, no real packages were fetched. 2. All the symlinks are pointing to non-existent files. 3. Attempt to download package is successful 4. Downloaded package checksum matches checksum of the source package :CaseLevel: System """ repo_url = FAKE_3_YUM_REPO packages_count = FAKE_3_YUM_REPOS_COUNT package = FAKE_1_YUM_REPO_RPMS[0] # Create organization, product, repository in satellite, and lifecycle # environment org = entities.Organization().create() prod = entities.Product(organization=org).create() repo = entities.Repository( download_policy='on_demand', mirror_on_sync=True, product=prod, url=repo_url, ).create() lce = entities.LifecycleEnvironment(organization=org).create() # Associate the lifecycle environment with the capsule capsule = entities.Capsule(id=self.capsule_id).read() capsule.content_add_lifecycle_environment(data={ 'environment_id': lce.id, }) result = capsule.content_lifecycle_environments() self.assertGreaterEqual(len(result['results']), 1) self.assertIn(lce.id, [capsule_lce['id'] for capsule_lce in result['results']]) # Create a content view with the repository cv = entities.ContentView( organization=org, repository=[repo], ).create() # Sync repository repo.sync() repo = repo.read() # Publish new version of the content view cv.publish() cv = cv.read() self.assertEqual(len(cv.version), 1) cvv = cv.version[-1].read() # Promote content view to lifecycle environment promote(cvv, lce.id) cvv = cvv.read() self.assertEqual(len(cvv.environment), 2) # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule.content_get_sync() self.assertTrue( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time']) # Check whether the symlinks for all the packages were created on # satellite cvv_repo_path = form_repo_path( org=org.label, cv=cv.label, cvv=cvv.version, prod=prod.label, repo=repo.label, ) result = ssh.command('find {}/ -type l'.format(cvv_repo_path)) self.assertEqual(result.return_code, 0) links = set(link for link in result.stdout if link) self.assertEqual(len(links), packages_count) # Ensure all the symlinks on satellite are broken (pointing to # nonexistent files) result = ssh.command( 'find {}/ -type l ! -exec test -e {{}} \\; -print'.format( cvv_repo_path)) self.assertEqual(result.return_code, 0) broken_links = set(link for link in result.stdout if link) self.assertEqual(len(broken_links), packages_count) self.assertEqual(broken_links, links) # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() lce_repo_path = form_repo_path( org=org.label, lce=lce.label, cv=cv.label, prod=prod.label, repo=repo.label, ) # Check whether the symlinks for all the packages were created on # capsule result = ssh.command( 'find {}/ -type l'.format(lce_repo_path), hostname=self.capsule_ip, ) self.assertEqual(result.return_code, 0) links = set(link for link in result.stdout if link) self.assertEqual(len(links), packages_count) # Ensure all the symlinks on capsule are broken (pointing to # nonexistent files) result = ssh.command( 'find {}/ -type l ! -exec test -e {{}} \\; -print'.format( lce_repo_path), hostname=self.capsule_ip, ) self.assertEqual(result.return_code, 0) broken_links = set(link for link in result.stdout if link) self.assertEqual(len(broken_links), packages_count) self.assertEqual(broken_links, links) # Download package from satellite and get its md5 checksum published_repo_url = 'http://{}{}/pulp/{}/'.format( settings.server.hostname, ':{}'.format(settings.server.port) if settings.server.port else '', lce_repo_path.split('http/')[1]) package_md5 = md5_by_url('{}{}'.format(repo_url, package)) # Get md5 checksum of source package published_package_md5 = md5_by_url('{}{}'.format( published_repo_url, package)) # Assert checksums are matching self.assertEqual(package_md5, published_package_md5)
def configure_provisioning(org=None, loc=None, compute=False, os=None): """Create and configure org, loc, product, repo, cv, env. Update proxy, domain, subnet, compute resource, provision templates and medium with previously created entities and create a hostgroup using all mentioned entities. :param str org: Default Organization that should be used in both host discovering and host provisioning procedures :param str loc: Default Location that should be used in both host discovering and host provisioning procedures :param bool compute: If False creates a default Libvirt compute resource :param str os: Specify the os to be used while provisioning and to associate related entities to the specified os. :return: List of created entities that can be re-used further in provisioning or validation procedure (e.g. hostgroup or domain) """ # Create new organization and location in case they were not passed if org is None: org = entities.Organization().create() if loc is None: loc = entities.Location(organization=[org]).create() if settings.rhel7_os is None: raise ImproperlyConfigured( 'settings file is not configured for rhel os') # Create a new Life-Cycle environment lc_env = entities.LifecycleEnvironment(organization=org).create() # Create a Product, Repository for custom RHEL6 contents product = entities.Product(organization=org).create() repo = entities.Repository(product=product, url=settings.rhel7_os, download_policy='immediate').create() # Increased timeout value for repo sync and CV publishing and promotion try: old_task_timeout = entity_mixins.TASK_TIMEOUT entity_mixins.TASK_TIMEOUT = 3600 repo.sync() # Create, Publish and promote CV content_view = entities.ContentView(organization=org).create() content_view.repository = [repo] content_view = content_view.update(['repository']) content_view.publish() content_view = content_view.read() promote(content_view.version[0], lc_env.id) finally: entity_mixins.TASK_TIMEOUT = old_task_timeout # Search for existing organization puppet environment, otherwise create a # new one, associate organization and location where it is appropriate. environments = entities.Environment().search(query=dict( search='organization_id={0}'.format(org.id))) if len(environments) > 0: environment = environments[0].read() environment.location.append(loc) environment = environment.update(['location']) else: environment = entities.Environment(organization=[org], location=[loc]).create() # Search for SmartProxy, and associate location proxy = entities.SmartProxy().search( query={'search': 'name={0}'.format(settings.server.hostname)}) proxy = proxy[0].read() proxy.location.append(loc) proxy.organization.append(org) proxy = proxy.update(['location', 'organization']) # Search for existing domain or create new otherwise. Associate org, # location and dns to it _, _, domain = settings.server.hostname.partition('.') domain = entities.Domain().search( query={'search': 'name="{0}"'.format(domain)}) if len(domain) == 1: domain = domain[0].read() domain.location.append(loc) domain.organization.append(org) domain.dns = proxy domain = domain.update(['dns', 'location', 'organization']) else: domain = entities.Domain(dns=proxy, location=[loc], organization=[org]).create() # Search if subnet is defined with given network. # If so, just update its relevant fields otherwise, # Create new subnet network = settings.vlan_networking.subnet subnet = entities.Subnet().search( query={'search': 'network={0}'.format(network)}) if len(subnet) == 1: subnet = subnet[0].read() subnet.domain = [domain] subnet.location.append(loc) subnet.organization.append(org) subnet.dns = proxy subnet.dhcp = proxy subnet.tftp = proxy subnet.discovery = proxy subnet.ipam = 'DHCP' subnet = subnet.update([ 'domain', 'discovery', 'dhcp', 'dns', 'location', 'organization', 'tftp', 'ipam' ]) else: # Create new subnet subnet = entities.Subnet( network=network, mask=settings.vlan_networking.netmask, domain=[domain], location=[loc], organization=[org], dns=proxy, dhcp=proxy, tftp=proxy, discovery=proxy, ipam='DHCP', ).create() # Search if Libvirt compute-resource already exists # If so, just update its relevant fields otherwise, # Create new compute-resource with 'libvirt' provider. # compute boolean is added to not block existing test's that depend on # Libvirt resource and use this same functionality to all CR's. if compute is False: resource_url = 'qemu+ssh://root@{0}/system'.format( settings.compute_resources.libvirt_hostname) comp_res = [ res for res in entities.LibvirtComputeResource().search() if res.provider == 'Libvirt' and res.url == resource_url ] if len(comp_res) > 0: computeresource = entities.LibvirtComputeResource( id=comp_res[0].id).read() computeresource.location.append(loc) computeresource.organization.append(org) computeresource.update(['location', 'organization']) else: # Create Libvirt compute-resource entities.LibvirtComputeResource( provider='libvirt', url=resource_url, set_console_password=False, display_type='VNC', location=[loc.id], organization=[org.id], ).create() # Get the Partition table ID ptable = (entities.PartitionTable().search( query={'search': 'name="{0}"'.format(DEFAULT_PTABLE)})[0].read()) ptable.location.append(loc) ptable.organization.append(org) ptable = ptable.update(['location', 'organization']) # Get the OS ID if os is None: os = (entities.OperatingSystem().search( query={ 'search': 'name="RedHat" AND (major="{0}" OR major="{1}")'.format( RHEL_6_MAJOR_VERSION, RHEL_7_MAJOR_VERSION) })[0].read()) else: os = (entities.OperatingSystem().search( query={ 'search': 'family="Redhat" ' 'AND major="{0}" ' 'AND minor="{1}")'.format( os.split(' ')[1].split('.')[0], os.split(' ')[1].split('.')[1]) })[0].read()) # Get the Provisioning template_ID and update with OS, Org, Location provisioning_template = entities.ProvisioningTemplate().search( query={'search': 'name="{0}"'.format(DEFAULT_TEMPLATE)}) provisioning_template = provisioning_template[0].read() provisioning_template.operatingsystem.append(os) provisioning_template.organization.append(org) provisioning_template.location.append(loc) provisioning_template = provisioning_template.update( ['location', 'operatingsystem', 'organization']) # Get the PXE template ID and update with OS, Org, location pxe_template = entities.ProvisioningTemplate().search( query={'search': 'name="{0}"'.format(DEFAULT_PXE_TEMPLATE)}) pxe_template = pxe_template[0].read() pxe_template.operatingsystem.append(os) pxe_template.organization.append(org) pxe_template.location.append(loc) pxe_template = pxe_template.update( ['location', 'operatingsystem', 'organization']) # Get the arch ID arch = (entities.Architecture().search( query={'search': 'name="{0}"'.format(DEFAULT_ARCHITECTURE)})[0].read()) # Update the OS to associate arch, ptable, templates os.architecture.append(arch) os.ptable.append(ptable) os.provisioning_template.append(provisioning_template) os.provisioning_template.append(pxe_template) os = os.update(['architecture', 'provisioning_template', 'ptable']) # kickstart_repository is the content view and lce bind repo kickstart_repository = entities.Repository().search( query=dict(content_view_id=content_view.id, environment_id=lc_env.id, name=repo.name))[0] # Create Hostgroup host_group = entities.HostGroup( architecture=arch, domain=domain.id, subnet=subnet.id, lifecycle_environment=lc_env.id, content_view=content_view.id, location=[loc.id], environment=environment.id, puppet_proxy=proxy, puppet_ca_proxy=proxy, content_source=proxy, kickstart_repository=kickstart_repository, root_pass=gen_string('alphanumeric'), operatingsystem=os.id, organization=[org.id], ptable=ptable.id, ).create() return { 'host_group': host_group.name, 'domain': domain.name, 'environment': environment.name, 'ptable': ptable.name, 'subnet': subnet.name, 'os': os.title, }
def test_positive_update_with_immediate_sync(self): """Create a repository with on_demand download policy, associate it with capsule, sync repo, update download policy to immediate, sync once more. :id: 511b531d-1fbe-4d64-ae31-0f9eb6625e7f :customerscenario: true :BZ: 1315752 :expectedresults: content was successfully synchronized - capsule filesystem contains valid links to packages :CaseLevel: System """ repo_url = FAKE_1_YUM_REPO packages_count = FAKE_1_YUM_REPOS_COUNT # Create organization, product, repository in satellite, and lifecycle # environment org = entities.Organization().create() prod = entities.Product(organization=org).create() repo = entities.Repository( download_policy='on_demand', mirror_on_sync=True, product=prod, url=repo_url, ).create() lce = entities.LifecycleEnvironment(organization=org).create() # Update capsule's download policy to on_demand to match repository's # policy self.update_capsule_download_policy(self.capsule_id, 'on_demand') # Associate the lifecycle environment with the capsule capsule = entities.Capsule(id=self.capsule_id).read() capsule.content_add_lifecycle_environment(data={ 'environment_id': lce.id, }) result = capsule.content_lifecycle_environments() self.assertGreaterEqual(len(result['results']), 1) self.assertIn(lce.id, [capsule_lce['id'] for capsule_lce in result['results']]) # Create a content view with the repository cv = entities.ContentView( organization=org, repository=[repo], ).create() # Sync repository repo.sync() repo = repo.read() # Publish new version of the content view cv.publish() cv = cv.read() self.assertEqual(len(cv.version), 1) cvv = cv.version[-1].read() # Promote content view to lifecycle environment promote(cvv, lce.id) cvv = cvv.read() self.assertEqual(len(cvv.environment), 2) # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule.content_get_sync() self.assertTrue( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time']) # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() # Update download policy to 'immediate' repo.download_policy = 'immediate' repo = repo.update(['download_policy']) self.assertEqual(repo.download_policy, 'immediate') # Update capsule's download policy as well self.update_capsule_download_policy(self.capsule_id, 'immediate') # Make sure to revert capsule's download policy after the test as the # capsule is shared among other tests self.addCleanup(self.update_capsule_download_policy, self.capsule_id, 'on_demand') # Sync repository once again repo.sync() repo = repo.read() # Publish new version of the content view cv.publish() cv = cv.read() self.assertEqual(len(cv.version), 2) cv.version.sort(key=lambda version: version.id) cvv = cv.version[-1].read() # Promote content view to lifecycle environment promote(cvv, lce.id) cvv = cvv.read() self.assertEqual(len(cvv.environment), 2) # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule.content_get_sync() self.assertTrue( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time']) # Check whether the symlinks for all the packages were created on # satellite cvv_repo_path = form_repo_path( org=org.label, cv=cv.label, cvv=cvv.version, prod=prod.label, repo=repo.label, ) result = ssh.command('find {}/ -type l'.format(cvv_repo_path)) self.assertEqual(result.return_code, 0) links = set(link for link in result.stdout if link) self.assertEqual(len(links), packages_count) # Ensure there're no broken symlinks (pointing to nonexistent files) on # satellite result = ssh.command( 'find {}/ -type l ! -exec test -e {{}} \\; -print'.format( cvv_repo_path)) self.assertEqual(result.return_code, 0) broken_links = set(link for link in result.stdout if link) self.assertEqual(len(broken_links), 0) # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() lce_repo_path = form_repo_path( org=org.label, lce=lce.label, cv=cv.label, prod=prod.label, repo=repo.label, ) # Check whether the symlinks for all the packages were created on # capsule result = ssh.command('find {}/ -type l'.format(lce_repo_path), hostname=self.capsule_ip) self.assertEqual(result.return_code, 0) links = set(link for link in result.stdout if link) self.assertEqual(len(links), packages_count) # Ensure there're no broken symlinks (pointing to nonexistent files) on # capsule result = ssh.command( 'find {}/ -type l ! -exec test -e {{}} \\; -print'.format( lce_repo_path), hostname=self.capsule_ip) self.assertEqual(result.return_code, 0) broken_links = set(link for link in result.stdout if link) self.assertEqual(len(broken_links), 0)
def test_positive_get_diff_for_cv_envs(self): """Generate a difference in errata between a set of environments for a content view :id: 96732506-4a89-408c-8d7e-f30c8d469769 :Setup: 1. Errata synced on satellite server. 2. Multiple environments present. :Steps: GET /katello/api/compare :expectedresults: Difference in errata between a set of environments for a content view is retrieved. :CaseLevel: System """ org = entities.Organization().create() env = entities.LifecycleEnvironment(organization=org).create() content_view = entities.ContentView(organization=org).create() activation_key = entities.ActivationKey( environment=env, organization=org, ).create() setup_org_for_a_rh_repo( { 'product': PRDS['rhel'], 'repository-set': REPOSET['rhst7'], 'repository': REPOS['rhst7']['name'], 'organization-id': org.id, 'content-view-id': content_view.id, 'lifecycle-environment-id': env.id, 'activationkey-id': activation_key.id, }, force_use_cdn=True) setup_org_for_a_custom_repo({ 'url': CUSTOM_REPO_URL, 'organization-id': org.id, 'content-view-id': content_view.id, 'lifecycle-environment-id': env.id, 'activationkey-id': activation_key.id, }) new_env = entities.LifecycleEnvironment( organization=org, prior=env, ).create() cvvs = content_view.read().version[-2:] promote(cvvs[-1], new_env.id) result = entities.Errata().compare( data={ 'content_view_version_ids': [cvv.id for cvv in cvvs], 'per_page': 9999, }) cvv2_only_errata = next( errata for errata in result['results'] if errata['errata_id'] == CUSTOM_REPO_ERRATA_ID) self.assertEqual([cvvs[-1].id], cvv2_only_errata['comparison']) both_cvvs_errata = next(errata for errata in result['results'] if errata['errata_id'] == REAL_0_ERRATA_ID) self.assertEqual(set(cvv.id for cvv in cvvs), set(both_cvvs_errata['comparison']))