def test_positive_delete_cv_promoted_to_multi_env(self): """Delete published content view with version promoted to multiple environments :id: c164bd97-e710-4a5a-9c9f-657e6bed804b :Steps: 1. Create a content view 2. Add a yum repo and a puppet module to the content view 3. Publish the content view 4. Promote the content view to multiple environment Library -> DEV -> QE -> STAGE -> PROD 5. Delete the content view, this should delete the content with all it's published/promoted versions from all environments :expectedresults: The content view doesn't exists :CaseLevel: Integration :CaseImportance: Critical """ org = entities.Organization().create() lce_dev = entities.LifecycleEnvironment(organization=org).create() lce_qe = entities.LifecycleEnvironment(organization=org, prior=lce_dev).create() lce_stage = entities.LifecycleEnvironment(organization=org, prior=lce_qe).create() lce_prod = entities.LifecycleEnvironment(organization=org, prior=lce_stage).create() product = entities.Product(organization=org).create() yum_repo = entities.Repository(url=FAKE_1_YUM_REPO, product=product).create() yum_repo.sync() puppet_repo = entities.Repository(url=FAKE_0_PUPPET_REPO, content_type='puppet', product=product).create() puppet_repo.sync() # create a content view and add to it the yum repo content_view = entities.ContentView(organization=org).create() content_view.repository = [yum_repo] content_view = content_view.update(['repository']) # get a random puppet module and add it to content view puppet_module = random.choice( content_view.available_puppet_modules()['results']) entities.ContentViewPuppetModule(author=puppet_module['author'], name=puppet_module['name'], content_view=content_view).create() # publish the content view content_view.publish() content_view = content_view.read() self.assertEqual(len(content_view.version), 1) content_view_version = content_view.version[0].read() self.assertEqual(len(content_view_version.environment), 1) lce_library = entities.LifecycleEnvironment( id=content_view_version.environment[0].id).read() self.assertEqual(lce_library.name, ENVIRONMENT) # promote content view version to DEV QE STAGE PROD lifecycle # environments for lce in [lce_dev, lce_qe, lce_stage, lce_prod]: promote(content_view_version, lce.id) content_view_version = content_view_version.read() self.assertEqual( {lce_library.id, lce_dev.id, lce_qe.id, lce_stage.id, lce_prod.id}, {lce.id for lce in content_view_version.environment}, ) # remove content view version from all lifecycle environments for lce in content_view_version.environment: content_view.delete_from_environment(lce.id) # delete the content view content_view.delete() with self.assertRaises(HTTPError): content_view.read()
def test_positive_mirror_on_sync(self, capsule_vm): """Create 2 repositories with 'on_demand' download policy and mirror on sync option, associate them with capsule, sync first repo, move package from first repo to second one, sync it, attempt to install package on some host. :id: 39149642-1e7e-4ef8-8762-bec295913014 :BZ: 1426408 :expectedresults: host, subscribed to second repo only, can successfully install package :CaseLevel: System """ repo1_name = gen_string('alphanumeric') repo2_name = gen_string('alphanumeric') # Create and publish first custom repository with 2 packages in it repo1_url = create_repo(repo1_name, FAKE_1_YUM_REPO, FAKE_1_YUM_REPO_RPMS[1:3]) # Create and publish second repo with no packages in it repo2_url = create_repo(repo2_name) # Create organization, product, repository in satellite, and lifecycle # environment org = entities.Organization().create() prod1 = entities.Product(organization=org).create() repo1 = entities.Repository( download_policy='on_demand', mirror_on_sync=True, product=prod1, url=repo1_url ).create() prod2 = entities.Product(organization=org).create() repo2 = entities.Repository( download_policy='on_demand', mirror_on_sync=True, product=prod2, url=repo2_url ).create() lce1 = entities.LifecycleEnvironment(organization=org).create() lce2 = entities.LifecycleEnvironment(organization=org).create() # Associate the lifecycle environments with the capsule capsule = entities.Capsule(id=capsule_vm._capsule.id).read() for lce_id in (lce1.id, lce2.id): capsule.content_add_lifecycle_environment(data={'environment_id': lce_id}) result = capsule.content_lifecycle_environments() assert len(result['results']) >= 2 assert {lce1.id, lce2.id}.issubset( [capsule_lce['id'] for capsule_lce in result['results']] ) # Create content views with the repositories cv1 = entities.ContentView(organization=org, repository=[repo1]).create() cv2 = entities.ContentView(organization=org, repository=[repo2]).create() # Sync first repository repo1.sync() repo1 = repo1.read() # Publish new version of the content view cv1.publish() cv1 = cv1.read() assert len(cv1.version) == 1 cvv1 = cv1.version[-1].read() # Promote content view to lifecycle environment promote(cvv1, lce1.id) cvv1 = cvv1.read() assert len(cvv1.environment) == 2 # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule.content_get_sync() assert len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time'] # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() # Move one package from the first repo to second one ssh.command( 'mv {} {}'.format( os.path.join(PULP_PUBLISHED_YUM_REPOS_PATH, repo1_name, FAKE_1_YUM_REPO_RPMS[2]), os.path.join(PULP_PUBLISHED_YUM_REPOS_PATH, repo2_name, FAKE_1_YUM_REPO_RPMS[2]), ) ) # Update repositories (re-trigger 'createrepo' command) create_repo(repo1_name) create_repo(repo2_name) # Synchronize first repository repo1.sync() cv1.publish() cv1 = cv1.read() assert len(cv1.version) == 2 cv1.version.sort(key=lambda version: version.id) cvv1 = cv1.version[-1].read() # Promote content view to lifecycle environment promote(cvv1, lce1.id) cvv1 = cvv1.read() assert len(cvv1.environment) == 2 # Synchronize second repository repo2.sync() repo2 = repo2.read() assert repo2.content_counts['package'] == 1 cv2.publish() cv2 = cv2.read() assert len(cv2.version) == 1 cvv2 = cv2.version[-1].read() # Promote content view to lifecycle environment promote(cvv2, lce2.id) cvv2 = cvv2.read() assert len(cvv2.environment) == 2 # Create activation key, add subscription to second repo only activation_key = entities.ActivationKey( content_view=cv2, environment=lce2, organization=org ).create() subscription = entities.Subscription(organization=org).search( query={'search': f'name={prod2.name}'} )[0] activation_key.add_subscriptions(data={'subscription_id': subscription.id}) # Subscribe a host with activation key with VirtualMachine(distro=DISTRO_RHEL7) as client: client.install_katello_ca() client.register_contenthost(org.label, activation_key.name) # Install the package package_name = FAKE_1_YUM_REPO_RPMS[2].rstrip('.rpm') result = client.run(f'yum install -y {package_name}') assert result.return_code == 0 # Ensure package installed result = client.run(f'rpm -qa | grep {package_name}') assert result.return_code == 0 assert package_name in result.stdout[0]
def test_positive_incremental_update_propagate_composite(self): """Incrementally update a CVV in composite CV with `propagate_all_composites` flag set :BZ: 1288148 :id: 1ddcb2ef-3819-442e-b070-cf44aba58dcd :customerscenario: true :Steps: 1. Create and publish CV with some content 2. Create composite CV, add previously created CV inside it 3. Publish composite CV 4. Create a puppet repository and upload a puppet module into it 5. Incrementally update the CVV with the puppet module with `propagate_all_composites` flag set to `True` :expectedresults: 1. The incremental update succeeds with no errors 2. New incremental CVV contains new puppet module 3. New incremental composite CVV contains new puppet module :CaseLevel: Integration :CaseImportance: Medium """ product = entities.Product().create() yum_repo = entities.Repository(content_type='yum', product=product).create() yum_repo.sync() content_view = entities.ContentView( organization=product.organization, repository=[yum_repo] ).create() content_view.publish() content_view = content_view.read() self.assertEqual(len(content_view.version), 1) self.assertEqual(len(content_view.version[0].read().puppet_module), 0) comp_content_view = entities.ContentView( component=[content_view.version[0].id], composite=True, organization=product.organization, ).create() comp_content_view.publish() comp_content_view = comp_content_view.read() self.assertEqual(len(comp_content_view.version), 1) self.assertEqual(len(comp_content_view.version[0].read().puppet_module), 0) puppet_repo = entities.Repository(content_type='puppet', product=product).create() with open(get_data_file(PUPPET_MODULE_NTP_PUPPETLABS), 'rb') as handle: puppet_repo.upload_content(files={'content': handle}) puppet_modules = content_view.available_puppet_modules()['results'] self.assertGreater(len(puppet_modules), 0) puppet_module = entities.PuppetModule(id=puppet_modules[0]['id']) content_view.version[0].incremental_update( data={ 'content_view_version_environments': [ { 'content_view_version_id': content_view.version[0].id, 'environment_ids': [ environment.id for environment in content_view.version[0].read().environment ], } ], 'add_content': {'puppet_module_ids': [puppet_module.id]}, 'propagate_all_composites': True, } ) content_view = content_view.read() self.assertEqual(len(content_view.version), 2) cvv = content_view.version[-1].read() self.assertEqual(len(cvv.puppet_module), 1) self.assertEqual(cvv.puppet_module[0].id, puppet_module.id) comp_content_view = comp_content_view.read() self.assertEqual(len(comp_content_view.version), 2) comp_cvv = comp_content_view.version[-1].read() self.assertEqual(len(comp_cvv.puppet_module), 1) self.assertEqual(comp_cvv.puppet_module[0].id, puppet_module.id)
def test_positive_uploaded_content_library_sync(self, capsule_vm): """Ensure custom repo with no upstream url and manually uploaded content after publishing to Library is synchronized to capsule automatically :id: f5406312-dd31-4551-9f03-84eb9c3415f5 :customerscenario: true :BZ: 1340686 :expectedresults: custom content is present on external capsule :CaseLevel: System """ org = entities.Organization(smart_proxy=[capsule_vm._capsule.id]).create() product = entities.Product(organization=org).create() repo = entities.Repository(product=product, url=None).create() capsule = entities.Capsule(id=capsule_vm._capsule.id).search( query={'search': f'name={capsule_vm.hostname}'} )[0] # Find "Library" lifecycle env for specific organization lce = entities.LifecycleEnvironment(organization=org).search( query={'search': f'name={ENVIRONMENT}'} )[0] # Associate the lifecycle environment with the capsule capsule.content_add_lifecycle_environment(data={'environment_id': lce.id}) result = capsule.content_lifecycle_environments() assert len(result['results']) >= 1 assert lce.id in [capsule_lce['id'] for capsule_lce in result['results']] # Create a content view with the repository cv = entities.ContentView(organization=org, repository=[repo]).create() # Upload custom content into the repo with open(get_data_file(RPM_TO_UPLOAD), 'rb') as handle: repo.upload_content(files={'content': handle}) assert repo.read().content_counts['package'] == 1 # Publish new version of the content view cv.publish() cv = cv.read() assert len(cv.version) == 1 # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule.content_get_sync() assert len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time'] # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() # Verify previously uploaded content is present on capsule lce_repo_path = form_repo_path( org=org.label, lce=lce.label, cv=cv.label, prod=product.label, repo=repo.label, capsule=True, ) for _ in range(5): capsule_rpms = get_repo_files(lce_repo_path, hostname=capsule_vm.ip_addr) if len(capsule_rpms) != 0: break else: sleep(5) assert len(capsule_rpms) == 1 assert capsule_rpms[0] == RPM_TO_UPLOAD
def test_positive_capsule_sync(self, capsule_vm): """Create repository, add it to lifecycle environment, assign lifecycle environment with a capsule, sync repository, sync it once again, update repository (add 1 new package), sync repository once again. :id: 35513099-c918-4a8e-90d0-fd4c87ad2f82 :customerscenario: true :BZ: 1394354, 1439691 :expectedresults: 1. Repository sync triggers capsule sync 2. After syncing capsule contains same repo content as satellite 3. Syncing repository which has no changes for a second time does not trigger any new publish task 4. Repository revision on capsule remains exactly the same after second repo sync with no changes 5. Syncing repository which was updated will update the content on capsule :CaseLevel: System """ repo_name = gen_string('alphanumeric') # Create and publish custom repository with 2 packages in it repo_url = create_repo(repo_name, FAKE_1_YUM_REPO, FAKE_1_YUM_REPO_RPMS[0:2]) # Create organization, product, repository in satellite, and lifecycle # environment org = entities.Organization(smart_proxy=[capsule_vm._capsule.id]).create() product = entities.Product(organization=org).create() repo = entities.Repository(product=product, url=repo_url).create() lce = entities.LifecycleEnvironment(organization=org).create() # Associate the lifecycle environment with the capsule capsule = entities.Capsule(id=capsule_vm._capsule.id).read() capsule.content_add_lifecycle_environment(data={'environment_id': lce.id}) result = capsule.content_lifecycle_environments() assert len(result['results']) >= 1 assert lce.id in [capsule_lce['id'] for capsule_lce in result['results']] # Create a content view with the repository cv = entities.ContentView(organization=org, repository=[repo]).create() # Sync repository repo.sync() repo = repo.read() # Publish new version of the content view cv.publish() cv = cv.read() assert len(cv.version) == 1 cvv = cv.version[-1].read() # Promote content view to lifecycle environment promote(cvv, lce.id) cvv = cvv.read() assert len(cvv.environment) == 2 # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule.content_get_sync() assert len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time'] # Content of the published content view in # lifecycle environment should equal content of the # repository lce_repo_path = form_repo_path( org=org.label, lce=lce.label, cv=cv.label, prod=product.label, repo=repo.label ) cvv_repo_path = form_repo_path( org=org.label, cv=cv.label, cvv=cvv.version, prod=product.label, repo=repo.label ) # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() sync_status = capsule.content_get_sync() last_sync_time = sync_status['last_sync_time'] # If BZ1439691 is open, need to sync repo once more, as repodata # will change on second attempt even with no changes in repo if is_open('BZ:1439691'): repo.sync() repo = repo.read() cv.publish() cv = cv.read() assert len(cv.version) == 2 cv.version.sort(key=lambda version: version.id) cvv = cv.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() assert len(cvv.environment) == 2 sync_status = capsule.content_get_sync() assert ( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time'] != last_sync_time ) for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() sync_status = capsule.content_get_sync() last_sync_time = sync_status['last_sync_time'] # Assert that the content published on the capsule is exactly the # same as in repository on satellite lce_revision_capsule = get_repomd_revision(lce_repo_path, hostname=capsule_vm.ip_addr) assert get_repo_files(lce_repo_path, hostname=capsule_vm.ip_addr) == get_repo_files( cvv_repo_path ) # Sync repository for a second time result = repo.sync() # Assert that the task summary contains a message that says the # publish was skipped because content had not changed assert result['result'] == 'success' assert result['output']['post_sync_skipped'] assert result['humanized']['output'] == 'No new packages.' # Publish a new version of content view cv.publish() cv = cv.read() cv.version.sort(key=lambda version: version.id) cvv = cv.version[-1].read() # Promote new content view version to lifecycle environment promote(cvv, lce.id) cvv = cvv.read() assert len(cvv.environment) == 2 # Wait till capsule sync finishes sync_status = capsule.content_get_sync() tasks = [] if not sync_status['active_sync_tasks']: assert sync_status['last_sync_time'] != last_sync_time else: for task in sync_status['active_sync_tasks']: tasks.append(entities.ForemanTask(id=task['id'])) tasks[-1].poll() # Assert that the value of repomd revision of repository in # lifecycle environment on the capsule has not changed new_lce_revision_capsule = get_repomd_revision(lce_repo_path, hostname=capsule_vm.ip_addr) assert lce_revision_capsule == new_lce_revision_capsule # Update a repository with 1 new rpm create_repo(repo_name, FAKE_1_YUM_REPO, FAKE_1_YUM_REPO_RPMS[-1:]) # Sync, publish and promote the repository repo.sync() repo = repo.read() cv.publish() cv = cv.read() cv.version.sort(key=lambda version: version.id) cvv = cv.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() assert len(cvv.environment) == 2 # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule.content_get_sync() assert ( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time'] != last_sync_time ) # Assert that packages count in the repository is updated assert repo.content_counts['package'] == 3 # Assert that the content of the published content view in # lifecycle environment is exactly the same as content of the # repository cvv_repo_path = form_repo_path( org=org.label, cv=cv.label, cvv=cvv.version, prod=product.label, repo=repo.label ) assert repo.content_counts['package'] == cvv.package_count assert get_repo_files(lce_repo_path) == get_repo_files(cvv_repo_path) # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() # Assert that the content published on the capsule is exactly the # same as in the repository assert get_repo_files(lce_repo_path, hostname=capsule_vm.ip_addr) == get_repo_files( cvv_repo_path )
def module_product(module_org): return entities.Product(organization=module_org).create()
def test_positive_run_packages_and_services_job(self, fixture_vmsetup, module_org): """Tests Ansible REX job can install packages and start services :id: 47ed82fb-77ca-43d6-a52e-f62bae5d3a42 :Steps: 0. Create a VM and register to SAT and prepare for REX (ssh key) 1. Run Ansible Package job for the host to install a package 2. Check the package is present at the host 3. Run Ansible Service job for the host to start a service 4. Check the service is started on the host :expectedresults: multiple asserts along the code :CaseAutomation: Automated :CaseLevel: System :parametrized: yes """ self.org = module_org client = fixture_vmsetup packages = ["cow"] # Create a custom repo repo = entities.Repository( content_type='yum', product=entities.Product(organization=self.org).create(), url=FAKE_0_YUM_REPO, ).create() repo.sync() prod = repo.product.read() subs = entities.Subscription().search(query={'search': f'name={prod.name}'}) assert len(subs), 'No subscriptions matching the product returned' ak = entities.ActivationKey( organization=self.org, content_view=self.org.default_content_view, environment=self.org.library, ).create() ak.add_subscriptions(data={'subscriptions': [{'id': subs[0].id}]}) client.register_contenthost(org=self.org.label, activation_key=ak.name) # install package invocation_command = make_job_invocation( { 'job-template': 'Package Action - Ansible Default', 'inputs': 'state=latest, name={}'.format(*packages), 'search-query': f'name ~ {client.hostname}', } ) try: assert invocation_command['success'] == '1' except AssertionError: result = 'host output: {}'.format( ' '.join( JobInvocation.get_output( {'id': invocation_command['id'], 'host': client.hostname} ) ) ) raise AssertionError(result) result = client.run(f'rpm -q {" ".join(packages)}') assert result.status == 0 # start a service service = "postfix" ssh.command( "sed -i 's/^inet_protocols.*/inet_protocols = ipv4/' /etc/postfix/main.cf", hostname=client.ip_addr, ) invocation_command = make_job_invocation( { 'job-template': 'Service Action - Ansible Default', 'inputs': f'state=started, name={service}', 'search-query': f"name ~ {client.hostname}", } ) try: assert invocation_command['success'] == '1' except AssertionError: result = 'host output: {}'.format( ' '.join( JobInvocation.get_output( {'id': invocation_command['id'], 'host': client.hostname} ) ) ) raise AssertionError(result) result = ssh.command(f"systemctl status {service}", hostname=client.ip_addr) assert result.return_code == 0
def product(manifest_org): """Find and return the product matching PRODUCT_NAME.""" return entities.Product(name=PRODUCT_NAME, organization=manifest_org).search()[0]
def test_positive_sync_puppet_module_with_versions(self): """Ensure it's possible to sync multiple versions of the same puppet module to the capsule :id: 83a0ddd6-8a6a-43a0-b169-094a2556dd28 :customerscenario: true :BZ: 1365952 :Steps: 1. Register a capsule 2. Associate LCE with the capsule 3. Sync a puppet module with multiple versions 4. Publish a CV with one version of puppet module and promote it to capsule's LCE 5. Wait for capsule synchronization to finish 6. Publish another CV with different version of puppet module and promote it to capsule's LCE 7. Wait for capsule synchronization to finish once more :expectedresults: Capsule was successfully synchronized, new version of puppet module is present on capsule :CaseLevel: System """ module_name = 'versioned' module_versions = ['2.2.2', '3.3.3'] org = entities.Organization().create() lce = entities.LifecycleEnvironment(organization=org).create() content_view = entities.ContentView(organization=org).create() prod = entities.Product(organization=org).create() puppet_repository = entities.Repository( content_type=REPO_TYPE['puppet'], product=prod, url=CUSTOM_PUPPET_REPO, ).create() capsule = entities.Capsule(id=self.capsule_id).read() capsule.content_add_lifecycle_environment(data={ 'environment_id': lce.id, }) result = capsule.content_lifecycle_environments() self.assertGreaterEqual(len(result['results']), 1) self.assertIn(lce.id, [capsule_lce['id'] for capsule_lce in result['results']]) puppet_repository.sync() puppet_module_old = entities.PuppetModule().search( query={ 'search': 'name={} and version={}'.format(module_name, module_versions[0]) })[0] # Add puppet module to the CV entities.ContentViewPuppetModule( content_view=content_view, id=puppet_module_old.id, ).create() content_view = content_view.read() self.assertGreater(len(content_view.puppet_module), 0) # Publish and promote CVV content_view.publish() content_view = content_view.read() self.assertEqual(len(content_view.version), 1) cvv = content_view.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() self.assertEqual(len(cvv.environment), 2) # Wait till capsule sync finishes sync_status = capsule.content_get_sync() self.assertTrue( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time']) for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() sync_status = capsule.content_get_sync() last_sync_time = sync_status['last_sync_time'] # Unassign old puppet module version from CV entities.ContentViewPuppetModule( content_view=content_view, id=content_view.puppet_module[0].id, ).delete() # Assign new puppet module version puppet_module_new = entities.PuppetModule().search( query={ 'search': 'name={} and version={}'.format(module_name, module_versions[1]) })[0] entities.ContentViewPuppetModule( content_view=content_view, id=puppet_module_new.id, ).create() self.assertGreater(len(content_view.puppet_module), 0) # Publish and promote CVV content_view.publish() content_view = content_view.read() self.assertEqual(len(content_view.version), 2) cvv = content_view.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() self.assertEqual(len(cvv.environment), 2) # Wait till capsule sync finishes sync_status = capsule.content_get_sync() if sync_status['active_sync_tasks']: for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() else: self.assertNotEqual(sync_status['last_sync_time'], last_sync_time) stored_modules = get_repo_files(PULP_PUBLISHED_PUPPET_REPOS_PATH, 'gz', self.capsule_hostname) with self.assertNotRaises(StopIteration): next( filename for filename in stored_modules if '{}-{}'.format(module_name, module_versions[1]) in filename)
def class_product(): product = entities.Product().create() yield product product.delete()
def test_positive_task_status(session): """Check if the Task Status is working in the Dashboard UI and filter from Tasks index page is working correctly :id: fb667d6a-7255-4341-9f79-2f03d19e8e0f :Steps: 1. Navigate to Monitor -> Dashboard 2. Review the Latest Warning/Error Tasks widget 3. Review the Running Chart widget 4. Review the Task Status widget 5. Review the Stopped Chart widget 6. Click few links from the widget :expectedresults: Each link shows the right info and filter can be set from Tasks dashboard :BZ: 1718889 :CaseLevel: Integration """ url = 'http://www.non_existent_repo_url.org/repos' org = entities.Organization().create() product = entities.Product(organization=org).create() repo = entities.Repository(url=url, product=product, content_type='puppet').create() with pytest.raises(TaskFailedError): repo.sync() with session: session.organization.select(org_name=org.name) session.dashboard.action( {'TaskStatus': { 'state': 'stopped', 'result': 'warning' }}) searchbox = session.task.read_all('searchbox') assert searchbox['searchbox'] == 'state=stopped&result=warning' session.task.set_chart_filter('ScheduledChart') tasks = session.task.read_all(['pagination', 'ScheduledChart']) assert tasks['pagination']['total_items'] == tasks['ScheduledChart'][ 'total'].split()[0] session.task.set_chart_filter('StoppedChart', { 'row': 1, 'focus': 'Total' }) tasks = session.task.read_all() assert tasks['pagination']['total_items'] == tasks['StoppedChart'][ 'table'][1]['Total'] task_name = "Synchronize repository '{}'; product '{}'; organization '{}'".format( repo.name, product.name, org.name) assert tasks['table'][0]['Action'] == task_name assert tasks['table'][0]['State'] == 'stopped' assert tasks['table'][0]['Result'] == 'warning' session.dashboard.action( {'LatestFailedTasks': { 'name': 'Synchronize' }}) values = session.task.read(task_name) assert values['task']['result'] == 'warning' assert values['task'][ 'errors'] == 'PLP0000: Importer indicated a failed response'
def create_activation_key_for_client_registration( ak_name, client_os, org, environment, sat_state): """Creates Activation key for client registration :param str ak_name: Activation key name :param str client_os: rhel6/rhel7 :param nailgun.entity.Organization org: Organization :param nailgun.entity.Environment environment: Environment :param str sat_state: pre or post :return nailgun.entity.ActivationKey: Activation key """ client_os = client_os.upper() from_ver = os.environ.get('FROM_VERSION') rhel_prod_name = 'scenarios_rhel{}_prod'.format(client_os[-1]) rhel_repo_name = '{}_repo'.format(rhel_prod_name) rhel_url = os.environ.get('{}_CUSTOM_REPO'.format(client_os)) if rhel_url is None: raise ValueError('The RHEL Repo URL environment variable for OS {} ' 'is not provided!'.format(client_os)) rhel_prod = entities.Product( name=rhel_prod_name, organization=org.id).create() if sat_state.lower() == 'pre' and from_ver in ['6.1', '6.2']: rhel_repo = entities.Repository( name=rhel_repo_name, product=rhel_prod, url=rhel_url, content_type='yum' ).create() else: rhel_repo = entities.Repository( name=rhel_repo_name, product=rhel_prod, url=rhel_url, content_type='yum', verify_ssl_on_sync=False ).create() call_entity_method_with_timeout(rhel_repo.sync, timeout=1400) if sat_state.lower() == 'pre': product_name = 'Red Hat Enterprise Linux Server' repo_name = 'Red Hat Satellite Tools {0} for RHEL ' \ '{1} Server RPMs x86_64'.format(from_ver, client_os[-1]) tools_prod = entities.Product( organization=org.id ).search( query={ 'per_page': 1000, 'search': 'name="{}"'.format(product_name) } )[0] tools_repo = entities.Repository( organization=org.id, product=tools_prod ).search( query={ 'per_page': 1000, 'search': 'name="{}"'.format(repo_name) } )[0] elif sat_state.lower() == 'post': product_name = 'scenarios_tools_product' tools_repo_url = os.environ.get( 'TOOLS_{}'.format(client_os.upper())) if tools_repo_url is None: raise ValueError('The Tools Repo URL environment variable for ' 'OS {} is not provided!'.format(client_os)) repo_name = '{}_repo'.format(product_name) tools_prod = entities.Product( organization=org.id ).search(query={'search': 'name={}'.format(product_name)}) if not tools_prod: tools_prod = entities.Product( name=product_name, organization=org.id).create() tools_repo = entities.Repository( name=repo_name, product=tools_prod, url=tools_repo_url, content_type='yum' ).create() tools_repo.sync() else: tools_repo = entities.Repository( organization=org.id, product=tools_prod ).search(query={'search': 'name={}'.format(repo_name)}) tools_cv = entities.ContentView( name=ak_name + '_cv', label=ak_name + '_cv', organization=org.id ).create() tools_cv.repository = [tools_repo, rhel_repo] tools_cv = tools_cv.update(['repository']) tools_cv.publish() tools_cv = tools_cv.read() # Published CV with new version # Promote CV cvv = entities.ContentViewVersion( id=max([cvv.id for cvv in tools_cv.version]) ).read() cvv.promote( data={ u'environment_id': environment.id, u'force': False } ) tools_ak = entities.ActivationKey( name=ak_name, content_view=tools_cv, organization=org.id, environment=environment ).create() if sat_state == 'pre': tools_sub = 'Red Hat Satellite Employee Subscription' tools_content = 'rhel-{0}-server-satellite-tools-{1}-rpms'.format( client_os[-1], from_ver) else: tools_sub = tools_prod.name tools_subscription = entities.Subscription(organization=org.id).search( query={ 'search': 'name="{}"'.format(tools_sub), 'per_page': 1000 } )[0] rhel_subscription = entities.Subscription(organization=org.id).search( query={ 'search': 'name={}'.format(rhel_prod.name), 'per_page': 1000 } )[0] tools_ak.add_subscriptions(data={ 'subscription_id': tools_subscription.id}) if sat_state == 'pre': tools_ak.content_override(data={ 'content_override': { u'content_label': tools_content, u'value': u'1' }} ) tools_ak.add_subscriptions(data={ 'subscription_id': rhel_subscription.id}) return tools_ak
def test_positive_add_remove_subscription(module_org, module_ak_cv_lce): """Try to bulk add and remove a subscription to members of a host collection. :id: c4ec5727-eb25-452e-a91f-87cafb16666b :steps: 1. Create HC, add AK to HC 2. Create product so we can use it's subscription 3. Create some VMs and register them with AK so they are in HC 4. Add the subscription to the members of the Host Collection 5. Assert subscription is added 6. Bulk remove subscription 7. Assert it is removed :expectedresults: subscription added to, and removed from, members of host collection :CaseImportance: Critical """ # this command creates a host collection and "appends", makes available, to the AK module_ak_cv_lce.host_collection.append( entities.HostCollection(organization=module_org).create() ) # Move HC from Add tab to List tab on AK view module_ak_cv_lce = module_ak_cv_lce.update(['host_collection']) # Create a product so we have a subscription to use product = entities.Product(organization=module_org).create() prod_name = product.name product_subscription = entities.Subscription(organization=module_org).search( query={'search': f'name={prod_name}'} )[0] # Create and register VMs as members of Host Collection with VMBroker(nick='rhel7', host_classes={'host': ContentHost}, _count=2) as hosts: for client in hosts: client.install_katello_ca() client.register_contenthost(module_org.label, module_ak_cv_lce.name) # Read host_collection back from Satellite to get host_ids host_collection = module_ak_cv_lce.host_collection[0].read() host_ids = [host.id for host in host_collection.host] # Add subscription # Call nailgun to make the API PUT to members of Host Collection entities.Host().bulk_add_subscriptions( data={ "organization_id": module_org.id, "included": {"ids": host_ids}, "subscriptions": [{"id": product_subscription.id, "quantity": 1}], } ) # GET the subscriptions from hosts and assert they are there for host_id in host_ids: req = entities.HostSubscription(host=host_id).subscriptions() assert ( prod_name in req['results'][0]['product_name'] ), 'Subscription not applied to HC members' # Remove the subscription # Call nailgun to make the API PUT to members of Host Collection entities.Host().bulk_remove_subscriptions( data={ "organization_id": module_org.id, "included": {"ids": host_ids}, "subscriptions": [{"id": product_subscription.id, "quantity": 1}], } ) # GET the subscriptions from hosts and assert they are gone for host_id in host_ids: req = entities.HostSubscription(host=host_id).subscriptions() assert not req['results'], 'Subscription not removed from HC members'
def test_positive_incremental_update_puppet(self): """Incrementally update a CVV with a puppet module. :id: 19b2fe3b-6c91-4713-9910-17517fba661f :expectedresults: The incremental update succeeds with no errors, and the content view is given an additional version. :CaseLevel: Integration """ # Create a content view and add a yum repository to it. Publish the CV. product = entities.Product().create() yum_repo = entities.Repository(content_type='yum', product=product).create() content_view = entities.ContentView(organization=product.organization, repository=[yum_repo]).create() content_view.publish() content_view = content_view.read() # Create a puppet repository and upload a puppet module into it. puppet_repo = entities.Repository(content_type='puppet', product=product).create() with open(get_data_file(PUPPET_MODULE_NTP_PUPPETLABS), 'rb') as handle: puppet_repo.upload_content(files={'content': handle}) # Extract all the available puppet modules. puppet_modules = content_view.available_puppet_modules()['results'] # Make sure that we have results. Uploading content does not # seem to create a task so we cannot pool it for status. We # should then check that we have some results back before # proceeding. self.assertGreater(len(puppet_modules), 0) puppet_module = entities.PuppetModule(id=puppet_modules[0]['id']) # Incrementally update the CVV with the puppet module. payload = { 'content_view_version_environments': [{ 'content_view_version_id': content_view.version[0].id, 'environment_ids': [ environment.id for environment in content_view.version[0].read().environment ], }], 'add_content': { 'puppet_module_ids': [puppet_module.id] }, } content_view.version[0].incremental_update(data=payload) content_view = content_view.read() # The CV now has two versions. The first version has no puppet modules, # and the second version has one puppet module. Let's verify this. # NOTE: The `read_json` lines should be refactored after the 'minor' # attribute is added to the ContentViewVersion entity class. self.assertEqual(len(content_view.version), 2) for i in range(len(content_view.version)): content_view.version[i] = content_view.version[i].read() content_view.version.sort(key=lambda cvv: cvv.read_json()['minor']) self.assertEqual(len(content_view.version[0].puppet_module), 0) self.assertEqual(len(content_view.version[1].puppet_module), 1) self.assertEqual(content_view.version[1].puppet_module[0].id, puppet_module.id)
def test_positive_create_as_non_admin_user_with_cv_published( module_org, test_name): """Create a repository as a non admin user in a product that already contain a repository that is used in a published content view. :id: 407864eb-50b8-4bc8-bbc7-0e6f8136d89f :expectedresults: New repository successfully created by non admin user :BZ: 1447829 :CaseLevel: Integration """ user_login = gen_string('alpha') user_password = gen_string('alphanumeric') repo_name = gen_string('alpha') user_permissions = { None: ['access_dashboard'], 'Katello::Product': [ 'view_products', 'create_products', 'edit_products', 'destroy_products', 'sync_products', 'export_products', ], } role = entities.Role().create() create_role_permissions(role, user_permissions) entities.User( login=user_login, password=user_password, role=[role], admin=False, default_organization=module_org, organization=[module_org], ).create() prod = entities.Product(organization=module_org).create() repo = entities.Repository(product=prod, url=settings.repos.yum_2.url).create() repo.sync() content_view = entities.ContentView(organization=module_org).create() content_view.repository = [repo] content_view = content_view.update(['repository']) content_view.publish() with Session(test_name, user_login, user_password) as session: # ensure that the created user is not a global admin user # check administer->users page with pytest.raises(NavigationTriesExceeded): pswd = gen_string('alphanumeric') session.user.create({ 'user.login': gen_string('alphanumeric'), 'user.auth': 'INTERNAL', 'user.password': pswd, 'user.confirm': pswd, }) # ensure that the created user has only the assigned permissions # check that host collections menu tab does not exist with pytest.raises(NavigationTriesExceeded): session.hostcollection.create({'name': gen_string('alphanumeric')}) session.repository.create( prod.name, { 'name': repo_name, 'repo_type': REPO_TYPE['yum'], 'repo_content.upstream_url': settings.repos.yum_1.url, }, ) assert session.repository.search(prod.name, repo.name)[0]['Name'] == repo.name
def test_positive_checksum_sync(self): """Synchronize repository to capsule, update repository's checksum type, trigger capsule sync and make sure checksum type was updated on capsule :id: eb07bdf3-6cd8-4a2f-919b-8dfc84e16115 :customerscenario: true :BZ: 1288656 :expectedresults: checksum type is updated in repodata of corresponding repository on capsule :CaseLevel: System """ repomd_path = 'repodata/repomd.xml' # Create organization, product, lce and repository with sha256 checksum # type org = entities.Organization(smart_proxy=[self.capsule_id]).create() product = entities.Product(organization=org).create() repo = entities.Repository( checksum_type='sha256', product=product, ).create() lce = entities.LifecycleEnvironment(organization=org).create() # Associate the lifecycle environment with the capsule capsule = entities.Capsule(id=self.capsule_id).read() capsule.content_add_lifecycle_environment(data={ 'environment_id': lce.id, }) result = capsule.content_lifecycle_environments() self.assertGreaterEqual(len(result['results']), 1) self.assertIn(lce.id, [capsule_lce['id'] for capsule_lce in result['results']]) # Sync, publish and promote a repo cv = entities.ContentView( organization=org, repository=[repo], ).create() repo.sync() repo = repo.read() cv.publish() cv = cv.read() self.assertEqual(len(cv.version), 1) cvv = cv.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() self.assertEqual(len(cvv.environment), 2) # Wait till capsule sync finishes sync_status = capsule.content_get_sync() self.assertTrue( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time']) for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() sync_status = capsule.content_get_sync() last_sync_time = sync_status['last_sync_time'] # Verify repodata's checksum type is sha256, not sha1 on capsule lce_repo_path = form_repo_path( org=org.label, lce=lce.label, cv=cv.label, prod=product.label, repo=repo.label, ) result = ssh.command('grep -o \'checksum type="sha1"\' {}/{}'.format( lce_repo_path, repomd_path), hostname=self.capsule_hostname) self.assertNotEqual(result.return_code, 0) self.assertEqual(len(result.stdout), 0) result = ssh.command('grep -o \'checksum type="sha256"\' {}/{}'.format( lce_repo_path, repomd_path), hostname=self.capsule_hostname) self.assertEqual(result.return_code, 0) self.assertGreater(len(result.stdout), 0) # Update repo's checksum type to sha1 repo.checksum_type = 'sha1' repo = repo.update(['checksum_type']) # Sync, publish and promote repo repo.sync() cv.publish() cv = cv.read() self.assertEqual(len(cv.version), 2) cv.version.sort(key=lambda version: version.id) cvv = cv.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() self.assertEqual(len(cvv.environment), 2) # Wait till capsule sync finishes sync_status = capsule.content_get_sync() self.assertTrue( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time'] != last_sync_time) for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() # Verify repodata's checksum type has updated to sha1 on capsule result = ssh.command('grep -o \'checksum type="sha256"\' {}/{}'.format( lce_repo_path, repomd_path), hostname=self.capsule_hostname) self.assertNotEqual(result.return_code, 0) self.assertEqual(len(result.stdout), 0) result = ssh.command('grep -o \'checksum type="sha1"\' {}/{}'.format( lce_repo_path, repomd_path), hostname=self.capsule_hostname) self.assertEqual(result.return_code, 0) self.assertGreater(len(result.stdout), 0)
def test_inherit_puppetclass(self): """Host that created from HostGroup entity with PuppetClass assigned to it should inherit such puppet class information under 'all_puppetclasses' field :id: 7b840f3d-413c-40bb-9a7d-cd9dad3c0737 :expectedresults: Host inherited 'all_puppetclasses' details from HostGroup that was used for such Host create procedure :BZ: 1107708, 1222118, 1487586 :CaseLevel: System """ # Creating entities like organization, content view and lifecycle_env # with not utf-8 names for easier interaction with puppet environment # further in test org = entities.Organization(name=gen_string('alpha')).create() location = entities.Location(organization=[org]).create() # Creating puppet repository with puppet module assigned to it product = entities.Product(organization=org).create() puppet_repo = entities.Repository(content_type='puppet', product=product).create() # Working with 'ntp' module as we know for sure that it contains at # least few puppet classes with open(get_data_file(PUPPET_MODULE_NTP_PUPPETLABS), 'rb') as handle: puppet_repo.upload_content(files={'content': handle}) content_view = entities.ContentView(name=gen_string('alpha'), organization=org).create() result = content_view.available_puppet_modules()['results'] assert len(result) == 1 entities.ContentViewPuppetModule(author=result[0]['author'], name=result[0]['name'], content_view=content_view).create() content_view.publish() content_view = content_view.read() lc_env = entities.LifecycleEnvironment(name=gen_string('alpha'), organization=org).create() promote(content_view.version[0], lc_env.id) content_view = content_view.read() assert len(content_view.version) == 1 assert len(content_view.puppet_module) == 1 # Form environment name variable for our test env_name = f'KT_{org.name}_{lc_env.name}_{content_view.name}_{content_view.id}' # Get all environments for current organization. # We have two environments (one created after publishing and one more # was created after promotion), so we need to select promoted one environments = entities.Environment().search( query={'organization_id': org.id}) assert len(environments) == 2 environments = [ environment for environment in environments if environment.name == env_name ] assert len(environments) == 1 environment = environments[0].read() environment.location = [location] environment.update() # Create a host group and it dependencies. mac = entity_fields.MACAddressField().gen_value() root_pass = entity_fields.StringField(length=(8, 30)).gen_value() domain = entities.Domain().create() architecture = entities.Architecture().create() ptable = entities.PartitionTable().create() operatingsystem = entities.OperatingSystem(architecture=[architecture], ptable=[ptable]).create() medium = entities.Media(operatingsystem=[operatingsystem]).create() hostgroup = entities.HostGroup( architecture=architecture, domain=domain, environment=environment, location=[location.id], medium=medium, name=gen_string('alpha'), operatingsystem=operatingsystem, organization=[org.id], ptable=ptable, ).create() assert len(hostgroup.read_json()['all_puppetclasses']) == 0 # Get puppet class id for ntp module response = client.get( environment.path('self') + '/puppetclasses', auth=get_credentials(), verify=False, ) response.raise_for_status() results = response.json()['results'] puppet_class_id = results['ntp'][0]['id'] # Assign puppet class client.post( hostgroup.path('self') + '/puppetclass_ids', data={ 'puppetclass_id': puppet_class_id }, auth=get_credentials(), verify=False, ).raise_for_status() hostgroup_attrs = hostgroup.read_json() assert len(hostgroup_attrs['all_puppetclasses']) == 1 assert hostgroup_attrs['all_puppetclasses'][0]['name'] == 'ntp' # Create Host entity using HostGroup host = entities.Host( hostgroup=hostgroup, mac=mac, root_pass=root_pass, environment=environment, location=location, organization=org, content_facet_attributes={ 'content_view_id': content_view.id, 'lifecycle_environment_id': lc_env.id, }, name=gen_string('alpha'), ).create(False) host_attrs = host.read_json() assert len(host_attrs['all_puppetclasses']) == 1 assert host_attrs['all_puppetclasses'][0]['name'] == 'ntp'
def configure_provisioning(org=None, loc=None): """Create and configure org, loc, product, repo, cv, env. Update proxy, domain, subnet, compute resource, provision templates and medium with previously created entities and create a hostgroup using all mentioned entities. :param org: Default Organization that should be used in both host discovering and host provisioning procedures :param loc: Default Location that should be used in both host discovering and host provisioning procedures :return: List of created entities that can be re-used further in provisioning or validation procedure (e.g. hostgroup or domain) """ # Create new organization and location in case they were not passed if org is None: org = entities.Organization().create() if loc is None: loc = entities.Location(organization=[org]).create() # Create a new Life-Cycle environment lc_env = entities.LifecycleEnvironment(organization=org).create() # Create a Product, Repository for custom RHEL6 contents product = entities.Product(organization=org).create() repo = entities.Repository(product=product, url=settings.rhel7_os).create() # Increased timeout value for repo sync try: old_task_timeout = entity_mixins.TASK_TIMEOUT entity_mixins.TASK_TIMEOUT = 3600 repo.sync() # Create, Publish and promote CV content_view = entities.ContentView(organization=org).create() content_view.repository = [repo] content_view = content_view.update(['repository']) content_view.publish() content_view = content_view.read() promote(content_view.version[0], lc_env.id) finally: entity_mixins.TASK_TIMEOUT = old_task_timeout # Search for puppet environment and associate location environment = entities.Environment( organization=[org.id]).search()[0].read() environment.location.append(loc) environment = environment.update(['location']) # Search for SmartProxy, and associate location proxy = entities.SmartProxy().search( query={u'search': u'name={0}'.format(settings.server.hostname)}) proxy = proxy[0].read() proxy.location.append(loc) proxy = proxy.update(['location']) proxy.organization.append(org) proxy = proxy.update(['organization']) # Search for existing domain or create new otherwise. Associate org, # location and dns to it _, _, domain = settings.server.hostname.partition('.') domain = entities.Domain().search( query={u'search': u'name="{0}"'.format(domain)}) if len(domain) == 1: domain = domain[0].read() domain.location.append(loc) domain.organization.append(org) domain.dns = proxy domain = domain.update(['dns', 'location', 'organization']) else: domain = entities.Domain( dns=proxy, location=[loc], organization=[org], ).create() # Search if subnet is defined with given network. # If so, just update its relevant fields otherwise, # Create new subnet network = settings.vlan_networking.subnet subnet = entities.Subnet().search( query={u'search': u'network={0}'.format(network)}) if len(subnet) == 1: subnet = subnet[0].read() subnet.domain = [domain] subnet.location.append(loc) subnet.organization.append(org) subnet.dns = [proxy] subnet.dhcp = [proxy] subnet.tftp = [proxy] subnet.discovery = [proxy] subnet = subnet.update([ 'domain', 'discovery', 'dhcp', 'dns', 'location', 'organization', 'tftp', ]) else: # Create new subnet subnet = entities.Subnet(network=network, mask=settings.vlan_networking.netmask, domain=[domain], location=[loc], organization=[org], dns=proxy, dhcp=proxy, tftp=proxy, discovery=proxy).create() # Search if Libvirt compute-resource already exists # If so, just update its relevant fields otherwise, # Create new compute-resource with 'libvirt' provider. resource_url = u'qemu+ssh://root@{0}/system'.format( settings.compute_resources.libvirt_hostname) comp_res = [ res for res in entities.LibvirtComputeResource().search() if res.provider == 'Libvirt' and res.url == resource_url ] if len(comp_res) >= 1: computeresource = entities.LibvirtComputeResource( id=comp_res[0].id).read() computeresource.location.append(loc) computeresource.organization.append(org) computeresource = computeresource.update(['location', 'organization']) else: # Create Libvirt compute-resource computeresource = entities.LibvirtComputeResource( provider=u'libvirt', url=resource_url, set_console_password=False, display_type=u'VNC', location=[loc.id], organization=[org.id], ).create() # Get the Partition table ID ptable = entities.PartitionTable().search( query={u'search': u'name="{0}"'.format(DEFAULT_PTABLE)})[0].read() # Get the OS ID os = entities.OperatingSystem().search( query={ u'search': u'name="RedHat" AND (major="{0}" OR major="{1}")'.format( RHEL_6_MAJOR_VERSION, RHEL_7_MAJOR_VERSION) })[0].read() # Get the Provisioning template_ID and update with OS, Org, Location provisioning_template = entities.ConfigTemplate().search( query={u'search': u'name="{0}"'.format(DEFAULT_TEMPLATE)}) provisioning_template = provisioning_template[0].read() provisioning_template.operatingsystem.append(os) provisioning_template.organization.append(org) provisioning_template.location.append(loc) provisioning_template = provisioning_template.update( ['location', 'operatingsystem', 'organization']) # Get the PXE template ID and update with OS, Org, location pxe_template = entities.ConfigTemplate().search( query={u'search': u'name="{0}"'.format(DEFAULT_PXE_TEMPLATE)}) pxe_template = pxe_template[0].read() pxe_template.operatingsystem.append(os) pxe_template.organization.append(org) pxe_template.location.append(loc) pxe_template = pxe_template.update( ['location', 'operatingsystem', 'organization']) # Get the arch ID arch = entities.Architecture().search( query={u'search': u'name="x86_64"'})[0].read() # Get the media and update its location media = entities.Media(organization=[org]).search()[0].read() media.location.append(loc) media.organization.append(org) media = media.update(['location', 'organization']) # Update the OS to associate arch, ptable, templates os.architecture.append(arch) os.ptable.append(ptable) os.config_template.append(provisioning_template) os.config_template.append(pxe_template) os.medium.append(media) os = os.update([ 'architecture', 'config_template', 'ptable', 'medium', ]) # Create Hostgroup host_group = entities.HostGroup( architecture=arch, domain=domain.id, subnet=subnet.id, lifecycle_environment=lc_env.id, content_view=content_view.id, location=[loc.id], environment=environment.id, puppet_proxy=proxy, puppet_ca_proxy=proxy, content_source=proxy, medium=media, root_pass=gen_string('alphanumeric'), operatingsystem=os.id, organization=[org.id], ptable=ptable.id, ).create() return { 'host_group': host_group.name, 'domain': domain.name, }
def test_positive_synchronize_rh_product_future_sync_date(self): """Create a sync plan with sync date in a future and sync one RH product with it automatically. :id: 6697a00f-2181-4c2b-88eb-2333268d780b :expectedresults: Product is synchronized successfully. :CaseLevel: System """ delay = 2 * 60 # delay for sync date in seconds org = entities.Organization().create() with manifests.clone() as manifest: entities.Subscription().upload(data={'organization_id': org.id}, files={'content': manifest.content}) repo_id = enable_rhrepo_and_fetchid( basearch='x86_64', org_id=org.id, product=PRDS['rhel'], repo=REPOS['rhst7']['name'], reposet=REPOSET['rhst7'], releasever=None, ) product = entities.Product(name=PRDS['rhel'], organization=org).search()[0] repo = entities.Repository(id=repo_id).read() if is_open('BZ:1695733'): self.logger.info('Need to set seconds to zero because BZ:1695733') sync_date = datetime.utcnow().replace(second=0) + timedelta( seconds=delay) else: sync_date = (datetime.utcnow() + timedelta(seconds=delay), ) sync_plan = entities.SyncPlan(organization=org, enabled=True, interval='hourly', sync_date=sync_date).create() # Create and Associate sync plan with product sync_plan.add_products(data={'product_ids': [product.id]}) # Verify product is not synced and doesn't have any content with self.assertRaises(AssertionError): self.validate_task_status(repo.id, max_tries=1) self.validate_repo_content(repo, ['erratum', 'package', 'package_group'], after_sync=False) # Wait quarter of expected time self.logger.info('Waiting {0} seconds to check product {1}' ' was not synced'.format(delay / 4, product.name)) sleep(delay / 4) # Verify product has not been synced yet with self.assertRaises(AssertionError): self.validate_task_status(repo.id, max_tries=1) self.validate_repo_content(repo, ['erratum', 'package', 'package_group'], after_sync=False) # Wait the rest of expected time self.logger.info('Waiting {0} seconds to check product {1}' ' was synced'.format((delay * 3 / 4), product.name)) sleep(delay * 3 / 4) # Verify product was synced successfully self.validate_task_status(repo.id, repo_backend_id=repo.backend_identifier) self.validate_repo_content(repo, ['erratum', 'package', 'package_group'])
def setUpClass(cls): """Steps required to create a real host on libvirt 1. Creates new Organization and Location. 2. Creates new life-cycle environment. 3. Creates new product and rhel67 custom repository. 4. Creates new content-view by associating rhel67 repository. 5. Publish and promote the content-view to next environment. 6. Search for puppet environment and associate location. 7. Search for smart-proxy and associate location. 8. Search for existing domain or create new otherwise. Associate org, location and dns proxy. 9. Search for '192.168.100.0' network and associate org, location, dns/dhcp/tftp proxy, and if its not there then creates new. 10. Search for existing compute-resource with 'libvirt' provider and associate org.location, and if its not there then creates new. 11. Search 'Kickstart default' partition table and rhel67 OS along with provisioning/PXE templates. 12. Associates org, location and OS with provisioning and PXE templates 13. Search for x86_64 architecture 14. Associate arch, partition table, provisioning/PXE templates with OS 15. Search for media and associate org/location 16. Create new host group with all required entities """ super(HostTestCase, cls).setUpClass() # Create a new Organization and Location cls.org_ = entities.Organization(name=gen_string('alpha')).create() cls.org_name = cls.org_.name cls.loc = entities.Location(name=gen_string('alpha'), organization=[cls.org_]).create() cls.loc_name = cls.loc.name # Create a new Life-Cycle environment cls.lc_env = entities.LifecycleEnvironment( name=gen_string('alpha'), organization=cls.org_).create() # Create a Product, Repository for custom RHEL6 contents cls.product = entities.Product(name=gen_string('alpha'), organization=cls.org_).create() cls.repo = entities.Repository( name=gen_string('alpha'), product=cls.product, ).create() # Increased timeout value for repo sync cls.old_task_timeout = entity_mixins.TASK_TIMEOUT entity_mixins.TASK_TIMEOUT = 3600 cls.repo.sync() # Create, Publish and promote CV cls.content_view = entities.ContentView( name=gen_string('alpha'), organization=cls.org_).create() cls.content_view.repository = [cls.repo] cls.content_view = cls.content_view.update(['repository']) cls.content_view.publish() cls.content_view = cls.content_view.read() promote(cls.content_view.version[0], cls.lc_env.id) entity_mixins.TASK_TIMEOUT = cls.old_task_timeout # Search for puppet environment and associate location cls.environment = entities.Environment( organization=[cls.org_.id]).search()[0] cls.environment.location = [cls.loc] cls.environment = cls.environment.update(['location']) # Search for SmartProxy, and associate location cls.proxy = entities.SmartProxy().search( query={u'search': u'name={0}'.format(settings.server.hostname)})[0] cls.proxy.location = [cls.loc] cls.proxy = cls.proxy.update(['location']) cls.proxy.organization = [cls.org_] cls.proxy = cls.proxy.update(['organization']) # Search for existing domain or create new otherwise. Associate org, # location and dns to it _, _, domain = settings.server.hostname.partition('.') cls.domain = entities.Domain().search( query={u'search': u'name="{0}"'.format(domain)}) if len(cls.domain) == 1: cls.domain = cls.domain[0].read() cls.domain.location.append(cls.loc) cls.domain.organization.append(cls.org_) cls.domain.dns = cls.proxy cls.domain = cls.domain.update(['dns', 'location', 'organization']) else: cls.domain = entities.Domain( dns=cls.proxy, location=[cls.loc], organization=[cls.org_], ).create() cls.domain_name = cls.domain.name # Search if subnet is defined with given network. # If so, just update its relevant fields otherwise, # Create new subnet network = settings.vlan_networking.subnet cls.subnet = entities.Subnet().search( query={u'search': u'network={0}'.format(network)}) if len(cls.subnet) == 1: cls.subnet = cls.subnet[0] cls.subnet.domain = [cls.domain] cls.subnet.location = [cls.loc] cls.subnet.organization = [cls.org_] cls.subnet.dns = cls.proxy cls.subnet.dhcp = cls.proxy cls.subnet.ipam = 'DHCP' cls.subnet.tftp = cls.proxy cls.subnet.discovery = cls.proxy cls.subnet = cls.subnet.update([ 'domain', 'discovery', 'dhcp', 'dns', 'ipam', 'location', 'organization', 'tftp', ]) else: # Create new subnet cls.subnet = entities.Subnet(name=gen_string('alpha'), network=network, mask=settings.vlan_networking.netmask, domain=[cls.domain], ipam='DHCP', location=[cls.loc], organization=[cls.org_], dns=cls.proxy, dhcp=cls.proxy, tftp=cls.proxy, discovery=cls.proxy).create() # Search if Libvirt compute-resource already exists # If so, just update its relevant fields otherwise, # Create new compute-resource with 'libvirt' provider. resource_url = u'qemu+ssh://root@{0}/system'.format( settings.compute_resources.libvirt_hostname) comp_res = [ res for res in entities.LibvirtComputeResource().search() if res.provider == 'Libvirt' and res.url == resource_url ] if len(comp_res) >= 1: cls.computeresource = entities.LibvirtComputeResource( id=comp_res[0].id).read() cls.computeresource.location.append(cls.loc) cls.computeresource.organization.append(cls.org_) cls.computeresource = cls.computeresource.update( ['location', 'organization']) else: # Create Libvirt compute-resource cls.computeresource = entities.LibvirtComputeResource( name=gen_string('alpha'), provider=u'libvirt', url=resource_url, set_console_password=False, display_type=u'VNC', location=[cls.loc.id], organization=[cls.org_.id], ).create() # Get the Partition table ID cls.ptable = entities.PartitionTable().search( query={u'search': u'name="{0}"'.format(DEFAULT_PTABLE)})[0] # Get the OS ID cls.os = entities.OperatingSystem().search( query={ u'search': u'name="RedHat" AND (major="{0}" OR major="{1}")'.format( RHEL_6_MAJOR_VERSION, RHEL_7_MAJOR_VERSION) })[0] # Get the Provisioning template_ID and update with OS, Org, Location cls.provisioning_template = entities.ConfigTemplate().search( query={u'search': u'name="Satellite Kickstart Default"'})[0] cls.provisioning_template.operatingsystem = [cls.os] cls.provisioning_template.organization = [cls.org_] cls.provisioning_template.location = [cls.loc] cls.provisioning_template = cls.provisioning_template.update( ['location', 'operatingsystem', 'organization']) # Get the PXE template ID and update with OS, Org, location cls.pxe_template = entities.ConfigTemplate().search( query={u'search': u'name="Kickstart default PXELinux"'})[0] cls.pxe_template.operatingsystem = [cls.os] cls.pxe_template.organization = [cls.org_] cls.pxe_template.location = [cls.loc] cls.pxe_template = cls.pxe_template.update( ['location', 'operatingsystem', 'organization']) # Get the arch ID cls.arch = entities.Architecture().search( query={u'search': u'name="x86_64"'})[0] # Get the media and update its location cls.media = entities.Media(organization=[cls.org_]).search()[0].read() cls.media.location.append(cls.loc) cls.media.organization.append(cls.org_) cls.media = cls.media.update(['location', 'organization']) # Update the OS to associate arch, ptable, templates cls.os.architecture = [cls.arch] cls.os.ptable = [cls.ptable] cls.os.config_template = [cls.provisioning_template] cls.os.config_template = [cls.pxe_template] cls.os.medium = [cls.media] cls.os = cls.os.update([ 'architecture', 'config_template', 'ptable', 'medium', ]) # Create Hostgroup cls.host_group = entities.HostGroup( architecture=cls.arch, domain=cls.domain.id, subnet=cls.subnet.id, lifecycle_environment=cls.lc_env.id, content_view=cls.content_view.id, location=[cls.loc.id], name=gen_string('alpha'), environment=cls.environment.id, puppet_proxy=cls.proxy, puppet_ca_proxy=cls.proxy, content_source=cls.proxy, medium=cls.media, operatingsystem=cls.os.id, organization=[cls.org_.id], ptable=cls.ptable.id, ).create()
def test_positive_sync_puppet_module_with_versions(self, capsule_vm): """Ensure it's possible to sync multiple versions of the same puppet module to the capsule :id: 83a0ddd6-8a6a-43a0-b169-094a2556dd28 :customerscenario: true :BZ: 1365952, 1655243 :Steps: 1. Register a capsule 2. Associate LCE with the capsule 3. Sync a puppet module with multiple versions 4. Publish a CV with one version of puppet module and promote it to capsule's LCE 5. Wait for capsule synchronization to finish 6. Publish another CV with different version of puppet module and promote it to capsule's LCE 7. Wait for capsule synchronization to finish once more :expectedresults: Capsule was successfully synchronized, new version of puppet module is present on capsule :CaseLevel: System :CaseImportance: Medium """ module_name = 'versioned' module_versions = ['2.2.2', '3.3.3'] org = entities.Organization().create() lce = entities.LifecycleEnvironment(organization=org).create() content_view = entities.ContentView(organization=org).create() prod = entities.Product(organization=org).create() puppet_repository = entities.Repository( content_type=REPO_TYPE['puppet'], product=prod, url=CUSTOM_PUPPET_REPO ).create() capsule = entities.Capsule(id=capsule_vm._capsule.id).read() capsule.content_add_lifecycle_environment(data={'environment_id': lce.id}) result = capsule.content_lifecycle_environments() assert len(result['results']) >= 1 assert lce.id in [capsule_lce['id'] for capsule_lce in result['results']] puppet_repository.sync() puppet_module_old = entities.PuppetModule().search( query={'search': f'name={module_name} and version={module_versions[0]}'} )[0] # Add puppet module to the CV entities.ContentViewPuppetModule( content_view=content_view, id=puppet_module_old.id ).create() content_view = content_view.read() assert len(content_view.puppet_module) > 0 # Publish and promote CVV content_view.publish() content_view = content_view.read() assert len(content_view.version) == 1 cvv = content_view.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() assert len(cvv.environment) == 2 # Wait till capsule sync finishes sync_status = capsule.content_get_sync() assert len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time'] for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() sync_status = capsule.content_get_sync() last_sync_time = sync_status['last_sync_time'] # Unassign old puppet module version from CV entities.ContentViewPuppetModule( content_view=content_view, id=content_view.puppet_module[0].id ).delete() # Assign new puppet module version puppet_module_new = entities.PuppetModule().search( query={'search': f'name={module_name} and version={module_versions[1]}'} )[0] entities.ContentViewPuppetModule( content_view=content_view, id=puppet_module_new.id ).create() assert len(content_view.puppet_module) > 0 # Publish and promote CVV content_view.publish() content_view = content_view.read() assert len(content_view.version) == 2 cvv = content_view.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() assert len(cvv.environment) == 2 # Wait till capsule sync finishes sync_status = capsule.content_get_sync() if sync_status['active_sync_tasks']: for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() else: assert sync_status['last_sync_time'] != last_sync_time stored_modules = get_repo_files(PULP_PUBLISHED_PUPPET_REPOS_PATH, 'gz', capsule_vm.ip_addr) matching_filenames = filter( lambda filename: f'{module_name}-{module_versions[1]}' in filename, stored_modules ) assert next(matching_filenames, None)
def test_pre_scenario_generate_errata_for_client(self): """Create product and repo from which the errata will be generated for the Satellite client or content host. :id: 88fd28e6-b4df-46c0-91d6-784859fd1c21 :steps: 1. Create Life Cycle Environment, Product and Custom Yum Repo 2. Create custom tools, rhel repos and sync them 3. Create content view and publish it 4. Create activation key and add subscription. 5. Registering Docker Content Host RHEL7 6. Check katello agent and goferd service running on host 7. Generate Errata by Installing Outdated/Older Packages 8. Collect the Erratum list :expectedresults: 1. The content host is created 2. errata count, erratum list will be generated to satellite client/content host """ org = entities.Organization().create() loc = entities.Location(organization=[org]).create() environment = entities.LifecycleEnvironment(organization=org).search( query={'search': 'name=Library'})[0] product = entities.Product(organization=org).create() custom_yum_repo = entities.Repository(product=product, content_type='yum', url=FAKE_9_YUM_REPO).create() product.sync() tools_repo, rhel_repo = self._create_custom_rhel_tools_repos(product) repolist = [custom_yum_repo, tools_repo, rhel_repo] content_view = self._publish_content_view(org=org, repolist=repolist) ak = entities.ActivationKey(content_view=content_view, organization=org.id, environment=environment).create() subscription = entities.Subscription(organization=org).search( query={'search': 'name={}'.format(product.name)})[0] ak.add_subscriptions(data={'subscription_id': subscription.id}) rhel7_client = dockerize(ak_name=ak.name, distro='rhel7', org_label=org.label) client_container_id = list(rhel7_client.values())[0] client_container_name = [key for key in rhel7_client.keys()][0] self._host_location_update(client_container_name=client_container_name, loc=loc) self._install_or_update_package(client_container_id, 'katello-agent') self._run_goferd(client_container_id) for package in FAKE_9_YUM_OUTDATED_PACKAGES: self._install_or_update_package(client_container_id, package) host = entities.Host().search( query={'search': 'activation_key={0}'.format(ak.name)})[0] installable_errata_count = host.content_facet_attributes[ 'errata_counts']['total'] self.assertGreater(installable_errata_count, 1) erratum_list = entities.Errata(repository=custom_yum_repo).search( query={ 'order': 'updated ASC', 'per_page': 1000, }) errata_ids = [errata.errata_id for errata in erratum_list] self.assertEqual(sorted(errata_ids), sorted(FAKE_9_YUM_ERRATUM)) scenario_dict = { self.__class__.__name__: { 'rhel_client': rhel7_client, 'activation_key': ak.name, 'custom_repo_id': custom_yum_repo.id, 'product_id': product.id, 'conten_view_id': content_view.id } } create_dict(scenario_dict)
def test_positive_checksum_sync(self, capsule_vm): """Synchronize repository to capsule, update repository's checksum type, trigger capsule sync and make sure checksum type was updated on capsule :id: eb07bdf3-6cd8-4a2f-919b-8dfc84e16115 :customerscenario: true :BZ: 1288656, 1664288, 1732066 :expectedresults: checksum type is updated in repodata of corresponding repository on capsule :CaseLevel: System :CaseImportance: Critical """ REPOMD_PATH = 'repodata/repomd.xml' # Create organization, product, lce and repository with sha256 checksum # type org = entities.Organization(smart_proxy=[capsule_vm._capsule.id]).create() product = entities.Product(organization=org).create() repo = entities.Repository( product=product, checksum_type='sha256', download_policy='immediate' ).create() lce = entities.LifecycleEnvironment(organization=org).create() # Associate the lifecycle environment with the capsule capsule = entities.Capsule(id=capsule_vm._capsule.id).read() capsule.content_add_lifecycle_environment(data={'environment_id': lce.id}) result = capsule.content_lifecycle_environments() assert len(result['results']) >= 1 assert lce.id in [capsule_lce['id'] for capsule_lce in result['results']] # Sync, publish and promote a repo cv = entities.ContentView(organization=org, repository=[repo]).create() repo.sync() repo = repo.read() cv.publish() cv = cv.read() assert len(cv.version) == 1 cvv = cv.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() assert len(cvv.environment) == 2 # Wait till capsule sync finishes sync_status = capsule.content_get_sync() assert len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time'] for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() sync_status = capsule.content_get_sync() last_sync_time = sync_status['last_sync_time'] # Verify repodata's checksum type is sha256, not sha1 on capsule lce_repo_path = form_repo_path( org=org.label, lce=lce.label, cv=cv.label, prod=product.label, repo=repo.label ) result = ssh.command( f'grep -o \'checksum type="sha1"\' {lce_repo_path}/{REPOMD_PATH}', hostname=capsule_vm.ip_addr, ) assert result.return_code != 0 assert len(result.stdout) == 0 result = ssh.command( f'grep -o \'checksum type="sha256"\' {lce_repo_path}/{REPOMD_PATH}', hostname=capsule_vm.ip_addr, ) assert result.return_code == 0 assert len(result.stdout) > 0 # Update repo's checksum type to sha1 repo.checksum_type = 'sha1' repo = repo.update(['checksum_type']) # Sync, publish, and promote repo repo.sync() cv.publish() cv = cv.read() assert len(cv.version) == 2 cv.version.sort(key=lambda version: version.id) cvv = cv.version[-1].read() promote(cvv, lce.id) cvv = cvv.read() assert len(cvv.environment) == 2 # Wait till capsule sync finishes sync_status = capsule.content_get_sync() assert ( len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time'] != last_sync_time ) for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() # Verify repodata's checksum type has updated to sha1 on capsule result = ssh.command( f'grep -o \'checksum type="sha256"\' {lce_repo_path}/{REPOMD_PATH}', hostname=capsule_vm.ip_addr, ) assert result.return_code != 0 assert len(result.stdout) == 0 result = ssh.command( f'grep -o \'checksum type="sha1"\' {lce_repo_path}/{REPOMD_PATH}', hostname=capsule_vm.ip_addr, ) assert result.return_code == 0 assert len(result.stdout) > 0
def test_post_scenario_errata_count_installtion(self): """Post-upgrade scenario that installs the package on pre-upgrade client remotely and then verifies if the package installed. :id: 88fd28e6-b4df-46c0-91d6-784859fd1c21 :steps: 1. Recovered pre_upgrade data for post_upgrade verification 2. Verifying errata count has not changed on satellite 3. Update Katello-agent and Restart goferd 4. Verifying the errata_ids 5. Verifying installation errata passes successfully 6. Verifying that package installation passed successfully by remote docker exec :expectedresults: 1. errata count, erratum list should same after satellite upgrade 2. Installation of errata should be pass successfully """ entity_data = get_entity_data(self.__class__.__name__) client = entity_data.get('rhel_client') client_container_id = list(client.values())[0] custom_repo_id = entity_data.get('custom_repo_id') product_id = entity_data.get('product_id') conten_view_id = entity_data.get('conten_view_id') product = entities.Product(id=product_id).read() content_view = entities.ContentView(id=conten_view_id).read() custom_yum_repo = entities.Repository(id=custom_repo_id).read() activation_key = entity_data.get('activation_key') host = entities.Host().search( query={'search': 'activation_key={0}'.format(activation_key)})[0] installable_errata_count = host.content_facet_attributes[ 'errata_counts']['total'] tools_repo, rhel_repo = self._create_custom_rhel_tools_repos(product) product.sync() for repo in (tools_repo, rhel_repo): content_view.repository.append(repo) content_view = content_view.update(['repository']) content_view.publish() self._install_or_update_package(client_container_id, "katello-agent", update=True) self._run_goferd(client_container_id) self.assertGreater(installable_errata_count, 1) erratum_list = entities.Errata(repository=custom_yum_repo).search( query={ 'order': 'updated ASC', 'per_page': 1000, }) errata_ids = [errata.errata_id for errata in erratum_list] self.assertEqual(sorted(errata_ids), sorted(FAKE_9_YUM_ERRATUM)) for errata in FAKE_9_YUM_ERRATUM: host.errata_apply(data={'errata_ids': [errata]}) installable_errata_count -= 1 # waiting for errata count to become 0, as profile uploading take some amount of time wait_for(lambda: self._errata_count(ak=activation_key) == 0, timeout=200, delay=10, logger=self.logger) self.assertEqual( host.content_facet_attributes['errata_counts']['total'], 0) for package in FAKE_9_YUM_UPDATED_PACKAGES: self._check_package_installed(client_container_id, package)
def test_positive_on_demand_sync(self, capsule_vm): """Create a repository with 'on_demand' sync, add it to lifecycle environment with a capsule, sync repository, examine existing packages on capsule, download any package, examine packages once more :id: ba470269-a7ad-4181-bc7c-8e17a177ca20 :expectedresults: 1. After initial syncing only symlinks are present on both satellite and capsule, no real packages were fetched. 2. All the symlinks are pointing to non-existent files. 3. Attempt to download package is successful 4. Downloaded package checksum matches checksum of the source package :CaseLevel: System """ repo_url = FAKE_3_YUM_REPO packages_count = FAKE_3_YUM_REPOS_COUNT package = FAKE_1_YUM_REPO_RPMS[0] # Create organization, product, repository in satellite, and lifecycle # environment org = entities.Organization().create() prod = entities.Product(organization=org).create() repo = entities.Repository( download_policy='on_demand', mirror_on_sync=True, product=prod, url=repo_url ).create() lce = entities.LifecycleEnvironment(organization=org).create() # Associate the lifecycle environment with the capsule capsule = entities.Capsule(id=capsule_vm._capsule.id).read() capsule.content_add_lifecycle_environment(data={'environment_id': lce.id}) result = capsule.content_lifecycle_environments() assert len(result['results']) >= 1 assert lce.id in [capsule_lce['id'] for capsule_lce in result['results']] # Create a content view with the repository cv = entities.ContentView(organization=org, repository=[repo]).create() # Sync repository repo.sync() repo = repo.read() # Publish new version of the content view cv.publish() cv = cv.read() assert len(cv.version) == 1 cvv = cv.version[-1].read() # Promote content view to lifecycle environment promote(cvv, lce.id) cvv = cvv.read() assert len(cvv.environment) == 2 # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule.content_get_sync() assert len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time'] # Check whether the symlinks for all the packages were created on # satellite cvv_repo_path = form_repo_path( org=org.label, cv=cv.label, cvv=cvv.version, prod=prod.label, repo=repo.label ) result = ssh.command(f'find {cvv_repo_path}/ -type l') assert result.return_code == 0 links = {link for link in result.stdout if link} assert len(links) == packages_count # Ensure all the symlinks on satellite are broken (pointing to # nonexistent files) result = ssh.command(f'find {cvv_repo_path}/ -type l ! -exec test -e {{}} \\; -print') assert result.return_code == 0 broken_links = {link for link in result.stdout if link} assert len(broken_links) == packages_count assert broken_links == links # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() lce_repo_path = form_repo_path( org=org.label, lce=lce.label, cv=cv.label, prod=prod.label, repo=repo.label ) # Check whether the symlinks for all the packages were created on # capsule result = ssh.command(f'find {lce_repo_path}/ -type l', hostname=capsule_vm.ip_addr) assert result.return_code == 0 links = {link for link in result.stdout if link} assert len(links) == packages_count # Ensure all the symlinks on capsule are broken (pointing to # nonexistent files) result = ssh.command( f'find {lce_repo_path}/ -type l ! -exec test -e {{}} \\; -print', hostname=capsule_vm.ip_addr, ) assert result.return_code == 0 broken_links = {link for link in result.stdout if link} assert len(broken_links) == packages_count assert broken_links == links # Download package from satellite and get its md5 checksum published_repo_url = 'http://{}{}/pulp/{}/'.format( settings.server.hostname, f':{settings.server.port}' if settings.server.port else '', lce_repo_path.split('http/')[1], ) package_md5 = md5_by_url(f'{repo_url}{package}') # Get md5 checksum of source package published_package_md5 = md5_by_url(f'{published_repo_url}{package}') # Assert checksums are matching assert package_md5 == published_package_md5
def setUpClass(cls): """Create a product and an org which can be re-used in tests.""" super(OstreeRepositoryTestCase, cls).setUpClass() cls.org = entities.Organization().create() cls.product = entities.Product(organization=cls.org).create()
def test_positive_update_with_immediate_sync(self, request, capsule_vm): """Create a repository with on_demand download policy, associate it with capsule, sync repo, update download policy to immediate, sync once more. :id: 511b531d-1fbe-4d64-ae31-0f9eb6625e7f :customerscenario: true :BZ: 1315752 :expectedresults: content was successfully synchronized - capsule filesystem contains valid links to packages :CaseLevel: System """ repo_url = FAKE_1_YUM_REPO packages_count = FAKE_1_YUM_REPOS_COUNT # Create organization, product, repository in satellite, and lifecycle # environment org = entities.Organization().create() prod = entities.Product(organization=org).create() repo = entities.Repository( download_policy='on_demand', mirror_on_sync=True, product=prod, url=repo_url ).create() lce = entities.LifecycleEnvironment(organization=org).create() # Update capsule's download policy to on_demand to match repository's # policy self.update_capsule_download_policy(capsule_vm, 'on_demand') # Associate the lifecycle environment with the capsule capsule = entities.Capsule(id=capsule_vm._capsule.id).read() capsule.content_add_lifecycle_environment(data={'environment_id': lce.id}) result = capsule.content_lifecycle_environments() assert len(result['results']) >= 1 assert lce.id in [capsule_lce['id'] for capsule_lce in result['results']] # Create a content view with the repository cv = entities.ContentView(organization=org, repository=[repo]).create() # Sync repository repo.sync() repo = repo.read() # Publish new version of the content view cv.publish() cv = cv.read() assert len(cv.version) == 1 cvv = cv.version[-1].read() # Promote content view to lifecycle environment promote(cvv, lce.id) cvv = cvv.read() assert len(cvv.environment) == 2 # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule.content_get_sync() assert len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time'] # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() # Update download policy to 'immediate' repo.download_policy = 'immediate' repo = repo.update(['download_policy']) assert repo.download_policy == 'immediate' # Update capsule's download policy as well self.update_capsule_download_policy(capsule_vm, 'immediate') # Make sure to revert capsule's download policy after the test as the # capsule is shared among other tests @request.addfinalizer def _cleanup(): self.update_capsule_download_policy(capsule_vm, 'on_demand') # Sync repository once again repo.sync() repo = repo.read() # Publish new version of the content view cv.publish() cv = cv.read() assert len(cv.version) == 2 cv.version.sort(key=lambda version: version.id) cvv = cv.version[-1].read() # Promote content view to lifecycle environment promote(cvv, lce.id) cvv = cvv.read() assert len(cvv.environment) == 2 # Assert that a task to sync lifecycle environment to the capsule # is started (or finished already) sync_status = capsule.content_get_sync() assert len(sync_status['active_sync_tasks']) >= 1 or sync_status['last_sync_time'] # Check whether the symlinks for all the packages were created on # satellite cvv_repo_path = form_repo_path( org=org.label, cv=cv.label, cvv=cvv.version, prod=prod.label, repo=repo.label ) result = ssh.command(f'find {cvv_repo_path}/ -type l') assert result.return_code == 0 links = {link for link in result.stdout if link} assert len(links) == packages_count # Ensure there're no broken symlinks (pointing to nonexistent files) on # satellite result = ssh.command(f'find {cvv_repo_path}/ -type l ! -exec test -e {{}} \\; -print') assert result.return_code == 0 broken_links = {link for link in result.stdout if link} assert len(broken_links) == 0 # Wait till capsule sync finishes for task in sync_status['active_sync_tasks']: entities.ForemanTask(id=task['id']).poll() lce_repo_path = form_repo_path( org=org.label, lce=lce.label, cv=cv.label, prod=prod.label, repo=repo.label ) # Check whether the symlinks for all the packages were created on # capsule result = ssh.command(f'find {lce_repo_path}/ -type l', hostname=capsule_vm.ip_addr) assert result.return_code == 0 links = {link for link in result.stdout if link} assert len(links) == packages_count # Ensure there're no broken symlinks (pointing to nonexistent files) on # capsule result = ssh.command( f'find {lce_repo_path}/ -type l ! -exec test -e {{}} \\; -print', hostname=capsule_vm.ip_addr, ) assert result.return_code == 0 broken_links = {link for link in result.stdout if link} assert len(broken_links) == 0
def test_post_version_cv_export_import(self, request, set_importing_org, dependent_scenario_name): """After upgrade, content view version import and export works on the existing content view(that we created before the upgrade). :id: postupgrade-f19e4928-94db-4df6-8ce8-b5e4afe34258 :parametrized: yes :steps: 1: Export the existing content-view version. 2: Import the existing content-view version. 3: Delete the imported and exported content-vew, product, repo and organization. :expectedresults: After upgrade, 1: Content view created before upgrade should be imported and exported successfully. 2: Imported and Exported content view should be deleted successfully """ pre_test_name = dependent_scenario_name export_base = '/var/lib/pulp/katello-export/' org = entities.Organization().search( query={'search': f'name="{pre_test_name}_org"'})[0] request.addfinalizer(org.delete) product = entities.Product(organization=org).search( query={'search': f'name="{pre_test_name}_prod"'})[0] request.addfinalizer(product.delete) exporting_cv = entities.ContentView(organization=org).search( query={'search': f'name="{pre_test_name}_cv"'})[0] request.addfinalizer(exporting_cv.delete) exporting_cvv_id = max([cvv.id for cvv in exporting_cv.version]) exporting_cvv_version = entities.ContentViewVersion( id=exporting_cvv_id).read().version ContentView.version_export({ 'export-dir': f'{export_base}', 'id': exporting_cvv_id }) exported_tar = f'{export_base}/export-{exporting_cv.name}-{exporting_cvv_version}.tar' result = ssh.command(f"[ -f {exported_tar} ]") assert result.return_code == 0 exported_packages = Package.list( {'content-view-version-id': exporting_cvv_id}) assert len(exported_packages) > 0 importing_cv, importing_org = set_importing_org ContentView.version_import({ 'export-tar': f'{exported_tar}', 'organization-id': importing_org.id }) importing_cvv = importing_cv.read().version assert len(importing_cvv) == 1 imported_packages = Package.list( {'content-view-version-id': importing_cvv[0].id}) assert len(imported_packages) > 0 assert len(exported_packages) == len(imported_packages) ssh.command(f'rm -rf {export_base}/*') exporting_cv_json = exporting_cv.read_json() importing_cv_json = importing_cv.read_json() exporting_cv_env_id = exporting_cv_json['environments'][0]['id'] importing_cv_env_id = importing_cv_json['environments'][0]['id'] assert exporting_cv.delete_from_environment(exporting_cv_env_id) assert importing_cv.delete_from_environment(importing_cv_env_id)
def test_positive_remove_prod_promoted_cv_version_from_default_env(self): """Remove PROD promoted content view version from Library environment :id: 24911876-7c2a-4a12-a3aa-98051dfda29d :Steps: 1. Create a content view 2. Add yum repositories, puppet modules, docker repositories to CV 3. Publish content view 4. Promote the content view version to multiple environments Library -> DEV -> QE -> PROD 5. remove the content view version from Library environment :expectedresults: Content view version exist only in DEV, QE, PROD and not in Library :CaseLevel: Integration """ org = entities.Organization().create() lce_dev = entities.LifecycleEnvironment(organization=org).create() lce_qe = entities.LifecycleEnvironment(organization=org, prior=lce_dev).create() lce_prod = entities.LifecycleEnvironment(organization=org, prior=lce_qe).create() product = entities.Product(organization=org).create() yum_repo = entities.Repository(url=FAKE_1_YUM_REPO, product=product).create() yum_repo.sync() docker_repo = entities.Repository( content_type='docker', docker_upstream_name='busybox', product=product, url=DOCKER_REGISTRY_HUB, ).create() docker_repo.sync() puppet_repo = entities.Repository( url=FAKE_0_PUPPET_REPO, content_type='puppet', product=product ).create() puppet_repo.sync() # create a content view and add to it the yum and docker repos content_view = entities.ContentView(organization=org).create() content_view.repository = [yum_repo, docker_repo] content_view = content_view.update(['repository']) # get a random puppet module and add it to content view puppet_module = random.choice(content_view.available_puppet_modules()['results']) entities.ContentViewPuppetModule( author=puppet_module['author'], name=puppet_module['name'], content_view=content_view ).create() # publish the content view content_view.publish() content_view = content_view.read() self.assertEqual(len(content_view.version), 1) content_view_version = content_view.version[0].read() self.assertEqual(len(content_view_version.environment), 1) lce_library = entities.LifecycleEnvironment( id=content_view_version.environment[0].id ).read() self.assertEqual(lce_library.name, ENVIRONMENT) # promote content view version to DEV QE PROD lifecycle environments for lce in [lce_dev, lce_qe, lce_prod]: promote(content_view_version, lce.id) self.assertEqual( {lce_library.id, lce_dev.id, lce_qe.id, lce_prod.id}, {lce.id for lce in content_view_version.read().environment}, ) # remove the content view version from Library environment content_view.delete_from_environment(lce_library.id) # assert that the content view version exists only in DEV QE PROD and # not in Library environment self.assertEqual( {lce_dev.id, lce_qe.id, lce_prod.id}, {lce.id for lce in content_view_version.read().environment}, )
def test_positive_remove_cv_version_from_multi_env(self): """Remove promoted content view version from multiple environment :id: 18b86a68-8e6a-43ea-b95e-188fba125a26 :Steps: 1. Create a content view 2. Add a yum repo and a puppet module to the content view 3. Publish the content view 4. Promote the content view version to multiple environments Library -> DEV -> QE -> STAGE -> PROD 5. Remove content view version from QE, STAGE and PROD :expectedresults: Content view version exists only in Library, DEV :CaseLevel: Integration :CaseImportance: Low """ org = entities.Organization().create() lce_dev = entities.LifecycleEnvironment(organization=org).create() lce_qe = entities.LifecycleEnvironment(organization=org, prior=lce_dev).create() lce_stage = entities.LifecycleEnvironment(organization=org, prior=lce_qe).create() lce_prod = entities.LifecycleEnvironment(organization=org, prior=lce_stage).create() product = entities.Product(organization=org).create() yum_repo = entities.Repository(url=FAKE_1_YUM_REPO, product=product).create() yum_repo.sync() puppet_repo = entities.Repository(url=FAKE_0_PUPPET_REPO, content_type='puppet', product=product).create() puppet_repo.sync() # create a content view and add to it the yum repo content_view = entities.ContentView(organization=org).create() content_view.repository = [yum_repo] content_view = content_view.update(['repository']) # get a random puppet module and add it to content view puppet_module = random.choice( content_view.available_puppet_modules()['results']) entities.ContentViewPuppetModule(author=puppet_module['author'], name=puppet_module['name'], content_view=content_view).create() # publish the content view content_view.publish() content_view = content_view.read() self.assertEqual(len(content_view.version), 1) content_view_version = content_view.version[0].read() self.assertEqual(len(content_view_version.environment), 1) lce_library = entities.LifecycleEnvironment( id=content_view_version.environment[0].id).read() self.assertEqual(lce_library.name, ENVIRONMENT) # promote content view version to DEV QE STAGE PROD lifecycle # environments for lce in [lce_dev, lce_qe, lce_stage, lce_prod]: promote(content_view_version, lce.id) self.assertEqual( {lce_library.id, lce_dev.id, lce_qe.id, lce_stage.id, lce_prod.id}, {lce.id for lce in content_view_version.read().environment}, ) # remove the content view version from QE STAGE and PROD environments for lce in [lce_qe, lce_stage, lce_prod]: content_view.delete_from_environment(lce.id) # assert that the content view version exists only in Library and DEV # environments self.assertEqual( {lce_library.id, lce_dev.id}, {lce.id for lce in content_view_version.read().environment}, )