def test_positive_update_ptable(session): """Add/Remove partition table from/to organization. :id: 75662a83-0921-45fd-a4b5-012c48bb003a :expectedresults: Partition table is added and then removed. :CaseLevel: Integration """ ptable = entities.PartitionTable().create() org = entities.Organization().create() with session: session.organization.update( org.name, {'partition_tables.resources.assigned': [ptable.name]}) org_values = session.organization.read(org.name) assert ptable.name in org_values['partition_tables']['resources'][ 'assigned'] session.organization.update( org.name, {'partition_tables.resources.unassigned': [ptable.name]}) org_values = session.organization.read(org.name) assert ptable.name in org_values['partition_tables']['resources'][ 'unassigned']
def setupScenario(self): """Create hostgroup and its dependant entities """ self.org = entities.Organization().create() self.loc = entities.Location(organization=[self.org]).create() self.parent_hostgroup = entities.HostGroup( location=[self.loc.id], organization=[self.org.id], name=self.parent_name).create() self.lc_env = entities.LifecycleEnvironment( name=gen_string('alpha'), organization=self.org).create() self.domain = entities.Domain(name=self.domain_name).create() self.architecture = entities.Architecture().create() self.ptable = entities.PartitionTable().create() self.operatingsystem = entities.OperatingSystem( architecture=[self.architecture], ptable=[self.ptable], name=self.os_name).create() self.medium = entities.Media( operatingsystem=[self.operatingsystem]).create() self.subnet = entities.Subnet(location=[self.loc], organization=[self.org], name=self.subnet_name).create()
def test_positive_create_with_ptable(self): """Create a hostgroup with partition table specified @id: f161fd59-fa38-4c4e-a641-489f754d5977 @assert: A hostgroup is created with expected partition table assigned @CaseLevel: Integration """ arch = entities.Architecture().create() ptable = entities.PartitionTable().create() os = entities.OperatingSystem( architecture=[arch], ptable=[ptable], ).create() hostgroup = entities.HostGroup( architecture=arch, location=[self.loc], operatingsystem=os, organization=[self.org], ptable=ptable, ).create() self.assertEqual(hostgroup.ptable.read().name, ptable.name)
def test_positive_create_with_os(self): """Create a hostgroup with operating system specified @id: ca443d3f-2b99-4f0e-b92e-37c3e9fcc460 @assert: A hostgroup is created with expected operating system assigned @CaseLevel: Integration """ arch = entities.Architecture().create() ptable = entities.PartitionTable().create() os = entities.OperatingSystem( architecture=[arch], ptable=[ptable], ).create() hostgroup = entities.HostGroup( architecture=arch, location=[self.loc], operatingsystem=os, organization=[self.org], ptable=ptable, ).create() self.assertEqual(hostgroup.operatingsystem.read().name, os.name)
def test_positive_update_media(self): """Update a hostgroup with a new media :id: 9b6ffbb8-0518-4900-95fd-49fc1d90a4be :expectedresults: A hostgroup is updated with expected media :CaseLevel: Integration """ arch = entities.Architecture().create() ptable = entities.PartitionTable().create() os = entities.OperatingSystem( architecture=[arch], ptable=[ptable], ).create() media = entities.Media( operatingsystem=[os], location=[self.loc], organization=[self.org], ).create() hostgroup = entities.HostGroup( architecture=arch, location=[self.loc], medium=media, operatingsystem=os, organization=[self.org], ptable=ptable, ).create() new_media = entities.Media( operatingsystem=[os], location=[self.loc], organization=[self.org], ).create() hostgroup.medium = new_media hostgroup = hostgroup.update(['medium']) self.assertEqual(hostgroup.medium.read().name, new_media.name)
def test_positive_update_ptable(self): """Update OS partition table :id: 08ddbc40-dcc1-4695-b209-ba72a6a458df :expectedresults: OS is updated :CaseImportance: Critical """ ptable = gen_string('alpha', 4) script_file = get_data_file(PARTITION_SCRIPT_DATA_FILE) with open(script_file, 'r') as file_contents: layout = file_contents.read() entities.PartitionTable( name=ptable, layout=layout, organization=[self.organization], ).create() os_name = entities.OperatingSystem().create().name with Session(self) as session: session.nav.go_to_select_org(self.organization.name) self.operatingsys.update(os_name, new_ptables=[ptable]) result_obj = self.operatingsys.get_os_entities(os_name, 'ptable') self.assertEqual(ptable, result_obj['ptable'])
def test_positive_update_ptable(self): """Remove partition table. :id: 75662a83-0921-45fd-a4b5-012c48bb003a :expectedresults: Partition table is added and then removed. :CaseLevel: Integration """ with Session(self.browser) as session: org_name = gen_string('alpha') make_org(session, org_name=org_name) for ptable_name in generate_strings_list(): with self.subTest(ptable_name): # Create partition table using nailgun ptable = entities.PartitionTable(name=ptable_name).create() self.assertEqual(ptable.name, ptable_name) kwargs = { 'org_name': org_name, 'entity_type': 'ptables', 'entity_name': ptable_name } self.assertIsNotNone(self.org.add_entity(**kwargs)) self.assertIsNotNone(self.org.remove_entity(**kwargs))
def test_import_filtered_templates_from_git_with_negate(self, module_org): """Assure templates with a given filter regex are NOT pulled from git repo. :id: a6857454-249b-4a2e-9b53-b5d7b4eb34e3 :Steps: 1. Using nailgun or direct API call import the templates NOT matching with regex e.g: `^freebsd.*` refer to: `/apidoc/v2/template/import.html` using the {'negate': true} in POST body to negate the filter regex. :expectedresults: 1. Assert result is {'message': 'success'} 2. Assert templates mathing the regex were not pulled. :CaseImportance: Medium """ prefix = gen_string('alpha') filtered_imported_templates = entities.Template().imports( data={ 'repo': FOREMAN_TEMPLATE_IMPORT_URL, 'branch': 'automation', 'filter': 'robottelo', 'organization_ids': [module_org.id], 'prefix': prefix, 'negate': True, }) not_imported_count = [ template['imported'] for template in filtered_imported_templates['message']['templates'] ].count(False) assert not_imported_count == 8 ptemplates = entities.ProvisioningTemplate().search( query={ 'per_page': 100, 'search': 'name~jenkins', 'organization_id': module_org.id }) assert len(ptemplates) == 6 ptables = entities.PartitionTable().search( query={ 'per_page': 100, 'search': 'name~jenkins', 'organization_id': module_org.id }) assert len(ptables) == 1 jtemplates = entities.JobTemplate().search( query={ 'per_page': 100, 'search': 'name~jenkins', 'organization_id': module_org.id }) assert len(jtemplates) == 1 rtemplates = entities.ReportTemplate().search( query={ 'per_page': 100, 'search': 'name~jenkins', 'organization_id': module_org.id }) assert len(rtemplates) == 1
def test_positive_end_to_end(session): """Perform end to end testing for organization component :id: 91003f52-63a6-4b0d-9b68-2b5717fd200e :expectedresults: All expected CRUD actions finished successfully :CaseLevel: Integration :CaseImportance: Critical """ name = gen_string('alpha') new_name = gen_string('alpha') label = gen_string('alphanumeric') description = gen_string('alpha') # entities to be added and removed user = entities.User().create() media = entities.Media(path_=INSTALL_MEDIUM_URL % gen_string('alpha', 6), os_family='Redhat').create() template = entities.ProvisioningTemplate().create() ptable = entities.PartitionTable().create() domain = entities.Domain().create() env = entities.Environment().create() hostgroup = entities.HostGroup().create() location = entities.Location().create() widget_list = [ 'primary', 'users', 'media', 'provisioning_templates', 'partition_tables', 'domains', 'environments', 'host_groups', 'locations', ] with session: session.organization.create({ 'name': name, 'label': label, 'description': description }) assert session.organization.search(name)[0]['Name'] == name org_values = session.organization.read(name, widget_names='primary') assert org_values['primary']['name'] == name assert org_values['primary']['label'] == label assert org_values['primary']['description'] == description # add attributes session.organization.update( name, { 'primary.name': new_name, 'users.resources.assigned': [user.login], 'media.resources.assigned': [media.name], 'provisioning_templates.resources.assigned': [template.name], 'partition_tables.resources.assigned': [ptable.name], 'domains.resources.assigned': [domain.name], 'environments.resources.assigned': [env.name], 'host_groups.resources.assigned': [hostgroup.name], 'locations.resources.assigned': [location.name], }, ) org_values = session.organization.read(new_name, widget_names=widget_list) with pytest.raises(AssertionError): session.organization.delete(new_name) assert user.login in org_values['users']['resources']['assigned'] assert media.name in org_values['media']['resources']['assigned'] assert template.name in org_values['provisioning_templates'][ 'resources']['assigned'] assert ptable.name in org_values['partition_tables']['resources'][ 'assigned'] assert domain.name in org_values['domains']['resources']['assigned'] assert env.name in org_values['environments']['resources']['assigned'] assert hostgroup.name in org_values['host_groups']['resources'][ 'assigned'] assert location.name in org_values['locations']['resources'][ 'assigned'] ptables_before_remove = len( org_values['partition_tables']['resources']['assigned']) templates_before_remove = len( org_values['provisioning_templates']['resources']['assigned']) # remove attributes session.organization.update( new_name, { 'users.resources.unassigned': [user.login], 'media.resources.unassigned': [media.name], 'provisioning_templates.resources.unassigned': [template.name], 'partition_tables.resources.unassigned': [ptable.name], 'domains.resources.unassigned': [domain.name], 'environments.resources.unassigned': [env.name], 'host_groups.resources.unassigned': [hostgroup.name], 'locations.resources.unassigned': [location.name], }, ) org_values = session.organization.read(new_name, widget_names=widget_list) assert len(org_values['users']['resources']['assigned']) == 0 assert user.login in org_values['users']['resources']['unassigned'] assert len(org_values['media']['resources']['assigned']) == 0 assert media.name in org_values['media']['resources']['unassigned'] assert len(org_values['partition_tables']['resources'] ['assigned']) < ptables_before_remove assert (len( org_values['provisioning_templates']['resources']['assigned']) < templates_before_remove) assert len(org_values['domains']['resources']['assigned']) == 0 assert domain.name in org_values['domains']['resources']['unassigned'] assert len(org_values['environments']['resources']['assigned']) == 0 assert env.name in org_values['environments']['resources'][ 'unassigned'] assert len(org_values['host_groups']['resources']['assigned']) == 0 assert hostgroup.name in org_values['host_groups']['resources'][ 'unassigned'] assert len(org_values['locations']['resources']['assigned']) == 0 assert location.name in org_values['locations']['resources'][ 'unassigned'] # delete org session.organization.select(DEFAULT_ORG) session.organization.delete(new_name) assert not session.organization.search(new_name)
def setUpClass(cls): """Steps required to create a Atomic host on libvirt 1. Creates new Organization and Location. 2. Creates new life-cycle environment. 3. Creates new product and sync RH Atomic OSTree repository. 4. Creates new content-view by associating RH Atomic repository. 5. Publish and promote the content-view to next environment. 6. Search for smart-proxy and associate location. 7. Search for existing domain or create new otherwise. Associate org, location and dns proxy. 8. Search for '192.168.100.0' network and associate org, location, dns/dhcp/tftp proxy, and if its not there then creates new. 9. Search for existing compute-resource with 'libvirt' provider and associate org.location, and if its not there then creates new. 10. Search 'Kickstart default' partition table and RH Atomic OS along with PXE templates. 11. Associates org, location and OS with provisioning and PXE templates 12. Search for x86_64 architecture 13. Associate arch, partition table, provisioning/PXE templates with OS 14. Search for existing Atomic media or create new otherwise and associate org/location 15. Create new host group with all required entities """ super(AtomicHostTestCase, cls).setUpClass() # Create a new Organization and Location cls.org = entities.Organization().create() cls.org_name = cls.org.name cls.loc = entities.Location(organization=[cls.org]).create() cls.loc_name = cls.loc.name # Create a new Life-Cycle environment cls.lc_env = entities.LifecycleEnvironment( organization=cls.org ).create() cls.rh_ah_repo = { 'name': REPOS['rhaht']['name'], 'product': PRDS['rhah'], 'reposet': REPOSET['rhaht'], 'basearch': None, 'releasever': None, } with manifests.clone() as manifest: upload_manifest(cls.org.id, manifest.content) # Enables the RedHat repo and fetches it's Id. cls.repo_id = enable_rhrepo_and_fetchid( basearch=cls.rh_ah_repo['basearch'], # OrgId is passed as data in API hence str org_id=str(cls.org.id), product=cls.rh_ah_repo['product'], repo=cls.rh_ah_repo['name'], reposet=cls.rh_ah_repo['reposet'], releasever=cls.rh_ah_repo['releasever'], ) # Sync repository with custom timeout call_entity_method_with_timeout( entities.Repository(id=cls.repo_id).sync, timeout=1500) cls.cv = entities.ContentView(organization=cls.org).create() cls.cv.repository = [entities.Repository(id=cls.repo_id)] cls.cv = cls.cv.update(['repository']) cls.cv.publish() cls.cv = cls.cv.read() promote(cls.cv.version[0], cls.lc_env.id) # Search for SmartProxy, and associate location cls.proxy = entities.SmartProxy().search( query={ u'search': u'name={0}'.format( settings.server.hostname ) } )[0].read() cls.proxy.location.append(cls.loc) cls.proxy.organization.append(cls.org) cls.proxy = cls.proxy.update(['organization', 'location']) # Search for existing domain or create new otherwise. Associate org, # location and dns to it _, _, domain = settings.server.hostname.partition('.') cls.domain = entities.Domain().search( query={ u'search': u'name="{0}"'.format(domain) } ) if len(cls.domain) > 0: cls.domain = cls.domain[0].read() cls.domain.location.append(cls.loc) cls.domain.organization.append(cls.org) cls.domain.dns = cls.proxy cls.domain = cls.domain.update(['dns', 'location', 'organization']) else: cls.domain = entities.Domain( dns=cls.proxy, location=[cls.loc], organization=[cls.org], ).create() cls.domain_name = cls.domain.name # Search if subnet is defined with given network. # If so, just update its relevant fields otherwise, # Create new subnet network = settings.vlan_networking.subnet subnet = entities.Subnet().search( query={u'search': u'network={0}'.format(network)} ) if len(subnet) > 0: cls.subnet = subnet[0].read() cls.subnet.domain.append(cls.domain) cls.subnet.location.append(cls.loc) cls.subnet.organization.append(cls.org) cls.subnet.dns = cls.proxy cls.subnet.dhcp = cls.proxy cls.subnet.ipam = 'DHCP' cls.subnet.tftp = cls.proxy cls.subnet.discovery = cls.proxy cls.subnet = cls.subnet.update([ 'domain', 'discovery', 'dhcp', 'dns', 'ipam', 'location', 'organization', 'tftp', ]) else: # Create new subnet cls.subnet = entities.Subnet( name=gen_string('alpha'), network=network, mask=settings.vlan_networking.netmask, domain=[cls.domain], location=[cls.loc], organization=[cls.org], dns=cls.proxy, dhcp=cls.proxy, ipam='DHCP', tftp=cls.proxy, discovery=cls.proxy ).create() # Search if Libvirt compute-resource already exists # If so, just update its relevant fields otherwise, # Create new compute-resource with 'libvirt' provider. resource_url = u'qemu+ssh://root@{0}/system'.format( settings.compute_resources.libvirt_hostname ) comp_res = [ res for res in entities.LibvirtComputeResource().search() if res.provider == 'Libvirt' and res.url == resource_url ] if len(comp_res) > 0: cls.computeresource = entities.LibvirtComputeResource( id=comp_res[0].id).read() cls.computeresource.location.append(cls.loc) cls.computeresource.organization.append(cls.org) cls.computeresource = cls.computeresource.update([ 'location', 'organization']) else: # Create Libvirt compute-resource cls.computeresource = entities.LibvirtComputeResource( name=gen_string('alpha'), provider=u'libvirt', url=resource_url, set_console_password=False, display_type=u'VNC', location=[cls.loc.id], organization=[cls.org.id], ).create() # Get the Partition table ID cls.ptable = entities.PartitionTable().search( query={ u'search': u'name="{0}"'.format(DEFAULT_PTABLE) } )[0].read() cls.ptable.location.append(cls.loc) cls.ptable.organization.append(cls.org) cls.ptable = cls.ptable.update(['location', 'organization']) # Get the OS ID os = entities.OperatingSystem().search(query={ u'search': u'name="RedHat_Enterprise_Linux_Atomic_Host"' }) if len(os) > 0: cls.os = os[0].read() else: cls.os = entities.OperatingSystem( name='RedHat_Enterprise_Linux_Atomic_Host', family='Redhat', major=RHEL_7_MAJOR_VERSION, ).create() # update the provisioning templates with OS, Org and Location cls.templates = [] for template_name in [DEFAULT_ATOMIC_TEMPLATE, DEFAULT_PXE_TEMPLATE]: template = entities.ConfigTemplate().search( query={ u'search': u'name="{0}"'.format(template_name) } )[0].read() template.operatingsystem.append(cls.os) template.organization.append(cls.org) template.location.append(cls.loc) template = template.update( ['location', 'operatingsystem', 'organization'] ) cls.templates.append(template) # Get the arch ID cls.arch = entities.Architecture().search( query={u'search': u'name="x86_64"'} )[0] # Get the ostree installer URL ostree_path = settings.ostree.ostree_installer # Get the Media media = entities.Media().search(query={ u'search': u'path={0}'.format(ostree_path) }) if len(media) > 0: cls.media = media[0].read() cls.media.location.append(cls.loc) cls.media.organization.append(cls.org) cls.media = cls.media.update(['location', 'organization']) else: cls.media = entities.Media( organization=[cls.org], location=[cls.loc], os_family='Redhat', path_=ostree_path ).create() # Update the OS to associate arch, ptable, templates cls.os.architecture = [cls.arch] cls.os.ptable = [cls.ptable] cls.os.config_template = cls.templates cls.os.medium = [cls.media] cls.os = cls.os.update([ 'architecture', 'config_template', 'ptable', 'medium', ]) # Create Hostgroup cls.host_group = entities.HostGroup( architecture=cls.arch, domain=cls.domain.id, subnet=cls.subnet.id, lifecycle_environment=cls.lc_env.id, content_view=cls.cv.id, location=[cls.loc.id], name=gen_string('alpha'), medium=cls.media, operatingsystem=cls.os.id, organization=[cls.org.id], ptable=cls.ptable.id, ).create()
def setUpClass(cls): """Steps required to create a real host on libvirt 1. Creates new Organization and Location. 2. Search 'Kickstart default' partition table and OS along with provisioning/PXE templates. 3. Associates org, location and OS with provisioning and PXE templates 4. Search for x86_64 architecture 5. Associate arch, partition table, provisioning/PXE templates with OS 6. Find and specify proper Repo URL for OS distribution folder 7. Creates new life-cycle environment. 8. Creates new product and OS custom repository. 9. Creates new content-view and associate with created repository. 10. Publish and promote the content-view to next environment. 11. Search for puppet environment and associate location. 12. Search for smart-proxy and associate organization/location. 13. Search for existing domain or create new otherwise. Associate org, location and dns proxy. 14. Search for '192.168.100.0' network and associate org, location, dns/dhcp/tftp proxy, and if its not there then creates new. 15. Search for existing compute-resource with 'libvirt' provider and associate org.location, and if its not there then creates new. 16. Create new host group with all required entities """ super(LibvirtHostTestCase, cls).setUpClass() # Create a new Organization and Location cls.org_ = entities.Organization(name=gen_string('alpha')).create() cls.org_name = cls.org_.name cls.loc = entities.Location( name=gen_string('alpha'), organization=[cls.org_] ).create() cls.loc_name = cls.loc.name # Get the Partition table ID cls.ptable = entities.PartitionTable().search( query={ u'search': u'name="{0}"'.format(DEFAULT_PTABLE) } )[0] # Get the OS ID cls.os = entities.OperatingSystem().search(query={ u'search': u'name="RedHat" AND (major="{0}" OR major="{1}")' .format(RHEL_6_MAJOR_VERSION, RHEL_7_MAJOR_VERSION) })[0].read() # Get the templates and update with OS, Org, Location cls.templates = [] for template_name in [ 'Kickstart default PXELinux', 'Discovery Red Hat kexec', 'Kickstart default iPXE', 'Satellite Kickstart Default', 'Satellite Kickstart Default Finish', 'Satellite Kickstart Default User Data' ]: template = entities.ConfigTemplate().search( query={ u'search': u'name="{}"'.format(template_name) } )[0].read() template.operatingsystem.append(cls.os) template.organization.append(cls.org_) template.location.append(cls.loc) template = template.update([ 'location', 'operatingsystem', 'organization' ]) cls.templates.append(template) # Get the arch ID cls.arch = entities.Architecture().search( query={u'search': u'name="x86_64"'} )[0] # Update the OS to associate arch, ptable, templates cls.os.architecture = [cls.arch] cls.os.ptable = [cls.ptable] cls.os.config_template = cls.templates cls.os = cls.os.update([ 'architecture', 'config_template', 'ptable', ]) # Check what OS was found to use correct media if cls.os.major == str(RHEL_6_MAJOR_VERSION): os_distr_url = settings.rhel6_os elif cls.os.major == str(RHEL_7_MAJOR_VERSION): os_distr_url = settings.rhel7_os else: raise ValueError('Proposed RHEL version is not supported') # Create a new Life-Cycle environment cls.lc_env = entities.LifecycleEnvironment( name=gen_string('alpha'), organization=cls.org_ ).create() # Create a Product and Repository for OS distribution content cls.product = entities.Product( name=gen_string('alpha'), organization=cls.org_ ).create() cls.repo = entities.Repository( name=gen_string('alpha'), product=cls.product, url=os_distr_url ).create() # Increased timeout value for repo sync cls.old_task_timeout = entity_mixins.TASK_TIMEOUT entity_mixins.TASK_TIMEOUT = 3600 cls.repo.sync() # Create, Publish and promote CV cls.content_view = entities.ContentView( name=gen_string('alpha'), organization=cls.org_ ).create() cls.content_view.repository = [cls.repo] cls.content_view = cls.content_view.update(['repository']) cls.content_view.publish() cls.content_view = cls.content_view.read() promote(cls.content_view.version[0], cls.lc_env.id) entity_mixins.TASK_TIMEOUT = cls.old_task_timeout # Search for puppet environment and associate location cls.environment = entities.Environment( organization=[cls.org_.id]).search()[0] cls.environment.location = [cls.loc] cls.environment = cls.environment.update(['location']) # Search for SmartProxy, and associate organization/location cls.proxy = entities.SmartProxy().search( query={ u'search': u'name={0}'.format( settings.server.hostname ) } )[0].read() cls.proxy.location.append(cls.loc) cls.proxy.organization.append(cls.org_) cls.proxy = cls.proxy.update(['location', 'organization']) # Search for existing domain or create new otherwise. Associate org, # location and dns to it _, _, domain = settings.server.hostname.partition('.') domain = entities.Domain().search( query={ u'search': u'name="{0}"'.format(domain) } ) if len(domain) > 0: cls.domain = domain[0].read() cls.domain.location.append(cls.loc) cls.domain.organization.append(cls.org_) cls.domain.dns = cls.proxy cls.domain = cls.domain.update(['dns', 'location', 'organization']) else: cls.domain = entities.Domain( dns=cls.proxy, location=[cls.loc], organization=[cls.org_], ).create() cls.domain_name = cls.domain.name # Search if subnet is defined with given network. # If so, just update its relevant fields otherwise, # Create new subnet network = settings.vlan_networking.subnet subnet = entities.Subnet().search( query={u'search': u'network={0}'.format(network)} ) if len(subnet) > 0: cls.subnet = subnet[0].read() cls.subnet.domain.append(cls.domain) cls.subnet.location.append(cls.loc) cls.subnet.organization.append(cls.org_) cls.subnet.dns = cls.proxy cls.subnet.dhcp = cls.proxy cls.subnet.ipam = 'DHCP' cls.subnet.tftp = cls.proxy cls.subnet.discovery = cls.proxy cls.subnet = cls.subnet.update([ 'domain', 'discovery', 'dhcp', 'dns', 'ipam', 'location', 'organization', 'tftp', ]) else: # Create new subnet cls.subnet = entities.Subnet( name=gen_string('alpha'), network=network, mask=settings.vlan_networking.netmask, domain=[cls.domain], ipam='DHCP', location=[cls.loc], organization=[cls.org_], dns=cls.proxy, dhcp=cls.proxy, tftp=cls.proxy, discovery=cls.proxy ).create() # Search if Libvirt compute-resource already exists # If so, just update its relevant fields otherwise, # Create new compute-resource with 'libvirt' provider. resource_url = u'qemu+ssh://root@{0}/system'.format( settings.compute_resources.libvirt_hostname ) comp_res = [ res for res in entities.LibvirtComputeResource().search() if (res.provider == FOREMAN_PROVIDERS['libvirt'] and res.url == resource_url) ] if len(comp_res) > 0: cls.computeresource = entities.LibvirtComputeResource( id=comp_res[0].id).read() cls.computeresource.location.append(cls.loc) cls.computeresource.organization.append(cls.org_) cls.computeresource = cls.computeresource.update([ 'location', 'organization']) else: # Create Libvirt compute-resource cls.computeresource = entities.LibvirtComputeResource( name=gen_string('alpha'), provider=FOREMAN_PROVIDERS['libvirt'], url=resource_url, set_console_password=False, display_type=u'VNC', location=[cls.loc.id], organization=[cls.org_.id], ).create() cls.resource = u'{0} (Libvirt)'.format(cls.computeresource.name) cls.puppet_env = entities.Environment( location=[cls.loc], organization=[cls.org_], ).create(True) cls.root_pwd = gen_string('alpha', 15) # Create Hostgroup cls.host_group = entities.HostGroup( architecture=cls.arch, domain=cls.domain.id, subnet=cls.subnet.id, lifecycle_environment=cls.lc_env.id, content_view=cls.content_view.id, location=[cls.loc.id], name=gen_string('alpha'), environment=cls.environment.id, puppet_proxy=cls.proxy, puppet_ca_proxy=cls.proxy, content_source=cls.proxy, operatingsystem=cls.os.id, organization=[cls.org_.id], ptable=cls.ptable.id, ).create()
def test_positive_create_by_type(): """Create entities of different types and check audit logs for these events using entity type as search criteria :id: 6c7ea7fc-6728-447f-9655-26fe0a2881bc :customerscenario: true :expectedresults: Audit logs contain corresponding entries per each create event :BZ: 1426742, 1492668, 1492696 :CaseImportance: Medium """ for entity_item in [ { 'entity': entities.Architecture() }, { 'entity': entities.AuthSourceLDAP(), 'entity_type': 'auth_source', 'value_template': 'LDAP-{entity.name}', }, { 'entity': entities.ComputeProfile(), 'entity_type': 'compute_profile' }, { 'entity': entities.LibvirtComputeResource(), 'entity_type': 'compute_resource', 'value_template': '{entity.name} (Libvirt)', }, { 'entity': entities.ConfigGroup(), 'entity_type': 'config_group' }, { 'entity': entities.Domain() }, { 'entity': entities.Host() }, { 'entity': entities.HostGroup() }, { 'entity': entities.Image( compute_resource=entities.LibvirtComputeResource().create()) }, { 'entity': entities.Location() }, { 'entity': entities.Media(), 'entity_type': 'medium' }, { 'entity': entities.Organization() }, { 'entity': entities.OperatingSystem(), 'entity_type': 'os', 'value_template': '{entity.name} {entity.major}', }, { 'entity': entities.PartitionTable(), 'entity_type': 'ptable' }, { 'entity': entities.PuppetClass() }, { 'entity': entities.Role() }, { 'entity': entities.Subnet(), 'value_template': '{entity.name} ({entity.network}/{entity.cidr})', }, { 'entity': entities.ProvisioningTemplate(), 'entity_type': 'provisioning_template' }, { 'entity': entities.User(), 'value_template': '{entity.login}' }, { 'entity': entities.UserGroup() }, { 'entity': entities.ContentView(), 'entity_type': 'katello/content_view' }, { 'entity': entities.LifecycleEnvironment(), 'entity_type': 'katello/kt_environment' }, { 'entity': entities.ActivationKey(), 'entity_type': 'katello/activation_key' }, { 'entity': entities.HostCollection(), 'entity_type': 'katello/host_collection' }, { 'entity': entities.Product(), 'entity_type': 'katello/product' }, { 'entity': entities.GPGKey(), 'entity_type': 'katello/gpg_key', 'value_template': 'content credential (gpg_key - {entity.name})', }, { 'entity': entities.SyncPlan(organization=entities.Organization(id=1)), 'entity_type': 'katello/sync_plan', }, ]: created_entity = entity_item['entity'].create() entity_type = entity_item.get( 'entity_type', created_entity.__class__.__name__.lower()) value_template = entity_item.get('value_template', '{entity.name}') entity_value = value_template.format(entity=created_entity) audits = entities.Audit().search( query={'search': f'type={entity_type}'}) entity_audits = [ entry for entry in audits if entry.auditable_name == entity_value ] assert entity_audits, ( f'audit not found by name "{entity_value}" for entity: ' f'{created_entity.__class__.__name__.lower()}') audit = entity_audits[0] assert audit.auditable_id == created_entity.id assert audit.action == 'create' assert audit.version == 1
def test_positive_create_by_type(self): """Create entities of different types and check audit logs for these events using entity type as search criteria :id: 6c7ea7fc-6728-447f-9655-26fe0a2881bc :customerscenario: true :expectedresults: Audit logs contain corresponding entries per each create event :BZ: 1426742, 1492668, 1492696 :CaseImportance: Critical """ for entity_item in [ {'entity': entities.Architecture()}, { 'entity': entities.AuthSourceLDAP(), 'entity_type': 'auth_source', 'value_template': 'LDAP-{entity.name}' }, { 'entity': entities.ComputeProfile(), 'entity_type': 'compute_profile' }, { 'entity': entities.LibvirtComputeResource(), 'entity_type': 'compute_resource', 'value_template': '{entity.name} (Libvirt)' }, {'entity': entities.ConfigGroup(), 'entity_type': 'config_group'}, {'entity': entities.Domain()}, {'entity': entities.Host()}, {'entity': entities.HostGroup()}, {'entity': entities.Image( compute_resource=entities.LibvirtComputeResource().create())}, {'entity': entities.Location()}, {'entity': entities.Media(), 'entity_type': 'medium'}, {'entity': entities.Organization()}, { 'entity': entities.OperatingSystem(), 'entity_type': 'os', 'value_template': '{entity.name} {entity.major}' }, { 'entity': entities.PartitionTable(), 'entity_type': 'ptable', }, {'entity': entities.PuppetClass()}, {'entity': entities.Role()}, { 'entity': entities.Subnet(), 'value_template': '{entity.name} ' '({entity.network}/{entity.cidr})' }, { 'entity': entities.ProvisioningTemplate(), 'entity_type': 'template', }, {'entity': entities.User(), 'value_template': '{entity.login}'}, {'entity': entities.UserGroup()}, ]: created_entity = entity_item['entity'].create() entity_type = entity_item.get( 'entity_type', created_entity.__class__.__name__.lower()) value_template = entity_item.get('value_template', '{entity.name}') entity_value = value_template.format(entity=created_entity) audits = entities.Audit().search( query={'search': 'type={0}'.format(entity_type)}) entity_audits = [entry for entry in audits if entry.auditable_name == entity_value] if not entity_audits: self.fail('audit not found by name "{}"'.format(entity_value)) audit = entity_audits[0] self.assertEqual(audit.auditable_id, created_entity.id) self.assertEqual(audit.action, 'create') self.assertEqual(audit.version, 1)
def configure_provisioning(org=None, loc=None, compute=False): """Create and configure org, loc, product, repo, cv, env. Update proxy, domain, subnet, compute resource, provision templates and medium with previously created entities and create a hostgroup using all mentioned entities. :param org: Default Organization that should be used in both host discovering and host provisioning procedures :param loc: Default Location that should be used in both host discovering and host provisioning procedures :return: List of created entities that can be re-used further in provisioning or validation procedure (e.g. hostgroup or domain) """ # Create new organization and location in case they were not passed if org is None: org = entities.Organization().create() if loc is None: loc = entities.Location(organization=[org]).create() # Create a new Life-Cycle environment lc_env = entities.LifecycleEnvironment(organization=org).create() # Create a Product, Repository for custom RHEL6 contents product = entities.Product(organization=org).create() repo = entities.Repository(product=product, url=settings.rhel7_os).create() # Increased timeout value for repo sync try: old_task_timeout = entity_mixins.TASK_TIMEOUT entity_mixins.TASK_TIMEOUT = 3600 repo.sync() # Create, Publish and promote CV content_view = entities.ContentView(organization=org).create() content_view.repository = [repo] content_view = content_view.update(['repository']) content_view.publish() content_view = content_view.read() promote(content_view.version[0], lc_env.id) finally: entity_mixins.TASK_TIMEOUT = old_task_timeout # Search for puppet environment and associate location environment = entities.Environment( organization=[org.id]).search()[0].read() environment.location.append(loc) environment = environment.update(['location']) # Search for SmartProxy, and associate location proxy = entities.SmartProxy().search( query={u'search': u'name={0}'.format(settings.server.hostname)}) proxy = proxy[0].read() proxy.location.append(loc) proxy = proxy.update(['location']) proxy.organization.append(org) proxy = proxy.update(['organization']) # Search for existing domain or create new otherwise. Associate org, # location and dns to it _, _, domain = settings.server.hostname.partition('.') domain = entities.Domain().search( query={u'search': u'name="{0}"'.format(domain)}) if len(domain) == 1: domain = domain[0].read() domain.location.append(loc) domain.organization.append(org) domain.dns = proxy domain = domain.update(['dns', 'location', 'organization']) else: domain = entities.Domain( dns=proxy, location=[loc], organization=[org], ).create() # Search if subnet is defined with given network. # If so, just update its relevant fields otherwise, # Create new subnet network = settings.vlan_networking.subnet subnet = entities.Subnet().search( query={u'search': u'network={0}'.format(network)}) if len(subnet) == 1: subnet = subnet[0].read() subnet.domain = [domain] subnet.location.append(loc) subnet.organization.append(org) subnet.dns = proxy subnet.dhcp = proxy subnet.tftp = proxy subnet.discovery = proxy subnet = subnet.update([ 'domain', 'dhcp', 'tftp', 'dns', 'discovery', 'location', 'organization', ]) else: # Create new subnet subnet = entities.Subnet(network=network, mask=settings.vlan_networking.netmask, domain=[domain], location=[loc], organization=[org], dns=proxy, dhcp=proxy, tftp=proxy, discovery=proxy).create() # Search if Libvirt compute-resource already exists # If so, just update its relevant fields otherwise, # Create new compute-resource with 'libvirt' provider. # compute boolean is added to not block existing test's that depend on # Libvirt resource and use this same functionality to all CR's. if compute is False: resource_url = u'qemu+ssh://root@{0}/system'.format( settings.compute_resources.libvirt_hostname) comp_res = [ res for res in entities.LibvirtComputeResource().search() if res.provider == 'Libvirt' and res.url == resource_url ] if len(comp_res) > 0: computeresource = entities.LibvirtComputeResource( id=comp_res[0].id).read() computeresource.location.append(loc) computeresource.organization.append(org) computeresource = computeresource.update( ['location', 'organization']) else: # Create Libvirt compute-resource computeresource = entities.LibvirtComputeResource( provider=u'libvirt', url=resource_url, set_console_password=False, display_type=u'VNC', location=[loc.id], organization=[org.id], ).create() # Get the Partition table ID ptable = entities.PartitionTable().search( query={u'search': u'name="{0}"'.format(DEFAULT_PTABLE)})[0].read() # Get the OS ID os = entities.OperatingSystem().search( query={ u'search': u'name="RedHat" AND (major="{0}" OR major="{1}")'.format( RHEL_6_MAJOR_VERSION, RHEL_7_MAJOR_VERSION) })[0].read() # Get the Provisioning template_ID and update with OS, Org, Location provisioning_template = entities.ConfigTemplate().search( query={u'search': u'name="{0}"'.format(DEFAULT_TEMPLATE)}) provisioning_template = provisioning_template[0].read() provisioning_template.operatingsystem.append(os) provisioning_template.organization.append(org) provisioning_template.location.append(loc) provisioning_template = provisioning_template.update( ['location', 'operatingsystem', 'organization']) # Get the PXE template ID and update with OS, Org, location pxe_template = entities.ConfigTemplate().search( query={u'search': u'name="{0}"'.format(DEFAULT_PXE_TEMPLATE)}) pxe_template = pxe_template[0].read() pxe_template.operatingsystem.append(os) pxe_template.organization.append(org) pxe_template.location.append(loc) pxe_template = pxe_template.update( ['location', 'operatingsystem', 'organization']) # Get the arch ID arch = entities.Architecture().search( query={u'search': u'name="x86_64"'})[0].read() # Update the OS to associate arch, ptable, templates os.architecture.append(arch) os.ptable.append(ptable) os.config_template.append(provisioning_template) os.config_template.append(pxe_template) os = os.update([ 'architecture', 'config_template', 'ptable', ]) # Create Hostgroup host_group = entities.HostGroup( architecture=arch, domain=domain.id, subnet=subnet.id, lifecycle_environment=lc_env.id, content_view=content_view.id, location=[loc.id], environment=environment.id, puppet_proxy=proxy, puppet_ca_proxy=proxy, content_source=proxy, root_pass=gen_string('alphanumeric'), operatingsystem=os.id, organization=[org.id], ptable=ptable.id, ).create() return { 'host_group': host_group.name, 'domain': domain.name, 'environment': environment.name, 'ptable': ptable.name, }
def test_positive_update_by_type(self): """Update entities of different types and check audit logs for these events using entity type and performed action as search criteria :id: fef54686-4c13-4f36-a616-51dc9b58be19 :expectedresults: Audit logs contain corresponding entries per each update event """ with Session(self): for entity_item in [ { 'entity': entities.Architecture(), 'entity_type': 'architecture' }, { 'entity': entities.ConfigGroup(), 'entity_type': 'config_group' }, { 'entity': entities.Domain(), 'entity_type': 'domain' }, { 'entity': entities.HostGroup(), 'entity_type': 'hostgroup' }, { 'entity': entities.Location(), 'entity_type': 'location' }, { 'entity': entities.PartitionTable(), 'entity_type': 'ptable', }, { 'entity': entities.Role(), 'entity_type': 'role' }, { 'entity': entities.ProvisioningTemplate(), 'entity_type': 'template', }, { 'entity': entities.UserGroup(), 'entity_type': 'usergroup' }, ]: entity = entity_item['entity'].create() name = entity.name new_name = gen_string('alpha') entity.name = new_name entity.update(['name']) self.audit.filter('type={} and action=update'.format( entity_item['entity_type'])) result = self.audit.get_last_entry() self.assertIn('updated', result['full_statement']) self.assertEqual(result['entity_name'], name) self.assertEqual( result['update_list'][0], 'Name changed from {} to {}'.format(name, new_name), )
def test_positive_VM_import(session, module_ca_cert, module_org, module_loc, rhev_data, version): """Import an existing VM as a Host :id: 47aea4b7-9258-4863-8966-9a0bc9e94116 :parametrized: yes :expectedresults: VM is shown as Host in Foreman :CaseLevel: Integration :CaseImportance: Medium :BZ: 1636067 """ # create entities for hostgroup default_loc_id = ( entities.Location().search(query={'search': 'name="{}"'.format(DEFAULT_LOC)})[0].id ) entities.SmartProxy(id=1, location=[default_loc_id, module_loc.id]).update() domain = entities.Domain(organization=[module_org.id], location=[module_loc]).create() subnet = entities.Subnet( organization=[module_org.id], location=[module_loc], domain=[domain] ).create() architecture = entities.Architecture().create() ptable = entities.PartitionTable(organization=[module_org.id], location=[module_loc]).create() operatingsystem = entities.OperatingSystem( architecture=[architecture], ptable=[ptable] ).create() medium = entities.Media( organization=[module_org.id], location=[module_loc], operatingsystem=[operatingsystem] ).create() le = ( entities.LifecycleEnvironment(name="Library", organization=module_org.id) .search()[0] .read() .id ) cv = entities.ContentView(organization=[module_org.id]).create() cv.publish() # create hostgroup hostgroup_name = gen_string('alpha') entities.HostGroup( name=hostgroup_name, architecture=architecture, domain=domain, subnet=subnet, location=[module_loc.id], medium=medium, operatingsystem=operatingsystem, organization=[module_org], ptable=ptable, lifecycle_environment=le, content_view=cv, content_source=1, ).create() name = gen_string('alpha') with session: session.computeresource.create( { 'name': name, 'provider': FOREMAN_PROVIDERS['rhev'], 'provider_content.url': rhev_data['rhev_url'], 'provider_content.user': rhev_data['username'], 'provider_content.password': rhev_data['password'], 'provider_content.api4': version, 'provider_content.datacenter.value': rhev_data['datacenter'], 'provider_content.certification_authorities': module_ca_cert, 'locations.resources.assigned': [module_loc.name], } ) session.hostgroup.update(hostgroup_name, {'host_group.deploy': name + " (RHV)"}) session.computeresource.vm_import( name, rhev_data['vm_name'], hostgroup_name, module_loc.name ) assert session.host.search(rhev_data['vm_name']) is not None # disassociate the host so the corresponding VM doesn't get removed from the CR on host delete entities.Host().search(query={'search': 'name~{}'.format(rhev_data['vm_name'])})[ 0 ].disassociate() entities.Host(name=rhev_data['vm_name']).search()[0].delete()
def test_inherit_puppetclass(self): """Host that created from HostGroup entity with PuppetClass assigned to it should inherit such puppet class information under 'all_puppetclasses' field :id: 7b840f3d-413c-40bb-9a7d-cd9dad3c0737 :expectedresults: Host inherited 'all_puppetclasses' details from HostGroup that was used for such Host create procedure :BZ: 1107708, 1222118, 1487586 :CaseLevel: System """ # Creating entities like organization, content view and lifecycle_env # with not utf-8 names for easier interaction with puppet environment # further in test org = entities.Organization(name=gen_string('alpha')).create() location = entities.Location(organization=[org]).create() # Creating puppet repository with puppet module assigned to it product = entities.Product(organization=org).create() puppet_repo = entities.Repository(content_type='puppet', product=product).create() # Working with 'ntp' module as we know for sure that it contains at # least few puppet classes with open(get_data_file(PUPPET_MODULE_NTP_PUPPETLABS), 'rb') as handle: puppet_repo.upload_content(files={'content': handle}) content_view = entities.ContentView(name=gen_string('alpha'), organization=org).create() result = content_view.available_puppet_modules()['results'] assert len(result) == 1 entities.ContentViewPuppetModule(author=result[0]['author'], name=result[0]['name'], content_view=content_view).create() content_view.publish() content_view = content_view.read() lc_env = entities.LifecycleEnvironment(name=gen_string('alpha'), organization=org).create() promote(content_view.version[0], lc_env.id) content_view = content_view.read() assert len(content_view.version) == 1 assert len(content_view.puppet_module) == 1 # Form environment name variable for our test env_name = f'KT_{org.name}_{lc_env.name}_{content_view.name}_{content_view.id}' # Get all environments for current organization. # We have two environments (one created after publishing and one more # was created after promotion), so we need to select promoted one environments = entities.Environment().search( query={'organization_id': org.id}) assert len(environments) == 2 environments = [ environment for environment in environments if environment.name == env_name ] assert len(environments) == 1 environment = environments[0].read() environment.location = [location] environment.update() # Create a host group and it dependencies. mac = entity_fields.MACAddressField().gen_value() root_pass = entity_fields.StringField(length=(8, 30)).gen_value() domain = entities.Domain().create() architecture = entities.Architecture().create() ptable = entities.PartitionTable().create() operatingsystem = entities.OperatingSystem(architecture=[architecture], ptable=[ptable]).create() medium = entities.Media(operatingsystem=[operatingsystem]).create() hostgroup = entities.HostGroup( architecture=architecture, domain=domain, environment=environment, location=[location.id], medium=medium, name=gen_string('alpha'), operatingsystem=operatingsystem, organization=[org.id], ptable=ptable, ).create() assert len(hostgroup.read_json()['all_puppetclasses']) == 0 # Get puppet class id for ntp module response = client.get( environment.path('self') + '/puppetclasses', auth=get_credentials(), verify=False, ) response.raise_for_status() results = response.json()['results'] puppet_class_id = results['ntp'][0]['id'] # Assign puppet class client.post( hostgroup.path('self') + '/puppetclass_ids', data={ 'puppetclass_id': puppet_class_id }, auth=get_credentials(), verify=False, ).raise_for_status() hostgroup_attrs = hostgroup.read_json() assert len(hostgroup_attrs['all_puppetclasses']) == 1 assert hostgroup_attrs['all_puppetclasses'][0]['name'] == 'ntp' # Create Host entity using HostGroup host = entities.Host( hostgroup=hostgroup, mac=mac, root_pass=root_pass, environment=environment, location=location, organization=org, content_facet_attributes={ 'content_view_id': content_view.id, 'lifecycle_environment_id': lc_env.id, }, name=gen_string('alpha'), ).create(False) host_attrs = host.read_json() assert len(host_attrs['all_puppetclasses']) == 1 assert host_attrs['all_puppetclasses'][0]['name'] == 'ntp'
def test_positive_create_with_properties(self, module_org, module_location): """Create a hostgroup with properties :id: 528afd01-356a-4082-9e88-a5b2a715a792 :expectedresults: A hostgroup is created with expected properties, updated and deleted :CaseLevel: Integration :CaseImportance: High """ env = entities.Environment(location=[module_location], organization=[module_org]).create() parent_hostgroup = entities.HostGroup(location=[module_location], organization=[module_org ]).create() arch = entities.Architecture().create() ptable = entities.PartitionTable().create() os = entities.OperatingSystem(architecture=[arch], ptable=[ptable]).create() media = entities.Media(operatingsystem=[os], location=[module_location], organization=[module_org]).create() proxy = entities.SmartProxy().search( query={'search': f'url = https://{settings.server.hostname}:9090' })[0] subnet = entities.Subnet(location=[module_location], organization=[module_org]).create() domain = entities.Domain(location=[module_location], organization=[module_org]).create() content_view = entities.ContentView(organization=module_org).create() content_view.publish() content_view = content_view.read() lce = entities.LifecycleEnvironment(organization=module_org).create() promote(content_view.version[0], lce.id) hostgroup = entities.HostGroup( architecture=arch, content_source=proxy, content_view=content_view, domain=domain, environment=env, lifecycle_environment=lce, location=[module_location], medium=media, operatingsystem=os, organization=[module_org], parent=parent_hostgroup, ptable=ptable, puppet_ca_proxy=proxy, puppet_proxy=proxy, subnet=subnet, ).create() assert hostgroup.environment.read().name == env.name assert hostgroup.parent.read().name == parent_hostgroup.name assert hostgroup.architecture.read().name == arch.name assert hostgroup.operatingsystem.read().name == os.name assert hostgroup.medium.read().name == media.name assert hostgroup.ptable.read().name == ptable.name assert hostgroup.puppet_ca_proxy.read().name == proxy.name assert hostgroup.subnet.read().name == subnet.name assert hostgroup.domain.read().name == domain.name assert hostgroup.puppet_proxy.read().name == proxy.name assert hostgroup.content_source.read().name == proxy.name assert hostgroup.content_view.read().name == content_view.name assert hostgroup.lifecycle_environment.read().name == lce.name # create new properties for update new_org = entities.Organization().create() new_loc = entities.Location(organization=[new_org]).create() new_arch = entities.Architecture().create() new_ptable = entities.PartitionTable().create() new_parent = entities.HostGroup(location=[new_loc], organization=[new_org]).create() new_env = entities.Environment(location=[new_loc], organization=[new_org]).create() new_os = entities.OperatingSystem(architecture=[new_arch], ptable=[new_ptable]).create() new_subnet = entities.Subnet(location=[new_loc], organization=[new_org]).create() new_domain = entities.Domain(location=[new_loc], organization=[new_org]).create() new_cv = entities.ContentView(organization=new_org).create() new_cv.publish() new_cv = new_cv.read() new_lce = entities.LifecycleEnvironment(organization=new_org).create() promote(new_cv.version[0], new_lce.id) new_media = entities.Media(operatingsystem=[os], location=[new_loc], organization=[new_org]).create() # update itself hostgroup.organization = [new_org] hostgroup.location = [new_loc] hostgroup.lifecycle_environment = new_lce hostgroup.content_view = new_cv hostgroup.domain = new_domain hostgroup.architecture = new_arch hostgroup.operatingsystem = new_os hostgroup.environment = new_env hostgroup.parent = new_parent hostgroup.ptable = new_ptable hostgroup.subnet = new_subnet hostgroup.medium = new_media hostgroup = hostgroup.update([ 'parent', 'environment', 'operatingsystem', 'architecture', 'ptable', 'subnet', 'domain', 'content_view', 'lifecycle_environment', 'location', 'organization', 'medium', ]) assert hostgroup.parent.read().name == new_parent.name assert hostgroup.environment.read().name == new_env.name assert hostgroup.operatingsystem.read().name == new_os.name assert hostgroup.architecture.read().name == new_arch.name assert hostgroup.ptable.read().name == new_ptable.name assert hostgroup.subnet.read().name == new_subnet.name assert hostgroup.domain.read().name == new_domain.name assert hostgroup.content_view.read().name == new_cv.name assert hostgroup.lifecycle_environment.read().name == new_lce.name assert hostgroup.location[0].read().name == new_loc.name assert hostgroup.organization[0].read().name == new_org.name assert hostgroup.medium.read().name == new_media.name # delete hostgroup.delete() with pytest.raises(HTTPError): hostgroup.read()
def setUpClass(cls): """Steps required to create a real host on libvirt 1. Creates new Organization and Location. 2. Creates new life-cycle environment. 3. Creates new product and rhel67 custom repository. 4. Creates new content-view by associating rhel67 repository. 5. Publish and promote the content-view to next environment. 6. Search for puppet environment and associate location. 7. Search for smart-proxy and associate location. 8. Search for existing domain or create new otherwise. Associate org, location and dns proxy. 9. Search for '192.168.100.0' network and associate org, location, dns/dhcp/tftp proxy, and if its not there then creates new. 10. Search for existing compute-resource with 'libvirt' provider and associate org.location, and if its not there then creates new. 11. Search 'Kickstart default' partition table and rhel67 OS along with provisioning/PXE templates. 12. Associates org, location and OS with provisioning and PXE templates 13. Search for x86_64 architecture 14. Associate arch, partition table, provisioning/PXE templates with OS 15. Search for media and associate org/location 16. Create new host group with all required entities """ super(HostTestCase, cls).setUpClass() # Create a new Organization and Location cls.org_ = entities.Organization(name=gen_string('alpha')).create() cls.org_name = cls.org_.name cls.loc = entities.Location(name=gen_string('alpha'), organization=[cls.org_]).create() cls.loc_name = cls.loc.name # Create a new Life-Cycle environment cls.lc_env = entities.LifecycleEnvironment( name=gen_string('alpha'), organization=cls.org_).create() # Create a Product, Repository for custom RHEL6 contents cls.product = entities.Product(name=gen_string('alpha'), organization=cls.org_).create() cls.repo = entities.Repository( name=gen_string('alpha'), product=cls.product, ).create() # Increased timeout value for repo sync cls.old_task_timeout = entity_mixins.TASK_TIMEOUT entity_mixins.TASK_TIMEOUT = 3600 cls.repo.sync() # Create, Publish and promote CV cls.content_view = entities.ContentView( name=gen_string('alpha'), organization=cls.org_).create() cls.content_view.repository = [cls.repo] cls.content_view = cls.content_view.update(['repository']) cls.content_view.publish() cls.content_view = cls.content_view.read() promote(cls.content_view.version[0], cls.lc_env.id) entity_mixins.TASK_TIMEOUT = cls.old_task_timeout # Search for puppet environment and associate location cls.environment = entities.Environment( organization=[cls.org_.id]).search()[0] cls.environment.location = [cls.loc] cls.environment = cls.environment.update(['location']) # Search for SmartProxy, and associate location cls.proxy = entities.SmartProxy().search( query={u'search': u'name={0}'.format(settings.server.hostname)})[0] cls.proxy.location = [cls.loc] cls.proxy = cls.proxy.update(['location']) cls.proxy.organization = [cls.org_] cls.proxy = cls.proxy.update(['organization']) # Search for existing domain or create new otherwise. Associate org, # location and dns to it _, _, domain = settings.server.hostname.partition('.') cls.domain = entities.Domain().search( query={u'search': u'name="{0}"'.format(domain)}) if len(cls.domain) == 1: cls.domain = cls.domain[0].read() cls.domain.location.append(cls.loc) cls.domain.organization.append(cls.org_) cls.domain.dns = cls.proxy cls.domain = cls.domain.update(['dns', 'location', 'organization']) else: cls.domain = entities.Domain( dns=cls.proxy, location=[cls.loc], organization=[cls.org_], ).create() cls.domain_name = cls.domain.name # Search if subnet is defined with given network. # If so, just update its relevant fields otherwise, # Create new subnet network = settings.vlan_networking.subnet cls.subnet = entities.Subnet().search( query={u'search': u'network={0}'.format(network)}) if len(cls.subnet) == 1: cls.subnet = cls.subnet[0] cls.subnet.domain = [cls.domain] cls.subnet.location = [cls.loc] cls.subnet.organization = [cls.org_] cls.subnet.dns = cls.proxy cls.subnet.dhcp = cls.proxy cls.subnet.ipam = 'DHCP' cls.subnet.tftp = cls.proxy cls.subnet.discovery = cls.proxy cls.subnet = cls.subnet.update([ 'domain', 'discovery', 'dhcp', 'dns', 'ipam', 'location', 'organization', 'tftp', ]) else: # Create new subnet cls.subnet = entities.Subnet(name=gen_string('alpha'), network=network, mask=settings.vlan_networking.netmask, domain=[cls.domain], ipam='DHCP', location=[cls.loc], organization=[cls.org_], dns=cls.proxy, dhcp=cls.proxy, tftp=cls.proxy, discovery=cls.proxy).create() # Search if Libvirt compute-resource already exists # If so, just update its relevant fields otherwise, # Create new compute-resource with 'libvirt' provider. resource_url = u'qemu+ssh://root@{0}/system'.format( settings.compute_resources.libvirt_hostname) comp_res = [ res for res in entities.LibvirtComputeResource().search() if res.provider == 'Libvirt' and res.url == resource_url ] if len(comp_res) >= 1: cls.computeresource = entities.LibvirtComputeResource( id=comp_res[0].id).read() cls.computeresource.location.append(cls.loc) cls.computeresource.organization.append(cls.org_) cls.computeresource = cls.computeresource.update( ['location', 'organization']) else: # Create Libvirt compute-resource cls.computeresource = entities.LibvirtComputeResource( name=gen_string('alpha'), provider=u'libvirt', url=resource_url, set_console_password=False, display_type=u'VNC', location=[cls.loc.id], organization=[cls.org_.id], ).create() # Get the Partition table ID cls.ptable = entities.PartitionTable().search( query={u'search': u'name="{0}"'.format(DEFAULT_PTABLE)})[0] # Get the OS ID cls.os = entities.OperatingSystem().search( query={ u'search': u'name="RedHat" AND (major="{0}" OR major="{1}")'.format( RHEL_6_MAJOR_VERSION, RHEL_7_MAJOR_VERSION) })[0] # Get the Provisioning template_ID and update with OS, Org, Location cls.provisioning_template = entities.ConfigTemplate().search( query={u'search': u'name="Satellite Kickstart Default"'})[0] cls.provisioning_template.operatingsystem = [cls.os] cls.provisioning_template.organization = [cls.org_] cls.provisioning_template.location = [cls.loc] cls.provisioning_template = cls.provisioning_template.update( ['location', 'operatingsystem', 'organization']) # Get the PXE template ID and update with OS, Org, location cls.pxe_template = entities.ConfigTemplate().search( query={u'search': u'name="Kickstart default PXELinux"'})[0] cls.pxe_template.operatingsystem = [cls.os] cls.pxe_template.organization = [cls.org_] cls.pxe_template.location = [cls.loc] cls.pxe_template = cls.pxe_template.update( ['location', 'operatingsystem', 'organization']) # Get the arch ID cls.arch = entities.Architecture().search( query={u'search': u'name="x86_64"'})[0] # Get the media and update its location cls.media = entities.Media(organization=[cls.org_]).search()[0].read() cls.media.location.append(cls.loc) cls.media.organization.append(cls.org_) cls.media = cls.media.update(['location', 'organization']) # Update the OS to associate arch, ptable, templates cls.os.architecture = [cls.arch] cls.os.ptable = [cls.ptable] cls.os.config_template = [cls.provisioning_template] cls.os.config_template = [cls.pxe_template] cls.os.medium = [cls.media] cls.os = cls.os.update([ 'architecture', 'config_template', 'ptable', 'medium', ]) # Create Hostgroup cls.host_group = entities.HostGroup( architecture=cls.arch, domain=cls.domain.id, subnet=cls.subnet.id, lifecycle_environment=cls.lc_env.id, content_view=cls.content_view.id, location=[cls.loc.id], name=gen_string('alpha'), environment=cls.environment.id, puppet_proxy=cls.proxy, puppet_ca_proxy=cls.proxy, content_source=cls.proxy, medium=cls.media, operatingsystem=cls.os.id, organization=[cls.org_.id], ptable=cls.ptable.id, ).create()
def test_positive_import_filtered_templates_from_git( self, module_org, module_location): """Assure only templates with a given filter regex are pulled from git repo. :id: 628a95d6-7a4e-4e56-ad7b-d9fecd34f765 :Steps: 1. Using nailgun or direct API call import only the templates matching with regex e.g: `^atomic.*` refer to: `/apidoc/v2/template/import.html` :expectedresults: 1. Assert result is {'message': 'success'} and template imported 2. Assert no other template has been imported but only those matching specified regex. NOTE: Templates are always imported with a prefix defaults to `community` unless it is specified as empty string 3. Assert json output doesnt have 'Name is not matching filter condition, skipping' info message for imported template :CaseImportance: High """ prefix = gen_string('alpha') filtered_imported_templates = entities.Template().imports( data={ 'repo': FOREMAN_TEMPLATE_IMPORT_URL, 'branch': 'automation', 'filter': 'robottelo', 'organization_ids': [module_org.id], 'location_ids': [module_location.id], 'prefix': prefix, }) imported_count = [ template['imported'] for template in filtered_imported_templates['message']['templates'] ].count(True) assert imported_count == 8 ptemplates = entities.ProvisioningTemplate().search( query={ 'per_page': 100, 'search': f'name~{prefix}', 'organization_id': module_org.id, 'location_id': module_location.id, }) assert len(ptemplates) == 5 ptables = entities.PartitionTable().search( query={ 'per_page': 100, 'search': f'name~{prefix}', 'organization_id': module_org.id, 'location_id': module_location.id, }) assert len(ptables) == 1 jtemplates = entities.JobTemplate().search( query={ 'per_page': 100, 'search': f'name~{prefix}', 'organization_id': module_org.id, 'location_id': module_location.id, }) assert len(jtemplates) == 1 rtemplates = entities.ReportTemplate().search( query={ 'per_page': 10, 'search': f'name~{prefix}', 'organization_id': module_org.id, 'location_id': module_location.id, }) assert len(rtemplates) == 1
def test_positive_create_by_type(self): """Create entities of different types and check audit logs for these events using entity type and performed action as search criteria :id: 26197b39-4d56-4aab-8df8-f0fcedbffdb7 :expectedresults: Audit logs contain corresponding entries per each create event :CaseImportance: Critical """ with Session(self): for entity_item in [ { 'entity': entities.Architecture(), 'entity_type': 'architecture' }, { 'entity': entities.AuthSourceLDAP(), 'entity_type': 'auth_source', 'value_template': 'LDAP-{entity.name}' }, { 'entity': entities.ComputeProfile(), 'entity_type': 'compute_profile' }, { 'entity': entities.LibvirtComputeResource(), 'entity_type': 'compute_resource', 'value_template': '{entity.name} (Libvirt)' }, { 'entity': entities.ConfigGroup(), 'entity_type': 'config_group' }, { 'entity': entities.Domain(), 'entity_type': 'domain' }, { 'entity': entities.Host(), 'entity_type': 'host' }, { 'entity': entities.HostGroup(), 'entity_type': 'hostgroup' }, { 'entity': entities.Image(compute_resource=entities. LibvirtComputeResource().create()), 'entity_type': 'image' }, { 'entity': entities.Location(), 'entity_type': 'location' }, { 'entity': entities.Media(), 'entity_type': 'medium', 'custom_operation': 'added', }, { 'entity': entities.Organization(), 'entity_type': 'organization' }, { 'entity': entities.OperatingSystem(), 'entity_type': 'os', 'value_template': '{entity.name} {entity.major}' }, { 'entity': entities.PartitionTable(), 'entity_type': 'ptable', }, { 'entity': entities.PuppetClass(), 'entity_type': 'puppetclass' }, { 'entity': entities.Role(), 'entity_type': 'role' }, { 'entity': entities.Subnet(), 'entity_type': 'subnet', 'value_template': '{entity.name} ' '({entity.network}/{entity.cidr})' }, { 'entity': entities.ProvisioningTemplate(), 'entity_type': 'template', }, { 'entity': entities.User(), 'value_template': '{entity.login}', 'entity_type': 'user', }, { 'entity': entities.UserGroup(), 'entity_type': 'usergroup' }, ]: created_entity = entity_item['entity'].create() value_template = entity_item.get('value_template', '{entity.name}') operation_type = entity_item.get('custom_operation', 'created') entity_value = value_template.format(entity=created_entity) self.audit.filter('type={} and action=create'.format( entity_item['entity_type'])) result = self.audit.get_last_entry() self.assertIn(operation_type, result['full_statement']) self.assertEqual(entity_value, result['entity_name'])
def test_positive_VM_import(session, module_ca_cert, module_org, rhev_data, version): """Import an existing VM as a Host :id: 47aea4b7-9258-4863-8966-9a0bc9e94116 :expectedresults: VM is shown as Host in Foreman :CaseLevel: Integration """ # create entities for hostgroup location = entities.Location().create() entities.SmartProxy(id=1, location=[DEFAULT_LOC_ID, location.id]).update() domain = entities.Domain(organization=[module_org.id], location=[location]).create() subnet = entities.Subnet(organization=[module_org.id], location=[location], domain=[domain]).create() architecture = entities.Architecture().create() ptable = entities.PartitionTable(organization=[module_org.id], location=[location]).create() operatingsystem = entities.OperatingSystem(architecture=[architecture], ptable=[ptable]).create() medium = entities.Media(organization=[module_org.id], location=[location], operatingsystem=[operatingsystem]).create() le = entities.LifecycleEnvironment( name="Library", organization=module_org.id).search()[0].read().id cv = entities.ContentView(organization=[module_org.id]).create() cv.publish() # create hostgroup hostgroup_name = gen_string('alpha') entities.HostGroup( name=hostgroup_name, architecture=architecture, domain=domain, subnet=subnet, location=[location.id], medium=medium, operatingsystem=operatingsystem, organization=[module_org], ptable=ptable, lifecycle_environment=le, content_view=cv, content_source=1, ).create() name = gen_string('alpha') with session: session.computeresource.create({ 'name': name, 'provider': FOREMAN_PROVIDERS['rhev'], 'provider_content.url': rhev_data['rhev_url'], 'provider_content.user': rhev_data['username'], 'provider_content.password': rhev_data['password'], 'provider_content.api4': version, 'provider_content.datacenter.value': rhev_data['datacenter'], 'provider_content.certification_authorities': module_ca_cert, 'locations.resources.assigned': [location.name], }) session.hostgroup.update(hostgroup_name, {'deploy_on': name + " (RHV)"}) session.computeresource.vm_import(name, rhev_data['vm_name'], hostgroup_name, location.name) assert session.host.search(rhev_data['vm_name']) is not None entities.Host(name=rhev_data['vm_name']).search()[0].delete()
def configure_provisioning(org=None, loc=None, compute=False, os=None): """Create and configure org, loc, product, repo, cv, env. Update proxy, domain, subnet, compute resource, provision templates and medium with previously created entities and create a hostgroup using all mentioned entities. :param str org: Default Organization that should be used in both host discovering and host provisioning procedures :param str loc: Default Location that should be used in both host discovering and host provisioning procedures :param bool compute: If False creates a default Libvirt compute resource :param str os: Specify the os to be used while provisioning and to associate related entities to the specified os. :return: List of created entities that can be re-used further in provisioning or validation procedure (e.g. hostgroup or domain) """ # Create new organization and location in case they were not passed if org is None: org = entities.Organization().create() if loc is None: loc = entities.Location(organization=[org]).create() if settings.repos.rhel7_os is None: raise ImproperlyConfigured( 'settings file is not configured for rhel os') # Create a new Life-Cycle environment lc_env = entities.LifecycleEnvironment(organization=org).create() # Create a Product, Repository for custom RHEL7 contents product = entities.Product(organization=org).create() repo = entities.Repository(product=product, url=settings.repos.rhel7_os, download_policy='immediate').create() # Increased timeout value for repo sync and CV publishing and promotion try: old_task_timeout = entity_mixins.TASK_TIMEOUT entity_mixins.TASK_TIMEOUT = 3600 repo.sync() # Create, Publish and promote CV content_view = entities.ContentView(organization=org).create() content_view.repository = [repo] content_view = content_view.update(['repository']) content_view.publish() content_view = content_view.read() promote(content_view.version[0], lc_env.id) finally: entity_mixins.TASK_TIMEOUT = old_task_timeout # Search for existing organization puppet environment, otherwise create a # new one, associate organization and location where it is appropriate. environments = entities.Environment().search(query=dict( search=f'organization_id={org.id}')) if len(environments) > 0: environment = environments[0].read() environment.location.append(loc) environment = environment.update(['location']) else: environment = entities.Environment(organization=[org], location=[loc]).create() # Search for SmartProxy, and associate location proxy = entities.SmartProxy().search( query={'search': f'name={settings.server.hostname}'}) proxy = proxy[0].read() if loc.id not in [location.id for location in proxy.location]: proxy.location.append(loc) if org.id not in [organization.id for organization in proxy.organization]: proxy.organization.append(org) proxy = proxy.update(['location', 'organization']) # Search for existing domain or create new otherwise. Associate org, # location and dns to it _, _, domain = settings.server.hostname.partition('.') domain = entities.Domain().search(query={'search': f'name="{domain}"'}) if len(domain) == 1: domain = domain[0].read() domain.location.append(loc) domain.organization.append(org) domain.dns = proxy domain = domain.update(['dns', 'location', 'organization']) else: domain = entities.Domain(dns=proxy, location=[loc], organization=[org]).create() # Search if subnet is defined with given network. # If so, just update its relevant fields otherwise, # Create new subnet network = settings.vlan_networking.subnet subnet = entities.Subnet().search(query={'search': f'network={network}'}) if len(subnet) == 1: subnet = subnet[0].read() subnet.domain = [domain] subnet.location.append(loc) subnet.organization.append(org) subnet.dns = proxy subnet.dhcp = proxy subnet.tftp = proxy subnet.discovery = proxy subnet.ipam = 'DHCP' subnet = subnet.update([ 'domain', 'discovery', 'dhcp', 'dns', 'location', 'organization', 'tftp', 'ipam' ]) else: # Create new subnet subnet = entities.Subnet( network=network, mask=settings.vlan_networking.netmask, domain=[domain], location=[loc], organization=[org], dns=proxy, dhcp=proxy, tftp=proxy, discovery=proxy, ipam='DHCP', ).create() # Search if Libvirt compute-resource already exists # If so, just update its relevant fields otherwise, # Create new compute-resource with 'libvirt' provider. # compute boolean is added to not block existing test's that depend on # Libvirt resource and use this same functionality to all CR's. if compute is False: resource_url = f'qemu+ssh://root@{settings.libvirt.libvirt_hostname}/system' comp_res = [ res for res in entities.LibvirtComputeResource().search() if res.provider == 'Libvirt' and res.url == resource_url ] if len(comp_res) > 0: computeresource = entities.LibvirtComputeResource( id=comp_res[0].id).read() computeresource.location.append(loc) computeresource.organization.append(org) computeresource.update(['location', 'organization']) else: # Create Libvirt compute-resource entities.LibvirtComputeResource( provider='libvirt', url=resource_url, set_console_password=False, display_type='VNC', location=[loc.id], organization=[org.id], ).create() # Get the Partition table ID ptable = (entities.PartitionTable().search( query={'search': f'name="{DEFAULT_PTABLE}"'})[0].read()) if loc.id not in [location.id for location in ptable.location]: ptable.location.append(loc) if org.id not in [organization.id for organization in ptable.organization]: ptable.organization.append(org) ptable = ptable.update(['location', 'organization']) # Get the OS ID if os is None: os = (entities.OperatingSystem().search( query={ 'search': 'name="RedHat" AND (major="{}" OR major="{}")'.format( RHEL_6_MAJOR_VERSION, RHEL_7_MAJOR_VERSION) })[0].read()) else: os_ver = os.split(' ')[1].split('.') os = (entities.OperatingSystem().search( query={ 'search': f'family="Redhat" AND major="{os_ver[0]}" AND minor="{os_ver[1]}")' })[0].read()) # Get the Provisioning template_ID and update with OS, Org, Location provisioning_template = entities.ProvisioningTemplate().search( query={'search': f'name="{DEFAULT_TEMPLATE}"'}) provisioning_template = provisioning_template[0].read() provisioning_template.operatingsystem.append(os) if org.id not in [ organization.id for organization in provisioning_template.organization ]: provisioning_template.organization.append(org) if loc.id not in [ location.id for location in provisioning_template.location ]: provisioning_template.location.append(loc) provisioning_template = provisioning_template.update( ['location', 'operatingsystem', 'organization']) # Get the PXE template ID and update with OS, Org, location pxe_template = entities.ProvisioningTemplate().search( query={'search': f'name="{DEFAULT_PXE_TEMPLATE}"'}) pxe_template = pxe_template[0].read() pxe_template.operatingsystem.append(os) if org.id not in [ organization.id for organization in pxe_template.organization ]: pxe_template.organization.append(org) if loc.id not in [location.id for location in pxe_template.location]: pxe_template.location.append(loc) pxe_template = pxe_template.update( ['location', 'operatingsystem', 'organization']) # Get the arch ID arch = (entities.Architecture().search( query={'search': f'name="{DEFAULT_ARCHITECTURE}"'})[0].read()) # Update the OS to associate arch, ptable, templates os.architecture.append(arch) os.ptable.append(ptable) os.provisioning_template.append(provisioning_template) os.provisioning_template.append(pxe_template) os = os.update(['architecture', 'provisioning_template', 'ptable']) # kickstart_repository is the content view and lce bind repo kickstart_repository = entities.Repository().search( query=dict(content_view_id=content_view.id, environment_id=lc_env.id, name=repo.name))[0] # Create Hostgroup host_group = entities.HostGroup( architecture=arch, domain=domain.id, subnet=subnet.id, lifecycle_environment=lc_env.id, content_view=content_view.id, location=[loc.id], environment=environment.id, puppet_proxy=proxy, puppet_ca_proxy=proxy, content_source=proxy, kickstart_repository=kickstart_repository, root_pass=gen_string('alphanumeric'), operatingsystem=os.id, organization=[org.id], ptable=ptable.id, ).create() return { 'host_group': host_group.name, 'domain': domain.name, 'environment': environment.name, 'ptable': ptable.name, 'subnet': subnet.name, 'os': os.title, }
def test_positive_end_to_end(session): """Create all possible entities that required for operating system and then test all scenarios like create/read/update/delete for it :id: 280afff3-ebf4-4a54-af11-200327b8957b :expectedresults: All scenarios flows work properly :CaseLevel: Integration """ name = gen_string('alpha') major_version = gen_string('numeric', 2) minor_version = gen_string('numeric', 2) description = gen_string('alpha') family = 'Red Hat' hash = HASH_TYPE['md5'] architecture = entities.Architecture().create() org = entities.Organization().create() loc = entities.Location().create() ptable = entities.PartitionTable( organization=[org], location=[loc], os_family='Redhat', ).create() medium = entities.Media( organization=[org], location=[loc], ).create() param_name = gen_string('alpha') param_value = gen_string('alpha') with session: session.organization.select(org_name=org.name) session.location.select(loc_name=loc.name) session.operatingsystem.create({ 'operating_system.name': name, 'operating_system.major': major_version, 'operating_system.minor': minor_version, 'operating_system.description': description, 'operating_system.family': family, 'operating_system.password_hash': hash, 'operating_system.architectures.assigned': [architecture.name], 'partition_table.resources.assigned': [ptable.name], 'installation_media.resources.assigned': [medium.name], 'parameters.os_params': { 'name': param_name, 'value': param_value }, }) assert session.operatingsystem.search( description)[0]['Title'] == description os = session.operatingsystem.read(description) assert os['operating_system']['name'] == name assert os['operating_system']['major'] == major_version assert os['operating_system']['minor'] == minor_version assert os['operating_system']['description'] == description assert os['operating_system']['family'] == family assert os['operating_system']['password_hash'] == hash assert len(os['operating_system']['architectures']['assigned']) == 1 assert os['operating_system']['architectures']['assigned'][ 0] == architecture.name assert ptable.name in os['partition_table']['resources']['assigned'] assert os['installation_media']['resources']['assigned'][ 0] == medium.name assert len(os['parameters']['os_params']) == 1 assert os['parameters']['os_params'][0]['name'] == param_name assert os['parameters']['os_params'][0]['value'] == param_value new_description = gen_string('alpha') session.operatingsystem.update( description, {'operating_system.description': new_description}) assert not session.operatingsystem.search(description) assert session.operatingsystem.search( new_description)[0]['Title'] == new_description assert session.partitiontable.search( ptable.name)[0]['Operating Systems'] == new_description session.operatingsystem.delete(new_description) assert not session.operatingsystem.search(new_description)
def default_partitiontable(): ptables = entities.PartitionTable().search( query={'search': f'name="{DEFAULT_PTABLE}"'}) if ptables: return ptables[0].read()