def test_1_format(self, capsys): """Test the format of the HMC credentials file.""" cpc_items = self.hmc_creds.get_cpc_items() if cpc_items is None: info(capsys, "HMC credentials file not found: %r - Skipping " "format check of HMC credentials file", self.hmc_creds.filepath) return assert len(cpc_items) > 0
def test_2_hmcs(self, capsys): """ Check out the HMCs specified in the HMC credentials file. Skip HMCs that cannot be contacted. """ cpc_items = self.hmc_creds.get_cpc_items() if cpc_items is None: pytest.skip("HMC credentials file not found: %r" % self.hmc_creds.filepath) return rt_config = zhmcclient.RetryTimeoutConfig( connect_timeout=10, connect_retries=1, ) # Check HMCs and their CPCs for cpc_name in cpc_items: cpc_item = cpc_items[cpc_name] hmc_host = cpc_item['hmc_host'] info(capsys, "Checking HMC %r for CPC %r", (hmc_host, cpc_name)) session = zhmcclient.Session(hmc_host, cpc_item['hmc_userid'], cpc_item['hmc_password'], retry_timeout_config=rt_config) client = zhmcclient.Client(session) try: session.logon() except zhmcclient.ConnectionError as exc: info(capsys, "Skipping HMC %r for CPC %r: %s", (hmc_host, cpc_name, exc)) continue cpcs = client.cpcs.list() cpc_names = [cpc.name for cpc in cpcs] if cpc_name not in cpc_names: raise AssertionError("CPC {!r} not found in HMC {!r}.\n" "Existing CPCs: {!r}".format( cpc_name, hmc_host, cpc_names)) session.logoff()
def test_crud(self, capsys): """Create, read, update and delete a partition.""" cpc_name, session, client, cpc, faked_cpc = \ setup_cpc(capsys, self.hmc_creds, self.fake_data) part_name = self.NAME_PREFIX + 'test_crud.part1' # Ensure a clean starting point for this test try: part = cpc.partitions.find(name=part_name) except zhmcclient.NotFound: pass else: info(capsys, "Cleaning up partition from previous run: {!r}".format(part)) status = part.get_property('status') if status != 'stopped': part.stop() part.delete() # Test creating the partition part_input_props = { 'name': part_name, 'description': 'Dummy partition description.', 'ifl-processors': 2, 'initial-memory': 1024, 'maximum-memory': 2048, 'processor-mode': 'shared', # used for filtering 'type': 'linux', # used for filtering } part_auto_props = { 'status': 'stopped', } part = cpc.partitions.create(part_input_props) for pn in part_input_props: exp_value = part_input_props[pn] assert part.properties[pn] == exp_value, \ "Unexpected value for property {!r}".format(pn) part.pull_full_properties() for pn in part_input_props: exp_value = part_input_props[pn] assert part.properties[pn] == exp_value, \ "Unexpected value for property {!r}".format(pn) for pn in part_auto_props: exp_value = part_auto_props[pn] assert part.properties[pn] == exp_value, \ "Unexpected value for property {!r}".format(pn) # Test finding the partition based on its (cached) name p = cpc.partitions.find(name=part_name) assert p.name == part_name # Test finding the partition based on a server-side filtered prop parts = cpc.partitions.findall(type='linux') assert part_name in [p.name for p in parts] # noqa: F812 # Test finding the partition based on a client-side filtered prop parts = cpc.partitions.findall(**{'processor-mode': 'shared'}) assert part_name in [p.name for p in parts] # noqa: F812 # Test updating a property of the partition new_desc = "Updated partition description." part.update_properties(dict(description=new_desc)) assert part.properties['description'] == new_desc part.pull_full_properties() assert part.properties['description'] == new_desc # Test deleting the partition part.delete() with pytest.raises(zhmcclient.NotFound): cpc.partitions.find(name=part_name) # Cleanup session.logoff()
def test_stogrp_crud(self, capsys): """Create, read, update and delete a storage group.""" cpc_name, session, client, cpc, faked_cpc = \ setup_cpc(capsys, self.hmc_creds, self.fake_data) if not self.dpm_storage_management_enabled(cpc): info( capsys, "DPM Storage feature not enabled or not supported; " "Skipping test_stogrp_crud() test case") return console = client.consoles.console stogrp_name = self.NAME_PREFIX + 'test_stogrp_crud.stogrp1' # Ensure clean starting point try: stogrp = console.storage_groups.find(name=stogrp_name) except zhmcclient.NotFound: pass else: info( capsys, "Cleaning up storage group from previous run: {!r}".format( stogrp)) stogrp.delete() # Test creating the storage group stogrp_input_props = { 'name': stogrp_name, 'description': 'Dummy storage group description.', 'type': 'fcp', } stogrp_auto_props = { 'shared': False, 'active': False, 'fulfillment-state': 'creating', 'adapter-count': 1, } stogrp = console.storage_groups.create(stogrp_input_props) for pn in stogrp_input_props: exp_value = stogrp_input_props[pn] assert stogrp.properties[pn] == exp_value, \ "Unexpected value for property {!r} of storage group:\n" \ "{!r}".format(pn, sorted(stogrp.properties)) stogrp.pull_full_properties() for pn in stogrp_input_props: exp_value = stogrp_input_props[pn] assert stogrp.properties[pn] == exp_value, \ "Unexpected value for property {!r} of storage group:\n" \ "{!r}".format(pn, sorted(stogrp.properties)) if not faked_cpc: for pn in stogrp_auto_props: exp_value = stogrp_auto_props[pn] assert stogrp.properties[pn] == exp_value, \ "Unexpected value for property {!r} of storage group:\n" \ "{!r}".format(pn, sorted(stogrp.properties)) # Test finding the storage group based on its (cached) name sg = console.storage_groups.find(name=stogrp_name) assert sg.name == stogrp_name # Test finding the storage group based on a server-side filtered prop stogrps = console.storage_groups.findall(type='fcp') assert stogrp_name in [sg.name for sg in stogrps] # noqa: F812 # Test finding the storage group based on a client-side filtered prop stogrps = console.storage_groups.findall(active=False) assert stogrp_name in [sg.name for sg in stogrps] # Test updating a property of the storage group new_desc = "Updated storage group description." stogrp.update_properties(dict(description=new_desc)) assert stogrp.properties['description'] == new_desc stogrp.pull_full_properties() assert stogrp.properties['description'] == new_desc # Test deleting the storage group stogrp.delete() with pytest.raises(zhmcclient.NotFound): console.storage_groups.find(name=stogrp_name) # Cleanup session.logoff()
def test_stovol_crud(self, capsys): """Create, read, update and delete a storage volume in a sto.grp.""" cpc_name, session, client, cpc, faked_cpc = \ setup_cpc(capsys, self.hmc_creds, self.fake_data) if not self.dpm_storage_management_enabled(cpc): info( capsys, "DPM Storage feature not enabled or not supported; " "Skipping test_stovol_crud() test case") return console = client.consoles.console stogrp_name = self.NAME_PREFIX + 'test_stovol_crud.stogrp1' stovol_name = self.NAME_PREFIX + 'test_stovol_crud.stovol1' # Ensure clean starting point try: stogrp = console.storage_groups.find(name=stogrp_name) except zhmcclient.NotFound: pass else: info( capsys, "Cleaning up storage group from previous run: {!r}".format( stogrp)) stogrp.delete() # Create a storage group for the volume stogrp = console.storage_groups.create( dict(name=stogrp_name, type='fcp')) # Test creating a volume stovol_input_props = { 'name': stovol_name, 'description': 'Dummy storage volume description.', 'size': 100, # MB } stovol_auto_props = { 'fulfillment-state': 'creating', 'usage': 'data', } # TODO: Remove this tempfix when fixed: if True: info( capsys, "Tempfix: Volume does not support 'cpc-uri' " "property; Omitting it in Create Volume.") else: stovol_input_props['cpc-uri'] = cpc.uri stovol = stogrp.storage_volumes.create(stovol_input_props) for pn in stovol_input_props: exp_value = stovol_input_props[pn] assert stovol.properties[pn] == exp_value, \ "Unexpected value for property {!r} of storage volume:\n" \ "{!r}".format(pn, sorted(stovol.properties)) stovol.pull_full_properties() for pn in stovol_input_props: # TODO: Remove this tempfix when fixed: if pn == 'name': info( capsys, "Tempfix: Create Volume does not honor name; " "Skipping assertion of name:\n" " provided name: %r\n" " created name: %r" % (stovol_input_props[pn], stovol.properties[pn])) continue exp_value = stovol_input_props[pn] assert stovol.properties[pn] == exp_value, \ "Unexpected value for property {!r} of storage volume:\n" \ "{!r}".format(pn, sorted(stovol.properties)) if not faked_cpc: for pn in stovol_auto_props: exp_value = stovol_auto_props[pn] assert stovol.properties[pn] == exp_value, \ "Unexpected value for property {!r} of storage volume:\n" \ "{!r}".format(pn, sorted(stovol.properties)) # Test finding the storage volume based on its (cached) name sv = stogrp.storage_volumes.find(name=stovol_name) assert sv.name == stovol_name # Test finding the storage volume based on a server-side filtered prop # TODO: Remove this tempfix when fixed: try: stovols = stogrp.storage_volumes.find(usage='data') except zhmcclient.HTTPError as exc: if exc.http_status == 500: info( capsys, "Tempfix: List Volumes filtered by usage raises " "%s,%s %r; Skipping this test." % (exc.http_status, exc.reason, exc.message)) else: assert stovol_name in [sv.name for sv in stovols] # noqa: F812 # Test finding the storage group based on a client-side filtered prop # TODO: Remove this tempfix when fixed: try: stovols = stogrp.storage_volumes.findall(active=False) except zhmcclient.HTTPError as exc: if exc.http_status == 500: info( capsys, "Tempfix: List Volumes raises " "%s,%s %r; Skipping this test." % (exc.http_status, exc.reason, exc.message)) else: assert stovol_name in [sv.name for sv in stovols] # Test updating a property of the storage volume new_desc = "Updated storage volume description." stovol.update_properties(dict(description=new_desc)) assert stovol.properties['description'] == new_desc stovol.pull_full_properties() assert stovol.properties['description'] == new_desc # Test deleting the storage volume # TODO: Remove this tempfix when fixed: try: stovol.delete() except zhmcclient.HTTPError as exc: if exc.http_status == 500: info( capsys, "Tempfix: Delete Volume raises " "%s,%s %r; Skipping this test." % (exc.http_status, exc.reason, exc.message)) else: with pytest.raises(zhmcclient.NotFound): stogrp.storage_volumes.find(name=stovol_name) # Cleanup stogrp.delete() session.logoff()