Example #1
0
    def mc_list_songs_updated_after(self):
        songs_last_minute = self.mc.get_all_songs(
            updated_after=datetime.datetime.now() - datetime.timedelta(minutes=1))
        assert_not_equal(len(songs_last_minute), 0)

        all_songs = self.mc.get_all_songs()
        assert_not_equal(len(songs_last_minute), len(all_songs))
    def ensure_domain_specified_in_flags_exists(self):
        """Make sure the domain in the FLAGS exists."""
        self.driver = RsDnsDriver(raise_if_zone_missing=False)
        assert_not_equal(None, self.driver.default_dns_zone)

        def zone_found():
            zones = self.driver.get_dns_zones()
            print("Retrieving zones.")
            for zone in zones:
                print("zone %s" % zone)
                if zone.name == self.driver.default_dns_zone.name:
                    self.driver.default_dns_zone.id = zone.id
                    global DNS_DOMAIN_ID
                    DNS_DOMAIN_ID = zone.id
                    return True
            return False
        if zone_found():
            return
        self.create_domain_if_needed()
        for i in range(5):
            if zone_found():
                return
        self.fail("""Could not find default dns zone.
                  This happens when they clear the staging DNS service of data.
                  To fix it, manually run the tests as follows:
                  $ ADD_DOMAINS=True python int_tests.py
                  and if all goes well the tests will create a new domain
                  record.""")
Example #3
0
 def test_nodes(self):
     """ Testing GET:/nodes """
     Nodes().api1_1_nodes_get()
     nodes = dumps(self.__client.last_response.data)
     LOG.info(nodes)
     assert_equal(200, self.__client.last_response.status)
     assert_not_equal(0, len(nodes), message="Node list was empty!")
Example #4
0
 def test_list_chassis(self):
     """ Testing GET /Chassis """
     redfish().list_chassis()
     chassis = self.__get_data()
     LOG.debug(chassis,json=True)
     assert_not_equal(0, len(chassis), message='Chassis list was empty!')
     self.__chassisList = chassis
Example #5
0
    def post_workflows(self, graph_name, \
                       timeout_sec=300, nodes=[], data={}, \
                       tasks=[], callback=None, run_now=True):
        self.__graph_name = graph_name
        self.__graph_status = []

        Api().nodes_get_all()
        nodes = loads(self.__client.last_response.data)

        if callback == None:
            callback = self.handle_graph_finish

        for n in nodes:
            if n.get('type') == 'compute':
                id = n.get('id')
                assert_not_equal(id,None)
                LOG.info('starting amqp listener for node {0}'.format(id))
                worker = AMQPWorker(queue=QUEUE_GRAPH_FINISH, callbacks=[callback])
                thread = WorkerThread(worker, id)
                self.__tasks.append(thread)
                tasks.append(thread)
                try:
                    Api().nodes_workflow_action_by_id(id, {'command': 'cancel'})
                except ApiException as e:
                    assert_equal(404,e.status, message='status should be 404')
                except (TypeError, ValueError) as e:
                    assert(e.message)
                Api().nodes_post_workflow_by_id(id, name=self.__graph_name, body=data)

        if run_now:
            self.run_workflow_tasks(self.__tasks, timeout_sec)
Example #6
0
 def test_list_schemas(self):
     """ Testing GET /Schemas """
     redfish().list_schemas()
     schemas = self.__get_data()
     LOG.debug(schemas,json=True)
     assert_not_equal(0, len(schemas), message='Schema list was empty!')
     self.__schemaList = schemas
Example #7
0
    def test_node_pollers(self):
        """ Test /nodes/:id/pollers are running """
        Nodes().nodes_get()
        nodes = loads(self.__client.last_response.data)
        LOG.debug(nodes, json=True)
        samples = []
        valid = False

        for n in nodes:
            LOG.info(n)
            if n.get('type') == 'compute':
                uuid = n.get('id')
                Nodes().nodes_identifier_pollers_get(uuid)
                rsp = self.__client.last_response
                data = loads(rsp.data)
                assert_equal(200, rsp.status, message=rsp.reason)
                assert_not_equal(0, len(data), \
                        message='Failed to find poller for nodes {0}'.format(n.get('id')))
                samples.append(data[0])

        for sample in samples:
            count = 18 # Wait for 3 mins (poller interval is 1 min)
            while valid == False:
                try:
                    Templates().pollers_identifier_data_get(sample.get('id'))
                    valid = True
                except Exception, e:
                    LOG.warning('Poller {0} doesn\'t work normally'.format(sample.get('id')))
                    time.sleep(10)
                    count -= 1
                    assert_not_equal(0, count, \
                            message='Poller {0} failed to get data'.format(sample.get('id')))
Example #8
0
    def test_backup_get(self):
        """Test get backup."""
        backup = instance_info.dbaas.backups.get(backup_info.id)
        assert_equal(backup_info.id, backup.id)
        assert_equal(backup_info.name, backup.name)
        assert_equal(backup_info.description, backup.description)
        assert_equal(instance_info.id, backup.instance_id)
        assert_not_equal(0.0, backup.size)
        assert_equal('COMPLETED', backup.status)
        assert_equal(instance_info.dbaas_datastore,
                     backup.datastore['type'])
        assert_equal(instance_info.dbaas_datastore_version,
                     backup.datastore['version'])

        datastore_version = instance_info.dbaas.datastore_versions.get(
            instance_info.dbaas_datastore,
            instance_info.dbaas_datastore_version)
        assert_equal(datastore_version.id, backup.datastore['version_id'])

        # Test to make sure that user in other tenant is not able
        # to GET this backup
        reqs = Requirements(is_admin=False)
        other_user = CONFIG.users.find_user(
            reqs,
            black_list=[instance_info.user.auth_user])
        other_client = create_dbaas_client(other_user)
        assert_raises(exceptions.NotFound, other_client.backups.get,
                      backup_info.id)
Example #9
0
 def test_list_registry(self):
     """ Testing GET /Registries """
     redfish().list_registry()
     registry = self.__get_data()
     LOG.debug(registry,json=True)
     assert_not_equal(0, len(registry), message='Registry list was empty!')
     self.__registryList = registry
Example #10
0
 def test_basic(self):
     """Test the basic operators of the Split class."""
     assert_equal(Split(1, 2), Split(1, 2))
     assert_not_equal(Split(2, 1), Split(1, 2))
     assert_equal(hash(Split(1, 2)), hash(Split(1, 2)))
     assert_equal(str(Split(2, 1)), "2:1")
     assert_equal(repr(Split(2, 1)), "Split(2,1)")
Example #11
0
    def __post_workflows(self, graph_name, body):
        # POST workflows without listening to AMQP about status
        Nodes().nodes_get()
        nodes = loads(self.client.last_response.data)

        for n in nodes:
            if n.get('type') == 'compute':
                id = n.get('id')
                assert_not_equal(id,None)
                try:
                    Nodes().nodes_identifier_workflows_active_delete(id)
                except Exception,e:
                    assert_equal(404, e.status, message = 'status should be 404')

                # Verify the active workflow has been deleted
                # If the post workflow API was called immediatly after deleting active workflow,
                # the API would fail at the first time and retry, though actually the workflow was issued twice
                # in a consecutive manner, which would bring malfunction of vBMC
                retries = 5
                Nodes().nodes_identifier_workflows_active_get(id)
                status = self.client.last_response.status
                while status != 204 and retries != 0:
                    LOG.warning('Workflow status for Node {0} (status={1},retries={2})'.format(id,status,retries))
                    sleep(1)
                    retries -= 1
                    Nodes().nodes_identifier_workflows_active_get(id)
                    status = self.client.last_response.status

                assert_equal(204, status, message = 'status should be 204')

                Nodes().nodes_identifier_workflows_post(id,name=graph_name,body=body)
Example #12
0
    def prepare_upgrade_smoke(self):
        self.backup_name = "backup_smoke.tar.gz"
        self.repos_backup_name = "repos_backup_smoke.tar.gz"

        self.check_run("upgrade_smoke_backup")
        self.env.revert_snapshot("ready", skip_timesync=True)
        intermediate_snapshot = "prepare_upgrade_smoke_before_backup"

        assert_not_equal(
            settings.KEYSTONE_CREDS['password'], 'admin',
            "Admin password was not changed, aborting execution")

        cluster_settings = {
            'net_provider': settings.NEUTRON,
            'net_segment_type': settings.NEUTRON_SEGMENT['vlan']
        }
        cluster_settings.update(self.cluster_creds)

        if not self.env.d_env.has_snapshot(intermediate_snapshot):
            self.deploy_cluster(
                {'name': self.prepare_upgrade_smoke.__name__,
                 'settings': cluster_settings,
                 'nodes': {'slave-01': ['controller'],
                           'slave-02': ['compute', 'cinder']}
                 }
            )
            self.env.make_snapshot(intermediate_snapshot)

        # revert_snapshot will do nothing if there is no snapshot
        self.env.revert_snapshot(intermediate_snapshot)

        self.do_backup(self.backup_path, self.local_path,
                       self.repos_backup_path, self.repos_local_path)
        self.env.make_snapshot("upgrade_smoke_backup", is_make=True)
 def redfish_discovery_test(self):
     """ Testing Redfish Service Discovery """
     user, passwd = get_cred('redfish')
     assert_is_not_none(user)
     assert_is_not_none(passwd)
     body = {
         'options': {
             'defaults': {
                 'username': user,
                 'password': passwd,
                 'uri': URI
             }
         }
     }
         
     if IS_EMC:
         body['options']['when-catalog-emc'] = { 'autoCatalogEmc': 'true' }
         body['options']['when-pollers-emc'] = { 'autoCreatePollerEmc': 'true' }
         
     self.__post_unbound_workflow('Graph.Redfish.Discovery', body)
     Nodes().nodes_get()
     nodes = self.__get_data()
     
     settings = []
     for node in nodes:
         if node.get('type') == 'enclosure':
             for obm in node.get('obmSettings', []):
                 if obm.get('service') == 'redfish-obm-service':
                     self.__nodes.append(node)
                     config = obm.get('config')
                     assert_equal(URI, config.get('uri'), \
                         message = "Unexpected Redfish URI")
     assert_not_equal(len(self.__nodes), 0, message='Missing Redfish Enclosures')
Example #14
0
 def test_root_now_enabled_details(self):
     """Use instance details to test that root is now enabled."""
     instance = self.dbaas.instances.get(instance_info.id)
     assert_true(hasattr(instance, "rootEnabled"), "Instance has no rootEnabled property.")
     assert_true(instance.rootEnabled, "Root SHOULD be enabled.")
     assert_not_equal(self.root_enabled_timestamp, "Never")
     self._verify_root_timestamp(instance_info.id)
Example #15
0
 def test_reset_root(self):
     if test_config.values['root_timestamp_disabled']:
         raise SkipTest("Enabled timestamp not enabled yet")
     old_ts = self.root_enabled_timestamp
     self._root()
     assert_not_equal(self.root_enabled_timestamp, 'Never')
     assert_equal(self.root_enabled_timestamp, old_ts)
Example #16
0
 def test_node_patch(self):
     """ Verify PATCH:/nodes/:id """
     data = {"name": "fake_name_test"}
     Nodes().api1_1_nodes_get()
     nodes = loads(self.__client.last_response.data)
     codes = []
     for n in nodes:
         if n.get("name") == "test_compute_node":
             uuid = n.get("id")
             Nodes().api1_1_nodes_identifier_patch(uuid, data)
             rsp = self.__client.last_response
             test_nodes = loads(self.__client.last_response.data)
             assert_equal(test_nodes.get("name"), "fake_name_test", "Oops patch failed")
             codes.append(rsp)
             LOG.info('Restoring name to "test_compute_node"')
             correct_data = {"name": "test_compute_node"}
             Nodes().api1_1_nodes_identifier_patch(uuid, correct_data)
             rsp = self.__client.last_response
             restored_nodes = loads(self.__client.last_response.data)
             assert_equal(restored_nodes.get("name"), "test_compute_node", "Oops restoring failed")
             codes.append(rsp)
     assert_not_equal(0, len(codes), message="Failed to find compute node Ids")
     for c in codes:
         assert_equal(200, c.status, message=c.reason)
     assert_raises(rest.ApiException, Nodes().api1_1_nodes_identifier_patch, "fooey", data)
Example #17
0
 def check_lookups(self):
     """ Testing GET:/lookups """
     Api().lookups_get()
     rsp = self.__client.last_response
     LOG.info("\nLookup list: {}\n".format(rsp.data, json=True))
     assert_equal(200, rsp.status, message=rsp.reason)
     assert_not_equal(0, len(rsp.data))
Example #18
0
 def test_create_security_group_rule(self):
     if len(self.testSecurityGroup.rules) == 0:
         self.testSecurityGroupRule = \
             dbaas.security_group_rules.create(
                 group_id=self.testSecurityGroup.id,
                 protocol="tcp",
                 from_port=3306,
                 to_port=3306,
                 cidr="0.0.0.0/0")
         assert_is_not_none(self.testSecurityGroupRule)
         with TypeCheck('SecurityGroupRule',
                        self.testSecurityGroupRule) as secGrpRule:
             secGrpRule.has_field('id', basestring)
             secGrpRule.has_field('security_group_id', basestring)
             secGrpRule.has_field('protocol', basestring)
             secGrpRule.has_field('cidr', basestring)
             secGrpRule.has_field('from_port', int)
             secGrpRule.has_field('to_port', int)
             secGrpRule.has_field('created', basestring)
         assert_equal(self.testSecurityGroupRule.security_group_id,
                      self.testSecurityGroup.id)
         assert_equal(self.testSecurityGroupRule.protocol, "tcp")
         assert_equal(int(self.testSecurityGroupRule.from_port), 3306)
         assert_equal(int(self.testSecurityGroupRule.to_port), 3306)
         assert_equal(self.testSecurityGroupRule.cidr, "0.0.0.0/0")
     else:
         assert_not_equal(len(self.testSecurityGroup.rules), 0)
Example #19
0
 def test_deep_list_security_group_with_rules(self):
     securityGroupList = dbaas.security_groups.list()
     assert_is_not_none(securityGroupList)
     securityGroup = [x for x in securityGroupList
                      if x.name in self.secGroupName]
     assert_is_not_none(securityGroup[0])
     assert_not_equal(len(securityGroup[0].rules), 0)
    def upgrade_env_code(self, release_id):
        self.show_step(self.next_step)
        seed_id = int(
            self.ssh_manager.check_call(
                ip=self.env.get_admin_node_ip(),
                command="octane upgrade-env {0} {1}".format(self.orig_cluster_id, release_id),
                error_info="'upgrade-env' command failed, " "inspect logs for details",
            ).stdout_str
        )

        new_cluster_id = int(self.fuel_web.get_last_created_cluster())

        assert_not_equal(
            self.orig_cluster_id,
            seed_id,
            "Cluster IDs are the same: old={} and new={}".format(self.orig_cluster_id, seed_id),
        )

        assert_equal(
            seed_id,
            new_cluster_id,
            "Cluster ID was changed, but it's not the last:"
            " abnormal activity or configuration error presents!\n"
            "\tSEED ID: {}\n"
            "\tLAST ID: {}".format(seed_id, new_cluster_id),
        )

        cluster_release_id = int(self.fuel_web.get_cluster_release_id(seed_id))

        assert_equal(
            cluster_release_id,
            release_id,
            "Release ID {} is not equals to expected {}".format(cluster_release_id, release_id),
        )
Example #21
0
 def test_get_configuration_details_from_instance_validation(self):
     # validate that the configuration was applied correctly to the instance
     inst = instance_info.dbaas.instances.get(configuration_instance.id)
     configuration_id = inst.configuration['id']
     assert_not_equal(None, inst.configuration['id'])
     _test_configuration_is_applied_to_instance(configuration_instance,
                                                configuration_id)
Example #22
0
 def test_node_patch(self):
     """ Testing PATCH:/api/2.0/nodes/:id """
     data = {"name": 'fake_name_test'}
     Api().nodes_get_all()
     nodes = self.__get_data()
     codes = []
     for n in nodes:
         if n.get('name') == 'test_compute_node':
             uuid = n.get('id')
             Api().nodes_patch_by_id(identifier=uuid,body=data)
             rsp = self.__client.last_response
             test_nodes = self.__get_data()
             assert_equal(test_nodes.get('name'), 'fake_name_test', 'Oops patch failed')
             codes.append(rsp)
             LOG.info('Restoring name to "test_compute_node"')
             correct_data = {"name": 'test_compute_node'}
             Api().nodes_patch_by_id(identifier=uuid,body=correct_data)
             rsp = self.__client.last_response
             restored_nodes = self.__get_data()
             assert_equal(restored_nodes.get('name'), 'test_compute_node', 'Oops restoring failed')
             codes.append(rsp)
     assert_not_equal(0, len(codes), message='Failed to find compute node Ids')
     for c in codes:
         assert_equal(200, c.status, message=c.reason)
     assert_raises(rest.ApiException, Api().nodes_patch_by_id, 'fooey', data)
Example #23
0
 def test_node_patch(self):
     """ Verify PATCH:/nodes/:id """
     data = {"name": 'fake_name_test'}
     Nodes().nodes_get()
     nodes = loads(self.__client.last_response.data)
     codes = []
     for n in nodes:
         if n.get('name') == 'test_compute_node':
             uuid = n.get('id')
             Nodes().nodes_identifier_patch(uuid, data)
             rsp = self.__client.last_response
             test_nodes = loads(self.__client.last_response.data)
             assert_equal(test_nodes.get('name'), 'fake_name_test', 'Oops patch failed')
             codes.append(rsp)
             LOG.info('Restoring name to "test_compute_node"')
             correct_data = {"name": 'test_compute_node'}
             Nodes().nodes_identifier_patch(uuid, correct_data)
             rsp = self.__client.last_response
             restored_nodes = loads(self.__client.last_response.data)
             assert_equal(restored_nodes.get('name'), 'test_compute_node', 'Oops restoring failed')
             codes.append(rsp)
     assert_not_equal(0, len(codes), message='Failed to find compute node Ids')
     for c in codes:
         assert_equal(200, c.status, message=c.reason)
     assert_raises(rest.ApiException, Nodes().nodes_identifier_patch, 'fooey', data)
Example #24
0
 def test_validate_user_readOnly(self):
     """ Testing validate read Only privilege  """
     user = {
         'username': '******',
         'password': '******'
     }
     Api().get_user('funtest-name')
     found_user = self.__get_data()
     LOG.info(user,json=True)
     save_admin_token = config.api_client.default_headers['authorization']
     config.api_client.default_headers['authorization'] = 'JWT ' + self.get_auth_token(user)
     newuser = {
         'username': '******',
         'password': '******',
         'role': 'Administrator'
     }
     LOG.info('should fail to create user')
     try :
         Api().add_user(body=newuser)
     except ApiException as e:
         assert_equal(403, e.status)
     LOG.info('should be able to display users list') 
     Api().list_users()
     users = self.__get_data()
     LOG.debug(users,json=True)
     assert_not_equal(0,len(users))
     #Restore config token
     config.api_client.default_headers['authorization'] = save_admin_token
Example #25
0
 def test_empty_index_host_list_single(self):
     self.host.name = self.host.name.replace(".", "\.")
     result = self.client.hosts.get(self.host)
     assert_not_equal(result, None,
                      "Get host should not be empty for: %s" % self.host)
     with Check() as check:
         used_ram = int(result.usedRAM)
         total_ram = int(result.totalRAM)
         percent_used = int(result.percentUsed)
         lower, upper = percent_boundary(used_ram, total_ram)
         check.true(percent_used > lower,
                    "percentUsed %r is below the lower boundary %r"
                    % (percent_used, lower))
         check.true(percent_used < upper,
                    "percentUsed %r is above the upper boundary %r"
                    % (percent_used, upper))
         check.true(used_ram < total_ram,
                    "usedRAM %r should be less than totalRAM %r"
                    % (used_ram, total_ram))
         check.true(percent_used < 100,
                    "percentUsed should be less than 100 but was %r"
                    % percent_used)
         check.true(total_ram > 0,
                    "totalRAM should be greater than 0 but was %r"
                    % total_ram)
         check.true(used_ram < total_ram,
                    "usedRAM %r should be less than totalRAM %r"
                    % (used_ram, total_ram))
Example #26
0
 def test_obm_library(self):
     """ Testing GET:/obms/library """
     Obms().obms_library_get()
     obms = loads(self.__client.last_response.data)
     services = [t.get('service') for t in obms]
     assert_equal(200, self.__client.last_response.status)
     assert_not_equal(0, len(obms), message='OBM list was empty!')
Example #27
0
    def ha_pacemaker_configuration(self):
        """Verify resources are configured

        Scenario:
            1. SSH to controller node
            2. Verify resources are configured
            3. Go to next controller

        Snapshot deploy_ha

        """
        self.env.revert_snapshot("deploy_ha")

        devops_ctrls = self.env.nodes().slaves[:3]
        for devops_node in devops_ctrls:
            config = self.fuel_web.get_pacemaker_config(devops_node.name)
            for n in devops_ctrls:
                fqdn = self.fuel_web.fqdn(n)
                assert_true(
                    'node {0}'.format(fqdn) in config,
                    'node {0} exists'.format(fqdn))
            assert_not_equal(
                re.search('primitive (openstack-)?heat-engine', config), None,
                'heat engine')
            assert_true('primitive p_haproxy' in config, 'haproxy')
            assert_true('primitive p_mysql' in config, 'mysql')
            assert_true(
                'primitive vip__management_old' in config, 'vip management')
            assert_true(
                'primitive vip__public_old' in config, 'vip public')
Example #28
0
 def result_is_active():
     backup = instance_info.dbaas.backups.get(backup_info.id)
     if backup.status == "COMPLETED":
         return True
     else:
         assert_not_equal("FAILED", backup.status)
         return False
Example #29
0
 def handle_graph_finish(self,body,message):
     routeId = message.delivery_info.get('routing_key').split('graph.finished.')[1]
     assert_not_equal(routeId,None)
     Workflows().workflows_get()
     workflows = loads(self.__client.last_response.data)
     message.ack()
     for w in workflows:
         injectableName = w['definition'].get('injectableName')
         if injectableName == self.__graph_name:
             graphId = w['context'].get('graphId')
             if graphId == routeId:
                 nodeid = w['context'].get('target', injectableName)
                 status = body['status']
                 if status == 'succeeded' or status == 'failed':
                     self.__graph_status.append(status)
                     for task in self.__tasks:
                         if task.id == nodeid:
                             task.worker.stop()
                             task.running = False
                     msg = {
                         'graph_name': injectableName,
                         'target': nodeid,
                         'status': status,
                         'route_id': routeId
                     }
                     if status == 'failed':
                         msg['active_task'] = w['tasks']
                         LOG.error(msg, json=True)
                     else:
                         LOG.info(msg, json=True)
                     break
 def test_index_host_list_single(self):
     self.host.name = self.host.name.replace(".", "\.")
     myresult = self.client.hosts.get(self.host)
     assert_not_equal(myresult, None,
                      "list hosts should not be empty: %s" % str(myresult))
     assert_true(len(myresult.instances) > 0,
                 "instance list on the host should not be empty: %r" %
                 myresult.instances)
     with Check() as check:
         check.true(myresult.totalRAM == instance_info.host_info.totalRAM,
                    "totalRAM should be the same as before : %r == %r" %
                    (myresult.totalRAM, instance_info.host_info.totalRAM))
         diff = instance_info.host_info.usedRAM\
                + instance_info.dbaas_flavor.ram
         check.true(myresult.usedRAM == diff,
                    "usedRAM should be : %r == %r" %
                    (myresult.usedRAM, diff))
         calc = float((1.0 * myresult.usedRAM / myresult.totalRAM) * 100)
         low_bound = calc - (calc * 0.25)
         high_bound = calc + (calc * 0.25)
         check.true(myresult.percentUsed > low_bound,
                    "percentUsed should be : %r > %r" %
                    (myresult.percentUsed, low_bound))
         check.true(myresult.percentUsed < high_bound,
                    "percentUsed should be : %r < %r" %
                    (myresult.percentUsed, high_bound))
         print("test_index_host_list_single result instances: %s" %
             str(myresult.instances))
         for index, instance in enumerate(myresult.instances, start=1):
             print("%d instance: %s" % (index, instance))
             check.equal(sorted(['id', 'name', 'status', 'server_id',
                                 'tenant_id']),
                         sorted(instance.keys()))
             for key in instance.keys():
                 check.is_not_none(instance[key])
Example #31
0
 def test_get_sel_log_services_entries(self):
     """ Testing GET /Systems/{identifier}/LogServices/sel/Entries """
     assert_is_not_none(self.__membersList)
     membersList = self.__logServicesList.get('Members')
     assert_is_not_none(membersList,
                        message='missing log services members field!')
     for member in membersList:
         dataId = member.get('@odata.id')
         assert_is_not_none(dataId)
         id = re.compile(r'/LogServices').split(dataId)[0]\
                 .split('/redfish/v1/Systems/')[1]
         redfish().list_sel_log_service_entries(id)
         entries = self.__get_data()
         assert_not_equal({}, entries)
         # Should validate the SEL entries here
         # Leaving as TODO until a 'add_sel' task is available
         LOG.debug(entries, json=True)
Example #32
0
    def test_node_workflows_get(self):
        """ Testing GET:/api/2.0/nodes/:id/workflows """
        resps = []
        Api().nodes_get_all()
        nodes = self.__get_data()
        for n in nodes:
            if n.get('type') == 'compute':
                Api().nodes_get_workflow_by_id(identifier=n.get('id'))
                resps.append(self.__get_data())
        for resp in resps:
            assert_not_equal(0,
                             len(resp),
                             message='No Workflows found for Node')

        Api().nodes_get_workflow_by_id('fooey')
        resps_fooey = self.__get_data()
        assert_equal(len(resps_fooey), 0, message='Should be empty')
Example #33
0
    def check_ovs_firewall_functionality(self,
                                         cluster_id,
                                         compute_ip,
                                         dpdk=False):
        """Check firewall functionality

        :param cluster_id: int, cluster id
        :param compute_ip: str, compute ip
        :param dpdk: bool, is DPDK enabled
        """
        flows = self.get_flows(compute_ip)
        if dpdk:
            ifaces = self.get_ovs_bridge_ifaces(compute_ip)
        else:
            ifaces = self.get_ifaces(compute_ip)
        net_name = self.fuel_web.get_cluster_predefined_networks_name(
            cluster_id)['private_net']
        os_conn = os_actions.OpenStackActions(
            self.fuel_web.get_public_vip(cluster_id))
        if dpdk:
            server = self.boot_dpdk_instance(os_conn, cluster_id)
            current_ifaces = self.get_ovs_bridge_ifaces(compute_ip)
        else:
            server = os_conn.create_server_for_migration(label=net_name)
            current_ifaces = self.get_ifaces(compute_ip)
        current_flows = self.get_flows(compute_ip)
        assert_equal(
            len(current_ifaces.stdout) - len(ifaces.stdout), 1,
            "Check is failed:"
            " {}\n\n{}".format(ifaces, current_ifaces))
        assert_not_equal(
            set(flows.stdout), set(current_flows.stdout),
            "Check is failed. Passed data is equal:"
            " {}\n\n{}".format(flows, current_flows))
        float_ip = os_conn.assign_floating_ip(server)
        logger.info("Floating address {0} associated with instance {1}".format(
            float_ip.ip, server.id))

        logger.info("Wait for ping from instance {} "
                    "by floating ip".format(server.id))
        devops_helpers.wait(
            lambda: devops_helpers.tcp_ping(float_ip.ip, 22),
            timeout=300,
            timeout_msg=("Instance {0} is unreachable for {1} seconds".format(
                server.id, 300)))
        os_conn.delete_instance(server)
Example #34
0
 def test_node_id(self):
     """ Testing GET:/nodes/:id """
     Nodes().api1_1_nodes_get()
     nodes = loads(self.__client.last_response.data)
     LOG.debug(nodes,json=True)
     codes = []
     for n in nodes:
         LOG.info(n)
         if n.get('type') == 'compute':
             uuid = n.get('id')
             Nodes().api1_1_nodes_identifier_get(uuid)
             rsp = self.__client.last_response
             codes.append(rsp)
     assert_not_equal(0, len(codes), message='Failed to find compute node Ids')
     for c in codes:
         assert_equal(200, c.status, message=c.reason)
     assert_raises(rest.ApiException, Nodes().api1_1_nodes_identifier_get, 'fooey')
Example #35
0
    def test_index_host_list_single(self):
        self.host.name = self.host.name.replace(".", "\.")
        result = self.client.hosts.get(self.host)
        assert_not_equal(result, None,
                         "list hosts should not be empty: %s" % str(result))
        assert_true(len(result.instances) > 0,
                    "instance list on the host should not be empty: %r"
                    % result.instances)
        with Check() as check:
            used_ram = int(result.usedRAM)
            total_ram = int(result.totalRAM)
            percent_used = int(result.percentUsed)
            lower, upper = percent_boundary(used_ram, total_ram)
            check.true(percent_used > lower,
                       "percentUsed %r is below the lower boundary %r"
                       % (percent_used, lower))
            check.true(percent_used < upper,
                       "percentUsed %r is above the upper boundary %r"
                       % (percent_used, upper))
            check.true(used_ram < total_ram,
                       "usedRAM %r should be less than totalRAM %r"
                       % (used_ram, total_ram))
            check.true(percent_used < 100,
                       "percentUsed should be less than 100 but was %r"
                       % percent_used)
            check.true(total_ram > 0,
                       "totalRAM should be greater than 0 but was %r"
                       % total_ram)
            check.true(used_ram < total_ram,
                       "usedRAM %r should be less than totalRAM %r"
                       % (used_ram, total_ram))

            # Check all active instances and validate all the fields exist
            active_instance = None
            for instance in result.instances:
                print("instance: %s" % instance)
                if instance['status'] != 'ACTIVE':
                    continue
                active_instance = instance
                check.is_not_none(instance['id'])
                check.is_not_none(instance['name'])
                check.is_not_none(instance['status'])
                check.is_not_none(instance['server_id'])
                check.is_not_none(instance['tenant_id'])
            check.true(active_instance is not None, "No active instances")
Example #36
0
    def test_node_id_obm(self):
        """ Testing GET:/api/2.0/nodes/:id/obm """
        Api().nodes_get_all()
        nodes = self.__get_data()
        LOG.debug(nodes,json=True)
        codes = []
        for n in nodes:
            if n.get('name') == 'test_compute_node':
                uuid = n.get('id')
                Api().nodes_get_obm_by_id(identifier=uuid)
                rsp = self.__client.last_response
                LOG.info('OBM setting for node ID {0} is {1}'.format(uuid, rsp.data))
                codes.append(rsp)

        assert_not_equal(0, len(codes), message='Failed to find compute node Ids')
        for c in codes:
            assert_equal(200, c.status, message=c.reason)
        assert_raises(rest.ApiException, Api().nodes_get_obm_by_id, 'fooey')
Example #37
0
 def test_get_schema_invalid(self):
     """ Testing GET /Schemas/{identifier} 404s properly """
     self.__membersList = self.__schemaList.get('Members')
     assert_not_equal(None, self.__membersList)
     for member in self.__membersList:
         dataId = member.get('@odata.id')
         assert_not_equal(None, dataId)
         dataId = dataId.split('/redfish/v1/Schemas/')[1]
         try:
             redfish().get_schema(dataId + '-invalid')
             fail(message='did not raise exception')
         except rest.ApiException as e:
             assert_equal(
                 404,
                 e.status,
                 message='unexpected response {0}, expected 404'.format(
                     e.status))
         break
Example #38
0
    def test_backup_get(self):
        """test get backup"""
        backup = instance_info.dbaas.backups.get(backup_info.id)
        assert_equal(backup_info.id, backup.id)
        assert_equal(backup_info.name, backup.name)
        assert_equal(backup_info.description, backup.description)
        assert_equal(instance_info.id, backup.instance_id)
        assert_not_equal(0.0, backup.size)
        assert_equal('COMPLETED', backup.status)

        # Test to make sure that user in other tenant is not able
        # to GET this backup
        reqs = Requirements(is_admin=False)
        other_user = CONFIG.users.find_user(
            reqs, black_list=[instance_info.user.auth_user])
        other_client = create_dbaas_client(other_user)
        assert_raises(exceptions.NotFound, other_client.backups.get,
                      backup_info.id)
Example #39
0
    def test_do_logout_session(self):
        """ Testing DELETE /SessionService/Sessions/{identifier} """
        redfish().do_logout_session(self.__session)
        try:
            redfish().get_session_info(self.__session)
            fail(message='did not raise exception')
        except rest.ApiException as e:
            assert_equal(
                404,
                e.status,
                message='unexpected response {0}, expected 404'.format(
                    e.status))

        self.test_get_sessions()
        for member in self.__sessionList:
            dataId = member.get('@odata.id')
            dataId = dataId.split('/redfish/v1/SessionService/Sessions/')[1]
            assert_not_equal(self.__session, dataId)
Example #40
0
 def test_node_id(self):
     """ Testing GET:/api/2.0/nodes/:id """
     Api().nodes_get_all()
     nodes = self.__get_data()
     LOG.debug(nodes, json=True)
     codes = []
     for n in nodes:
         LOG.info(n, json=True)
         if n.get('type') == 'compute':
             uuid = n.get('id')
             Api().nodes_get_by_id(identifier=uuid)
             rsp = self.__client.last_response
             codes.append(rsp)
     assert_not_equal(0,
                      len(codes),
                      message='Failed to find compute node Ids')
     for c in codes:
         assert_equal(200, c.status, message=c.reason)
     assert_raises(rest.ApiException, Api().nodes_get_by_id, 'fooey')
Example #41
0
 def test_tags(self):
     """ Testing GET:/api/2.0/tags """
     Api().nodes_get_all()
     nodes = self.__get_data()
     tagsArray = []
     for n in nodes:
         if n.get('type') == 'compute':
             tagsWithRules = self.__create_tag_rule(n.get('id'))
             assert_not_equal(len(tagsWithRules), 0,
                              "Failed to create tag rules")
             tagsArray.append(tagsWithRules)
     Api().get_all_tags()
     rsp = self.__client.last_response
     updated_tags = self.__get_data()
     assert_equal(200, rsp.status, message=rsp.reason)
     for i in xrange(len(updated_tags)):
         assert_equal(updated_tags[i]['rules'][0]['path'],
                      'dmi.System Information.Manufacturer',
                      message='Could not find the tag')
    def prepare_upgrade_ceph_ha(self):
        self.backup_name = "backup_ceph_ha.tar.gz"
        self.repos_backup_name = "repos_backup_ceph_ha.tar.gz"

        self.check_run("upgrade_ceph_ha_backup")
        self.env.revert_snapshot("ready", skip_timesync=True)
        intermediate_snapshot = "prepare_upgrade_ceph_ha_before_backup"

        assert_not_equal(
            settings.KEYSTONE_CREDS['password'], 'admin',
            "Admin password was not changed, aborting execution")

        cluster_settings = {
            'net_provider': settings.NEUTRON,
            'net_segment_type': settings.NEUTRON_SEGMENT['tun'],
            'volumes_lvm': False,
            'volumes_ceph': True,
            'images_ceph': True,
            'objects_ceph': True,
            'ephemeral_ceph': True,
        }
        cluster_settings.update(self.cluster_creds)

        if not self.env.d_env.has_snapshot(intermediate_snapshot):
            self.deploy_cluster(
                {'name': self.prepare_upgrade_ceph_ha.__name__,
                 'settings': cluster_settings,
                 'nodes':
                     {'slave-01': ['controller'],
                      'slave-02': ['controller'],
                      'slave-03': ['controller'],
                      'slave-04': ['compute', 'ceph-osd'],
                      'slave-05': ['compute', 'ceph-osd']}
                 }
            )
            self.env.make_snapshot(intermediate_snapshot)

        self.env.revert_snapshot(intermediate_snapshot)

        self.do_backup(self.backup_path, self.local_path,
                       self.repos_backup_path, self.repos_local_path)

        self.env.make_snapshot("upgrade_ceph_ha_backup", is_make=True)
Example #43
0
    def test_node_delete(self):
        """ Testing DELETE:/api/2.0/nodes/:id """
        codes = []
        test_names = []
        Api().nodes_get_all()
        nodes = self.__get_data()
        test_names = [t.get('name') for t in self.__test_nodes]
        for n in nodes:
            name = n.get('name')
            if name in test_names:
                uuid = n.get('id')
                LOG.info('Deleting node {0} (name={1})'.format(uuid, name))
                Api().nodes_del_by_id(identifier=uuid)
                codes.append(self.__client.last_response)

        assert_not_equal(0, len(codes), message='Delete node list empty!')
        for c in codes:
            assert_equal(204, c.status, message=c.reason)
        assert_raises(rest.ApiException, Api().nodes_del_by_id, 'fooey')
Example #44
0
    def test_node_delete(self):
        """ Testing DELETE:/nodes/:id """
        codes = []
        test_names = []
        Nodes().api1_1_nodes_get()
        nodes = loads(self.__client.last_response.data)
        test_names = [t.get('name') for t in self.__test_nodes]
        for n in nodes:
            name = n.get('name')
            if name in test_names:
                uuid = n.get('id')
                LOG.info('Deleting node {0} (name={1})'.format(uuid, name))
                Nodes().api1_1_nodes_identifier_delete(uuid)
                codes.append(self.__client.last_response)

        assert_not_equal(0, len(codes), message='Delete node list empty!')
        for c in codes:
            assert_equal(200, c.status, message=c.reason)
        assert_raises(rest.ApiException, Nodes().api1_1_nodes_identifier_delete, 'fooey')
Example #45
0
 def test_empty_index_host_list_single(self):
     self.host.name = self.host.name.replace(".", "\.")
     single_host = self.client.hosts.get(self.host)
     assert_not_equal(single_host, None,
                      "Get host should not be empty for: %s" % self.host)
     print("test_index_host_list_single result: %r" % single_host.__dict__)
     with Check() as check:
         check.true(single_host.percentUsed < 50,
                    "percentUsed should be around 40 but was  %r"
                    % single_host.percentUsed)
         check.true(single_host.totalRAM,
                    "totalRAM should exist > 0 : %r" % single_host.totalRAM)
         check.true(single_host.usedRAM < 1000, "usedRAM should be less "
                    "than 1000 : %r" % single_host.usedRAM)
         check.true(instance_info.name not in
                    [dbc.name for dbc in single_host.instances])
         instance_info.host_info = single_host
         for index, instance in enumerate(single_host.instances, start=1):
             print("%r instance: %r" % (index, instance))
Example #46
0
 def test_node_get_obm_by_node_id(self):
     """Testing GET:/api/2.0/:id/obm"""
     Api().nodes_get_all()
     rsp = self.__client.last_response
     nodes = loads(rsp.data)
     assert_equal(200, rsp.status, message=rsp.status)
     for n in nodes:
         LOG.info(n, json=True)
         Api().nodes_get_obms_by_node_id(identifier=n.get('id'))
         LOG.info('getting OBMs for node {0}'.format(n.get('id')))
         rsp = self.__client.last_response
         assert_equal(200, rsp.status, message=rsp.status)
         obms = loads(rsp.data)
         assert_not_equal(0, len(obms), message='OBMs list was empty!')
         for obm in obms:
             id = obm.get('id')
             Api().obms_delete_by_id(identifier=id)
             rsp = self.__client.last_response
             assert_equal(204, rsp.status, message=rsp.status)
Example #47
0
 def check_chassis_task(self):
     """ Testing AMQP on.task.ipmi.chassis.result """
     Nodes().api1_1_nodes_get()
     nodes = loads(self.__client.last_response.data)
     self.__threadTasks = []
     for node in nodes:
         id = node.get('id')
         assert_not_equal(id,None)
         type = node.get('type')
         assert_not_equal(type,None)
         if type == 'compute':
             worker = Worker(queue=QUEUE_CHASSIS_RESULT, callbacks=[self.handle_result])
             thread = Thread(target=self.amqp_tasker_thread,args=(worker,id,))
             thread.daemon = True
             self.__threadTasks.append(self.ThreadTask(worker,thread,id))
     for t in self.__threadTasks:
         t.thread.start()
         t.state = True
     self.amqp_tasker_loop()
Example #48
0
 def test_get_accounts(self):
     """ Testing GET /AcountService/Accounts """
     redfish().get_accounts()
     accounts = self.__get_data()
     LOG.debug(accounts, json=True)
     self.__accounts = accounts.get('Members')
     for member in self.__accounts:
         dataId = member.get('@odata.id')
         assert_not_equal(None, dataId)
         dataId = dataId.split('/redfish/v1/AccountService/Accounts/')[1]
         redfish().get_account(dataId)
         account = self.__get_data()
         LOG.debug(account, json=True)
         username = account.get('UserName')
         assert_equal(
             dataId,
             username,
             message='unexpected username {0}, expected {1}'.format(
                 username, dataId))
Example #49
0
    def build_simple_bootstrap(self):
        """Verify than slaves retrieved Ubuntu bootstrap with extra package

        Scenario:
            1. Revert snapshot ready
            2. Build and activate Ubuntu bootstrap with extra package
            3. Bootstrap slaves
            4. Verify Ubuntu bootstrap on slaves

        Duration: 20m
        """
        self.env.revert_snapshot("ready")

        bootstrap_params = {
            "ubuntu-release": "xenial",
            "label": "UbuntuBootstrap",
            "output-dir": "/tmp",
            "package": ["ipython"]
        }

        uuid, bootstrap_location = \
            self.env.fuel_bootstrap_actions.build_bootstrap_image(
                **bootstrap_params)

        self.env.fuel_bootstrap_actions.\
            import_bootstrap_image(bootstrap_location)
        self.env.fuel_bootstrap_actions.\
            activate_bootstrap_image(uuid)

        nodes = self.env.d_env.get_nodes(
            name__in=["slave-01", "slave-02", "slave-03"])
        self.env.bootstrap_nodes(nodes)

        for node in nodes:
            n_node = self.fuel_web.get_nailgun_node_by_devops_node(node)
            checkers.verify_bootstrap_on_node(n_node['ip'],
                                              os_type="ubuntu",
                                              uuid=uuid)

            ipython_version = utils.get_package_versions_from_node(
                n_node['ip'], name="ipython", os_type="Ubuntu")
            assert_not_equal(ipython_version, "")
Example #50
0
 def test_node_workflows_post(self):
     """Testing node POST:id/workflows"""
     resps = []
     Nodes().nodes_get()
     nodes = loads(self.__client.last_response.data)
     for n in nodes:
         if n.get('type') == 'compute':
             Nodes().nodes_identifier_workflows_post(n.get('id'),
                                                     'Graph.Discovery',
                                                     body={})
             resps.append(self.__client.last_response.data)
     for resp in resps:
         assert_not_equal(0,
                          len(loads(resp)),
                          message='No Workflows found for Node')
     assert_raises(rest.ApiException,
                   Nodes().nodes_identifier_workflows_post,
                   'fooey',
                   'Graph.Discovery',
                   body={})
 def test_get_task_service_root(self):
     """ Testing GET /TaskService """
     redfish().task_service_root()
     taskService = self.__get_data()
     LOG.debug(taskService, json=True)
     self.__oemServiceList = taskService.get('Oem')
     assert_is_not_none(self.__oemServiceList)
     oemMembers = self.__oemServiceList['RackHD'] \
                                       ['SystemTaskCollection'].get('Members')
     assert_is_not_none(oemMembers)
     assert_not_equal(0,
                      len(oemMembers),
                      message='OEM members list was empty!')
     self.__taskServiceList = taskService.get('Tasks')
     assert_is_not_none(self.__taskServiceList)
     taskMembers = self.__taskServiceList.get('Members')
     assert_is_not_none(taskMembers)
     assert_not_equal(0,
                      len(taskMembers),
                      message='Task service members list was empty!')
Example #52
0
 def test_node_workflows_post(self):
     """ Testing POST:/api/2.0/nodes/:id/workflows """
     resps = []
     Api().nodes_get_all()
     nodes = loads(self.__client.last_response.data)
     for n in nodes:
         if n.get('type') == 'compute':
             Api().nodes_post_workflow_by_id(identifier=n.get('id'),
                                             name='Graph.Discovery',
                                             body={})
             resps.append(self.__client.last_response.data)
     for resp in resps:
         assert_not_equal(0,
                          len(loads(resp)),
                          message='No Workflows found for Node')
     assert_raises(rest.ApiException,
                   Api().nodes_post_workflow_by_id,
                   'fooey',
                   name='Graph.Discovery',
                   body={})
Example #53
0
    def test_reset_root_user_enabled(self):
        if test_config.values['root_timestamp_disabled']:
            raise SkipTest("Enabled timestamp not enabled yet")
        created_users = ['root']
        self.system_users.remove('root')
        users = self.dbaas.users.list(instance_info.id)
        found = False
        for user in created_users:
            found = any(result.name == user for result in users)
            assert_true(found, "User '%s' not found in result" % user)
            found = False

        found = False
        for user in self.system_users:
            found = any(result.name == user for result in users)
            msg = "User '%s' SHOULD NOT BE found in result" % user
            assert_false(found, msg)
            found = False
        assert_not_equal(self.root_enabled_timestamp, 'Never')
        self._verify_root_timestamp(instance_info.id)
Example #54
0
    def handle_graph_finish(self, body, message):
        routeId = message.delivery_info.get('routing_key').split(
            'graph.finished.')[1]
        assert_not_equal(routeId, None)
        Api().workflows_get()
        workflows = loads(self.__client.last_response.data)
        message.ack()
        for w in workflows:
            injectableName = w['injectableName']
            if injectableName == self.__graph_name:
                graphId = w['context'].get('graphId')
                if graphId == routeId:
                    if 'target' in w['context']:
                        nodeid = w['context']['target'] or 'none'
                    else:
                        nodeid = 'none'
                    status = body['status']
                    if status == 'succeeded' or status == 'failed':
                        LOG.info('{0} - target: {1}, status: {2}, route: {3}'.
                                 format(injectableName, nodeid, status,
                                        routeId))
                        self.__graph_status.append(status)

                        for task in self.__tasks:
                            if task.id == nodeid:
                                task.worker.stop()
                                task.running = False

                        msg = {
                            'graph_name': injectableName,
                            'target': nodeid,
                            'status': status,
                            'route_id': routeId
                        }

                        if status == 'failed':
                            msg['active_task'] = w['tasks']
                            LOG.error(msg, json=True)
                        else:
                            LOG.info(msg, json=True)
                        break
Example #55
0
 def test_workflows_action(self):
     """ Testing PUT:/api/2.0/nodes/:id/workflows/action """
     Api().nodes_get_all()
     nodes = self.__get_data()
     for n in nodes:
         if n.get('type') == 'compute':
             id = n.get('id')
             timeout = 5
             done = False
             while timeout > 0 and done == False:
                 if 0 == self.__post_workflow(id,'Graph.Discovery'):
                     fail('Timed out waiting for graph to start!')
                 try:
                     Api().nodes_workflow_action_by_id(id, {'command': 'cancel'})
                     done = True
                 except rest.ApiException as e:
                     if e.status != 404:
                         raise e
                     timeout -= 1
             assert_not_equal(timeout, 0, message='Failed to delete an active workflow')
     assert_raises(rest.ApiException, Api().nodes_workflow_action_by_id, 'fooey', {'command': 'test'})
Example #56
0
 def test_list_roles(self):
     """ Testing GET /AcountService/Roles """
     redfish().list_roles()
     roles = self.__get_data()
     LOG.debug(roles, json=True)
     self.__roles = roles.get('Members')
     assert_equal(len(self.__roles),
                  3,
                  message='expected role length to be 3')
     for member in self.__roles:
         dataId = member.get('@odata.id')
         assert_not_equal(None, dataId)
         dataId = dataId.split('/redfish/v1/AccountService/Roles/')[1]
         redfish().get_role(dataId)
         role = self.__get_data()
         LOG.debug(role, json=True)
         name = role.get('Name')
         assert_equal(dataId,
                      name,
                      message='unexpected name {0}, expected {1}'.format(
                          name, dataId))
    def set_up(self):
        self.client = create_client(is_admin=False)
        self.mgmt_client = create_client(is_admin=True)

        if EPHEMERAL_SUPPORT:
            flavor_name = test_config.values.get('instance_eph_flavor_name',
                                                 'eph.rd-tiny')
            flavor2_name = test_config.values.get(
                'instance_bigger_eph_flavor_name', 'eph.rd-smaller')
        else:
            flavor_name = test_config.values.get('instance_flavor_name',
                                                 'm1.tiny')
            flavor2_name = test_config.values.get(
                'instance_bigger_flavor_name', 'm1.small')
        flavors = self.client.find_flavors_by_name(flavor_name)
        self.flavor_id = flavors[0].id
        self.name = "TEST_" + str(uuid.uuid4())
        # Get the resize to flavor.
        flavors2 = self.client.find_flavors_by_name(flavor2_name)
        self.new_flavor_id = flavors2[0].id
        asserts.assert_not_equal(self.flavor_id, self.new_flavor_id)
Example #58
0
 def test_get_chassis_power(self):
     """ Testing GET /Chassis/{identifier}/Power """
     for member in self.__membersList:
         dataId = member.get('@odata.id')
         assert_not_equal(None,dataId)
         dataId = dataId.split('/redfish/v1/Chassis/')[1]
         redfish().get_power(dataId)
         power = self.__get_data()
         LOG.debug(power,json=True)
         assert_not_equal({}, power, message='power object undefined!')
         name = power.get('Name')
         assert_not_equal('', name, message='empty power name!')
         voltages = power.get('Voltages')
         assert_not_equal(0, len(power), message='voltages list was empty!')
Example #59
0
    def redfish_emc_compose_test(self):
        """ Testing EMC Redfish Compose Workflow """
        for node in self.__nodes:
            elements = []
            id = node.get('id')
            assert_is_not_none(id)
            Nodes().nodes_identifier_catalogs_get(id)
            catalog = self.__get_data()
            assert_not_equal(len(catalog),
                             0,
                             message='EMC Redfish Catalog size failure')

            self.__endpoints = []
            self.__endpoints.append('ComputeElement{0}' \
                .format(next_element('ComputeElement', catalog[0].get('data')).get('Id')))
            self.__endpoints.append('StorageElement{0}' \
                .format(next_element('StorageElement', catalog[0].get('data')).get('Id')))
            self.__endpoints.append('StorageElement{0}' \
                .format(next_element('StorageElement', catalog[0].get('data')).get('Id')))
            body = {
                'options': {
                    'defaults': {
                        'endpoints': self.__endpoints,
                        'name': self.__system_name,
                        'action': 'compose'
                    }
                }
            }
            self.__post_node_workflow([id], 'Graph.Emc.Compose.System', body)
        Nodes().nodes_get()
        nodes = self.__get_data()
        for node in nodes:
            if self.__system_name in node.get('identifiers', []):
                for relation in node.get('relations', []):
                    if relation.get('relationType') == 'elementEndpoints':
                        assert_equal(sorted(relation.get('targets', [])), sorted(self.__endpoints), \
                            message='failure composed system endpoints')
                        return
        # test failure if we get here
        fail('Failed to find composed system')
Example #60
0
 def test_node_workflows_post(self):
     """Testing node POST:id/workflows"""
     resps = []
     Nodes().nodes_get()
     nodes = self.__get_data()
     for n in nodes:
         if n.get('type') == 'compute':
             id = n.get('id')
             timeout = self.__post_workflow(id, 'Graph.Discovery', {})
             if timeout > 0:
                 data = self.__get_data()
             resps.append({'data': data, 'id': id})
     for resp in resps:
         assert_not_equal(0,
                          len(resp['data']),
                          message='No Workflows found for Node {0}'.format(
                              resp['id']))
     assert_raises(rest.ApiException,
                   Nodes().nodes_identifier_workflows_post,
                   'fooey',
                   'Graph.Discovery',
                   body={})