Пример #1
0
    def add_vmware_nova_compute(self, nova_computes):
        vmware_attr = \
            self.fuel_web.client.get_cluster_vmware_attributes(self.cluster_id)
        vcenter_data = vmware_attr['editable']['value']['availability_zones'][
            0]["nova_computes"]

        comp_vmware_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ['compute-vmware'], role_status='pending_roles')

        for instance in nova_computes:
            cluster_name = instance['cluster']
            srv_name = instance['srv_name']
            datastore = instance['datastore']
            if instance['target_node'] == 'compute-vmware':
                node = comp_vmware_nodes.pop()
                target_node = node['hostname']
            else:
                target_node = instance['target_node']

            vcenter_data.append(
                {"vsphere_cluster": cluster_name,
                 "service_name": srv_name,
                 "datastore_regex": datastore,
                 "target_node": {
                     "current": {"id": target_node,
                                 "label": target_node},
                     "options": [{"id": target_node,
                                  "label": target_node}, ]},
                 }
            )

        logger.debug("Try to update cluster with next "
                     "vmware_attributes {0}".format(vmware_attr))
        self.fuel_web.client.update_cluster_vmware_attributes(
            self.cluster_id, vmware_attr)
Пример #2
0
 def wrapper(*args, **kwargs):
     try:
         result = func(*args, **kwargs)
     except SkipTest:
         raise SkipTest()
     except Exception as test_exception:
         exc_trace = sys.exc_traceback
         name = 'error_%s' % func.__name__
         description = "Failed in method '%s'." % func.__name__
         if args[0].env is not None:
             try:
                 create_diagnostic_snapshot(args[0].env,
                                            "fail", name)
             except:
                 logger.error("Fetching of diagnostic snapshot failed: {0}".
                              format(traceback.format_exc()))
                 try:
                     with args[0].env.d_env.get_admin_remote()\
                             as admin_remote:
                         pull_out_logs_via_ssh(admin_remote, name)
                 except:
                     logger.error("Fetching of raw logs failed: {0}".
                                  format(traceback.format_exc()))
             finally:
                 logger.debug(args)
                 try:
                     args[0].env.make_snapshot(snapshot_name=name[-50:],
                                               description=description,
                                               is_make=True)
                 except:
                     logger.error("Error making the environment snapshot:"
                                  " {0}".format(traceback.format_exc()))
         raise test_exception, None, exc_trace
     return result
    def create_instance_with_vmxnet3_adapter(self):
        """Create instance with vmxnet3 adapter."""
        public_ip = self.fuel_web.get_public_vip(self.cluster_id)
        os_conn = OpenStackActions(public_ip)

        image = os_conn.get_image(self.image_name)
        os_conn.update_image(image,
                             properties={"hw_vif_model": "VirtualVmxnet3"})
        flavor = os_conn.get_flavor_by_name('m1.small')
        sg = os_conn.get_security_group(self.sg_name)
        net = os_conn.get_network(self.net_name)
        vm = os_conn.create_server(image=image,
                                   availability_zone=self.vcenter_az,
                                   net_id=net['id'], security_groups=[sg],
                                   flavor_id=flavor.id, timeout=900)
        floating_ip = os_conn.assign_floating_ip(vm)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22), timeout=210,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])[0]
        with self.fuel_web.get_ssh_for_nailgun_node(controller) as remote:
            cmd = '/usr/bin/lshw -class network | grep vmxnet3'
            res = remote.execute_through_host(
                hostname=floating_ip.ip,
                cmd=cmd,
                auth=self.image_creds
            )
            logger.debug('OUTPUT: {}'.format(res['stdout_str']))
            assert_equal(res['exit_code'], 0, "VMxnet3 driver is not found")

        os_conn.delete_instance(vm)
        os_conn.verify_srv_deleted(vm)
Пример #4
0
    def create_instance_with_vmxnet3_adapter(self):
        """Create instance with vmxnet3 adapter."""
        public_ip = self.fuel_web.get_public_vip(self.cluster_id)
        os_conn = OpenStackActions(public_ip)

        image = os_conn.get_image(self.image_name)
        os_conn.update_image(image,
                             properties={"hw_vif_model": "VirtualVmxnet3"})
        flavor = os_conn.get_flavor_by_name('m1.small')
        sg = os_conn.get_security_group(self.sg_name)
        net = os_conn.get_network(self.net_name)
        vm = os_conn.create_server(image=image,
                                   availability_zone=self.vcenter_az,
                                   net_id=net['id'],
                                   security_groups=[sg],
                                   flavor_id=flavor.id,
                                   timeout=900)
        floating_ip = os_conn.assign_floating_ip(vm)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22),
                     timeout=210,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])[0]
        with self.fuel_web.get_ssh_for_nailgun_node(controller) as remote:
            cmd = '/usr/bin/lshw -class network | grep vmxnet3'
            res = remote.execute_through_host(hostname=floating_ip.ip,
                                              cmd=cmd,
                                              auth=self.image_creds)
            logger.debug('OUTPUT: {}'.format(res['stdout_str']))
            assert_equal(res['exit_code'], 0, "VMxnet3 driver is not found")

        os_conn.delete_instance(vm)
        os_conn.verify_srv_deleted(vm)
Пример #5
0
 def check_cinder_vmware_srv(self):
     """Verify cinder-vmware service."""
     ctrl_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
         self.cluster_id, ["controller"])
     cmd = '. openrc; cinder-manage service list | grep vcenter | ' \
           'grep ":-)"'
     logger.debug('CMD: {}'.format(cmd))
     SSHManager().execute_on_remote(ctrl_nodes[0]['ip'], cmd)
 def check_cinder_vmware_srv(self):
     """Verify cinder-vmware service."""
     ctrl_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
         self.cluster_id, ["controller"])
     cmd = '. openrc; cinder-manage service list | grep vcenter | ' \
           'grep ":-)"'
     logger.debug('CMD: {}'.format(cmd))
     SSHManager().execute_on_remote(ctrl_nodes[0]['ip'], cmd)
Пример #7
0
    def create_and_attach_empty_volume(self):
        """Create and attach to instance empty volume."""
        mount_point = '/dev/sdb'

        public_ip = self.fuel_web.get_public_vip(self.cluster_id)
        os_conn = OpenStackActions(public_ip)

        vol = os_conn.create_volume(availability_zone=self.cinder_az)
        image = os_conn.get_image(self.vmware_image)
        net = os_conn.get_network(self.net_name)
        sg = os_conn.get_security_group(self.sg_name)
        vm = os_conn.create_server(image=image,
                                   availability_zone=self.vcenter_az,
                                   security_groups=[sg],
                                   net_id=net['id'],
                                   timeout=210)
        floating_ip = os_conn.assign_floating_ip(vm)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22),
                     timeout=180,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        logger.info("Attaching volume via cli")
        ctrl_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])
        cmd = '. openrc; nova volume-attach {srv_id} {volume_id} {mount}' \
              ''.format(srv_id=vm.id, volume_id=vol.id, mount=mount_point)
        logger.debug('CMD: {}'.format(cmd))
        SSHManager().execute_on_remote(ctrl_nodes[0]['ip'], cmd)

        helpers.wait(lambda: os_conn.get_volume_status(vol) == "in-use",
                     timeout=30,
                     timeout_msg="Volume doesn't reach 'in-use' state")

        vm.reboot()
        sleep(10)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22),
                     timeout=180,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])[0]
        with self.fuel_web.get_ssh_for_nailgun_node(controller) as remote:
            cmd = 'sudo /sbin/fdisk -l | grep {}'.format(mount_point)
            res = remote.execute_through_host(hostname=floating_ip.ip,
                                              cmd=cmd,
                                              auth=cirros_auth)
            logger.debug('OUTPUT: {}'.format(res['stdout_str']))
            assert_equal(res['exit_code'], 0, "Attached volume is not found")

        os_conn.delete_instance(vm)
        os_conn.verify_srv_deleted(vm)
        os_conn.delete_volume(vol)
    def create_and_attach_empty_volume(self):
        """Create and attach to instance empty volume."""
        mount_point = '/dev/sdb'

        public_ip = self.fuel_web.get_public_vip(self.cluster_id)
        os_conn = OpenStackActions(public_ip)

        vol = os_conn.create_volume(availability_zone=self.cinder_az)
        image = os_conn.get_image(self.vmware_image)
        net = os_conn.get_network(self.net_name)
        sg = os_conn.get_security_group(self.sg_name)
        vm = os_conn.create_server(image=image,
                                   availability_zone=self.vcenter_az,
                                   security_groups=[sg],
                                   net_id=net['id'],
                                   timeout=210)
        floating_ip = os_conn.assign_floating_ip(vm)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22), timeout=180,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        logger.info("Attaching volume via cli")
        ctrl_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])
        cmd = '. openrc; nova volume-attach {srv_id} {volume_id} {mount}' \
              ''.format(srv_id=vm.id, volume_id=vol.id, mount=mount_point)
        logger.debug('CMD: {}'.format(cmd))
        SSHManager().execute_on_remote(ctrl_nodes[0]['ip'], cmd)

        helpers.wait(
            lambda: os_conn.get_volume_status(vol) == "in-use",
            timeout=30, timeout_msg="Volume doesn't reach 'in-use' state")

        vm.reboot()
        sleep(10)
        helpers.wait(lambda: helpers.tcp_ping(floating_ip.ip, 22), timeout=180,
                     timeout_msg="Node {ip} is not accessible by SSH.".format(
                         ip=floating_ip.ip))

        controller = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])[0]
        with self.fuel_web.get_ssh_for_nailgun_node(controller) as remote:
            cmd = 'sudo /sbin/fdisk -l | grep {}'.format(mount_point)
            res = remote.execute_through_host(
                hostname=floating_ip.ip,
                cmd=cmd,
                auth=cirros_auth
            )
            logger.debug('OUTPUT: {}'.format(res['stdout_str']))
            assert_equal(res['exit_code'], 0, "Attached volume is not found")

        os_conn.delete_instance(vm)
        os_conn.verify_srv_deleted(vm)
        os_conn.delete_volume(vol)
Пример #9
0
    def check_config(host, path, settings):
        """Return vmware glance backend conf_dict.

        :param host:     host url or ip, string
        :param path:     config path, string
        :param settings: settings, dict
        """
        for key in settings.keys():
            cmd = 'grep {1} {0} | grep -i "{2}"'.format(
                path, key, settings[key])
            logger.debug('CMD: {}'.format(cmd))
            SSHManager().check_call(host, cmd)
    def check_config(host, path, settings):
        """Return vmware glance backend conf_dict.

        :param host:     host url or ip, string
        :param path:     config path, string
        :param settings: settings, dict
        """
        for key in settings.keys():
            cmd = 'grep {1} {0} | grep -i "{2}"'.format(path, key,
                                                        settings[key])
            logger.debug('CMD: {}'.format(cmd))
            SSHManager().check_call(host, cmd)
 def check_gw_on_vmware_nodes(self):
     """Check that default gw != fuel node ip."""
     vmware_nodes = []
     vmware_nodes.extend(self.fuel_web.get_nailgun_cluster_nodes_by_roles(
         self.cluster_id, ["compute-vmware"]))
     vmware_nodes.extend(self.fuel_web.get_nailgun_cluster_nodes_by_roles(
         self.cluster_id, ["cinder-vmware"]))
     logger.debug('Fuel ip is {0}'.format(self.fuel_web.admin_node_ip))
     for node in vmware_nodes:
         cmd = "ip route | grep default | awk '{print $3}'"
         gw_ip = SSHManager().execute_on_remote(node['ip'], cmd)
         logger.debug('Default gw for node {0} is {1}'.format(
             node['name'], gw_ip['stdout_str']))
         assert_not_equal(gw_ip['stdout_str'], self.fuel_web.admin_node_ip)
Пример #12
0
    def check_nova_srv(self):
        """Verify nova-compute service for each vSphere cluster."""
        vmware_attr = self.fuel_web.client.get_cluster_vmware_attributes(
            self.cluster_id)
        az = vmware_attr['editable']['value']['availability_zones'][0]
        nova_computes = az['nova_computes']

        ctrl_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])
        for nova in nova_computes:
            srv_name = nova['service_name']
            cmd = '. openrc; nova-manage service describe_resource ' \
                  'vcenter-{}'.format(srv_name)
            logger.debug('CMD: {}'.format(cmd))
            SSHManager().execute_on_remote(ctrl_nodes[0]['ip'], cmd)
Пример #13
0
 def mcollective_nodes_online(self):
     nodes_uids = set([
         str(node['id']) for node in
         self.fuel_web.client.list_cluster_nodes(self.cluster_id)
     ])
     ssh_manager = SSHManager()
     out = ssh_manager.execute_on_remote(ip=ssh_manager.admin_ip,
                                         cmd='mco find',
                                         assert_ec_equal=[0,
                                                          1])['stdout_str']
     ready_nodes_uids = set(out.split('\n'))
     unavailable_nodes = nodes_uids - ready_nodes_uids
     logger.debug('Nodes {0} are not reacheable via'
                  ' mcollective'.format(unavailable_nodes))
     return not unavailable_nodes
 def mcollective_nodes_online(self):
     nodes_uids = set(
         [str(node['id']) for node in
          self.fuel_web.client.list_cluster_nodes(self.cluster_id)]
     )
     ssh_manager = SSHManager()
     out = ssh_manager.execute_on_remote(
         ip=ssh_manager.admin_ip,
         cmd='mco find',
         assert_ec_equal=[0, 1]
     )['stdout_str']
     ready_nodes_uids = set(out.split('\n'))
     unavailable_nodes = nodes_uids - ready_nodes_uids
     logger.debug('Nodes {0} are not reacheable via'
                  ' mcollective'.format(unavailable_nodes))
     return not unavailable_nodes
Пример #15
0
 def check_gw_on_vmware_nodes(self):
     """Check that default gw != fuel node ip."""
     vmware_nodes = []
     vmware_nodes.extend(
         self.fuel_web.get_nailgun_cluster_nodes_by_roles(
             self.cluster_id, ["compute-vmware"]))
     vmware_nodes.extend(
         self.fuel_web.get_nailgun_cluster_nodes_by_roles(
             self.cluster_id, ["cinder-vmware"]))
     logger.debug('Fuel ip is {0}'.format(self.fuel_web.admin_node_ip))
     for node in vmware_nodes:
         cmd = "ip route | grep default | awk '{print $3}'"
         gw_ip = SSHManager().execute_on_remote(node['ip'], cmd)
         logger.debug('Default gw for node {0} is {1}'.format(
             node['name'], gw_ip['stdout_str']))
         assert_not_equal(gw_ip['stdout_str'], self.fuel_web.admin_node_ip)
    def check_nova_srv(self):
        """Verify nova-compute service for each vSphere cluster."""
        vmware_attr = self.fuel_web.client.get_cluster_vmware_attributes(
            self.cluster_id)
        az = vmware_attr['editable']['value']['availability_zones'][0]
        nova_computes = az['nova_computes']

        ctrl_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])
        for nova in nova_computes:
            srv_name = nova['service_name']
            cmd = '. openrc; nova-manage service describe_resource ' \
                  'vcenter-{}'.format(srv_name)
            logger.debug('CMD: {}'.format(cmd))
            SSHManager().execute_on_remote(ctrl_nodes[0]['ip'],
                                           cmd)
Пример #17
0
 def wrapper(*args, **kwargs):
     try:
         result = func(*args, **kwargs)
     except SkipTest:
         raise SkipTest()
     except Exception as test_exception:
         exc_trace = sys.exc_traceback
         name = 'error_%s' % func.__name__
         case_name = getattr(func, '_base_class', None)
         step_num = getattr(func, '_step_num', None)
         config_name = getattr(func, '_config_case_group', None)
         description = "Failed in method '%s'." % func.__name__
         if args[0].env is not None:
             try:
                 create_diagnostic_snapshot(args[0].env,
                                            "fail", name)
             except:
                 logger.error("Fetching of diagnostic snapshot failed: {0}".
                              format(traceback.format_exc()))
                 try:
                     with args[0].env.d_env.get_admin_remote()\
                             as admin_remote:
                         pull_out_logs_via_ssh(admin_remote, name)
                 except:
                     logger.error("Fetching of raw logs failed: {0}".
                                  format(traceback.format_exc()))
             finally:
                 logger.debug(args)
                 try:
                     if all([case_name, step_num, config_name]):
                         _hash = hashlib.sha256(config_name)
                         _hash = _hash.hexdigest()[:8]
                         snapshot_name = "{case}_{config}_{step}".format(
                             case=case_name,
                             config=_hash,
                             step="Step{:03d}".format(step_num)
                         )
                     else:
                         snapshot_name = name[-50:]
                     args[0].env.make_snapshot(snapshot_name=snapshot_name,
                                               description=description,
                                               is_make=True)
                 except:
                     logger.error("Error making the environment snapshot:"
                                  " {0}".format(traceback.format_exc()))
         raise test_exception, None, exc_trace
     return result
Пример #18
0
 def wrapper(*args, **kwargs):
     try:
         result = func(*args, **kwargs)
     except SkipTest:
         raise SkipTest()
     except Exception as test_exception:
         exc_trace = sys.exc_traceback
         name = 'error_%s' % func.__name__
         case_name = getattr(func, '_base_class', None)
         step_num = getattr(func, '_step_num', None)
         config_name = getattr(func, '_config_case_group', None)
         description = "Failed in method '%s'." % func.__name__
         if args[0].env is not None:
             try:
                 create_diagnostic_snapshot(args[0].env, "fail", name)
             except:
                 logger.error(
                     "Fetching of diagnostic snapshot failed: {0}".format(
                         traceback.format_exc()))
                 try:
                     with args[0].env.d_env.get_admin_remote()\
                             as admin_remote:
                         pull_out_logs_via_ssh(admin_remote, name)
                 except:
                     logger.error("Fetching of raw logs failed: {0}".format(
                         traceback.format_exc()))
             finally:
                 logger.debug(args)
                 try:
                     if all([case_name, step_num, config_name]):
                         _hash = hashlib.sha256(config_name)
                         _hash = _hash.hexdigest()[:8]
                         snapshot_name = "{case}_{config}_{step}".format(
                             case=case_name,
                             config=_hash,
                             step="Step{:03d}".format(step_num))
                     else:
                         snapshot_name = name[-50:]
                     args[0].env.make_snapshot(snapshot_name=snapshot_name,
                                               description=description,
                                               is_make=True)
                 except:
                     logger.error("Error making the environment snapshot:"
                                  " {0}".format(traceback.format_exc()))
         raise test_exception, None, exc_trace
     return result
Пример #19
0
    def check_nova_conf(self):
        """Verify nova-compute vmware configuration"""

        nodes = self.fuel_web.client.list_cluster_nodes(self.cluster_id)
        vmware_attr = self.fuel_web.client.get_cluster_vmware_attributes(
            self.cluster_id)
        az = vmware_attr['editable']['value']['availability_zones'][0]
        nova_computes = az['nova_computes']

        data = []
        ctrl_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])
        for nova in nova_computes:
            target_node = nova['target_node']['current']['id']
            if target_node == 'controllers':
                conf_path = '/etc/nova/nova-compute.d/vmware-vcenter_{0}.' \
                            'conf'.format(nova['service_name'])
                for node in ctrl_nodes:
                    hostname = node['hostname']
                    ip = node['ip']
                    conf_dict = self.get_nova_conf_dict(az, nova)
                    params = (hostname, ip, conf_path, conf_dict)
                    data.append(params)
            else:
                conf_path = '/etc/nova/nova-compute.conf'
                for node in nodes:
                    if node['hostname'] == target_node:
                        hostname = node['hostname']
                        ip = node['ip']
                        conf_dict = self.get_nova_conf_dict(az, nova)
                        params = (hostname, ip, conf_path, conf_dict)
                        data.append(params)

        for hostname, ip, conf_path, conf_dict in data:
            logger.info("Check nova conf of {0}".format(hostname))
            for key in conf_dict.keys():
                cmd = 'cat {0} | grep {1}={2}'.format(conf_path, key,
                                                      conf_dict[key])
                logger.debug('CMD: {}'.format(cmd))
                SSHManager().execute_on_remote(ip, cmd)
Пример #20
0
    def check_nova_conf(self):
        """Verify nova-compute vmware configuration"""

        nodes = self.fuel_web.client.list_cluster_nodes(self.cluster_id)
        vmware_attr = self.fuel_web.client.get_cluster_vmware_attributes(
            self.cluster_id)
        az = vmware_attr['editable']['value']['availability_zones'][0]
        nova_computes = az['nova_computes']

        data = []
        ctrl_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
            self.cluster_id, ["controller"])
        for nova in nova_computes:
            target_node = nova['target_node']['current']['id']
            if target_node == 'controllers':
                conf_path = '/etc/nova/nova-compute.d/vmware-vcenter_{0}.' \
                            'conf'.format(nova['service_name'])
                for node in ctrl_nodes:
                    hostname = node['hostname']
                    ip = node['ip']
                    conf_dict = self.get_nova_conf_dict(az, nova)
                    params = (hostname, ip, conf_path, conf_dict)
                    data.append(params)
            else:
                conf_path = '/etc/nova/nova-compute.conf'
                for node in nodes:
                    if node['hostname'] == target_node:
                        hostname = node['hostname']
                        ip = node['ip']
                        conf_dict = self.get_nova_conf_dict(az, nova)
                        params = (hostname, ip, conf_path, conf_dict)
                        data.append(params)

        for hostname, ip, conf_path, conf_dict in data:
            logger.info("Check nova conf of {0}".format(hostname))
            for key in conf_dict.keys():
                cmd = 'cat {0} | grep {1}={2}'.format(conf_path, key,
                                                      conf_dict[key])
                logger.debug('CMD: {}'.format(cmd))
                SSHManager().execute_on_remote(ip, cmd)
Пример #21
0
    def configure_vcenter(self):
        """Configure vCenter settings"""

        vmware_vcenter = self.env_settings['vmware_vcenter']

        vcenter_value = {
            "glance": {
                "vcenter_username": "",
                "datacenter": "",
                "vcenter_host": "",
                "vcenter_password": "",
                "datastore": ""
            },
            "availability_zones": [{
                "vcenter_username":
                vmware_vcenter['settings']['user'],
                "nova_computes": [],
                "vcenter_host":
                vmware_vcenter['settings']['host'],
                "az_name":
                vmware_vcenter['settings']['az'],
                "vcenter_password":
                vmware_vcenter['settings']['pwd']
            }]
        }

        clusters = vmware_vcenter['nova-compute']
        nodes = self.fuel_web.client.list_cluster_nodes(self.cluster_id)
        roles = ['compute-vmware']
        comp_vmware_nodes = [
            n for n in nodes if set(roles) <= set(n['pending_roles'])
        ]

        for cluster in clusters:
            cluster_name = cluster['cluster']
            srv_name = cluster['srv_name']
            datastore = cluster['datastore']
            if cluster['target_node'] == 'compute-vmware':
                node = comp_vmware_nodes.pop()
                target_node = node['hostname']
            else:
                target_node = cluster['target_node']

            vcenter_value["availability_zones"][0]["nova_computes"].append({
                "vsphere_cluster":
                cluster_name,
                "service_name":
                srv_name,
                "datastore_regex":
                datastore,
                "target_node": {
                    "current": {
                        "id": target_node,
                        "label": target_node
                    },
                    "options": [
                        {
                            "id": target_node,
                            "label": target_node
                        },
                    ]
                },
            })

        if vmware_vcenter['glance']['enable']:
            attributes = self.fuel_web.client.get_cluster_attributes(
                self.cluster_id)
            attributes['editable']['storage']['images_vcenter']['value'] =\
                vmware_vcenter['glance']['enable']
            self.fuel_web.client.update_cluster_attributes(
                self.cluster_id, attributes)

            vcenter_value["glance"]["vcenter_host"] = vmware_vcenter['glance'][
                'host']
            vcenter_value["glance"]["vcenter_username"] = vmware_vcenter[
                'glance']['user']
            vcenter_value["glance"]["vcenter_password"] = vmware_vcenter[
                'glance']['pwd']
            vcenter_value["glance"]["datacenter"] = vmware_vcenter['glance'][
                'datacenter']
            vcenter_value["glance"]["datastore"] = vmware_vcenter['glance'][
                'datastore']

        logger.info('Configuring vCenter...')

        vmware_attr = \
            self.fuel_web.client.get_cluster_vmware_attributes(self.cluster_id)
        vcenter_data = vmware_attr['editable']
        vcenter_data['value'] = vcenter_value
        logger.debug("Try to update cluster with next "
                     "vmware_attributes {0}".format(vmware_attr))
        self.fuel_web.client.update_cluster_vmware_attributes(
            self.cluster_id, vmware_attr)

        logger.debug("Attributes of cluster have been updated")
Пример #22
0
    def configure_vcenter(self):
        """Configure vCenter settings"""

        vmware_vcenter = self.env_settings['vmware_vcenter']

        vcenter_value = {
            "glance": {"vcenter_username": "",
                       "datacenter": "",
                       "vcenter_host": "",
                       "vcenter_password": "",
                       "datastore": ""
                       },
            "availability_zones": [
                {"vcenter_username": vmware_vcenter['settings']['user'],
                 "nova_computes": [],
                 "vcenter_host": vmware_vcenter['settings']['host'],
                 "az_name": vmware_vcenter['settings']['az'],
                 "vcenter_password": vmware_vcenter['settings']['pwd']
                 }]
        }

        clusters = vmware_vcenter['nova-compute']
        nodes = self.fuel_web.client.list_cluster_nodes(self.cluster_id)
        roles = ['compute-vmware']
        comp_vmware_nodes = [n for n in nodes if set(roles) <=
                             set(n['pending_roles'])]

        for cluster in clusters:
            cluster_name = cluster['cluster']
            srv_name = cluster['srv_name']
            datastore = cluster['datastore']
            if cluster['target_node'] == 'compute-vmware':
                node = comp_vmware_nodes.pop()
                target_node = node['hostname']
            else:
                target_node = cluster['target_node']

            vcenter_value["availability_zones"][0]["nova_computes"].append(
                {"vsphere_cluster": cluster_name,
                 "service_name": srv_name,
                 "datastore_regex": datastore,
                 "target_node": {
                     "current": {"id": target_node,
                                 "label": target_node},
                     "options": [{"id": target_node,
                                  "label": target_node}, ]},
                 }
            )

        if vmware_vcenter['glance']['enable']:
            vcenter_value["glance"]["vcenter_host"] = vmware_vcenter[
                'glance']['host']
            vcenter_value["glance"]["vcenter_username"] = vmware_vcenter[
                'glance']['user']
            vcenter_value["glance"]["vcenter_password"] = vmware_vcenter[
                'glance']['pwd']
            vcenter_value["glance"]["datacenter"] = vmware_vcenter[
                'glance']['datacenter']
            vcenter_value["glance"]["datastore"] = vmware_vcenter[
                'glance']['datastore']

        logger.info('Configuring vCenter...')

        vmware_attr = \
            self.fuel_web.client.get_cluster_vmware_attributes(self.cluster_id)
        vcenter_data = vmware_attr['editable']
        vcenter_data['value'] = vcenter_value
        logger.debug("Try to update cluster with next "
                     "vmware_attributes {0}".format(vmware_attr))
        self.fuel_web.client.update_cluster_vmware_attributes(self.cluster_id,
                                                              vmware_attr)

        logger.debug("Attributes of cluster have been updated")