def _get_hostname_from_bridge_if(self): if self.environment[ohostedcons.CoreEnv.ANSIBLE_DEPLOYMENT]: # TODO: properly handle it without vdsm if not self.environment[ohostedcons.NetworkEnv.HOST_NAME]: self.environment[ ohostedcons.NetworkEnv.HOST_NAME] = socket.gethostname() if not self.environment[ohostedcons.EngineEnv.APP_HOST_NAME]: self.environment[ohostedcons.EngineEnv. APP_HOST_NAME] = socket.gethostname() else: ipaddr = None if self._enabled: # acquiring interface address configuration, status = vds_info.network( vds_info.capabilities( self.environment[ohostedcons.VDSMEnv.VDS_CLI]), self.environment[ohostedcons.NetworkEnv.BRIDGE_IF], ) self.logger.debug('Network info: {info}'.format(info=status)) if 'ipaddr' not in status: raise RuntimeError( _('Cannot acquire nic/bond/vlan address')) ipaddr = status['ipaddr'] else: # acquiring bridge address caps = vds_info.capabilities( self.environment[ohostedcons.VDSMEnv.VDS_CLI]) if 'networks' in caps: networks = caps['networks'] if self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME] in networks: bridge = networks[self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME]] if 'addr' in bridge: ipaddr = bridge['addr'] if not ipaddr: raise RuntimeError(_('Cannot acquire bridge address')) hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ipaddr) self.logger.debug( "hostname: '{h}', aliaslist: '{a}', ipaddrlist: '{i}'".format( h=hostname, a=aliaslist, i=ipaddrlist, )) if len(ipaddrlist) > 1: other_ip = set(ipaddrlist) - set([ipaddr]) raise RuntimeError( _("hostname '{h}' doesn't uniquely match the interface " "'{i}' selected for the management bridge; " "it matches also interface with IP {o}. " "Please make sure that the hostname got from " "the interface for the management network resolves " "only there.").format( h=hostname, i=self.environment[ohostedcons.NetworkEnv.BRIDGE_IF], o=other_ip, )) self.environment[ohostedcons.NetworkEnv.HOST_NAME] = hostname
def _getCompatibleCpuModels(self): caps = vds_info.capabilities( self.environment[ohostedcons.VDSMEnv.VDS_CLI]) cpuModel = caps['cpuModel'] cpuCompatibles = [ x for x in caps['cpuFlags'].split(',') if x.startswith('model_') ] ret = (cpuModel, cpuCompatibles) return ret
def _misc(self): self.logger.info(_('Configuring the management bridge')) conn = self.environment[ohostedcons.VDSMEnv.VDS_CLI] networks = { self.environment[ohostedcons.NetworkEnv.BRIDGE_NAME]: vds_info.network( vds_info.capabilities(conn), self.environment[ohostedcons.NetworkEnv.BRIDGE_IF]) } _setupNetworks(conn, networks, {}, {'connectivityCheck': False}) _setSafeNetworkConfig(conn)
def _misc(self): self.logger.info(_('Configuring the management bridge')) conn = self.environment[ohostedcons.VDSMEnv.VDS_CLI] networks = { self.environment[ohostedcons.NetworkEnv.BRIDGE_NAME]: vds_info.network( vds_info.capabilities(conn), self.environment[ohostedcons.NetworkEnv.BRIDGE_IF] ) } _setupNetworks(conn, networks, {}, {'connectivityCheck': False}) _setSafeNetworkConfig(conn)
def _customization(self): info = netinfo.NetInfo( vds_info.capabilities( self.environment[ohostedcons.VDSMEnv.VDS_CLI])) interfaces = set(info.nics.keys() + info.bondings.keys() + info.vlans.keys()) validValues = [] enslaved = set() inv_bond = set() for bridge in info.bridges.keys(): enslaved.update(set(info.bridges[bridge]['ports'])) for bond in info.bondings.keys(): slaves = set(info.bondings[bond]['slaves']) if slaves: enslaved.update(slaves) else: self.logger.debug('Detected bond device %s without slaves' % bond) inv_bond.update(set([bond])) validValues = list(interfaces - enslaved - inv_bond) self.logger.debug('Nics detected: %s' % ','.join(interfaces)) self.logger.debug('Nics enslaved: %s' % ','.join(enslaved)) self.logger.debug('Nics valid: %s' % ','.join(validValues)) if not validValues: if enslaved: raise RuntimeError( _('The following existing interfaces are not suitable ' 'for vdsm: {enslaved}. You might want to pull out an ' 'interface out of a bridge to be able to use it').format( enslaved=','.join(enslaved))) else: raise RuntimeError(_('A Network interface is required')) interactive = self.environment[ ohostedcons.NetworkEnv.BRIDGE_IF] is None if interactive: default = ohostedcons.Defaults.DEFAULT_BRIDGE_IF if default not in validValues: default = validValues[0] self.environment[ ohostedcons.NetworkEnv.BRIDGE_IF] = self.dialog.queryString( name='ovehosted_bridge_if', note=_( 'Please indicate a nic to set ' '{bridge} bridge on: (@VALUES@) [@DEFAULT@]: ').format( bridge=self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME]), prompt=True, caseSensitive=True, default=default, validValues=validValues, )
def _get_existing_bridge_interface(self): info = CachingNetInfo( vds_info.capabilities( self.environment[ohostedcons.VDSMEnv.VDS_CLI] ) ) cfgif = [] for e in info.nics.keys(): if 'cfg' in info.nics[e]: cfgif.append((e, info.nics[e]['cfg'])) for e in info.bondings.keys(): if 'cfg' in info.bondings[e]: cfgif.append((e, info.bondings[e]['cfg'])) for e in info.vlans.keys(): if 'cfg' in info.vlans[e]: cfgif.append((e, info.vlans[e]['cfg'])) bridge_ifs = [ e[0] for e in cfgif if 'BRIDGE' in e[1] and e[1]['BRIDGE'] == self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME ] ] if len(bridge_ifs) > 1: self.logger.warning( _( 'Unable to uniquely detect the interface where Bridge ' '{bridge} has been created on, {bridge_ifs} appear to be ' 'valid alternatives' ).format( bridge=self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME ], bridge_ifs=bridge_ifs, ) ) elif len(bridge_ifs) < 1: self.logger.warning( _( 'Unable to detect the interface where Bridge ' '{bridge} has been created on' ).format( bridge=self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME ], ) ) else: self.environment[ ohostedcons.NetworkEnv.BRIDGE_IF ] = bridge_ifs[0]
def _misc(self): self.logger.info(_('Configuring the management bridge')) conn = self.environment[ohostedcons.VDSMEnv.VDS_CLI] nconf, nstatus = vds_info.network( vds_info.capabilities(conn), self.environment[ohostedcons.NetworkEnv.BRIDGE_IF]) networks = { self.environment[ohostedcons.NetworkEnv.BRIDGE_NAME]: nconf } bonds = {} options = {'connectivityCheck': False} self.logger.debug('networks: {networks}'.format(networks=networks)) self.logger.debug('bonds: {bonds}'.format(bonds=bonds)) self.logger.debug('options: {options}'.format(options=options)) _setupNetworks(conn, networks, bonds, options) _setSafeNetworkConfig(conn)
def _misc(self): self.logger.info(_('Configuring the management bridge')) conn = self.environment[ohostedcons.VDSMEnv.VDS_CLI] nconf, nstatus = vds_info.network( vds_info.capabilities(conn), self.environment[ohostedcons.NetworkEnv.BRIDGE_IF] ) networks = { self.environment[ohostedcons.NetworkEnv.BRIDGE_NAME]: nconf } bonds = {} options = {'connectivityCheck': False} self.logger.debug('networks: {networks}'.format(networks=networks)) self.logger.debug('bonds: {bonds}'.format(bonds=bonds)) self.logger.debug('options: {options}'.format(options=options)) _setupNetworks(conn, networks, bonds, options) _setSafeNetworkConfig(conn)
def _get_existing_bridge_interface(self): caps = vds_info.capabilities( self.environment[ohostedcons.VDSMEnv.VDS_CLI]) bridge_name = self.environment[ohostedcons.NetworkEnv.BRIDGE_NAME] bridge_network = caps['networks'].get(bridge_name) bridge_ifs = bridge_network['ports'] if bridge_network else [] if len(bridge_ifs) > 1: self.logger.warning( _('Unable to uniquely detect the interface where Bridge ' '{bridge} has been created on, {bridge_ifs} appear to be ' 'valid alternatives').format( bridge=self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME], bridge_ifs=bridge_ifs, )) elif len(bridge_ifs) < 1: self.logger.warning( _('Unable to detect the interface where Bridge ' '{bridge} has been created on').format( bridge=self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME], )) else: self.environment[ohostedcons.NetworkEnv.BRIDGE_IF] = bridge_ifs[0]
def _closeup(self): # TODO: refactor into shorter and simpler functions self._getSSH() self._configureHostDeploy() cluster_name = None default_cluster_name = 'Default' engine_api = engineapi.get_engine_api(self) added_to_cluster = False while not added_to_cluster: try: cluster_name = self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME] if not self.environment[ohostedcons.EngineEnv.APP_HOST_NAME]: self.environment[ohostedcons.EngineEnv. APP_HOST_NAME] = socket.gethostname() self.logger.debug( "Getting the list of available clusters via engine's APIs") if cluster_name is not None: if cluster_name not in [ c.get_name() for c in engine_api.clusters.list() ]: raise RuntimeError( _('Specified cluster does not exist: {cluster}'). format(cluster=cluster_name, )) else: cluster_l = [ c.get_name() for c in engine_api.clusters.list() ] cluster_name = (default_cluster_name if default_cluster_name in cluster_l else cluster_l[0]) if len(cluster_l) > 1: cluster_name = self.dialog.queryString( name='cluster_name', note=_('Enter the name of the cluster to which ' 'you want to add the host (@VALUES@) ' '[@DEFAULT@]: '), prompt=True, default=cluster_name, validValues=cluster_l, ) self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME] = cluster_name cluster = engine_api.clusters.get(cluster_name) conn = self.environment[ohostedcons.VDSMEnv.VDS_CLI] caps = vds_info.capabilities(conn) bridge_port = self.environment[ ohostedcons.NetworkEnv.BRIDGE_IF] if bridge_port in caps['vlans']: self.logger.debug( "Updating engine's management network to be vlanned") vlan_id = caps['vlans'][bridge_port]['vlanid'] self.logger.debug( "Getting engine's management network via engine's APIs" ) cluster_mgmt_network = cluster.networks.get( name=self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME]) mgmt_network_id = cluster_mgmt_network.get_id() mgmt_network = engine_api.networks.get(id=mgmt_network_id) mgmt_network.set_vlan( self._ovirtsdk_xml.params.VLAN(id=vlan_id)) mgmt_network.update() self._wait_network_vlan_ready(engine_api, mgmt_network_id, vlan_id) self.logger.debug('Adding the host to the cluster') engine_api.hosts.add( self._ovirtsdk_xml.params.Host( name=self.environment[ ohostedcons.EngineEnv.APP_HOST_NAME], # Note that the below is required for compatibility # with vdsm-generated pki. See bz 1178535. address=self.environment[ ohostedcons.NetworkEnv.HOST_NAME], cluster=cluster, ssh=self._ovirtsdk_xml.params.SSH( authentication_method='publickey', port=self.environment[ ohostedcons.NetworkEnv.SSHD_PORT], ), override_iptables=self.environment[ otopicons.NetEnv.IPTABLES_ENABLE], )) added_to_cluster = True except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.debug( 'Cannot add the host to cluster {cluster}'.format( cluster=cluster_name, ), exc_info=True, ) self.logger.error( _('Cannot automatically add the host ' 'to cluster {cluster}:\n{details}\n').format( cluster=cluster_name, details=e.detail)) while not check_liveliness.manualSetupDispatcher( self, check_liveliness.MSD_FURTHER_ACTIONS, self.environment[ ohostedcons.NetworkEnv.OVIRT_HOSTED_ENGINE_FQDN]): pass up = self._wait_host_ready( engine_api, self.environment[ohostedcons.EngineEnv.APP_HOST_NAME]) # TODO: host-deploy restarted vdscli so we need to # connect again if not up: self.logger.error( _('Unable to add {host} to the manager').format( host=self.environment[ ohostedcons.EngineEnv.APP_HOST_NAME], )) else: # This works only if the host is up. self.logger.debug('Setting CPU for the cluster') try: cluster, cpu = self._wait_cluster_cpu_ready( engine_api, cluster_name) self.logger.debug(cpu.__dict__) cpu.set_id(self.environment[ohostedcons.VDSMEnv.ENGINE_CPU]) cluster.set_cpu(cpu) cluster.update() except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.debug( 'Cannot set CPU level of cluster {cluster}'.format( cluster=cluster_name, ), exc_info=True, ) self.logger.error( _('Cannot automatically set CPU level ' 'of cluster {cluster}:\n{details}\n').format( cluster=cluster_name, details=e.detail)) engine_api.disconnect()
def _closeup(self): # TODO: refactor into shorter and simpler functions self._getCA() self._getSSH() cluster_name = None default_cluster_name = 'Default' valid = False fqdn = self.environment[ ohostedcons.NetworkEnv.OVIRT_HOSTED_ENGINE_FQDN] while not valid: try: self.logger.info(_('Connecting to the Engine')) insecure = False if self.environment[ohostedcons.EngineEnv.INSECURE_SSL]: insecure = True engine_api = self._ovirtsdk_api.API( url='https://{fqdn}/ovirt-engine/api'.format(fqdn=fqdn, ), username='******', password=self.environment[ ohostedcons.EngineEnv.ADMIN_PASSWORD], ca_file=self.environment[ ohostedcons.EngineEnv.TEMPORARY_CERT_FILE], insecure=insecure, ) engine_api.clusters.list() valid = True except ovirtsdk.infrastructure.errors.RequestError as e: if e.status == 401: if self._interactive_admin_pwd: self.logger.error( _('The engine API didn' 't accepted ' 'the administrator password you provided\n' 'Please enter it again to retry.')) self.environment[ ohostedcons.EngineEnv. ADMIN_PASSWORD] = self.dialog.queryString( name='ENGINE_ADMIN_PASSWORD', note=_('Enter ' 'admin@internal' ' user password that ' 'will be used for accessing ' 'the Administrator Portal: '), prompt=True, hidden=True, ) else: raise RuntimeError( _('The engine API didn' 't accepted ' 'the administrator password you provided\n')) else: self.logger.error( _('Cannot connect to engine APIs on {fqdn}:\n' '{details}\n').format( fqdn=fqdn, details=e.detail, )) raise RuntimeError( _('Cannot connect to engine APIs on {fqdn}').format( fqdn=fqdn, )) added_to_cluster = False while not added_to_cluster: try: cluster_name = self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME] self.logger.debug( "Getting the list of available clusters via engine's APIs") if cluster_name is not None: if cluster_name not in [ c.get_name() for c in engine_api.clusters.list() ]: raise RuntimeError( _('Specified cluster does not exist: {cluster}'). format(cluster=cluster_name, )) else: cluster_l = [ c.get_name() for c in engine_api.clusters.list() ] cluster_name = (default_cluster_name if default_cluster_name in cluster_l else cluster_l[0]) cluster_name = self.dialog.queryString( name='cluster_name', note=_('Enter the name of the cluster to which ' 'you want to add the host (@VALUES@) ' '[@DEFAULT@]: '), prompt=True, default=cluster_name, validValues=cluster_l, ) self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME] = cluster_name cluster = engine_api.clusters.get(cluster_name) conn = self.environment[ohostedcons.VDSMEnv.VDS_CLI] net_info = netinfo.NetInfo(vds_info.capabilities(conn)) bridge_port = self.environment[ ohostedcons.NetworkEnv.BRIDGE_IF] if bridge_port in net_info.vlans: self.logger.debug( "Updating engine's management network to be vlanned") vlan_id = net_info.vlans[bridge_port]['vlanid'] self.logger.debug( "Getting engine's management network via engine's APIs" ) mgmt_network = cluster.networks.get(name=self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME]) mgmt_network.set_vlan( self._ovirtsdk_xml.params.VLAN(id=vlan_id)) mgmt_network.update() # Configuring the cluster for Hyper Converged support if # enabled if self.environment[ ohostedcons.StorageEnv.GLUSTER_PROVISIONING_ENABLED]: cluster.set_gluster_service(True) cluster.update() cluster = engine_api.clusters.get(cluster_name) self.logger.debug('Adding the host to the cluster') engine_api.hosts.add( self._ovirtsdk_xml.params.Host( name=self.environment[ ohostedcons.EngineEnv.APP_HOST_NAME], # Note that the below is required for compatibility # with vdsm-generated pki. See bz 1178535. # TODO: Make it configurable like engine fqdn. address=socket.gethostname(), reboot_after_installation=False, cluster=cluster, ssh=self._ovirtsdk_xml.params.SSH( authentication_method='publickey', port=self.environment[ ohostedcons.NetworkEnv.SSHD_PORT], ), override_iptables=self.environment[ otopicons.NetEnv.IPTABLES_ENABLE], )) added_to_cluster = True except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.debug( 'Cannot add the host to cluster {cluster}'.format( cluster=cluster_name, ), exc_info=True, ) self.logger.error( _('Cannot automatically add the host ' 'to cluster {cluster}:\n{details}\n').format( cluster=cluster_name, details=e.detail)) while not check_liveliness.manualSetupDispatcher( self, check_liveliness.MSD_FURTHER_ACTIONS, fqdn): pass up = self._wait_host_ready( engine_api, self.environment[ohostedcons.EngineEnv.APP_HOST_NAME]) # TODO: host-deploy restarted vdscli so we need to # connect again if not up: self.logger.error( _('Unable to add {host} to the manager').format( host=self.environment[ ohostedcons.EngineEnv.APP_HOST_NAME], )) else: # This works only if the host is up. self.logger.debug('Setting CPU for the cluster') try: cluster, cpu = self._wait_cluster_cpu_ready( engine_api, cluster_name) self.logger.debug(cpu.__dict__) cpu.set_id(self.environment[ohostedcons.VDSMEnv.ENGINE_CPU]) cluster.set_cpu(cpu) cluster.update() except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.debug( 'Cannot set CPU level of cluster {cluster}'.format( cluster=cluster_name, ), exc_info=True, ) self.logger.error( _('Cannot automatically set CPU level ' 'of cluster {cluster}:\n{details}\n').format( cluster=cluster_name, details=e.detail)) engine_api.disconnect()
def _closeup(self): self._getPKICert() self._getSSHkey() cluster_name = None default_cluster_name = 'Default' try: self.logger.debug('Connecting to the Engine') engine_api = self._ovirtsdk_api.API( url='https://{fqdn}/ovirt-engine/api'.format( fqdn=self.environment[ ohostedcons.NetworkEnv.OVIRT_HOSTED_ENGINE_FQDN ], ), username='******', password=self.environment[ ohostedcons.EngineEnv.ADMIN_PASSWORD ], ca_file=self.environment[ ohostedcons.EngineEnv.TEMPORARY_CERT_FILE ], ) conn = vdscli.connect() net_info = netinfo.NetInfo(vds_info.capabilities(conn)) bridge_port = self.environment[ohostedcons.NetworkEnv.BRIDGE_IF] if bridge_port in net_info.vlans: self.logger.debug( 'Updating engine\'s management network to be vlanned' ) vlan_id = net_info.vlans[bridge_port]['vlanid'] mgmt_network = engine_api.networks.get( name=self.environment[ohostedcons.NetworkEnv.BRIDGE_NAME] ) mgmt_network.set_vlan( self._ovirtsdk_xml.params.VLAN(id=vlan_id) ) mgmt_network.update() self.logger.debug('Adding the host to the cluster') cluster_name = self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME ] if cluster_name is not None: if cluster_name not in [ c.get_name() for c in engine_api.clusters.list() ]: raise RuntimeError( _( 'Specified cluster does not exist: {cluster}' ).format( cluster=cluster_name, ) ) else: cluster_l = [c.get_name() for c in engine_api.clusters.list()] cluster_name = ( default_cluster_name if default_cluster_name in cluster_l else cluster_l[0] ) cluster_name = self.dialog.queryString( name='cluster_name', note=_( 'Enter the name of the cluster to which you want to ' 'add the host (@VALUES@) [@DEFAULT@]: ' ), prompt=True, default=cluster_name, validValues=cluster_l, ) self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME ] = cluster_name engine_api.hosts.add( self._ovirtsdk_xml.params.Host( name=self.environment[ ohostedcons.EngineEnv.APP_HOST_NAME ], address=self._getIPAddress(), reboot_after_installation=False, cluster=engine_api.clusters.get(cluster_name), ssh=self._ovirtsdk_xml.params.SSH( authentication_method='publickey', port=self.environment[ ohostedcons.NetworkEnv.SSHD_PORT ], ), override_iptables=True, ) ) except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.debug( 'Cannot add the host to cluster {cluster}'.format( cluster=cluster_name, ), exc_info=True, ) self.logger.error( _( 'Cannot automatically add the host ' 'to cluster {cluster}:\n{details}\n' ).format( cluster=cluster_name, details=e.detail ) ) raise RuntimeError( _( 'Cannot add the host to cluster {cluster}' ).format( cluster=cluster_name, ) ) if self.environment[ ohostedcons.CoreEnv.IS_ADDITIONAL_HOST ]: self._check_network_configuration( engine_api, self.environment[ohostedcons.EngineEnv.APP_HOST_NAME], ) else: up = self._wait_host_ready( engine_api, self.environment[ohostedcons.EngineEnv.APP_HOST_NAME] ) if not up: self.logger.error( _( 'Unable to add {host} to the manager' ).format( host=self.environment[ ohostedcons.EngineEnv.APP_HOST_NAME ], ) ) else: # This works only if the host is up. self.logger.debug('Setting CPU for the cluster') try: cluster, cpu = self._wait_cluster_cpu_ready( engine_api, cluster_name ) self.logger.debug(cpu.__dict__) cpu.set_id( self.environment[ohostedcons.VDSMEnv.ENGINE_CPU] ) cluster.set_cpu(cpu) cluster.update() except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.debug( 'Cannot set CPU level of cluster {cluster}'.format( cluster=cluster_name, ), exc_info=True, ) self.logger.error( _( 'Cannot automatically set CPU level ' 'of cluster {cluster}:\n{details}\n' ).format( cluster=cluster_name, details=e.detail ) ) engine_api.disconnect()
def _customization(self): validValues = [] if self.environment[ohostedcons.CoreEnv.ANSIBLE_DEPLOYMENT]: playbook = ohostedcons.FileLocations.HE_AP_NETWORK_INTERFACES ah = ansible_utils.AnsibleHelper(playbook_name=playbook, extra_vars={}) r = ah.run() self.logger.debug(r) if 'otopi_host_net' in r: for network_interface in r['otopi_host_net']['results']: if 'ansible_facts' in network_interface: validValues.append(network_interface['item']['item']) else: raise RuntimeError( _('No suitable network interfaces were found')) else: INVALID_BOND_MODES = ('0', '5', '6') ALLOW_INVALID_BOND_MODES = \ ohostedcons.NetworkEnv.ALLOW_INVALID_BOND_MODES caps = vds_info.capabilities( self.environment[ohostedcons.VDSMEnv.VDS_CLI]) interfaces = set(caps['nics'].keys() + caps['bondings'].keys() + caps['vlans'].keys()) validValues = [] enslaved = set() inv_bond = set() for bridge in caps['bridges'].keys(): enslaved.update(set(caps['bridges'][bridge]['ports'])) for bond in caps['bondings'].keys(): bondMode = caps['bondings'][bond]['opts']['mode'] if (bondMode in INVALID_BOND_MODES): self.logger.warning( _("Bond {bondname} is on mode {bondmode}, " "modes {invalid} are not supported").format( bondname=bond, bondmode=bondMode, invalid=INVALID_BOND_MODES)) if not self.environment[ALLOW_INVALID_BOND_MODES]: inv_bond.update(set([bond])) else: self.logger.warning( _("Allowing anyway, as enforced by {key}={val}"). format(key=ALLOW_INVALID_BOND_MODES, val=self. environment[ALLOW_INVALID_BOND_MODES])) slaves = set(caps['bondings'][bond]['slaves']) if slaves: enslaved.update(slaves) else: self.logger.debug( 'Detected bond device %s without slaves' % bond) inv_bond.update(set([bond])) validValues = list(interfaces - enslaved - inv_bond) self.logger.debug('Nics detected: %s' % ','.join(interfaces)) self.logger.debug('Nics enslaved: %s' % ','.join(enslaved)) self.logger.debug('Nics valid: %s' % ','.join(validValues)) if not validValues: if enslaved: raise RuntimeError( _('The following existing interfaces are not suitable ' 'for vdsm: {enslaved}. You might want to pull out an ' 'interface out of a bridge to be able to use it').format( enslaved=','.join(enslaved))) else: raise RuntimeError(_('A Network interface is required')) interactive = self.environment[ ohostedcons.NetworkEnv.BRIDGE_IF] is None if interactive: default = self._get_active_interface(validValues) self.environment[ ohostedcons.NetworkEnv.BRIDGE_IF] = self.dialog.queryString( name='ovehosted_bridge_if', note=_( 'Please indicate a nic to set ' '{bridge} bridge on: (@VALUES@) [@DEFAULT@]: ').format( bridge=self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME]), prompt=True, caseSensitive=True, default=default, validValues=validValues, )
def _closeup(self): # TODO: refactor into shorter and simpler functions self._getCA() self._getSSH() cluster_name = None default_cluster_name = 'Default' valid = False fqdn = self.environment[ ohostedcons.NetworkEnv.OVIRT_HOSTED_ENGINE_FQDN ] while not valid: try: self.logger.info(_('Connecting to the Engine')) insecure = False if self.environment[ ohostedcons.EngineEnv.INSECURE_SSL ]: insecure = True engine_api = self._ovirtsdk_api.API( url='https://{fqdn}/ovirt-engine/api'.format( fqdn=fqdn, ), username='******', password=self.environment[ ohostedcons.EngineEnv.ADMIN_PASSWORD ], ca_file=self.environment[ ohostedcons.EngineEnv.TEMPORARY_CERT_FILE ], insecure=insecure, ) engine_api.clusters.list() valid = True except ovirtsdk.infrastructure.errors.RequestError as e: if e.status == 401: if self._interactive_admin_pwd: self.logger.error( _( 'The engine API didn''t accepted ' 'the administrator password you provided\n' 'Please enter it again to retry.' ) ) self.environment[ ohostedcons.EngineEnv.ADMIN_PASSWORD ] = self.dialog.queryString( name='ENGINE_ADMIN_PASSWORD', note=_( 'Enter ''admin@internal'' user password that ' 'will be used for accessing ' 'the Administrator Portal: ' ), prompt=True, hidden=True, ) else: raise RuntimeError( _( 'The engine API didn''t accepted ' 'the administrator password you provided\n' ) ) else: self.logger.error( _( 'Cannot connect to engine APIs on {fqdn}:\n' '{details}\n' ).format( fqdn=fqdn, details=e.detail, ) ) raise RuntimeError( _( 'Cannot connect to engine APIs on {fqdn}' ).format( fqdn=fqdn, ) ) added_to_cluster = False while not added_to_cluster: try: cluster_name = self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME ] self.logger.debug( "Getting the list of available clusters via engine's APIs" ) if cluster_name is not None: if cluster_name not in [ c.get_name() for c in engine_api.clusters.list() ]: raise RuntimeError( _( 'Specified cluster does not exist: {cluster}' ).format( cluster=cluster_name, ) ) else: cluster_l = [ c.get_name() for c in engine_api.clusters.list() ] cluster_name = ( default_cluster_name if default_cluster_name in cluster_l else cluster_l[0] ) cluster_name = self.dialog.queryString( name='cluster_name', note=_( 'Enter the name of the cluster to which ' 'you want to add the host (@VALUES@) ' '[@DEFAULT@]: ' ), prompt=True, default=cluster_name, validValues=cluster_l, ) self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME ] = cluster_name cluster = engine_api.clusters.get(cluster_name) conn = self.environment[ohostedcons.VDSMEnv.VDS_CLI] net_info = netinfo.NetInfo(vds_info.capabilities(conn)) bridge_port = self.environment[ ohostedcons.NetworkEnv.BRIDGE_IF ] if bridge_port in net_info.vlans: self.logger.debug( "Updating engine's management network to be vlanned" ) vlan_id = net_info.vlans[bridge_port]['vlanid'] self.logger.debug( "Getting engine's management network via engine's APIs" ) mgmt_network = cluster.networks.get( name=self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME] ) mgmt_network.set_vlan( self._ovirtsdk_xml.params.VLAN(id=vlan_id) ) mgmt_network.update() # Configuring the cluster for Hyper Converged support if # enabled if self.environment[ ohostedcons.StorageEnv.GLUSTER_PROVISIONING_ENABLED ]: cluster.set_gluster_service(True) cluster.update() cluster = engine_api.clusters.get(cluster_name) self.logger.debug('Adding the host to the cluster') engine_api.hosts.add( self._ovirtsdk_xml.params.Host( name=self.environment[ ohostedcons.EngineEnv.APP_HOST_NAME ], # Note that the below is required for compatibility # with vdsm-generated pki. See bz 1178535. # TODO: Make it configurable like engine fqdn. address=socket.gethostname(), reboot_after_installation=False, cluster=cluster, ssh=self._ovirtsdk_xml.params.SSH( authentication_method='publickey', port=self.environment[ ohostedcons.NetworkEnv.SSHD_PORT ], ), override_iptables=self.environment[ otopicons.NetEnv.IPTABLES_ENABLE ], ) ) added_to_cluster = True except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.debug( 'Cannot add the host to cluster {cluster}'.format( cluster=cluster_name, ), exc_info=True, ) self.logger.error( _( 'Cannot automatically add the host ' 'to cluster {cluster}:\n{details}\n' ).format( cluster=cluster_name, details=e.detail ) ) while not check_liveliness.manualSetupDispatcher( self, check_liveliness.MSD_FURTHER_ACTIONS, fqdn ): pass up = self._wait_host_ready( engine_api, self.environment[ohostedcons.EngineEnv.APP_HOST_NAME] ) # TODO: host-deploy restarted vdscli so we need to # connect again if not up: self.logger.error( _( 'Unable to add {host} to the manager' ).format( host=self.environment[ ohostedcons.EngineEnv.APP_HOST_NAME ], ) ) else: # This works only if the host is up. self.logger.debug('Setting CPU for the cluster') try: cluster, cpu = self._wait_cluster_cpu_ready( engine_api, cluster_name ) self.logger.debug(cpu.__dict__) cpu.set_id( self.environment[ohostedcons.VDSMEnv.ENGINE_CPU] ) cluster.set_cpu(cpu) cluster.update() except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.debug( 'Cannot set CPU level of cluster {cluster}'.format( cluster=cluster_name, ), exc_info=True, ) self.logger.error( _( 'Cannot automatically set CPU level ' 'of cluster {cluster}:\n{details}\n' ).format( cluster=cluster_name, details=e.detail ) ) engine_api.disconnect()
def _closeup(self): # TODO: refactor into shorter and simpler functions self._getPKICert() self._getSSHkey() cluster_name = None default_cluster_name = 'Default' try: self.logger.debug('Connecting to the Engine') engine_api = self._ovirtsdk_api.API( url='https://{fqdn}/ovirt-engine/api'.format( fqdn=self.environment[ ohostedcons.NetworkEnv.OVIRT_HOSTED_ENGINE_FQDN ], ), username='******', password=self.environment[ ohostedcons.EngineEnv.ADMIN_PASSWORD ], ca_file=self.environment[ ohostedcons.EngineEnv.TEMPORARY_CERT_FILE ], ) except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.error( _( 'Cannot connect to engine APIs on {fqdn}:\n ' '{details}\n' ).format( fqdn=self.environment[ ohostedcons.NetworkEnv.OVIRT_HOSTED_ENGINE_FQDN ], details=e.detail ) ) raise RuntimeError( _( 'Cannot connect to engine APIs on {fqdn}' ).format( fqdn=self.environment[ ohostedcons.NetworkEnv.OVIRT_HOSTED_ENGINE_FQDN ], ) ) try: conn = self.environment[ohostedcons.VDSMEnv.VDS_CLI] net_info = netinfo.NetInfo(vds_info.capabilities(conn)) bridge_port = self.environment[ohostedcons.NetworkEnv.BRIDGE_IF] if bridge_port in net_info.vlans: self.logger.debug( "Updating engine's management network to be vlanned" ) vlan_id = net_info.vlans[bridge_port]['vlanid'] self.logger.debug( "Getting engine's management network via engine's APIs" ) mgmt_network = engine_api.networks.get( name=self.environment[ohostedcons.NetworkEnv.BRIDGE_NAME] ) mgmt_network.set_vlan( self._ovirtsdk_xml.params.VLAN(id=vlan_id) ) mgmt_network.update() cluster_name = self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME ] self.logger.debug( "Getting the list of available clusters via engine's APIs" ) if cluster_name is not None: if cluster_name not in [ c.get_name() for c in engine_api.clusters.list() ]: raise RuntimeError( _( 'Specified cluster does not exist: {cluster}' ).format( cluster=cluster_name, ) ) else: cluster_l = [c.get_name() for c in engine_api.clusters.list()] cluster_name = ( default_cluster_name if default_cluster_name in cluster_l else cluster_l[0] ) cluster_name = self.dialog.queryString( name='cluster_name', note=_( 'Enter the name of the cluster to which you want to ' 'add the host (@VALUES@) [@DEFAULT@]: ' ), prompt=True, default=cluster_name, validValues=cluster_l, ) self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME ] = cluster_name self.logger.debug('Adding the host to the cluster') engine_api.hosts.add( self._ovirtsdk_xml.params.Host( name=self.environment[ ohostedcons.EngineEnv.APP_HOST_NAME ], # Note that the below is required for compatibility # with vdsm-generated pki. See bz 1178535. # TODO: Make it configurable like engine fqdn. address=socket.gethostname(), reboot_after_installation=False, cluster=engine_api.clusters.get(cluster_name), ssh=self._ovirtsdk_xml.params.SSH( authentication_method='publickey', port=self.environment[ ohostedcons.NetworkEnv.SSHD_PORT ], ), override_iptables=self.environment[ otopicons.NetEnv.IPTABLES_ENABLE ], ) ) except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.debug( 'Cannot add the host to cluster {cluster}'.format( cluster=cluster_name, ), exc_info=True, ) self.logger.error( _( 'Cannot automatically add the host ' 'to cluster {cluster}:\n{details}\n' ).format( cluster=cluster_name, details=e.detail ) ) raise RuntimeError( _( 'Cannot add the host to cluster {cluster}' ).format( cluster=cluster_name, ) ) up = self._wait_host_ready( engine_api, self.environment[ohostedcons.EngineEnv.APP_HOST_NAME] ) # TODO: host-deploy restarted vdscli so we need to # connect again if not up: self.logger.error( _( 'Unable to add {host} to the manager' ).format( host=self.environment[ ohostedcons.EngineEnv.APP_HOST_NAME ], ) ) else: # This works only if the host is up. self.logger.debug('Setting CPU for the cluster') try: cluster, cpu = self._wait_cluster_cpu_ready( engine_api, cluster_name ) self.logger.debug(cpu.__dict__) cpu.set_id( self.environment[ohostedcons.VDSMEnv.ENGINE_CPU] ) cluster.set_cpu(cpu) cluster.update() except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.debug( 'Cannot set CPU level of cluster {cluster}'.format( cluster=cluster_name, ), exc_info=True, ) self.logger.error( _( 'Cannot automatically set CPU level ' 'of cluster {cluster}:\n{details}\n' ).format( cluster=cluster_name, details=e.detail ) ) engine_api.disconnect()
def _customization(self): info = netinfo.NetInfo( vds_info.capabilities( self.environment[ohostedcons.VDSMEnv.VDS_CLI] ) ) interfaces = set( info.nics.keys() + info.bondings.keys() + info.vlans.keys() ) validValues = [] enslaved = set() inv_bond = set() for bridge in info.bridges.keys(): enslaved.update(set(info.bridges[bridge]['ports'])) for bond in info.bondings.keys(): slaves = set(info.bondings[bond]['slaves']) if slaves: enslaved.update(slaves) else: self.logger.debug( 'Detected bond device %s without slaves' % bond ) inv_bond.update(set([bond])) validValues = list(interfaces - enslaved - inv_bond) self.logger.debug('Nics detected: %s' % ','.join(interfaces)) self.logger.debug('Nics enslaved: %s' % ','.join(enslaved)) self.logger.debug('Nics valid: %s' % ','.join(validValues)) if not validValues: if enslaved: raise RuntimeError( _( 'The following existing interfaces are not suitable ' 'for vdsm: {enslaved}. You might want to pull out an ' 'interface out of a bridge to be able to use it' ).format( enslaved=','.join(enslaved) ) ) else: raise RuntimeError(_('A Network interface is required')) interactive = self.environment[ ohostedcons.NetworkEnv.BRIDGE_IF ] is None if interactive: default = ohostedcons.Defaults.DEFAULT_BRIDGE_IF if default not in validValues: default = validValues[0] self.environment[ ohostedcons.NetworkEnv.BRIDGE_IF ] = self.dialog.queryString( name='ovehosted_bridge_if', note=_( 'Please indicate a nic to set ' '{bridge} bridge on: (@VALUES@) [@DEFAULT@]: ' ).format( bridge=self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME ] ), prompt=True, caseSensitive=True, default=default, validValues=validValues, )
def _getMaxVCpus(self): if self.environment[ohostedcons.CoreEnv.ANSIBLE_DEPLOYMENT]: return str(multiprocessing.cpu_count()) caps = vds_info.capabilities( self.environment[ohostedcons.VDSMEnv.VDS_CLI]) return caps['cpuCores']
def _closeup(self): # TODO: refactor into shorter and simpler functions self._getSSH() self._configureHostDeploy() cluster_name = None default_cluster_name = 'Default' engine_api = engineapi.get_engine_api(self) added_to_cluster = False while not added_to_cluster: try: cluster_name = self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME ] self.logger.debug( "Getting the list of available clusters via engine's APIs" ) if cluster_name is not None: if cluster_name not in [ c.get_name() for c in engine_api.clusters.list() ]: raise RuntimeError( _( 'Specified cluster does not exist: {cluster}' ).format( cluster=cluster_name, ) ) else: cluster_l = [ c.get_name() for c in engine_api.clusters.list() ] cluster_name = ( default_cluster_name if default_cluster_name in cluster_l else cluster_l[0] ) if len(cluster_l) > 1: cluster_name = self.dialog.queryString( name='cluster_name', note=_( 'Enter the name of the cluster to which ' 'you want to add the host (@VALUES@) ' '[@DEFAULT@]: ' ), prompt=True, default=cluster_name, validValues=cluster_l, ) self.environment[ ohostedcons.EngineEnv.HOST_CLUSTER_NAME ] = cluster_name cluster = engine_api.clusters.get(cluster_name) conn = self.environment[ohostedcons.VDSMEnv.VDS_CLI] net_info = CachingNetInfo(vds_info.capabilities(conn)) bridge_port = self.environment[ ohostedcons.NetworkEnv.BRIDGE_IF ] if bridge_port in net_info.vlans: self.logger.debug( "Updating engine's management network to be vlanned" ) vlan_id = net_info.vlans[bridge_port]['vlanid'] self.logger.debug( "Getting engine's management network via engine's APIs" ) cluster_mgmt_network = cluster.networks.get( name=self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME] ) mgmt_network_id = cluster_mgmt_network.get_id() mgmt_network = engine_api.networks.get( id=mgmt_network_id ) mgmt_network.set_vlan( self._ovirtsdk_xml.params.VLAN(id=vlan_id) ) mgmt_network.update() self._wait_network_vlan_ready( engine_api, mgmt_network_id, vlan_id ) self.logger.debug('Adding the host to the cluster') engine_api.hosts.add( self._ovirtsdk_xml.params.Host( name=self.environment[ ohostedcons.EngineEnv.APP_HOST_NAME ], # Note that the below is required for compatibility # with vdsm-generated pki. See bz 1178535. address=self.environment[ ohostedcons.NetworkEnv.HOST_NAME ], cluster=cluster, ssh=self._ovirtsdk_xml.params.SSH( authentication_method='publickey', port=self.environment[ ohostedcons.NetworkEnv.SSHD_PORT ], ), override_iptables=self.environment[ otopicons.NetEnv.IPTABLES_ENABLE ], ) ) added_to_cluster = True except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.debug( 'Cannot add the host to cluster {cluster}'.format( cluster=cluster_name, ), exc_info=True, ) self.logger.error( _( 'Cannot automatically add the host ' 'to cluster {cluster}:\n{details}\n' ).format( cluster=cluster_name, details=e.detail ) ) while not check_liveliness.manualSetupDispatcher( self, check_liveliness.MSD_FURTHER_ACTIONS, self.environment[ ohostedcons.NetworkEnv.OVIRT_HOSTED_ENGINE_FQDN ] ): pass up = self._wait_host_ready( engine_api, self.environment[ohostedcons.EngineEnv.APP_HOST_NAME] ) # TODO: host-deploy restarted vdscli so we need to # connect again if not up: self.logger.error( _( 'Unable to add {host} to the manager' ).format( host=self.environment[ ohostedcons.EngineEnv.APP_HOST_NAME ], ) ) else: # This works only if the host is up. self.logger.debug('Setting CPU for the cluster') try: cluster, cpu = self._wait_cluster_cpu_ready( engine_api, cluster_name ) self.logger.debug(cpu.__dict__) cpu.set_id( self.environment[ohostedcons.VDSMEnv.ENGINE_CPU] ) cluster.set_cpu(cpu) cluster.update() except ovirtsdk.infrastructure.errors.RequestError as e: self.logger.debug( 'Cannot set CPU level of cluster {cluster}'.format( cluster=cluster_name, ), exc_info=True, ) self.logger.error( _( 'Cannot automatically set CPU level ' 'of cluster {cluster}:\n{details}\n' ).format( cluster=cluster_name, details=e.detail ) ) engine_api.disconnect()
def _get_hostname_from_bridge_if(self): ipaddr = None if self._enabled: # acquiring interface address configuration, status = vds_info.network( vds_info.capabilities( self.environment[ohostedcons.VDSMEnv.VDS_CLI] ), self.environment[ ohostedcons.NetworkEnv.BRIDGE_IF ], ) self.logger.debug('Network info: {info}'.format(info=status)) if 'ipaddr' not in status: raise RuntimeError(_('Cannot acquire nic/bond/vlan address')) ipaddr = status['ipaddr'] else: # acquiring bridge address cli = self.environment[ohostedcons.VDSMEnv.VDS_CLI] caps = cli.getVdsCapabilities() self.logger.debug(caps) if caps['status']['code'] != 0: raise RuntimeError( _('Failed getting VDSM capabilities: {msg}').format( msg=caps['status']['message'], ) ) if 'networks' in caps: networks = caps['networks'] if self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME ] in networks: bridge = networks[ self.environment[ ohostedcons.NetworkEnv.BRIDGE_NAME ] ] if 'addr' in bridge: ipaddr = bridge['addr'] if not ipaddr: raise RuntimeError(_('Cannot acquire bridge address')) hostname, aliaslist, ipaddrlist = socket.gethostbyaddr(ipaddr) self.logger.debug( "hostname: '{h}', aliaslist: '{a}', ipaddrlist: '{i}'".format( h=hostname, a=aliaslist, i=ipaddrlist, ) ) if len(ipaddrlist) > 1: other_ip = set(ipaddrlist) - set([ipaddr]) raise RuntimeError(_( "hostname '{h}' doesn't uniquely match the interface " "'{i}' selected for the management bridge; " "it matches also interface with IP {o}. " "Please make sure that the hostname got from " "the interface for the management network resolves " "only there." ).format( h=hostname, i=self.environment[ ohostedcons.NetworkEnv.BRIDGE_IF ], o=other_ip, )) self.environment[ ohostedcons.NetworkEnv.HOST_NAME ] = hostname