Ejemplo n.º 1
0
    def storage_pool_query(self, request):
        self.required_options(request, 'nodeURI')
        if request.options['nodeURI'] in self.storage_pools:
            self.finished(
                request.id,
                self.storage_pools[request.options['nodeURI']].values())
            return

        def _finished(thread, result, request):
            success, data = result
            if success:
                self.storage_pools[request.options['nodeURI']] = dict(
                    map(lambda p: (p.name, object2dict(p)), data))
                self.finished(
                    request.id,
                    self.storage_pools[request.options['nodeURI']].values())
            else:
                self.finished(request.id,
                              None,
                              message=str(data),
                              status=MODULE_ERR_COMMAND_FAILED)

        self.uvmm.send('STORAGE_POOLS',
                       Callback(_finished, request),
                       uri=request.options['nodeURI'])
Ejemplo n.º 2
0
    def storage_volume_query(self, request):
        """Returns a list of volumes located in the given pool.

		options: { 'nodeURI': <node uri>, 'pool' : <pool name>[, 'type' : (disk|cdrom|floppy)] }

		return: [ { <volume description> }, ... ]
		"""
        self.required_options(request, 'nodeURI', 'pool')

        def _finished(thread, result, request):
            success, data = result
            if success:
                volume_list = []
                for vol in data:
                    vol = object2dict(vol)
                    vol['volumeFilename'] = os.path.basename(
                        vol.get('source', ''))
                    volume_list.append(vol)
                self.finished(request.id, volume_list)
            else:
                self.finished(request.id,
                              None,
                              message=str(data),
                              status=MODULE_ERR_COMMAND_FAILED)

        drive_type = request.options.get('type', None)
        if drive_type == 'floppy':  # not yet supported
            drive_type = 'disk'
        self.uvmm.send('STORAGE_VOLUMES',
                       Callback(_finished, request),
                       uri=request.options['nodeURI'],
                       pool=request.options['pool'],
                       type=drive_type)
Ejemplo n.º 3
0
    def storage_volume_usedby(self, request):
        """Returns a list of domains that use the given volume.

		options: { 'nodeURI' : <node URI>, 'pool' : <pool name>, 'volumeFilename': <filename> }

		return: [ <domain URI>, ... ]
		"""
        self.required_options(request, 'nodeURI', 'pool', 'volumeFilename')

        def _finished(thread, result, request):
            if self._check_thread_error(thread, result, request):
                return

            success, data = result
            if success:
                if isinstance(data, (list, tuple)):
                    data = map(lambda x: '#'.join(x), data)
                self.finished(request.id, data)
            else:
                self.finished(request.id,
                              None,
                              message=str(data),
                              status=MODULE_ERR_COMMAND_FAILED)

        pool_path = self.get_pool_path(request.options['nodeURI'],
                                       request.options['pool'])
        if pool_path is None:
            raise UMC_OptionTypeError(
                _('The given pool could not be found or is no file pool'))
        volume = os.path.join(pool_path, request.options['volumeFilename'])
        self.uvmm.send('STORAGE_VOLUME_USEDBY',
                       Callback(_finished, request),
                       volume=volume)
Ejemplo n.º 4
0
    def node_query(self, request):
        """Searches nodes by the given pattern

		options: { 'nodePattern': <pattern> }

		return: [ { 'id' : <node URI>, 'label' : <node name>, 'group' : 'default', 'type' : 'node', 'virtech' : <virtualization technology>,
					'memUsed' : <used amount of memory in B>, 'memAvailable' : <amount of physical memory in B>, 'cpus' : <number of CPUs>,
					'cpuUsage' : <cpu usage in %>, 'available' : (True|False), 'supports_suspend' : (True|False), 'supports_snapshot' : (True|False) }, ... ]
		"""
        self.required_options(request, 'nodePattern')

        def _finished(thread, result, request):
            if self._check_thread_error(thread, result, request):
                return

            nodes = []
            success, data = result
            if success:
                for node_pd in data:
                    node_uri = urlparse.urlsplit(node_pd.uri)
                    nodes.append({
                        'id':
                        node_pd.uri,
                        'label':
                        node_pd.name,
                        'group':
                        _('Physical servers'),
                        'type':
                        'node',
                        'virtech':
                        node_uri.scheme,
                        'memUsed':
                        node_pd.curMem,
                        'memAvailable':
                        node_pd.phyMem,
                        'cpuUsage': (node_pd.cpu_usage or 0) / 10.0,
                        'available':
                        node_pd.last_try == node_pd.last_update,
                        'cpus':
                        node_pd.cpus,
                        'supports_suspend':
                        node_pd.supports_suspend,
                        'supports_snapshot':
                        node_pd.supports_snapshot,
                    })

                self.finished(request.id, nodes)
            else:
                self.finished(request.id,
                              None,
                              message=str(data),
                              status=MODULE_ERR_COMMAND_FAILED)

        self.uvmm.send('NODE_LIST',
                       Callback(_finished, request),
                       group='default',
                       pattern=request.options.get('nodePattern', '*'))
Ejemplo n.º 5
0
	def __init__(self):
		Simple.__init__(
			self,
			'UVMM_Connection-%d' % (UVMM_ConnectionThread.counter,),
			None,
			Callback(self._finished)
		)
		UVMM_ConnectionThread.counter += 1
		self.busy = False
Ejemplo n.º 6
0
	def domain_migrate( self, request ):
		"""Migrates a domain from sourceURI to targetURI.

		options: { 'domainURI': <domain uri>, 'targetNodeURI': <target node uri> }

		return: 
		"""
		self.required_options( request, 'domainURI', 'targetNodeURI' )
		node_uri, domain_uuid = urlparse.urldefrag( request.options[ 'domainURI' ] )
		self.uvmm.send( 'DOMAIN_MIGRATE', Callback( self._thread_finish, request ), uri = node_uri, domain = domain_uuid, target_uri = request.options[ 'targetNodeURI' ] )
Ejemplo n.º 7
0
	def domain_clone( self, request ):
		"""Clones an existing domain.

		options: { 'domainURI': <domain uri>, 'cloneName': <name of clone>, 'macAddress' : (clone|auto) }

		return: 
		"""
		self.required_options( request, 'domainURI', 'cloneName' )
		node_uri, domain_uuid = urlparse.urldefrag( request.options[ 'domainURI' ] )
		self.uvmm.send( 'DOMAIN_CLONE', Callback( self._thread_finish, request ), uri = node_uri, domain = domain_uuid, name = request.options[ 'cloneName' ], subst = { 'mac' : request.options.get( 'macAddress', 'clone' ) } )
Ejemplo n.º 8
0
	def __call__(self, callback, command, **kwargs):
		MODULE.info('Starting request thread ...')
		if self.busy:
			MODULE.info('Thread is already busy')
			return False
		self._user_callback = callback
		self._function = Callback(self.request, command, **kwargs)
		self.busy = True
		MODULE.info('Thread is working on a request')
		self.run()
		return True
Ejemplo n.º 9
0
	def domain_state( self, request ):
		"""Set the state a domain domainUUID on node nodeURI.

		options: { 'domainURI': <domain uri>, 'domainState': (RUN|SHUTDOWN|PAUSE|RESTART|SUSPEND) }

		return: 
		"""
		self.required_options( request, 'domainURI', 'domainState' )
		node_uri, domain_uuid = urlparse.urldefrag( request.options[ 'domainURI' ] )
		MODULE.info( 'nodeURI: %s, domainUUID: %s' % ( node_uri, domain_uuid ) )
		if request.options[ 'domainState' ] not in self.DOMAIN_STATES:
			raise UMC_OptionTypeError( _( 'Invalid domain state' ) )
		self.uvmm.send( 'DOMAIN_STATE', Callback( self._thread_finish, request ), uri = node_uri, domain = domain_uuid, state = request.options[ 'domainState' ] )
Ejemplo n.º 10
0
	def profile_query( self, request ):
		"""Returns a list of profiles for the given virtualization technology"""
		self.required_options( request, 'nodeURI' )

		def _finished( thread, result, request ):
			if self._check_thread_error( thread, result, request ):
				return

			success, data = result
			if success:
				profiles = [{'id': dn, 'label': item.name} for pd in data for (dn, item) in self._filter_profiles(pd)]

				self.finished( request.id, profiles )
			else:
				self.finished( request.id, None, message = str( data ), status = MODULE_ERR_COMMAND_FAILED )

		self.uvmm.send( 'NODE_LIST', Callback( _finished, request ), group = 'default', pattern = request.options[ 'nodeURI' ] )
Ejemplo n.º 11
0
	def domain_remove( self, request ):
		"""Removes a domain. Optional a list of volumes can bes specified that should be removed

		options: { 'domainURI': <domain uri>, 'volumes' : [ { 'pool' : <pool name>, 'volumeFilename' : <filename> }, ... ] }

		return: 
		"""
		self.required_options( request, 'domainURI', 'volumes' )
		volume_list = []
		node_uri, domain_uuid = urlparse.urldefrag( request.options[ 'domainURI' ] )

		for vol in request.options[ 'volumes' ]:
			path = self.get_pool_path( node_uri, vol[ 'pool' ] )
			if not path:
				MODULE.warn( 'Could not find volume %(volumeFilename)s. The pool %(pool)s is not known' % vol )
				continue
			volume_list.append( os.path.join( path, vol[ 'volumeFilename' ] ) )
		self.uvmm.send( 'DOMAIN_UNDEFINE', Callback( self._thread_finish, request ), uri = node_uri, domain = domain_uuid, volumes = volume_list )
Ejemplo n.º 12
0
    def storage_volume_remove(self, request):
        """Removes a list of volumes located in the given pool.

		options: { 'nodeURI': <node uri>, 'volumes' : [ { 'pool' : <pool name>, 'volumeFilename' : <filename> }, ... ] }

		return: 
		"""
        self.required_options(request, 'nodeURI', 'volumes')
        volume_list = []
        node_uri = request.options['nodeURI']
        for vol in request.options['volumes']:
            path = self.get_pool_path(node_uri, vol['pool'])
            if not path:
                MODULE.warn(
                    'Could not remove volume %(volumeFilename)s. The pool %(pool)s is not known'
                    % vol)
                continue
            volume_list.append(os.path.join(path, vol['volumeFilename']))
        self.uvmm.send('STORAGE_VOLUMES_DESTROY',
                       Callback(self._thread_finish, request),
                       uri=request.options['nodeURI'],
                       volumes=volume_list)
Ejemplo n.º 13
0
	def domain_query( self, request ):
		"""Returns a list of domains matching domainPattern on the nodes matching nodePattern.

		options: { 'nodepattern': <node name pattern>, 'domainPattern' : <domain pattern> }

		return: { 'id': <domain uri>, 'name' : <name>, 'nodeName' : <node>, 'mem' : <ram>, 'state' : <state>, 'cpu_usage' : <percentage>, 'type' : 'domain' }, ... ], ... }
		"""

		def _finished( thread, result, request ):
			if self._check_thread_error( thread, result, request ):
				return

			success, data = result

			if success:
				domain_list = []
				for node_uri, domains in data.items():
					uri = urlparse.urlsplit( node_uri )
					for domain in domains:
						if domain[ 'uuid' ] == '00000000-0000-0000-0000-000000000000': # ignore domain-0 of Xen
							continue
						domain_uri = '%s#%s' % ( node_uri, domain[ 'uuid' ] )
						domain_list.append( { 'id' : domain_uri,
											'label' : domain[ 'name' ],
											'nodeName' : uri.netloc,
											'state' : domain[ 'state' ],
											'type' : 'domain',
											'mem' : domain[ 'mem' ],
											'cpuUsage' : domain[ 'cpu_usage' ],
											'vnc' : domain[ 'vnc' ],
											'vnc_port' : domain[ 'vnc_port' ],
											'suspended' : bool( domain[ 'suspended' ] ),
											'node_available' : domain[ 'node_available' ] } )
				self.finished( request.id, domain_list )
			else:
				self.finished( request.id, None, str( data ), status = MODULE_ERR_COMMAND_FAILED )

		self.uvmm.send( 'DOMAIN_LIST', Callback( _finished, request ), uri = request.options.get( 'nodePattern', '*' ), pattern = request.options.get( 'domainPattern', '*' ) )
Ejemplo n.º 14
0
			profile_dn = json.get( 'profile' )
			profile = None
			if profile_dn:
				for dn, pro in self.profiles:
					if dn == profile_dn:
						profile = pro
						break
				if profile:
					json[ 'profileData' ] = object2dict( profile )

			MODULE.info( 'Got domain description: success: %s, data: %s' % ( success, json ) )
			self.finished( request.id, json )

		self.required_options( request, 'domainURI' )
		node_uri, domain_uuid = urlparse.urldefrag( request.options[ 'domainURI' ] )
		self.uvmm.send( 'DOMAIN_INFO', Callback( _finished, request ), uri = node_uri, domain = domain_uuid )

	def _create_disks( self, node_uri, disks, domain_info, profile = None ):
		drives = []

		uri = urlparse.urlsplit( node_uri )
		for disk in disks:
			drive = Disk()
			# do we create a new disk or just copy data from an already defined drive
			create_new = disk.get( 'source', None ) is None

			drive.device = disk[ 'device' ]
			drive.driver_type = disk[ 'driver_type' ]
			drive.driver_cache = disk.get('driver_cache', 'default')

			# set old values of existing drive
Ejemplo n.º 15
0
 def process_uvmm_response(self, request, callback=None):
     return Callback(self._process_uvmm_response, request, callback)
Ejemplo n.º 16
0
 def group_query(self, request):
     self.uvmm.send('GROUP_LIST', Callback(self._thread_finish, request))
Ejemplo n.º 17
0
	def domain_add( self, request ):
		"""Creates a new domain on nodeURI.

		options: { 'nodeURI': <node uri>, 'domain' : {} }

		return:
		"""
		self.required_options( request, 'nodeURI', 'domain' )

		domain = request.options.get( 'domain' )

		domain_info = Data_Domain()
		# when we edit a domain there must be a UUID
		if 'domainURI' in domain:
			node_uri, domain_uuid = urlparse.urldefrag( domain[ 'domainURI' ] )
			domain_info.uuid = domain_uuid

		# annotations & profile
		profile = None
		if not domain_info.uuid:
			profile_dn = domain.get( 'profile' )
			for dn, pro in self.profiles:
				if dn == profile_dn:
					profile = pro
					break
			else:
				raise UMC_OptionTypeError( _( 'Unknown profile given' ) )
			domain_info.annotations[ 'profile' ] = profile_dn
			MODULE.info( 'Creating new domain using profile %s' % str( object2dict( profile ) ) )
			domain_info.annotations[ 'os' ] = getattr( profile, 'os' )
		else:
			domain_info.annotations[ 'os' ] = domain.get( 'os', '' )
		domain_info.annotations[ 'contact' ] = domain.get( 'contact', '' )
		domain_info.annotations[ 'description' ] = domain.get( 'description', '' )


		domain_info.name = domain[ 'name' ]
		if 'arch' in domain:
			domain_info.arch = domain[ 'arch' ]
		elif profile:
			domain_info.arch = profile.arch
		else:
			raise UMC_CommandError( 'Could not determine architecture for domain' )

		if domain_info.arch == 'automatic':
			success, node_list = self.uvmm.send( 'NODE_LIST', None, group = 'default', pattern = request.options[ 'nodeURI' ] )
			if not success:
				raise UMC_CommandError( _( 'Failed to retrieve details for the server %(nodeURI)s' ) % request.optiond )
			if not node_list:
				raise UMC_CommandError( _( 'Unknown physical server %(nodeURI)s' ) % request.options )
			archs = set( [ t.arch for t in node_list[ 0 ].capabilities ] )
			if 'x86_64' in archs:
				domain_info.arch = 'x86_64'
			else:
				domain_info.arch = 'i686'

		if 'type' in domain:
			try:
				domain_info.domain_type, domain_info.os_type = domain['type'].split( '-' )
			except ValueError:
				domain_info.domain_type, domain_info.os_type = ( None, None )

		if domain_info.domain_type is None or domain_info.os_type is None:
			if profile:
				domain_info.domain_type, domain_info.os_type = profile.virttech.split( '-' )
			else:
				raise UMC_CommandError( 'Could not determine virtualisation technology for domain' )

		# check configuration for para-virtualized machines
		if domain_info.os_type == 'xen':
			if profile and getattr( profile, 'advkernelconf', None ) != True: # use pyGrub
				domain_info.bootloader = '/usr/bin/pygrub'
				domain_info.bootloader_args = '-q' # Bug #19249: PyGrub timeout
			else:
				domain_info.kernel = domain['kernel']
				domain_info.cmdline = domain['cmdline']
				domain_info.initrd = domain['initrd']
		# memory
		domain_info.maxMem = MemorySize.str2num( domain['maxMem'], unit = 'MB' )

		# CPUs
		try:
			domain_info.vcpus = int( domain[ 'vcpus' ] )
		except ValueError:
			raise UMC_OptionTypeError( 'vcpus must be a number' )

		# boot devices
		if 'boot' in domain:
			domain_info.boot = domain[ 'boot' ]
		elif profile:
			domain_info.boot = getattr( profile, 'bootdev', None )
		else:
			raise UMC_CommandError( 'Could not determine the list of boot devices for domain' )

		# VNC
		if domain[ 'vnc' ]:
			gfx = Graphic()
			if domain.get( 'vnc_remote', False ):
				gfx.listen = '0.0.0.0'
			else:
				gfx.listen = None
			if 'kblayout' in domain:
				gfx.keymap = domain[ 'kblayout' ]
			elif profile:
				gfx.keymap = profile.kblayout
			else:
				raise UMC_CommandError( 'Could not determine the keyboard layout for the VNC access' )
			if domain.get( 'vnc_password', None ):
				gfx.passwd = domain[ 'vnc_password' ]

			domain_info.graphics = [gfx,]

		# RTC offset
		if 'rtc_offset' in domain:
			domain_info.rtc_offset = domain[ 'rtc_offset' ]
		elif profile and getattr( profile, 'rtcoffset' ):
			domain_info.rtc_offset = profile.rtcoffset
		else:
			domain_info.rtc_offset = 'utc'

		# drives
		domain_info.disks = self._create_disks( request.options[ 'nodeURI' ], domain[ 'disks' ], domain_info, profile )
		verify_device_files( domain_info )
		# on _new_ PV machines we should move the CDROM drive to first position
		if domain_info.uuid is None and domain_info.os_type == 'xen':
			non_disks, disks = [], []
			for dev in domain_info.disks:
				if dev.device == Disk.DEVICE_DISK:
					disks.append( dev )
				else:
					non_disks.append( dev )
			domain_info.disks = non_disks + disks

		# network interface
		domain_info.interfaces = []
		for interface in domain[ 'interfaces' ]:
			iface = Interface()
			if interface.get( 'type', '' ).startswith( 'network:' ):
				iface.type = 'network'
				iface.source = interface[ 'type' ].split( ':', 1 )[ 1 ]
			else:
				iface.type = interface.get( 'type', 'bridge' )
				iface.source = interface[ 'source' ]
			iface.model = interface[ 'model' ]
			iface.mac_address = interface.get( 'mac_address', None )
			# if domain_info.os_type == 'hvm':
			# 	if domain_info.domain_type == 'xen':
			# 		iface.model = 'netfront'
			# 	elif domain_info.domain_type in ( 'kvm', 'qemu' ):
			# 		iface.model = 'virtio'
			domain_info.interfaces.append( iface )

		def _finished( thread, result, request ):
			if self._check_thread_error( thread, result, request ):
				return

			success, data = result

			json = object2dict( data )
			MODULE.info( 'New domain: success: %s, data: %s' % ( success, json ) )
			if success:
				self.finished( request.id, json )
			else:
				self.finished( request.id, None, message = str( data ), status = MODULE_ERR_COMMAND_FAILED )

		self.uvmm.send( 'DOMAIN_DEFINE', Callback( _finished, request ), uri = request.options[ 'nodeURI' ], domain = domain_info )