def _create_snapshot(self, context, volume_id, name, description, force=False): volume = self.get(context, volume_id) if ((not force) and (volume['status'] != "available")): raise exception.ApiError(_("Volume status must be available")) options = { 'volume_id': volume_id, 'user_id': context.user_id, 'project_id': context.project_id, 'status': "creating", 'progress': '0%', 'volume_size': volume['size'], 'display_name': name, 'display_description': description } snapshot = self.db.snapshot_create(context, options) rpc.cast( context, FLAGS.scheduler_topic, { "method": "create_snapshot", "args": { "topic": FLAGS.volume_topic, "volume_id": volume_id, "snapshot_id": snapshot['id'] } }) return snapshot
def disassociate_floating_ip(self, context, address, affect_auto_assigned=False): """Disassociates a floating ip from fixed ip it is associated with.""" rpc.cast(context, FLAGS.network_topic, {'method': 'disassociate_floating_ip', 'args': {'address': address}})
def remove_fixed_ip_from_instance(self, context, instance_id, address): """Removes a fixed ip from instance from specified network.""" args = {'instance_id': instance_id, 'address': address} rpc.cast(context, FLAGS.network_topic, { 'method': 'remove_fixed_ip_from_instance', 'args': args })
def cast_to_network_host(context, host, method, update_db=False, **kwargs): """Cast request to a network host queue""" rpc.cast(context, db.queue_get_for(context, 'network', host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to network '%(host)s'") % locals())
def remove_fixed_ip_from_instance(self, context, instance_id, address): """Removes a fixed ip from instance from specified network.""" args = {'instance_id': instance_id, 'address': address} rpc.cast(context, FLAGS.network_topic, {'method': 'remove_fixed_ip_from_instance', 'args': args})
def create(self, context, size, snapshot_id, name, description, volume_type=None, metadata=None, availability_zone=None): if snapshot_id is not None: snapshot = self.get_snapshot(context, snapshot_id) if snapshot['status'] != "available": raise exception.ApiError( _("Snapshot status must be available")) if not size: size = snapshot['volume_size'] if quota.allowed_volumes(context, 1, size) < 1: pid = context.project_id LOG.warn( _("Quota exceeded for %(pid)s, tried to create" " %(size)sG volume") % locals()) raise exception.QuotaError( _("Volume quota exceeded. You cannot " "create a volume of size %sG") % size) if availability_zone is None: availability_zone = FLAGS.storage_availability_zone if volume_type is None: volume_type_id = None else: volume_type_id = volume_type.get('id', None) options = { 'size': size, 'user_id': context.user_id, 'project_id': context.project_id, 'snapshot_id': snapshot_id, 'availability_zone': availability_zone, 'status': "creating", 'attach_status': "detached", 'display_name': name, 'display_description': description, 'volume_type_id': volume_type_id, 'metadata': metadata, } volume = self.db.volume_create(context, options) rpc.cast( context, FLAGS.scheduler_topic, { "method": "create_volume", "args": { "topic": FLAGS.volume_topic, "volume_id": volume['id'], "snapshot_id": snapshot_id } }) return volume
def add_network_to_project(self, context, project_id): """Force adds another network to a project.""" rpc.cast(context, FLAGS.network_topic, { 'method': 'add_network_to_project', 'args': { 'project_id': project_id } })
def deallocate_for_instance(self, context, instance, **kwargs): """Deallocates all network structures related to instance.""" args = kwargs args['instance_id'] = instance['id'] args['project_id'] = instance['project_id'] rpc.cast(context, FLAGS.network_topic, {'method': 'deallocate_for_instance', 'args': args})
def release_floating_ip(self, context, address, affect_auto_assigned=False): """Removes floating ip with address from a project. (deallocates)""" rpc.cast(context, FLAGS.network_topic, {'method': 'deallocate_floating_ip', 'args': {'address': address, 'affect_auto_assigned': affect_auto_assigned}})
def cast_to_network_host(context, host, method, update_db=False, **kwargs): """Cast request to a network host queue""" rpc.cast(context, db.queue_get_for(context, 'network', host), { "method": method, "args": kwargs }) LOG.debug(_("Casted '%(method)s' to network '%(host)s'") % locals())
def add_fixed_ip_to_instance(self, context, instance_id, host, network_id): """Adds a fixed ip to instance from specified network.""" args = {'instance_id': instance_id, 'host': host, 'network_id': network_id} rpc.cast(context, FLAGS.network_topic, {'method': 'add_fixed_ip_to_instance', 'args': args})
def deallocate_for_instance(self, context, instance, **kwargs): """Deallocates all network structures related to instance.""" args = kwargs args['instance_id'] = instance['id'] args['project_id'] = instance['project_id'] rpc.cast(context, FLAGS.network_topic, { 'method': 'deallocate_for_instance', 'args': args })
def delete_snapshot(self, context, snapshot_id): snapshot = self.get_snapshot(context, snapshot_id) if snapshot['status'] != "available": raise exception.ApiError(_("Snapshot status must be available")) self.db.snapshot_update(context, snapshot_id, {'status': 'deleting'}) rpc.cast(context, FLAGS.scheduler_topic, {"method": "delete_snapshot", "args": {"topic": FLAGS.volume_topic, "snapshot_id": snapshot_id}})
def add_fixed_ip_to_instance(self, context, instance_id, host, network_id): """Adds a fixed ip to instance from specified network.""" args = { 'instance_id': instance_id, 'host': host, 'network_id': network_id } rpc.cast(context, FLAGS.network_topic, { 'method': 'add_fixed_ip_to_instance', 'args': args })
def create_console(self, context, instance_id): #NOTE(mdragon): If we wanted to return this the console info # here, as we would need to do a call. # They can just do an index later to fetch # console info. I am not sure which is better # here. instance = self._get_instance(context, instance_id) rpc.cast(context, self._get_console_topic(context, instance['host']), {'method': 'add_console', 'args': {'instance_id': instance['id']}})
def disassociate_floating_ip(self, context, address, affect_auto_assigned=False): """Disassociates a floating ip from fixed ip it is associated with.""" rpc.cast(context, FLAGS.network_topic, { 'method': 'disassociate_floating_ip', 'args': { 'address': address } })
def _force_volume_delete(self, ctxt, volume): """Delete a volume, bypassing the check that it must be available.""" host = volume['host'] if not host: # Deleting volume from database and skipping rpc. self.db.volume_destroy(ctxt, volume['id']) return rpc.cast(ctxt, self.db.queue_get_for(ctxt, FLAGS.volume_topic, host), {"method": "delete_volume", "args": {"volume_id": volume['id']}})
def delete_console(self, context, instance_id, console_id): instance_id = self._translate_uuid_if_necessary(context, instance_id) console = self.db.console_get(context, console_id, instance_id) pool = console['pool'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.console_topic, pool['host']), {'method': 'remove_console', 'args': {'console_id': console['id']}})
def delete(self, context, volume_id): volume = self.get(context, volume_id) if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) now = utils.utcnow() self.db.volume_update(context, volume_id, {'status': 'deleting', 'terminated_at': now}) host = volume['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.volume_topic, host), {"method": "delete_volume", "args": {"volume_id": volume_id}})
def associate_floating_ip(self, context, floating_address, fixed_address, affect_auto_assigned=False): """Associates a floating ip with a fixed ip. ensures floating ip is allocated to the project in context """ rpc.cast(context, FLAGS.network_topic, {'method': 'associate_floating_ip', 'args': {'floating_address': floating_address, 'fixed_address': fixed_address, 'affect_auto_assigned': affect_auto_assigned}})
def delete_snapshot(self, context, snapshot_id): snapshot = self.get_snapshot(context, snapshot_id) if snapshot['status'] != "available": raise exception.ApiError(_("Snapshot status must be available")) self.db.snapshot_update(context, snapshot_id, {'status': 'deleting'}) rpc.cast( context, FLAGS.scheduler_topic, { "method": "delete_snapshot", "args": { "topic": FLAGS.volume_topic, "snapshot_id": snapshot_id } })
def cast_to_volume_host(context, host, method, update_db=True, **kwargs): """Cast request to a volume host queue""" if update_db: volume_id = kwargs.get('volume_id', None) if volume_id is not None: now = utils.utcnow() db.volume_update(context, volume_id, {'host': host, 'scheduled_at': now}) rpc.cast(context, db.queue_get_for(context, 'volume', host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals())
def release_floating_ip(self, context, address, affect_auto_assigned=False): """Removes floating ip with address from a project. (deallocates)""" rpc.cast( context, FLAGS.network_topic, { 'method': 'deallocate_floating_ip', 'args': { 'address': address, 'affect_auto_assigned': affect_auto_assigned } })
def create_console(self, context, instance_id): #NOTE(mdragon): If we wanted to return this the console info # here, as we would need to do a call. # They can just do an index later to fetch # console info. I am not sure which is better # here. instance = self._get_instance(context, instance_id) rpc.cast(context, self._get_console_topic(context, instance['host']), { 'method': 'add_console', 'args': { 'instance_id': instance['id'] } })
def delete_console(self, context, instance_id, console_id): instance_id = self._translate_uuid_if_necessary(context, instance_id) console = self.db.console_get(context, console_id, instance_id) pool = console['pool'] rpc.cast( context, self.db.queue_get_for(context, FLAGS.console_topic, pool['host']), { 'method': 'remove_console', 'args': { 'console_id': console['id'] } })
def _force_volume_delete(self, ctxt, volume): """Delete a volume, bypassing the check that it must be available.""" host = volume['host'] if not host: # Deleting volume from database and skipping rpc. self.db.volume_destroy(ctxt, volume['id']) return rpc.cast(ctxt, self.db.queue_get_for(ctxt, FLAGS.volume_topic, host), { "method": "delete_volume", "args": { "volume_id": volume['id'] } })
def cast_to_compute_host(context, host, method, update_db=True, **kwargs): """Cast request to a compute host queue""" if update_db: # fall back on the id if the uuid is not present instance_id = kwargs.get('instance_id', None) instance_uuid = kwargs.get('instance_uuid', instance_id) if instance_uuid is not None: now = utils.utcnow() db.instance_update(context, instance_uuid, {'host': host, 'scheduled_at': now}) rpc.cast(context, db.queue_get_for(context, 'compute', host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals())
def cast_to_volume_host(context, host, method, update_db=True, **kwargs): """Cast request to a volume host queue""" if update_db: volume_id = kwargs.get('volume_id', None) if volume_id is not None: now = utils.utcnow() db.volume_update(context, volume_id, { 'host': host, 'scheduled_at': now }) rpc.cast(context, db.queue_get_for(context, 'volume', host), { "method": method, "args": kwargs }) LOG.debug(_("Casted '%(method)s' to volume '%(host)s'") % locals())
def create(self, context, size, snapshot_id, name, description, volume_type=None, metadata=None, availability_zone=None): if snapshot_id is not None: snapshot = self.get_snapshot(context, snapshot_id) if snapshot['status'] != "available": raise exception.ApiError( _("Snapshot status must be available")) if not size: size = snapshot['volume_size'] if quota.allowed_volumes(context, 1, size) < 1: pid = context.project_id LOG.warn(_("Quota exceeded for %(pid)s, tried to create" " %(size)sG volume") % locals()) raise exception.QuotaError(_("Volume quota exceeded. You cannot " "create a volume of size %sG") % size) if availability_zone is None: availability_zone = FLAGS.storage_availability_zone if volume_type is None: volume_type_id = None else: volume_type_id = volume_type.get('id', None) options = { 'size': size, 'user_id': context.user_id, 'project_id': context.project_id, 'snapshot_id': snapshot_id, 'availability_zone': availability_zone, 'status': "creating", 'attach_status': "detached", 'display_name': name, 'display_description': description, 'volume_type_id': volume_type_id, 'metadata': metadata, } volume = self.db.volume_create(context, options) rpc.cast(context, FLAGS.scheduler_topic, {"method": "create_volume", "args": {"topic": FLAGS.volume_topic, "volume_id": volume['id'], "snapshot_id": snapshot_id}}) return volume
def delete(self, context, volume_id): volume = self.get(context, volume_id) if volume['status'] != "available": raise exception.ApiError(_("Volume status must be available")) now = utils.utcnow() self.db.volume_update(context, volume_id, { 'status': 'deleting', 'terminated_at': now }) host = volume['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.volume_topic, host), { "method": "delete_volume", "args": { "volume_id": volume_id } })
def cast_to_host(context, topic, host, method, update_db=True, **kwargs): """Generic cast to host""" topic_mapping = { "compute": cast_to_compute_host, "volume": cast_to_volume_host, 'network': cast_to_network_host} func = topic_mapping.get(topic) if func: func(context, host, method, update_db=update_db, **kwargs) else: rpc.cast(context, db.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) LOG.debug(_("Casted '%(method)s' to %(topic)s '%(host)s'") % locals())
def _notify_vsa(self, context, volume_ref, status): if volume_ref['volume_type_id'] is None: return if volume_types.is_vsa_drive(volume_ref['volume_type_id']): vsa_id = None for i in volume_ref.get('volume_metadata'): if i['key'] == 'to_vsa_id': vsa_id = int(i['value']) break if vsa_id: rpc.cast(context, FLAGS.vsa_topic, {"method": "vsa_volume_created", "args": {"vol_id": volume_ref['id'], "vsa_id": vsa_id, "status": status}})
def cast_to_host(context, topic, host, method, update_db=True, **kwargs): """Generic cast to host""" topic_mapping = { "compute": cast_to_compute_host, "volume": cast_to_volume_host, 'network': cast_to_network_host } func = topic_mapping.get(topic) if func: func(context, host, method, update_db=update_db, **kwargs) else: rpc.cast(context, db.queue_get_for(context, topic, host), { "method": method, "args": kwargs }) LOG.debug(_("Casted '%(method)s' to %(topic)s '%(host)s'") % locals())
def associate_floating_ip(self, context, floating_address, fixed_address, affect_auto_assigned=False): """Associates a floating ip with a fixed ip. ensures floating ip is allocated to the project in context """ rpc.cast( context, FLAGS.network_topic, { 'method': 'associate_floating_ip', 'args': { 'floating_address': floating_address, 'fixed_address': fixed_address, 'affect_auto_assigned': affect_auto_assigned } })
def cast_to_compute_host(context, host, method, update_db=True, **kwargs): """Cast request to a compute host queue""" if update_db: # fall back on the id if the uuid is not present instance_id = kwargs.get('instance_id', None) instance_uuid = kwargs.get('instance_uuid', instance_id) if instance_uuid is not None: now = utils.utcnow() db.instance_update(context, instance_uuid, { 'host': host, 'scheduled_at': now }) rpc.cast(context, db.queue_get_for(context, 'compute', host), { "method": method, "args": kwargs }) LOG.debug(_("Casted '%(method)s' to compute '%(host)s'") % locals())
def _notify_vsa(self, context, volume_ref, status): if volume_ref['volume_type_id'] is None: return if volume_types.is_vsa_drive(volume_ref['volume_type_id']): vsa_id = None for i in volume_ref.get('volume_metadata'): if i['key'] == 'to_vsa_id': vsa_id = int(i['value']) break if vsa_id: rpc.cast( context, FLAGS.vsa_topic, { "method": "vsa_volume_created", "args": { "vol_id": volume_ref['id'], "vsa_id": vsa_id, "status": status } })
def _create_snapshot(self, context, volume_id, name, description, force=False): volume = self.get(context, volume_id) if ((not force) and (volume['status'] != "available")): raise exception.ApiError(_("Volume status must be available")) options = { 'volume_id': volume_id, 'user_id': context.user_id, 'project_id': context.project_id, 'status': "creating", 'progress': '0%', 'volume_size': volume['size'], 'display_name': name, 'display_description': description} snapshot = self.db.snapshot_create(context, options) rpc.cast(context, FLAGS.scheduler_topic, {"method": "create_snapshot", "args": {"topic": FLAGS.volume_topic, "volume_id": volume_id, "snapshot_id": snapshot['id']}}) return snapshot
def create(self, context, display_name='', display_description='', vc_count=1, instance_type=None, image_name=None, availability_zone=None, storage=[], shared=None): """ Provision VSA instance with corresponding compute instances and associated volumes :param storage: List of dictionaries with following keys: disk_name, num_disks, size :param shared: Specifies if storage is dedicated or shared. For shared storage disks split into partitions """ LOG.info(_("*** Experimental VSA code ***")) if vc_count > FLAGS.max_vcs_in_vsa: LOG.warning(_("Requested number of VCs (%d) is too high."\ " Setting to default"), vc_count) vc_count = FLAGS.max_vcs_in_vsa if instance_type is None: instance_type = self._get_default_vsa_instance_type() if availability_zone is None: availability_zone = FLAGS.storage_availability_zone if storage is None: storage = [] if not shared or shared == 'False': shared = False else: shared = True # check if image is ready before starting any work if image_name is None: image_name = FLAGS.vc_image_name try: image_service = self.compute_api.image_service vc_image = image_service.show_by_name(context, image_name) vc_image_href = vc_image['id'] except exception.ImageNotFound: raise exception.ApiError( _("Failed to find configured image %s") % image_name) options = { 'display_name': display_name, 'display_description': display_description, 'project_id': context.project_id, 'availability_zone': availability_zone, 'instance_type_id': instance_type['id'], 'image_ref': vc_image_href, 'vc_count': vc_count, 'status': VsaState.CREATING, } LOG.info(_("Creating VSA: %s") % options) # create DB entry for VSA instance try: vsa_ref = self.db.vsa_create(context, options) except exception.Error: raise exception.ApiError(_(sys.exc_info()[1])) vsa_id = vsa_ref['id'] vsa_name = vsa_ref['name'] # check storage parameters try: volume_params = self._check_storage_parameters( context, vsa_name, storage, shared) except exception.ApiError: self.db.vsa_destroy(context, vsa_id) raise exception.ApiError( _("Error in storage parameters: %s") % storage) # after creating DB entry, re-check and set some defaults updates = {} if (not hasattr(vsa_ref, 'display_name') or vsa_ref.display_name is None or vsa_ref.display_name == ''): updates['display_name'] = display_name = vsa_name updates['vol_count'] = len(volume_params) vsa_ref = self.update(context, vsa_id, **updates) # create volumes if FLAGS.vsa_multi_vol_creation: if len(volume_params) > 0: request_spec = { 'num_volumes': len(volume_params), 'vsa_id': str(vsa_id), 'volumes': volume_params, } rpc.cast( context, FLAGS.scheduler_topic, { "method": "create_volumes", "args": { "topic": FLAGS.volume_topic, "request_spec": request_spec, "availability_zone": availability_zone } }) else: # create BE volumes one-by-one for vol in volume_params: try: vol_name = vol['name'] vol_size = vol['size'] vol_type_id = vol['volume_type_id'] LOG.debug(_("VSA ID %(vsa_id)d %(vsa_name)s: Create "\ "volume %(vol_name)s, %(vol_size)d GB, "\ "type %(vol_type_id)s"), locals()) vol_type = volume_types.get_volume_type( context, vol['volume_type_id']) vol_ref = self.volume_api.create( context, vol_size, None, vol_name, vol['description'], volume_type=vol_type, metadata=dict(to_vsa_id=str(vsa_id)), availability_zone=availability_zone) except Exception: self.update_vsa_status(context, vsa_id, status=VsaState.PARTIAL) raise if len(volume_params) == 0: # No BE volumes - ask VSA manager to start VCs rpc.cast(context, FLAGS.vsa_topic, { "method": "create_vsa", "args": { "vsa_id": str(vsa_id) } }) return vsa_ref
def create(self, context, display_name='', display_description='', vc_count=1, instance_type=None, image_name=None, availability_zone=None, storage=[], shared=None): """ Provision VSA instance with corresponding compute instances and associated volumes :param storage: List of dictionaries with following keys: disk_name, num_disks, size :param shared: Specifies if storage is dedicated or shared. For shared storage disks split into partitions """ LOG.info(_("*** Experimental VSA code ***")) if vc_count > FLAGS.max_vcs_in_vsa: LOG.warning(_("Requested number of VCs (%d) is too high."\ " Setting to default"), vc_count) vc_count = FLAGS.max_vcs_in_vsa if instance_type is None: instance_type = self._get_default_vsa_instance_type() if availability_zone is None: availability_zone = FLAGS.storage_availability_zone if storage is None: storage = [] if not shared or shared == 'False': shared = False else: shared = True # check if image is ready before starting any work if image_name is None: image_name = FLAGS.vc_image_name try: image_service = self.compute_api.image_service vc_image = image_service.show_by_name(context, image_name) vc_image_href = vc_image['id'] except exception.ImageNotFound: raise exception.ApiError(_("Failed to find configured image %s") % image_name) options = { 'display_name': display_name, 'display_description': display_description, 'project_id': context.project_id, 'availability_zone': availability_zone, 'instance_type_id': instance_type['id'], 'image_ref': vc_image_href, 'vc_count': vc_count, 'status': VsaState.CREATING, } LOG.info(_("Creating VSA: %s") % options) # create DB entry for VSA instance try: vsa_ref = self.db.vsa_create(context, options) except exception.Error: raise exception.ApiError(_(sys.exc_info()[1])) vsa_id = vsa_ref['id'] vsa_name = vsa_ref['name'] # check storage parameters try: volume_params = self._check_storage_parameters(context, vsa_name, storage, shared) except exception.ApiError: self.db.vsa_destroy(context, vsa_id) raise exception.ApiError(_("Error in storage parameters: %s") % storage) # after creating DB entry, re-check and set some defaults updates = {} if (not hasattr(vsa_ref, 'display_name') or vsa_ref.display_name is None or vsa_ref.display_name == ''): updates['display_name'] = display_name = vsa_name updates['vol_count'] = len(volume_params) vsa_ref = self.update(context, vsa_id, **updates) # create volumes if FLAGS.vsa_multi_vol_creation: if len(volume_params) > 0: request_spec = { 'num_volumes': len(volume_params), 'vsa_id': str(vsa_id), 'volumes': volume_params, } rpc.cast(context, FLAGS.scheduler_topic, {"method": "create_volumes", "args": {"topic": FLAGS.volume_topic, "request_spec": request_spec, "availability_zone": availability_zone}}) else: # create BE volumes one-by-one for vol in volume_params: try: vol_name = vol['name'] vol_size = vol['size'] vol_type_id = vol['volume_type_id'] LOG.debug(_("VSA ID %(vsa_id)d %(vsa_name)s: Create "\ "volume %(vol_name)s, %(vol_size)d GB, "\ "type %(vol_type_id)s"), locals()) vol_type = volume_types.get_volume_type(context, vol['volume_type_id']) vol_ref = self.volume_api.create(context, vol_size, None, vol_name, vol['description'], volume_type=vol_type, metadata=dict(to_vsa_id=str(vsa_id)), availability_zone=availability_zone) except Exception: self.update_vsa_status(context, vsa_id, status=VsaState.PARTIAL) raise if len(volume_params) == 0: # No BE volumes - ask VSA manager to start VCs rpc.cast(context, FLAGS.vsa_topic, {"method": "create_vsa", "args": {"vsa_id": str(vsa_id)}}) return vsa_ref
def add_network_to_project(self, context, project_id): """Force adds another network to a project.""" rpc.cast(context, FLAGS.network_topic, {'method': 'add_network_to_project', 'args': {'project_id': project_id}})