def _get_default_networks(self): project_id = CONF.neutron_default_tenant_id ctx = nova_context.RequestContext(user_id=None, project_id=project_id) networks = {} for n in self.network_api.get_all(ctx): networks[n['id']] = n['label'] return [{'id': k, 'label': v} for k, v in six.iteritems(networks)]
def __call__(self, req): if 'X-Auth-Token' not in req.headers: user_id = req.headers.get('X-Auth-User', 'admin') project_id = req.headers.get('X-Auth-Project-Id', 'admin') os_url = os.path.join(req.url, project_id) res = webob.Response() # NOTE(vish): This is expecting and returning Auth(1.1), whereas # keystone uses 2.0 auth. We should probably allow # 2.0 auth here as well. res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) res.headers['X-Server-Management-Url'] = os_url res.content_type = 'text/plain' res.status = '204' return res token = req.headers['X-Auth-Token'] user_id, _sep, project_id = token.partition(':') project_id = project_id or user_id remote_address = getattr(req, 'remote_address', '127.0.0.1') if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) ctx = context.RequestContext(user_id, project_id, is_admin=True, remote_address=remote_address) req.environ['jacket.context'] = ctx req.environ['storage.context'] = ctx return self.application
def base_call(self, req, project_id_in_path, always_admin=True): if 'X-Auth-Token' not in req.headers: user_id = req.headers.get('X-Auth-User', 'admin') project_id = req.headers.get('X-Auth-Project-Id', 'admin') if project_id_in_path: os_url = '/'.join([req.url.rstrip('/'), project_id]) else: os_url = req.url.rstrip('/') res = webob.Response() # NOTE(vish): This is expecting and returning Auth(1.1), whereas # keystone uses 2.0 auth. We should probably allow # 2.0 auth here as well. res.headers['X-Auth-Token'] = '%s:%s' % (user_id, project_id) res.headers['X-Server-Management-Url'] = os_url res.content_type = 'text/plain' res.status = '204' return res token = req.headers['X-Auth-Token'] user_id, _sep, project_id = token.partition(':') project_id = project_id or user_id remote_address = getattr(req, 'remote_address', '127.0.0.1') if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) is_admin = always_admin or (user_id == 'admin') ctx = context.RequestContext(user_id, project_id, is_admin=is_admin, remote_address=remote_address) req.environ['compute.context'] = ctx return self.application
def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot.""" context = req_context.RequestContext(is_admin=True, project_id=volume.project_id) try: provider_snap = self._get_provider_snapshot_id( context, snapshot.id) vol = self._create_volume(volume, context, snapshot=provider_snap) except Exception as ex: LOG.error( _LE('create_volume_from_snapshot failed,' 'snapshot:%(id)s,ex:%(ex)s'), { 'id': snapshot.id, 'ex': ex }) msg = (_("create_volume_from_snapshot failed! volume:%s") % volume.id) raise cinder_ex.VolumeBackendAPIException(data=msg) # create local volume mapper try: values = {'provider_volume_id': vol['VolumeId']} self.caa_db_api.volume_mapper_create(context, volume.id, context.project_id, values) except Exception as ex: LOG.error(_LE("volume_mapper_create failed! ex = %s"), ex) self._aws_client.get_aws_client(context).\ delete_volume(VolumeId=vol['VolumeId']) msg = (_("create_volume_from_snapshot failed! volume:%s") % volume.id) raise cinder_ex.VolumeBackendAPIException(data=msg) LOG.debug('create volume %s success.' % volume.id)
def list_instances(self): """List VM instances from all nodes. :return: list of instance id. e.g.['id_001', 'id_002', ...] """ instances = [] context = req_context.RequestContext(is_admin=True, project_id='default') try: servers = self.aws_client.get_aws_client(context)\ .describe_instances() except botocore.exceptions.ClientError as e: reason = e.response.get('Error', {}).get('Message', 'Unkown') LOG.warn('List instances failed, the error is: %s' % reason) return instances for server in servers: tags = server.get('Tags') server_name = None for tag in tags: if tag.get('key') == 'Name': server_name = tag.get('Value') break if server_name: instances.append(server_name) LOG.debug('List_instance: %s' % instances) return instances
def create_cell_mapping(**kwargs): args = sample_cell_mapping.copy() if 'uuid' not in kwargs: args['uuid'] = uuidutils.generate_uuid() args.update(kwargs) ctxt = context.RequestContext('fake-user', 'fake-project') return cell_mapping.CellMapping._create_in_db(ctxt, args)
def create_volume(self, volume): LOG.debug('start to create volume') LOG.debug('volume glance image metadata: %s' % volume.volume_glance_metadata) volume_args = {} volume_args['size'] = volume.size volume_args['display_description'] = volume.display_description volume_args['display_name'] = self._get_provider_volume_name( volume.display_name, volume.id) context = req_context.RequestContext(is_admin=True, project_id=volume.project_id) volume_type_id = volume.volume_type_id volume_type_name = None LOG.debug('volume type id %s ' % volume_type_id) if volume_type_id: volume_type_name = self._get_sub_type_name( req_context.get_admin_context(), volume_type_id) if volume_type_name: volume_args['volume_type'] = volume_type_name optionals = ('shareable', 'metadata', 'multiattach') volume_args.update((prop, getattr(volume, prop)) for prop in optionals if getattr(volume, prop, None)) if 'metadata' not in volume_args: volume_args['metadata'] = {} volume_args['metadata']['tag:caa_volume_id'] = volume.id sub_volume = self.os_cinderclient(context).create_volume(**volume_args) LOG.debug('submit create-volume task to sub os. ' 'sub volume id: %s' % sub_volume.id) LOG.debug('start to wait for volume %s in status ' 'available' % sub_volume.id) try: self.os_cinderclient(context).check_create_volume_complete( sub_volume) except Exception as ex: LOG.exception( _LE("volume(%s), check_create_volume_complete " "failed! ex = %s"), volume.id, ex) with excutils.save_and_reraise_exception(): sub_volume.delete() try: # create volume mapper values = {"provider_volume_id": sub_volume.id} self.caa_db_api.volume_mapper_create(context, volume.id, context.project_id, values) except Exception as ex: LOG.exception(_LE("volume_mapper_create failed! ex = %s"), ex) sub_volume.delete() raise LOG.debug('create volume %s success.' % volume.id) return {'provider_location': 'SUB-FusionSphere'}
def get_info(self, instance): """Retrieve information from aws for a specific instance name.""" LOG.debug('Get info of server: %s' % instance.uuid) context = req_context.RequestContext(is_admin=True, project_id=instance.project_id) state = power_state.NOSTATE aws_instance_id = self._get_provider_instance_id( context, instance.uuid) if not aws_instance_id: LOG.error('Cannot get the aws_instance_id of % s' % instance.uuid) raise exception.InstanceNotFound(instance_id=instance.uuid) try: LOG.debug('Get info the instance %s on aws', aws_instance_id) kwargs = {'InstanceIds': [aws_instance_id]} instances = self.aws_client.get_aws_client(context)\ .describe_instances(**kwargs) if not instances: LOG.error('Instance %s not found on aws' % instance.uuid) raise exception.InstanceNotFound(instance_id=instance.uuid) instance = instances[0] state = AWS_POWER_STATE.get(instance.get('State').get('Code')) except botocore.exceptions.ClientError as e: reason = e.response.get('Error', {}).get('Message', 'Unkown') with excutils.save_and_reraise_exception(): LOG.error('Get instance failed on aws, the error is: %s' % reason) except KeyError: state = power_state.NOSTATE return hardware.InstanceInfo(state=state, max_mem_kb=0, mem_kb=0, num_cpu=1)
def create_mapping(**kwargs): args = sample_mapping.copy() args.update(kwargs) if args["cell_mapping"] is None: args["cell_mapping"] = create_cell_mapping() args["cell_id"] = args.pop("cell_mapping", {}).get("id") ctxt = context.RequestContext('fake-user', 'fake-project') return host_mapping.HostMapping._create_in_db(ctxt, args)
def setUp(self): super(MigrationsSamplesJsonTestV2_23, self).setUp() self.api.microversion = self.microversion fake_context = context.RequestContext('fake', 'fake') for mig in self.fake_migrations: mig_obj = compute.Migration(context=fake_context, **mig) mig_obj.create()
def setUp(self): super(BuildRequestTestCase, self).setUp() # NOTE: This means that we're using a database for this test suite # despite inheriting from NoDBTestCase self.useFixture(fixtures.Database(database='api')) self.context = context.RequestContext('fake-user', 'fake-project') self.build_req_obj = build_request.BuildRequest() self.instance_uuid = uuidutils.generate_uuid() self.project_id = 'fake-project'
def __call__(self, req): user_id = req.headers.get('X_USER') user_id = req.headers.get('X_USER_ID', user_id) if user_id is None: LOG.debug("Neither X_USER_ID nor X_USER found in request") return webob.exc.HTTPUnauthorized() roles = self._get_roles(req) if 'X_TENANT_ID' in req.headers: # This is the new header since Keystone went to ID/Name project_id = req.headers['X_TENANT_ID'] else: # This is for legacy compatibility project_id = req.headers['X_TENANT'] project_name = req.headers.get('X_TENANT_NAME') user_name = req.headers.get('X_USER_NAME') req_id = req.environ.get(request_id.ENV_REQUEST_ID) # Get the auth token auth_token = req.headers.get('X_AUTH_TOKEN', req.headers.get('X_STORAGE_TOKEN')) # Build a context, including the auth_token... remote_address = req.remote_addr if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) service_catalog = None if req.headers.get('X_SERVICE_CATALOG') is not None: try: catalog_header = req.headers.get('X_SERVICE_CATALOG') service_catalog = jsonutils.loads(catalog_header) except ValueError: raise webob.exc.HTTPInternalServerError( _('Invalid service catalog json.')) # NOTE(jamielennox): This is a full auth plugin set by auth_token # middleware in newer versions. user_auth_plugin = req.environ.get('keystone.token_auth') ctx = context.RequestContext(user_id, project_id, user_name=user_name, project_name=project_name, roles=roles, auth_token=auth_token, remote_address=remote_address, service_catalog=service_catalog, request_id=req_id, user_auth_plugin=user_auth_plugin) req.environ['compute.context'] = ctx return self.application
def setUp(self): """Initialise variable common to all the test cases.""" super(TestAwsBackupDriver, self).setUp() self.ctx = context.RequestContext('fake', 'fake', is_admin=False) self.volume = fake_volume.fake_volume_obj(self.ctx) self.backup = fake_backup.fake_backup_obj(self.ctx) self.set_delete_flag = False self.set_create_flag = False self.driver = self._get_driver(self.ctx) self.fake_snap = {'SnapshotId': 'fake'} self.fake_ebs = {'VolumeId': 'fake'}
def __call__(self, req): user_id = req.headers.get('X_USER') user_id = req.headers.get('X_USER_ID', user_id) if user_id is None: LOG.debug("Neither X_USER_ID nor X_USER found in request") return webob.exc.HTTPUnauthorized() # get the roles roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] if 'X_TENANT_ID' in req.headers: # This is the new header since Keystone went to ID/Name project_id = req.headers['X_TENANT_ID'] else: # This is for legacy compatibility project_id = req.headers['X_TENANT'] project_name = req.headers.get('X_TENANT_NAME') req_id = req.environ.get(request_id.ENV_REQUEST_ID) # Get the auth token auth_token = req.headers.get('X_AUTH_TOKEN', req.headers.get('X_STORAGE_TOKEN')) # Build a context, including the auth_token... remote_address = req.remote_addr service_catalog = None if req.headers.get('X_SERVICE_CATALOG') is not None: try: catalog_header = req.headers.get('X_SERVICE_CATALOG') service_catalog = jsonutils.loads(catalog_header) except ValueError: raise webob.exc.HTTPInternalServerError( explanation=_('Invalid service catalog json.')) if CONF.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) ctx = context.RequestContext(user_id, project_id, project_name=project_name, roles=roles, auth_token=auth_token, remote_address=remote_address, service_catalog=service_catalog, request_id=req_id) req.environ['jacket.context'] = ctx return self.application
def _get_provider_instance(self, context=None, hybrid_instance=None): if not context: context = req_context.RequestContext( is_admin=True, project_id=hybrid_instance.project_id) provider_instance_id = self._get_provider_instance_id(context, hybrid_instance.uuid) if provider_instance_id: return self.os_novaclient(context).get_server(provider_instance_id) server = self.os_novaclient(context).get_server_by_caa_instance_id( hybrid_instance.uuid) if server is None: raise exception.EntityNotFound(entity='Server', name=hybrid_instance.uuid) return server
def extend_volume(self, volume, new_size): """Extend a volume.""" context = req_context.RequestContext(is_admin=True, project_id=volume.project_id) try: sub_volume = self._get_provider_volume(context, volume) except exception.EntityNotFound: LOG.exception(_LE("volume(%s) not found in provider cloud!"), volume.id) raise exception_ex.VolumeNotFoundAtProvider(volume_id=volume.id) sub_volume.extend(sub_volume, new_size) self.os_cinderclient(context).check_extend_volume_complete(sub_volume) LOG.info(_LI("extend volume(%s) success!"), sub_volume.id)
def list_instance_uuids(self): """List VM instances from all nodes.""" uuids = [] try: context = req_context.RequestContext(is_admin=True, project_id='aws_default') servers = self.aws_client.get_aws_client(context)\ .describe_instances() except botocore.exceptions.ClientError as e: reason = e.response.get('Error', {}).get('Message', 'Unkown') LOG.warn('List instances failed, the error is: %s' % reason) return uuids for server in servers: server_id = server.get('InstanceId') uuids.append(server_id) LOG.debug('List_instance_uuids: %s' % uuids) return uuids
def create_cloned_volume(self, volume, src_vref): """Create a clone of the specified volume.""" context = req_context.RequestContext(is_admin=True, project_id=volume.project_id) snapshot = None try: src_vol = self._get_provider_volume_id(context, src_vref) snapshot = self._aws_client.get_aws_client(context).\ create_snapshot(VolumeId=src_vol) provider_vol = self._create_volume(volume, context, snapshot=snapshot['SnapshotId']) except Exception as ex: LOG.error( _LE("create_cloned_volume failed! volume:%(id)s," "ex: %(ex)s"), { 'id': volume.id, 'ex': ex }) msg = (_("create_cloned_volume failed! volume:%s") % volume.id) raise cinder_ex.VolumeBackendAPIException(data=msg) finally: if snapshot: self._aws_client.get_aws_client(context).\ delete_snapshot( SnapshotId=snapshot['SnapshotId'] ) # create local volume mapper try: values = {'provider_volume_id': provider_vol['VolumeId']} self.caa_db_api.volume_mapper_create(context, volume.id, context.project_id, values) except Exception as ex: LOG.error( _LE("volume_mapper_create failed! vol:%(id)s," " ex = %(ex)s"), { 'id': volume.id, 'ex': ex }) self._aws_client.get_aws_client(context).\ delete_volume(VolumeId=provider_vol['VolumeId']) msg = (_("volume_mapper_create failed! volume:%s") % volume.id) raise cinder_ex.VolumeBackendAPIException(data=msg) LOG.debug('create volume %s success.' % volume.id)
def setUp(self): """setUp method for server usage.""" super(ServerMigrationsSampleJsonTestV2_24, self).setUp() self.api.microversion = '2.24' self.uuid = self._post_server() self.context = context.RequestContext('fake', 'fake') fake_migration = { 'source_node': self.compute.host, 'dest_node': 'node10', 'source_compute': 'compute1', 'dest_compute': 'compute12', 'migration_type': 'live-migration', 'instance_uuid': self.uuid, 'status': 'running' } self.migration = compute.Migration(context=self.context, **fake_migration) self.migration.create()
def delete_volume(self, volume): context = req_context.RequestContext(is_admin=True, project_id=volume.project_id) try: sub_volume = self._get_provider_volume(context, volume) except exception.EntityNotFound: LOG.debug('no sub-volume exist, ' 'no need to delete sub volume') return LOG.debug('submit delete-volume task') sub_volume.delete() LOG.debug('wait for volume delete') self.os_cinderclient(context).check_delete_volume_complete(sub_volume) try: # delelte volume snapshot mapper self.caa_db_api.volume_mapper_delete(context, volume.id, context.project_id) except Exception as ex: LOG.error(_LE("volume_mapper_delete failed! ex = %s"), ex)
def delete_snapshot(self, snapshot): """Delete a snapshot.""" context = req_context.RequestContext(is_admin=True, project_id=snapshot.project_id) try: provider_snap = self._get_provider_snapshot_id( context, snapshot.id) if provider_snap: self._aws_client.get_aws_client(context).\ delete_snapshot(SnapshotId=provider_snap) else: snapshots = self._get_provider_snapshot(context, snapshot.id) for snap in snapshots: self._aws_client.get_aws_client(context).\ delete_snapshot(SnapshotId=snap) except Exception as ex: LOG.error( _LE("delete snapshot failed! snapshot:%(id)s," "ex = %(ex)s"), { 'id': snapshot.id, 'ex': ex }) msg = (_("delete_snapshot failed! snapshot:%s") % snapshot.id) raise cinder_ex.VolumeBackendAPIException(data=msg) # delete snapshot mapper try: self.caa_db_api.volume_snapshot_mapper_delete( context, snapshot.id, context.project_id) except Exception as ex: LOG.error( _LE("delete snapshot mapper failed! snapshot:%(id)s," "ex = %(ex)s"), { 'id': snapshot.id, 'ex': ex }) msg = (_("delete_snapshot failed! snapshot:%s") % snapshot.id) raise cinder_ex.VolumeBackendAPIException(data=msg) LOG.info(_LI("delete snapshot(%s) success!"), snapshot.id)
def setUp(self): super(ServerMigrationsSamplesJsonTestV2_23, self).setUp() fake_context = context.RequestContext('fake', 'fake') self.mig1 = compute.Migration(context=fake_context, **self.fake_migrations[0]) self.mig1.create() self.mig2 = compute.Migration(context=fake_context, **self.fake_migrations[1]) self.mig2.create() fake_ins = fake_instance.fake_db_instance(uuid=self.UUID_1) fake_ins.pop("pci_devices") fake_ins.pop("security_groups") fake_ins.pop("services") fake_ins.pop("tags") fake_ins.pop("info_cache") fake_ins.pop("id") self.instance = compute.Instance(context=fake_context, **fake_ins) self.instance.create()
def delete_volume(self, volume): context = req_context.RequestContext(is_admin=True, project_id=volume.project_id) try: vol_id = self._get_provider_volume_id(context, volume) if not vol_id: volumes = self._get_provider_volume(context, volume.id) # if len(volumes) > 1,there must have been an error,we should # delete all volumes for vol in volumes: self._aws_client.get_aws_client(context).\ delete_volume(VolumeId=vol) else: self._aws_client.get_aws_client(context).\ delete_volume(VolumeId=vol_id) except Exception as ex: LOG.error(_LE("delete volume failed! vol:%(id)s,ex = %(ex)s"), { 'id': volume.id, 'ex': ex }) msg = (_("delete_volume failed! volume:%s") % volume.id) raise cinder_ex.VolumeBackendAPIException(data=msg) # delete volume mapper try: self.caa_db_api.volume_mapper_delete(context, volume.id, context.project_id) except Exception as ex: LOG.error( _LE("delete volume mapper failed! vol: %(id)s," "ex = %(ex)s"), { 'id': volume.id, 'ex': ex }) msg = (_("volume_mapper_delete failed! vol: %(id)s,ex: %(ex)s"), { 'id': volume.id, 'ex': ex }) raise cinder_ex.VolumeBackendAPIException(data=msg)
def delete(self, backup): """Delete a saved backup.""" context = req_context.RequestContext(is_admin=True, project_id=backup.project_id) try: provider_snap = self._get_provider_backup_id(context, backup) if not provider_snap: snapshots = self._get_provider_snapshot(backup.id) # if len(volumes) > 1,there must have been an error,we should # delete all volumes for snapshot in snapshots: self._aws_client.get_aws_client(context).\ delete_snapshot(SnapshotId=snapshot) else: self._aws_client.get_aws_client(context).\ delete_snapshot(SnapshotId=provider_snap) except Exception as ex: msg = (_LE("backup delete failed,backup_id:%(id)s, ex:%(ex)s") % { 'id': backup.id, 'ex': ex }) LOG.error(msg) raise cinder_ex.BackupOperationError(msg) # delete backup mapper try: self.caa_db_api.volume_backup_mapper_delete( context, backup.id, context.project_id) except Exception as ex: msg = (_LE("backup mapper delete failed, backup_id: %(id)s," "ex: %(ex)s") % { 'id': backup.id, 'ex': ex }) LOG.error(msg) raise cinder_ex.BackupOperationError(msg) LOG.info(_LI("delete backup(%s) success!"), backup.id)
def restore(self, backup, volume_id, volume_file): """Restore a saved backup.""" context = req_context.RequestContext(is_admin=True, project_id=backup.project_id) volume = self.db.volume_get(context, volume_id) try: old_vol = self._get_provider_volume_id(context, volume) provider_snap = self._get_provider_backup_id(context, backup) vol = self._create_volume(volume, context, snapshot=provider_snap) except Exception as e: msg = _LE("Restore failed,backup_id:%(id)s, " "ex:%(e)s") % { 'id': backup.id, 'e': e } LOG.error(msg) raise cinder_ex.BackupOperationError(msg) # update local volume mapper try: values = {'provider_volume_id': vol['VolumeId']} self.caa_db_api.volume_mapper_update(context, volume.id, context.project_id, values) except Exception as ex: msg = ( _("backup mapper delete failed,backup_id:%(id)s,ex:%(ex)s") % { 'id': backup.id, 'ex': ex }) LOG.error(msg) self._aws_client.get_aws_client(context).\ delete_volume(VolumeId=vol['VolumeId']) raise cinder_ex.BackupOperationError(msg) else: self._aws_client.get_aws_client(context).\ delete_volume(VolumeId=old_vol) LOG.debug('restore volume %s success.' % volume.id)
def power_off(self, instance, timeout=0, retry_interval=0): """Power off the specified instance.""" LOG.debug('Start to stop server: %s' % instance.uuid) try: project_id = instance.project_id context = req_context.RequestContext(is_admin=True, project_id=project_id) aws_instance_id = self._get_provider_instance_id( context, instance.uuid) if aws_instance_id: LOG.debug('Power off the instance %s on aws', aws_instance_id) instance_ids = [aws_instance_id] self.aws_client.get_aws_client(context)\ .stop_instances(InstanceIds=instance_ids) LOG.debug('Stop server: %s success' % instance.uuid) else: LOG.error('Cannot get the aws_instance_id of % s' % instance.uuid) raise exception.InstanceNotFound(instance_id=instance.uuid) except botocore.exceptions.ClientError as e: reason = e.response.get('Error', {}).get('Message', 'Unkown') LOG.error('Power off instance failed, the error is: %s' % reason) error_code = e.response.get('Error', {}).get('Code', 'Unkown') if error_code == 'InvalidInstanceID.NotFound': raise exception.InstanceNotFound(instance_id=instance.uuid) else: raise exception.InstancePowerOffFailure(reason=reason) except botocore.exceptions.WaiterError as e: reason = e.message LOG.warn('Cannot power_off instance,operation time out') raise exception.InstancePowerOffFailure(reason=reason) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE('Error from power off instance. ' 'Error=%(e)s'), {'e': e}, instance=instance)
def _modify_volume(self, volume, new_size=None, new_type=None): context = req_context.RequestContext(is_admin=True, project_id=volume.project_id) snapshot = None try: old_vol = self._get_provider_volume_id(context, volume) snapshot = self._aws_client.get_aws_client(context).\ create_snapshot(VolumeId=old_vol) provider_vol = self._create_volume(volume, context, snapshot=snapshot['SnapshotId'], new_type=new_type, new_size=new_size) except Exception as ex: msg = _("Modify Volume failed! Result: %s.") % ex raise cinder_ex.VolumeBackendAPIException(data=msg) finally: if snapshot: self._aws_client.get_aws_client(context).\ delete_snapshot( SnapshotId=snapshot['SnapshotId'] ) # update local volume mapper try: values = {'provider_volume_id': provider_vol['VolumeId']} self.caa_db_api.volume_mapper_update(context, volume.id, context.project_id, values) except Exception as ex: LOG.error(_LE("volume_mapper_create failed! ex = %s"), ex) self._aws_client.get_aws_client(context).\ delete_volume(VolumeId=provider_vol['VolumeId']) raise self._aws_client.get_aws_client(context).\ delete_volume(VolumeId=old_vol) LOG.debug('create volume %s success.' % volume.id)
def create_volume(self, volume): context = req_context.RequestContext(is_admin=True, project_id=volume.project_id) try: provider_vol = self._create_volume(volume, context) except Exception: msg = (_("create_volume failed! volume:%s") % volume.id) raise cinder_ex.VolumeBackendAPIException(data=msg) # create local volume mapper try: values = {'provider_volume_id': provider_vol['VolumeId']} self.caa_db_api.volume_mapper_create(context, volume.id, context.project_id, values) except Exception as ex: msg = (_("volume_mapper_create failed! vol: %(id)s,ex: %(ex)s"), { 'id': volume.id, 'ex': ex }) self._aws_client.get_aws_client(context).\ delete_volume(VolumeId=provider_vol['VolumeId']) raise cinder_ex.VolumeBackendAPIException(data=msg) LOG.debug('create volume %s success.' % volume.id)
def create_snapshot(self, snapshot): context = req_context.RequestContext(is_admin=True, project_id=snapshot.project_id) volume = snapshot.volume try: provider_vol = self._get_provider_volume_id(context, volume) provider_snap = self._create_snapshot(context, provider_vol, snapshot.id) except Exception as ex: LOG.error(_LE("create snapshot %(id)s failed! ex = %(ex)s"), { 'id': snapshot.id, 'ex': ex }) msg = (_("create_snapshot failed! volume:%s") % snapshot.id) raise cinder_ex.VolumeBackendAPIException(data=msg) # create volume snapshot mapper try: values = {"provider_snapshot_id": provider_snap['SnapshotId']} self.caa_db_api.volume_snapshot_mapper_create( context, snapshot.id, context.project_id, values) except Exception as ex: LOG.error( _LE("create snapshot mapper failed! snapshot:%(id)s," "ex = %(ex)s"), { 'id': snapshot.id, 'ex': ex }) self._aws_client.get_aws_client(context).\ delete_snapshot( SnapshotId=provider_snap['SnapshotId'] ) msg = (_("create_snapshot failed! snapshot:%s") % snapshot.id) raise cinder_ex.VolumeBackendAPIException(data=msg) LOG.info(_LI("create snapshot:%s success!"), snapshot.id)
def backup(self, backup, volume_file, backup_metadata=False): """Start a backup of a specified volume.""" context = req_context.RequestContext(is_admin=True, project_id=backup.project_id) volume = self.db.volume_get(context, backup.volume_id) try: provider_vol = self._get_provider_volume_id(context, volume) provider_snap = self._create_snapshot(context, provider_vol, backup.id) except Exception as ex: msg = (_("Backup failed,backup_id:%(id)s,ex:%(ex)s") % { 'id': backup.id, 'ex': ex }) LOG.error(msg) raise cinder_ex.BackupOperationError(msg) # create volume backup mapper try: values = {"provider_backup_id": provider_snap['SnapshotId']} self.caa_db_api.volume_backup_mapper_create( context, backup.id, context.project_id, values) except Exception as ex: msg = (_("create backup mapper failed! backup:%(id)s,ex = %(ex)s"), { 'id': backup.id, 'ex': ex }) LOG.error(msg) self._aws_client.get_aws_client(context).\ delete_snapshot( SnapshotId=provider_snap['SnapshotId'] ) raise cinder_ex.BackupOperationError(msg) LOG.info(_LI("create backup(%(id)s) success!"), backup.id)