def _piped_execute(self, cmd1, cmd2): """Pipe output of cmd1 into cmd2.""" LOG.debug("Piping cmd1='%s' into...", ' '.join(cmd1)) LOG.debug("cmd2='%s'", ' '.join(cmd2)) try: p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: LOG.error(_LE("Pipe1 failed - %s "), e) raise # NOTE(dosaboy): ensure that the pipe is blocking. This is to work # around the case where evenlet.green.subprocess is used which seems to # use a non-blocking pipe. flags = fcntl.fcntl(p1.stdout, fcntl.F_GETFL) & (~os.O_NONBLOCK) fcntl.fcntl(p1.stdout, fcntl.F_SETFL, flags) try: p2 = subprocess.Popen(cmd2, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as e: LOG.error(_LE("Pipe2 failed - %s "), e) raise p1.stdout.close() stdout, stderr = p2.communicate() return p2.returncode, stderr
def _copy_image_to_volume(self, context, volume_ref, image_id, image_location, image_service): """Downloads Glance image to the specified volume.""" copy_image_to_volume = self.driver.copy_image_to_volume volume_id = volume_ref['id'] LOG.debug("Attempting download of %(image_id)s (%(image_location)s)" " to volume %(volume_id)s.", {'image_id': image_id, 'volume_id': volume_id, 'image_location': image_location}) try: copy_image_to_volume(context, volume_ref, image_service, image_id) except processutils.ProcessExecutionError as ex: LOG.exception(_LE("Failed to copy image %(image_id)s to volume: " "%(volume_id)s"), {'volume_id': volume_id, 'image_id': image_id}) raise exception.ImageCopyFailure(reason=ex.stderr) except exception.ImageUnacceptable as ex: LOG.exception(_LE("Failed to copy image to volume: %(volume_id)s"), {'volume_id': volume_id}) raise exception.ImageUnacceptable(ex) except Exception as ex: LOG.exception(_LE("Failed to copy image %(image_id)s to " "volume: %(volume_id)s"), {'volume_id': volume_id, 'image_id': image_id}) if not isinstance(ex, exception.ImageCopyFailure): raise exception.ImageCopyFailure(reason=ex) else: raise LOG.debug("Downloaded image %(image_id)s (%(image_location)s)" " to volume %(volume_id)s successfully.", {'image_id': image_id, 'volume_id': volume_id, 'image_location': image_location})
def create_lv_snapshot(self, name, source_lv_name, lv_type='default'): """Creates a snapshot of a logical volume. :param name: Name to assign to new snapshot :param source_lv_name: Name of Logical Volume to snapshot :param lv_type: Type of LV (default or thin) """ source_lvref = self.get_volume(source_lv_name) if source_lvref is None: LOG.error(_LE("Trying to create snapshot by non-existent LV: %s"), source_lv_name) raise exception.VolumeDeviceNotFound(device=source_lv_name) cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '--name', name, '--snapshot', '%s/%s' % (self.vg_name, source_lv_name)] if lv_type != 'thin': size = source_lvref['size'] cmd.extend(['-L', '%sg' % (size)]) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error creating snapshot')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise
def create_lv_snapshot(self, name, source_lv_name, lv_type='default'): """Creates a snapshot of a logical volume. :param name: Name to assign to new snapshot :param source_lv_name: Name of Logical Volume to snapshot :param lv_type: Type of LV (default or thin) """ source_lvref = self.get_volume(source_lv_name) if source_lvref is None: LOG.error(_LE("Trying to create snapshot by non-existent LV: %s"), source_lv_name) raise exception.VolumeDeviceNotFound(device=source_lv_name) cmd = LVM.LVM_CMD_PREFIX + [ 'lvcreate', '--name', name, '--snapshot', '%s/%s' % (self.vg_name, source_lv_name) ] if lv_type != 'thin': size = source_lvref['size'] cmd.extend(['-L', '%sg' % (size)]) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error creating snapshot')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise
def _create_cg_from_cgsnapshot(self, context, group, cgsnapshot): try: snapshots = storage.SnapshotList.get_all_for_cgsnapshot( context, cgsnapshot.id) if not snapshots: msg = _("Cgsnahost is empty. No consistency group " "will be created.") raise exception.InvalidConsistencyGroup(reason=msg) for snapshot in snapshots: kwargs = {} kwargs['availability_zone'] = group.availability_zone kwargs['cgsnapshot'] = cgsnapshot kwargs['consistencygroup'] = group kwargs['snapshot'] = snapshot volume_type_id = snapshot.volume_type_id if volume_type_id: kwargs['volume_type'] = volume_types.get_volume_type( context, volume_type_id) # Since cgsnapshot is passed in, the following call will # create a db entry for the volume, but will not call the # volume manager to create a real volume in the backend yet. # If error happens, taskflow will handle rollback of quota # and removal of volume entry in the db. try: self.volume_api.create(context, snapshot.volume_size, None, None, **kwargs) except exception.CinderException: with excutils.save_and_reraise_exception(): LOG.error( _LE("Error occurred when creating volume " "entry from snapshot in the process of " "creating consistency group %(group)s " "from cgsnapshot %(cgsnap)s."), { 'group': group.id, 'cgsnap': cgsnapshot.id }) except Exception: with excutils.save_and_reraise_exception(): try: group.destroy() finally: LOG.error( _LE("Error occurred when creating consistency " "group %(group)s from cgsnapshot " "%(cgsnap)s."), { 'group': group.id, 'cgsnap': cgsnapshot.id }) volumes = self.db.volume_get_all_by_group(context, group.id) for vol in volumes: # Update the host field for the volume. self.db.volume_update(context, vol['id'], {'host': group.get('host')}) self.jacket_rpcapi.create_consistencygroup_from_src( context, group, cgsnapshot)
def revert(self, context, result, flow_failures, volume_ref, **kwargs): # NOTE(dulek): Revert is occurring and manager need to know if # rescheduling happened. We're returning boolean flag that will # indicate that. It which will be available in flow engine store # through get_revert_result method. # If do not want to be rescheduled, just set the volume's status to # error and return. if not self.do_reschedule: common.error_out_volume(context, self.db, volume_ref.id) LOG.error(_LE("Volume %s: create failed"), volume_ref.id) return False # Check if we have a cause which can tell us not to reschedule and # set the volume's status to error. for failure in flow_failures.values(): if failure.check(*self.no_reschedule_types): common.error_out_volume(context, self.db, volume_ref.id) LOG.error(_LE("Volume %s: create failed"), volume_ref.id) return False # Use a different context when rescheduling. if self.reschedule_context: cause = list(flow_failures.values())[0] context = self.reschedule_context try: self._pre_reschedule(context, volume_ref) self._reschedule(context, cause, volume=volume_ref, **kwargs) self._post_reschedule(volume_ref) return True except exception.CinderException: LOG.exception(_LE("Volume %s: rescheduling failed"), volume_ref.id) return False
def update_config_file(self, name, tid, path, config_auth): conf_file = self.iet_conf vol_id = name.split(':')[1] # If config file does not exist, create a blank conf file and # add configuration for the volume on the new file. if not os.path.exists(conf_file): try: utils.execute("truncate", conf_file, "--size=0", run_as_root=True) except putils.ProcessExecutionError: LOG.exception(_LE("Failed to create %(conf)s for volume " "id:%(vol_id)s"), {'conf': conf_file, 'vol_id': vol_id}) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) try: volume_conf = """ Target %s %s Lun 0 Path=%s,Type=%s """ % (name, config_auth, path, self._iotype(path)) with utils.temporary_chown(conf_file): with open(conf_file, 'a+') as f: f.write(volume_conf) except Exception: LOG.exception(_LE("Failed to update %(conf)s for volume " "id:%(vol_id)s"), {'conf': conf_file, 'vol_id': vol_id}) raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
def _create_cg_from_source_cg(self, context, group, source_cg): try: source_vols = self.db.volume_get_all_by_group( context, source_cg.id) if not source_vols: msg = _("Source CG is empty. No consistency group " "will be created.") raise exception.InvalidConsistencyGroup(reason=msg) for source_vol in source_vols: kwargs = {} kwargs['availability_zone'] = group.availability_zone kwargs['source_cg'] = source_cg kwargs['consistencygroup'] = group kwargs['source_volume'] = source_vol volume_type_id = source_vol.get('volume_type_id') if volume_type_id: kwargs['volume_type'] = volume_types.get_volume_type( context, volume_type_id) # Since source_cg is passed in, the following call will # create a db entry for the volume, but will not call the # volume manager to create a real volume in the backend yet. # If error happens, taskflow will handle rollback of quota # and removal of volume entry in the db. try: self.volume_api.create(context, source_vol['size'], None, None, **kwargs) except exception.CinderException: with excutils.save_and_reraise_exception(): LOG.error( _LE("Error occurred when creating cloned " "volume in the process of creating " "consistency group %(group)s from " "source CG %(source_cg)s."), { 'group': group.id, 'source_cg': source_cg.id }) except Exception: with excutils.save_and_reraise_exception(): try: group.destroy() finally: LOG.error( _LE("Error occurred when creating consistency " "group %(group)s from source CG " "%(source_cg)s."), { 'group': group.id, 'source_cg': source_cg.id }) volumes = self.db.volume_get_all_by_group(context, group.id) for vol in volumes: # Update the host field for the volume. self.db.volume_update(context, vol['id'], {'host': group.host}) self.jacket_rpcapi.create_consistencygroup_from_src( context, group, None, source_cg)
def revert(self, context, result, flow_failures, **kwargs): # Restore the source volume status and set the volume to error status. volume_id = kwargs['volume_id'] common.error_out_volume(context, self.db, volume_id) LOG.error(_LE("Volume %s: manage failed."), volume_id) exc_info = False if all(flow_failures[-1].exc_info): exc_info = flow_failures[-1].exc_info LOG.error(_LE('Unexpected build error:'), exc_info=exc_info)
def revert(self, context, result, flow_failures, **kwargs): # Restore the source volume status and set the volume to error status. volume_id = kwargs['volume_id'] common.error_out_volume(context, self.db, volume_id) LOG.error(_LE("Volume %s: manage failed."), volume_id) exc_info = False if all(flow_failures[-1].exc_info): exc_info = flow_failures[-1].exc_info LOG.error(_LE('Unexpected build error:'), exc_info=exc_info)
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): LOG.info(_LI("Removing iscsi_target for volume: %s"), vol_id) try: self._delete_logicalunit(tid, lun) session_info = self._find_sid_cid_for_target(tid, vol_name, vol_id) if session_info: sid, cid = session_info self._force_delete_target(tid, sid, cid) self._delete_target(tid) except putils.ProcessExecutionError: LOG.exception( _LE("Failed to remove iscsi target for volume " "id:%s"), vol_id) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) vol_uuid_file = vol_name conf_file = self.iet_conf if os.path.exists(conf_file): try: with utils.temporary_chown(conf_file): with open(conf_file, 'r+') as iet_conf_text: full_txt = iet_conf_text.readlines() new_iet_conf_txt = [] count = 0 for line in full_txt: if count > 0: count -= 1 continue elif vol_uuid_file in line: count = 2 continue else: new_iet_conf_txt.append(line) iet_conf_text.seek(0) iet_conf_text.truncate(0) iet_conf_text.writelines(new_iet_conf_txt) except Exception: LOG.exception( _LE("Failed to update %(conf)s for volume id " "%(vol_id)s after removing iscsi target"), { 'conf': conf_file, 'vol_id': vol_id }) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) else: LOG.warning( _LW("Failed to update %(conf)s for volume id " "%(vol_id)s after removing iscsi target. " "%(conf)s does not exist."), { 'conf': conf_file, 'vol_id': vol_id })
def _heartbeat(self): try: self.coordinator.heartbeat() return True except coordination.ToozConnectionError: LOG.exception(_LE('Connection error while sending a heartbeat ' 'to coordination backend.')) raise except coordination.ToozError: LOG.exception(_LE('Error sending a heartbeat to coordination ' 'backend.')) return False
def execute(self, context, volume_ref, volume_spec): volume_spec = dict(volume_spec) volume_id = volume_spec.pop('volume_id', None) if not volume_id: volume_id = volume_ref['id'] # we can't do anything if the driver didn't init if not self.driver.initialized: driver_name = self.driver.__class__.__name__ LOG.error(_LE("Unable to create volume. " "Volume driver %s not initialized"), driver_name) raise exception.DriverNotInitialized() create_type = volume_spec.pop('type', None) LOG.info(_LI("Volume %(volume_id)s: being created as %(create_type)s " "with specification: %(volume_spec)s"), {'volume_spec': volume_spec, 'volume_id': volume_id, 'create_type': create_type}) if create_type == 'raw': model_update = self._create_raw_volume(volume_ref=volume_ref, **volume_spec) elif create_type == 'snap': model_update = self._create_from_snapshot(context, volume_ref=volume_ref, **volume_spec) elif create_type == 'source_vol': model_update = self._create_from_source_volume( context, volume_ref=volume_ref, **volume_spec) elif create_type == 'source_replica': model_update = self._create_from_source_replica( context, volume_ref=volume_ref, **volume_spec) elif create_type == 'image': model_update = self._create_from_image(context, volume_ref=volume_ref, **volume_spec) else: raise exception.VolumeTypeNotFound(volume_type_id=create_type) # Persist any model information provided on creation. try: if model_update: volume_ref.update(model_update) volume_ref.save() except exception.CinderException: # If somehow the update failed we want to ensure that the # failure is logged (but not try rescheduling since the volume at # this point has been created). LOG.exception(_LE("Failed updating model of volume %(volume_id)s " "with creation provided model %(model)s"), {'volume_id': volume_id, 'model': model_update}) raise return volume_ref
def revert(self, context, result, flow_failures, **kwargs): if isinstance(result, ft.Failure): return # Restore the source volume status and set the volume to error status. volume_id = kwargs['volume_id'] common.restore_source_status(context, self.db, kwargs) common.error_out_volume(context, self.db, volume_id) LOG.error(_LE("Volume %s: create failed"), volume_id) exc_info = False if all(flow_failures[-1].exc_info): exc_info = flow_failures[-1].exc_info LOG.error(_LE('Unexpected build error:'), exc_info=exc_info)
def rename_volume(self, lv_name, new_name): """Change the name of an existing volume.""" try: self._execute('lvrename', self.vg_name, lv_name, new_name, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error renaming logical volume')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise
def _heartbeat(self): try: self.coordinator.heartbeat() return True except coordination.ToozConnectionError: LOG.exception( _LE('Connection error while sending a heartbeat ' 'to coordination backend.')) raise except coordination.ToozError: LOG.exception( _LE('Error sending a heartbeat to coordination ' 'backend.')) return False
def deactivate_lv(self, name): lv_path = self.vg_name + '/' + self._mangle_lv_name(name) cmd = ['lvchange', '-a', 'n'] cmd.append(lv_path) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error deactivating LV')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise
def deactivate_lv(self, name): lv_path = self.vg_name + '/' + self._mangle_lv_name(name) cmd = ['lvchange', '-a', 'n'] cmd.append(lv_path) try: self._execute(*cmd, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error deactivating LV')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise
def create_iscsi_target(self, name, tid, lun, path, chap_auth=None, **kwargs): # tid and lun are not used vol_id = name.split(':')[1] LOG.info(_LI('Creating iscsi_target for volume: %s'), vol_id) chap_auth_userid = "" chap_auth_password = "" if chap_auth is not None: (chap_auth_userid, chap_auth_password) = chap_auth optional_args = [] if 'portals_port' in kwargs: optional_args.append('-p%s' % kwargs['portals_port']) if 'portals_ips' in kwargs: optional_args.append('-a' + ','.join(kwargs['portals_ips'])) try: command_args = [ 'storage-rtstool', 'create', path, name, chap_auth_userid, chap_auth_password, self.iscsi_protocol == 'iser' ] + optional_args self._execute(*command_args, run_as_root=True) except putils.ProcessExecutionError: LOG.exception( _LE("Failed to create iscsi target for volume " "id:%s."), vol_id) raise exception.ISCSITargetCreateFailed(volume_id=vol_id) iqn = '%s%s' % (self.iscsi_target_prefix, vol_id) tid = self._get_target(iqn) if tid is None: LOG.error( _LE("Failed to create iscsi target for volume " "id:%s."), vol_id) raise exception.NotFound() # We make changes persistent self._persist_configuration(vol_id) return tid
def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs): LOG.info(_LI("Removing iscsi_target for volume: %s"), vol_id) try: self._delete_logicalunit(tid, lun) session_info = self._find_sid_cid_for_target(tid, vol_name, vol_id) if session_info: sid, cid = session_info self._force_delete_target(tid, sid, cid) self._delete_target(tid) except putils.ProcessExecutionError: LOG.exception(_LE("Failed to remove iscsi target for volume " "id:%s"), vol_id) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) vol_uuid_file = vol_name conf_file = self.iet_conf if os.path.exists(conf_file): try: with utils.temporary_chown(conf_file): with open(conf_file, 'r+') as iet_conf_text: full_txt = iet_conf_text.readlines() new_iet_conf_txt = [] count = 0 for line in full_txt: if count > 0: count -= 1 continue elif vol_uuid_file in line: count = 2 continue else: new_iet_conf_txt.append(line) iet_conf_text.seek(0) iet_conf_text.truncate(0) iet_conf_text.writelines(new_iet_conf_txt) except Exception: LOG.exception(_LE("Failed to update %(conf)s for volume id " "%(vol_id)s after removing iscsi target"), {'conf': conf_file, 'vol_id': vol_id}) raise exception.ISCSITargetRemoveFailed(volume_id=vol_id) else: LOG.warning(_LW("Failed to update %(conf)s for volume id " "%(vol_id)s after removing iscsi target. " "%(conf)s does not exist."), {'conf': conf_file, 'vol_id': vol_id})
def rename_volume(self, lv_name, new_name): """Change the name of an existing volume.""" try: self._execute('lvrename', self.vg_name, lv_name, new_name, root_helper=self._root_helper, run_as_root=True) except putils.ProcessExecutionError as err: LOG.exception(_LE('Error renaming logical volume')) LOG.error(_LE('Cmd :%s'), err.cmd) LOG.error(_LE('StdOut :%s'), err.stdout) LOG.error(_LE('StdErr :%s'), err.stderr) raise
def _set_volume_state_and_notify(self, method, updates, context, ex, request_spec, msg=None): # TODO(harlowja): move into a task that just does this later. if not msg: msg = (_LE("Failed to schedule_%(method)s: %(ex)s") % {'method': method, 'ex': six.text_type(ex)}) LOG.error(msg) volume_state = updates['volume_state'] properties = request_spec.get('volume_properties', {}) volume_id = request_spec.get('volume_id', None) if volume_id: db.volume_update(context, volume_id, volume_state) payload = dict(request_spec=request_spec, volume_properties=properties, volume_id=volume_id, state=volume_state, method=method, reason=ex) rpc.get_notifier("scheduler").error(context, 'scheduler.' + method, payload)
def _diff_restore_rbd(self, base_name, restore_file, restore_name, restore_point, restore_length): """Attempt restore rbd volume from backup using diff transfer.""" rbd_user = restore_file.rbd_user rbd_pool = restore_file.rbd_pool rbd_conf = restore_file.rbd_conf LOG.debug("Attempting incremental restore from base='%(base)s' " "snap='%(snap)s'", {'base': base_name, 'snap': restore_point}) before = time.time() try: self._rbd_diff_transfer(base_name, self._ceph_backup_pool, restore_name, rbd_pool, src_user=self._ceph_backup_user, src_conf=self._ceph_backup_conf, dest_user=rbd_user, dest_conf=rbd_conf, src_snap=restore_point) except exception.BackupRBDOperationFailed: LOG.exception(_LE("Differential restore failed, trying full " "restore")) raise # If the volume we are restoring to is larger than the backup volume, # we will need to resize it after the diff import since import-diff # appears to shrink the target rbd volume to the size of the original # backup volume. self._check_restore_vol_size(base_name, restore_name, restore_length, rbd_pool) LOG.debug("Restore transfer completed in %.4fs", (time.time() - before))
def _open_volume_with_path(path, mode): try: with utils.temporary_chown(path): handle = open(path, mode) return handle except Exception: LOG.error(_LE("Failed to open volume from %(path)s."), {'path': path})
def _set_volume_state_and_notify(self, method, updates, context, ex, request_spec, msg=None): # TODO(harlowja): move into a task that just does this later. if not msg: msg = (_LE("Failed to schedule_%(method)s: %(ex)s") % { 'method': method, 'ex': six.text_type(ex) }) LOG.error(msg) volume_state = updates['volume_state'] properties = request_spec.get('volume_properties', {}) volume_id = request_spec.get('volume_id', None) if volume_id: db.volume_update(context, volume_id, volume_state) payload = dict(request_spec=request_spec, volume_properties=properties, volume_id=volume_id, state=volume_state, method=method, reason=ex) rpc.get_notifier("scheduler").error(context, 'scheduler.' + method, payload)
def _update_object(context, db, status, reason, object_type, object_id): update = { 'status': status, } try: LOG.debug( 'Updating %(object_type)s: %(object_id)s with %(update)s' ' due to: %(reason)s', { 'object_type': object_type, 'object_id': object_id, 'reason': reason, 'update': update }) if object_type == 'volume': db.volume_update(context, object_id, update) elif object_type == 'snapshot': snapshot = storage.Snapshot.get_by_id(context, object_id) snapshot.update(update) snapshot.save() except exception.CinderException: # Don't let this cause further exceptions. LOG.exception( _LE("Failed updating %(object_type)s %(object_id)s with" " %(update)s"), { 'object_type': object_type, 'object_id': object_id, 'update': update })
def get_key(self, ctxt, key_id, payload_content_type='application/octet-stream'): """Retrieves the specified key. :param ctxt: contains information of the user and the environment for the request (storage/context.py) :param key_id: the UUID of the key to retrieve :param payload_content_type: The format/type of the secret data :return: SymmetricKey representation of the key :throws Exception: if key retrieval fails """ try: secret_ref = self._create_secret_ref(key_id, barbican_client) secret = self._get_secret(ctxt, secret_ref) secret_data = self._get_secret_data(secret, payload_content_type) if payload_content_type == 'application/octet-stream': # convert decoded string to list of unsigned ints for each byte key_data = array.array('B', base64.b64decode(secret_data)).tolist() else: key_data = secret_data key = keymgr_key.SymmetricKey(secret.algorithm, key_data) return key except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error getting key."))
def create_key(self, ctxt, expiration=None, name='Cinder Volume Key', payload_content_type='application/octet-stream', mode='CBC', algorithm='AES', length=256): """Creates a key. :param ctxt: contains information of the user and the environment for the request (storage/context.py) :param expiration: the date the key will expire :param name: a friendly name for the secret :param payload_content_type: the format/type of the secret data :param mode: the algorithm mode (e.g. CBC or CTR mode) :param algorithm: the algorithm associated with the secret :param length: the bit length of the secret :return: the UUID of the new key :throws Exception: if key creation fails """ barbican_client = self._get_barbican_client(ctxt) try: key_order = barbican_client.orders.create_key( name, algorithm, length, mode, payload_content_type, expiration) order_ref = key_order.submit() order = barbican_client.orders.get(order_ref) secret_uuid = order.secret_ref.rpartition('/')[2] return secret_uuid except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error creating key."))
def _hybrid_cloud_image_volume(self, context, volume_ref, image_location, image_id, image_service): if self.driver.CLOUD_DRIVER: model_update = {'provider_location': 'SUB-FusionSphere'} updates = dict(model_update, status='downloading') try: volume_ref = self.db.volume_update(context, volume_ref['id'], updates) volume_ref = storage.Volume.get_by_id(context, volume_ref['id']) except exception.CinderException: LOG.exception(_LE("Failed updating volume %(volume_id)s with " "%(updates)s"), {'volume_id': volume_ref['id'], 'updates': updates}) # NOTE(laoyi) whether need to image sync run_api = worker_api.API() if jacket_utils.is_image_sync(context, image_id): run_api.image_sync(context, image_id) self.check_image_sync_complete(context, run_api, image_id) self._copy_image_to_volume(context, volume_ref, image_id, image_location, image_service) return None, True else: return None, False
def _do_iscsi_discovery(self, volume): # TODO(justinsb): Deprecate discovery and use stored info # NOTE(justinsb): Discovery won't work with CHAP-secured targets (?) LOG.warning(_LW("ISCSI provider_location not stored, using discovery")) volume_id = volume['id'] try: # NOTE(griff) We're doing the split straight away which should be # safe since using '@' in hostname is considered invalid (out, _err) = utils.execute('iscsiadm', '-m', 'discovery', '-t', 'sendtargets', '-p', volume['host'].split('@')[0], run_as_root=True) except processutils.ProcessExecutionError as ex: LOG.error(_LE("ISCSI discovery attempt failed for:%s") % volume['host'].split('@')[0]) LOG.debug(("Error from iscsiadm -m discovery: %s") % ex.stderr) return None for target in out.splitlines(): if (self.configuration.safe_get('iscsi_ip_address') in target and volume_id in target): return target return None
def is_supported_firmware(self): """Check firmware version is v6.4 or higher. This API checks if the firmware version per the plug-in support level. This only checks major and minor version. """ cmd = ['version'] firmware = 0 try: stdout, stderr = self._execute_shell_cmd(cmd) if (stdout): for line in stdout: if 'Fabric OS: v' in line: LOG.debug("Firmware version string: %s", line) ver = line.split('Fabric OS: v')[1].split('.') if (ver): firmware = int(ver[0] + ver[1]) return firmware > 63 else: LOG.error(_LE("No CLI output for firmware version check")) return False except processutils.ProcessExecutionError as e: msg = _("Error while getting data via ssh: (command=%(cmd)s " "error=%(err)s).") % { 'cmd': cmd, 'err': six.text_type(e) } LOG.error(msg) raise exception.BrocadeZoningCliException(reason=msg)
def revert(self, context, volume_id, result, **kwargs): if isinstance(result, ft.Failure) or not self.set_error: return reason = _('Volume create failed while extracting volume ref.') common.error_out_volume(context, self.db, volume_id, reason=reason) LOG.error(_LE("Volume %s: create failed"), volume_id)
def restore(self, backup, volume_id, volume_file): """Restore volume from backup in Ceph object store. If volume metadata is available this will also be restored. """ target_volume = self.db.volume_get(self.context, volume_id) LOG.debug('Starting restore from Ceph backup=%(src)s to ' 'volume=%(dest)s', {'src': backup['id'], 'dest': target_volume['name']}) try: self._restore_volume(backup, target_volume, volume_file) # Be tolerant of IO implementations that do not support fileno() try: fileno = volume_file.fileno() except IOError: LOG.debug("Restore target I/O object does not support " "fileno() - skipping call to fsync().") else: os.fsync(fileno) self._restore_metadata(backup, volume_id) LOG.debug('Restore to volume %s finished successfully.', volume_id) except exception.BackupOperationError as e: LOG.error(_LE('Restore to volume %(volume)s finished with error - ' '%(error)s.'), {'error': e, 'volume': volume_id}) raise
def _create_from_snapshot(self, context, volume_ref, snapshot_id, **kwargs): volume_id = volume_ref['id'] snapshot = storage.Snapshot.get_by_id(context, snapshot_id) model_update = self.driver.create_volume_from_snapshot(volume_ref, snapshot) # NOTE(harlowja): Subtasks would be useful here since after this # point the volume has already been created and further failures # will not destroy the volume (although they could in the future). make_bootable = False try: originating_vref = storage.Volume.get_by_id(context, snapshot.volume_id) make_bootable = originating_vref.bootable except exception.CinderException as ex: LOG.exception(_LE("Failed fetching snapshot %(snapshot_id)s " "bootable" " flag using the provided glance snapshot " "%(snapshot_ref_id)s volume reference"), {'snapshot_id': snapshot_id, 'snapshot_ref_id': snapshot.volume_id}) raise exception.MetadataUpdateFailure(reason=ex) if make_bootable: self._handle_bootable_volume_glance_meta(context, volume_id, snapshot_id=snapshot_id) return model_update
def create(context, name, specs=None): """Creates qos_specs. :param specs dictionary that contains specifications for QoS e.g. {'consumer': 'front-end', 'total_iops_sec': 1000, 'total_bytes_sec': 1024000} """ _verify_prepare_qos_specs(specs) values = dict(name=name, qos_specs=specs) LOG.debug("Dict for qos_specs: %s", values) try: qos_specs_ref = db.qos_specs_create(context, values) except db_exc.DBDataError: msg = _('Error writing field to database') LOG.exception(msg) raise exception.Invalid(msg) except db_exc.DBError: LOG.exception(_LE('DB error:')) raise exception.QoSSpecsCreateFailed(name=name, qos_specs=specs) return qos_specs_ref
def _get_target(self, iqn): # Find existing iSCSI target session from /proc/net/iet/session # # tid:2 name:iqn.2010-10.org:volume-222 # sid:562950561399296 initiator:iqn.1994-05.com:5a6894679665 # cid:0 ip:192.168.122.1 state:active hd:none dd:none # tid:1 name:iqn.2010-10.org:volume-111 # sid:281475567911424 initiator:iqn.1994-05.com:5a6894679665 # cid:0 ip:192.168.122.1 state:active hd:none dd:none iscsi_target = 0 try: with open(self.iet_sessions, 'r') as f: sessions = f.read() except Exception: LOG.exception(_LE("Failed to open iet session list for %s"), iqn) raise session_list = re.split('^tid:(?m)', sessions)[1:] for ses in session_list: m = re.match('(\d+) name:(\S+)\s+', ses) if m and iqn in m.group(2): return m.group(1) return iscsi_target
def copy_key(self, ctxt, key_id): """Copies (i.e., clones) a key stored by barbican. :param ctxt: contains information of the user and the environment for the request (storage/context.py) :param key_id: the UUID of the key to copy :return: the UUID of the key copy :throws Exception: if key copying fails """ barbican_client = self._get_barbican_client(ctxt) try: secret_ref = self._create_secret_ref(key_id, barbican_client) secret = self._get_secret(ctxt, secret_ref) con_type = secret.content_types['default'] secret_data = self._get_secret_data(secret, payload_content_type=con_type) key = keymgr_key.SymmetricKey(secret.algorithm, secret_data) copy_uuid = self.store_key(ctxt, key, secret.expiration, secret.name, con_type, 'base64', secret.algorithm, secret.bit_length, secret.mode, True) return copy_uuid except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error copying key."))
def execute(self, context, volume, volume_spec): new_status = self.status_translation.get(volume_spec.get('status'), 'available') update = { 'status': new_status, 'launched_at': timeutils.utcnow(), } try: # TODO(harlowja): is it acceptable to only log if this fails?? # or are there other side-effects that this will cause if the # status isn't updated correctly (aka it will likely be stuck in # 'creating' if this fails)?? volume.update(update) volume.save() # Now use the parent to notify. super(CreateVolumeOnFinishTask, self).execute(context, volume) except exception.CinderException: LOG.exception(_LE("Failed updating volume %(volume_id)s with " "%(update)s"), {'volume_id': volume.id, 'update': update}) # Even if the update fails, the volume is ready. LOG.info(_LI("Volume %(volume_name)s (%(volume_id)s): " "created successfully"), {'volume_name': volume_spec['volume_name'], 'volume_id': volume.id})
def _open_volume_with_path(path, mode): try: with utils.temporary_chown(path): handle = open(path, mode) return handle except Exception: LOG.error(_LE("Failed to open volume from %(path)s."), {'path': path})
def _pre_reschedule(self, context, volume): """Actions that happen before the rescheduling attempt occur here.""" try: # Update volume's timestamp and host. # # NOTE(harlowja): this is awkward to be done here, shouldn't # this happen at the scheduler itself and not before it gets # sent to the scheduler? (since what happens if it never gets # there??). It's almost like we need a status of 'on-the-way-to # scheduler' in the future. # We don't need to update the volume's status to creating, since # we haven't changed it to error. update = { 'scheduled_at': timeutils.utcnow(), 'host': None, } LOG.debug("Updating volume %(volume_id)s with %(update)s.", {'update': update, 'volume_id': volume.id}) volume.update(update) volume.save() except exception.CinderException: # Don't let updating the state cause the rescheduling to fail. LOG.exception(_LE("Volume %s: update volume state failed."), volume.id)
def _load_file(self, handle): """Decode the JSON file. Broken out for testing.""" try: return json.load(handle) except ValueError: LOG.exception(_LE("Could not decode scheduler options.")) return {}
def associate_qos_with_type(context, specs_id, type_id): """Associate qos_specs with volume type. Associate target qos specs with specific volume type. Would raise following exceptions: VolumeTypeNotFound - if volume type doesn't exist; QoSSpecsNotFound - if qos specs doesn't exist; InvalidVolumeType - if volume type is already associated with qos specs other than given one. QoSSpecsAssociateFailed - if there was general DB error :param specs_id: qos specs ID to associate with :param type_id: volume type ID to associate with """ try: get_qos_specs(context, specs_id) res = volume_types.get_volume_type_qos_specs(type_id) if res.get('qos_specs', None): if res['qos_specs'].get('id') != specs_id: msg = (_("Type %(type_id)s is already associated with another " "qos specs: %(qos_specs_id)s") % {'type_id': type_id, 'qos_specs_id': res['qos_specs']['id']}) raise exception.InvalidVolumeType(reason=msg) else: db.qos_specs_associate(context, specs_id, type_id) except db_exc.DBError: LOG.exception(_LE('DB error:')) LOG.warning(_LW('Failed to associate qos specs ' '%(id)s with type: %(vol_type_id)s'), dict(id=specs_id, vol_type_id=type_id)) raise exception.QoSSpecsAssociateFailed(specs_id=specs_id, type_id=type_id)
def _verify_rtstool(self): try: # This call doesn't need locking utils.execute('storage-rtstool', 'verify') except (OSError, putils.ProcessExecutionError): LOG.error(_LE('storage-rtstool is not installed correctly')) raise
def associate_qos_with_type(context, specs_id, type_id): """Associate qos_specs with volume type. Associate target qos specs with specific volume type. Would raise following exceptions: VolumeTypeNotFound - if volume type doesn't exist; QoSSpecsNotFound - if qos specs doesn't exist; InvalidVolumeType - if volume type is already associated with qos specs other than given one. QoSSpecsAssociateFailed - if there was general DB error :param specs_id: qos specs ID to associate with :param type_id: volume type ID to associate with """ try: get_qos_specs(context, specs_id) res = volume_types.get_volume_type_qos_specs(type_id) if res.get('qos_specs', None): if res['qos_specs'].get('id') != specs_id: msg = (_("Type %(type_id)s is already associated with another " "qos specs: %(qos_specs_id)s") % { 'type_id': type_id, 'qos_specs_id': res['qos_specs']['id'] }) raise exception.InvalidVolumeType(reason=msg) else: db.qos_specs_associate(context, specs_id, type_id) except db_exc.DBError: LOG.exception(_LE('DB error:')) LOG.warning( _LW('Failed to associate qos specs ' '%(id)s with type: %(vol_type_id)s'), dict(id=specs_id, vol_type_id=type_id)) raise exception.QoSSpecsAssociateFailed(specs_id=specs_id, type_id=type_id)
def _get_device_number(self, path): try: return utils.get_blkdev_major_minor(path) except exception.Error as e: LOG.error( _LE('Failed to get device number for throttling: ' '%(error)s'), {'error': e})
def create(self, context, volume_id, display_name): """Creates an entry in the transfers table.""" volume_api.check_policy(context, 'create_transfer') LOG.info(_LI("Generating transfer record for volume %s"), volume_id) volume_ref = self.db.volume_get(context, volume_id) if volume_ref['status'] != "available": raise exception.InvalidVolume(reason=_("status must be available")) volume_utils.notify_about_volume_usage(context, volume_ref, "transfer.create.start") # The salt is just a short random string. salt = self._get_random_string(CONF.volume_transfer_salt_length) auth_key = self._get_random_string(CONF.volume_transfer_key_length) crypt_hash = self._get_crypt_hash(salt, auth_key) # TODO(ollie): Transfer expiry needs to be implemented. transfer_rec = {'volume_id': volume_id, 'display_name': display_name, 'salt': salt, 'crypt_hash': crypt_hash, 'expires_at': None} try: transfer = self.db.transfer_create(context, transfer_rec) except Exception: LOG.error(_LE("Failed to create transfer record " "for %s"), volume_id) raise volume_utils.notify_about_volume_usage(context, volume_ref, "transfer.create.end") return {'id': transfer['id'], 'volume_id': transfer['volume_id'], 'display_name': transfer['display_name'], 'auth_key': auth_key, 'created_at': transfer['created_at']}
def get_key(self, ctxt, key_id, payload_content_type='application/octet-stream'): """Retrieves the specified key. :param ctxt: contains information of the user and the environment for the request (storage/context.py) :param key_id: the UUID of the key to retrieve :param payload_content_type: The format/type of the secret data :return: SymmetricKey representation of the key :throws Exception: if key retrieval fails """ try: secret_ref = self._create_secret_ref(key_id, barbican_client) secret = self._get_secret(ctxt, secret_ref) secret_data = self._get_secret_data(secret, payload_content_type) if payload_content_type == 'application/octet-stream': # convert decoded string to list of unsigned ints for each byte key_data = array.array('B', base64.b64decode(secret_data)).tolist() else: key_data = secret_data key = keymgr_key.SymmetricKey(secret.algorithm, key_data) return key except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error getting key."))
def _verify_rtstool(self): try: # This call doesn't need locking utils.execute('storage-rtstool', 'verify') except (OSError, putils.ProcessExecutionError): LOG.error(_LE('storage-rtstool is not installed correctly')) raise
def create_key(self, ctxt, expiration=None, name='Cinder Volume Key', payload_content_type='application/octet-stream', mode='CBC', algorithm='AES', length=256): """Creates a key. :param ctxt: contains information of the user and the environment for the request (storage/context.py) :param expiration: the date the key will expire :param name: a friendly name for the secret :param payload_content_type: the format/type of the secret data :param mode: the algorithm mode (e.g. CBC or CTR mode) :param algorithm: the algorithm associated with the secret :param length: the bit length of the secret :return: the UUID of the new key :throws Exception: if key creation fails """ barbican_client = self._get_barbican_client(ctxt) try: key_order = barbican_client.orders.create_key( name, algorithm, length, mode, payload_content_type, expiration) order_ref = key_order.submit() order = barbican_client.orders.get(order_ref) secret_uuid = order.secret_ref.rpartition('/')[2] return secret_uuid except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error creating key."))
def copy_key(self, ctxt, key_id): """Copies (i.e., clones) a key stored by barbican. :param ctxt: contains information of the user and the environment for the request (storage/context.py) :param key_id: the UUID of the key to copy :return: the UUID of the key copy :throws Exception: if key copying fails """ barbican_client = self._get_barbican_client(ctxt) try: secret_ref = self._create_secret_ref(key_id, barbican_client) secret = self._get_secret(ctxt, secret_ref) con_type = secret.content_types['default'] secret_data = self._get_secret_data(secret, payload_content_type=con_type) key = keymgr_key.SymmetricKey(secret.algorithm, secret_data) copy_uuid = self.store_key(ctxt, key, secret.expiration, secret.name, con_type, 'base64', secret.algorithm, secret.bit_length, secret.mode, True) return copy_uuid except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error copying key."))
def validate_connector(self, connector): # NOTE(jdg): api passes in connector which is initiator info if 'initiator' not in connector: err_msg = (_LE('The volume driver requires the iSCSI initiator ' 'name in the connector.')) LOG.error(err_msg) raise exception.InvalidConnectorException(missing='initiator') return True
def store_key(self, ctxt, key, expiration=None, name='Cinder Volume Key', payload_content_type='application/octet-stream', payload_content_encoding='base64', algorithm='AES', bit_length=256, mode='CBC', from_copy=False): """Stores (i.e., registers) a key with the key manager. :param ctxt: contains information of the user and the environment for the request (storage/context.py) :param key: the unencrypted secret data. Known as "payload" to the barbicanclient api :param expiration: the expiration time of the secret in ISO 8601 format :param name: a friendly name for the key :param payload_content_type: the format/type of the secret data :param payload_content_encoding: the encoding of the secret data :param algorithm: the algorithm associated with this secret key :param bit_length: the bit length of this secret key :param mode: the algorithm mode used with this secret key :param from_copy: establishes whether the function is being used to copy a key. In case of the latter, it does not try to decode the key :returns: the UUID of the stored key :throws Exception: if key storage fails """ barbican_client = self._get_barbican_client(ctxt) try: if key.get_algorithm(): algorithm = key.get_algorithm() if payload_content_type == 'text/plain': payload_content_encoding = None encoded_key = key.get_encoded() elif (payload_content_type == 'application/octet-stream' and not from_copy): key_list = key.get_encoded() string_key = ''.join(map(lambda byte: "%02x" % byte, key_list)) encoded_key = base64.b64encode(binascii.unhexlify(string_key)) else: encoded_key = key.get_encoded() secret = barbican_client.secrets.create(name, encoded_key, payload_content_type, payload_content_encoding, algorithm, bit_length, None, mode, expiration) secret_ref = secret.store() secret_uuid = secret_ref.rpartition('/')[2] return secret_uuid except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Error storing key."))
def get_active_zone_set(self): """Return the active zone configuration. Return active zoneset from fabric. When none of the configurations are active then it will return empty map. :returns: Map -- active zone set map in the following format { 'zones': {'openstack50060b0000c26604201900051ee8e329': ['50060b0000c26604', '201900051ee8e329'] }, 'active_zone_config': 'OpenStack_Cfg' } """ zone_set = {} zone = {} zone_member = None zone_name = None switch_data = None zone_set_name = None try: switch_data = self._get_switch_info( [zone_constant.GET_ACTIVE_ZONE_CFG]) except exception.BrocadeZoningCliException: with excutils.save_and_reraise_exception(): LOG.error( _LE("Failed getting active zone set " "from fabric %s"), self.switch_ip) try: for line in switch_data: line_split = re.split('\\t', line) if len(line_split) > 2: line_split = [x.replace('\n', '') for x in line_split] line_split = [x.replace(' ', '') for x in line_split] if zone_constant.CFG_ZONESET in line_split: zone_set_name = line_split[1] continue if line_split[1]: zone_name = line_split[1] zone[zone_name] = list() if line_split[2]: zone_member = line_split[2] zone_member_list = zone.get(zone_name) zone_member_list.append(zone_member) zone_set[zone_constant.CFG_ZONES] = zone zone_set[zone_constant.ACTIVE_ZONE_CONFIG] = zone_set_name except Exception: # In case of parsing error here, it should be malformed cli output. msg = _("Malformed zone configuration: (switch=%(switch)s " "zone_config=%(zone_config)s).") % { 'switch': self.switch_ip, 'zone_config': switch_data } LOG.exception(msg) raise exception.FCZoneDriverException(reason=msg) switch_data = None return zone_set
def _get_file_timestamp(self, filename): """Get the last modified datetime. Broken out for testing.""" try: return os.path.getmtime(filename) except os.error: LOG.exception(_LE("Could not stat scheduler options file " "%(filename)s."), {'filename': filename}) raise
def disassociate_all(context, specs_id): """Disassociate qos_specs from all entities.""" try: get_qos_specs(context, specs_id) db.qos_specs_disassociate_all(context, specs_id) except db_exc.DBError: LOG.exception(_LE('DB error:')) LOG.warning(_LW('Failed to disassociate qos specs %s.'), specs_id) raise exception.QoSSpecsDisassociateFailed(specs_id=specs_id, type_id=None)
def revert(self, context, result, optional_args=None, **kwargs): # We never produced a result and therefore can't destroy anything. if isinstance(result, ft.Failure): return vol_id = result['volume_id'] try: self.db.volume_destroy(context.elevated(), vol_id) except exception.CinderException: LOG.exception(_LE("Failed destroying volume entry: %s."), vol_id)
def _run_ssh(self, cmd_list, check_exit_code=True, attempts=1): # TODO(skolathur): Need to implement ssh_injection check # currently, the check will fail for zonecreate command # as zone members are separated by ';'which is a danger char command = ' '.join(cmd_list) if not self.sshpool: self.sshpool = ssh_utils.SSHPool(self.switch_ip, self.switch_port, None, self.switch_user, self.switch_pwd, self.switch_key, min_size=1, max_size=5) last_exception = None try: with self.sshpool.item() as ssh: while attempts > 0: attempts -= 1 try: return processutils.ssh_execute( ssh, command, check_exit_code=check_exit_code) except Exception as e: LOG.exception(_LE('Error executing SSH command.')) last_exception = e greenthread.sleep(random.randint(20, 500) / 100.0) try: raise processutils.ProcessExecutionError( exit_code=last_exception.exit_code, stdout=last_exception.stdout, stderr=last_exception.stderr, cmd=last_exception.cmd) except AttributeError: raise processutils.ProcessExecutionError( exit_code=-1, stdout="", stderr="Error running SSH command", cmd=command) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error running SSH command: %s"), command)
def _cleanup_incomplete_backup_operations(self, ctxt): LOG.info(_LI("Cleaning up incomplete backup operations.")) # TODO(smulcahy) implement full resume of backup and restore # operations on restart (rather than simply resetting) backups = storage.BackupList.get_all_by_host(ctxt, self.host) for backup in backups: try: self._cleanup_one_backup(ctxt, backup) except Exception: LOG.exception(_LE("Problem cleaning up backup %(bkup)s."), {'bkup': backup['id']}) try: self._cleanup_temp_volumes_snapshots_for_one_backup( ctxt, backup) except Exception: LOG.exception( _LE("Problem cleaning temp volumes and " "snapshots for backup %(bkup)s."), {'bkup': backup['id']})