def _get_volume_details_for_create_volume(self, reflection_target_ip, volume, metadata): instance_id = self._get_volume_metadata_value(metadata, 'InstanceId') volume_details = {} volume_details['pool_name'] = POOL_NAME volume_details['volume_guid'] = ( util.get_guid_with_curly_brackets(volume['id'])) volume_details['display_name'] = ( util.get_guid_with_curly_brackets(volume['id'])) volume_details['volume_raw_size'] = volume['size'] volume_details['vm_id'] = util.get_guid_with_curly_brackets( six.text_type(instance_id)) volume_details['user_id'] = util.get_guid_with_curly_brackets( volume['user_id']) volume_details['project_id'] = ( util.get_guid_with_curly_brackets(volume['project_id'])) volume_details['volume_qos'] = 1 volume_details['dn_reflection_factor'] = 0 if reflection_target_ip is not None: volume_details['is_reflection_source'] = 1 volume_details['dn_reflection_factor'] = 1 volume_details['reflection_target_ip'] = reflection_target_ip return volume_details
def _get_volume_details_for_create_volume(self, reflection_target_ip, volume, metadata): instance_id = self._get_volume_metadata_value(metadata, 'InstanceId') volume_details = {} volume_details['pool_name'] = POOL_NAME volume_details['volume_guid'] = ( util.get_guid_with_curly_brackets(volume['id'])) volume_details['display_name'] = ( util.get_guid_with_curly_brackets(volume['id'])) volume_details['volume_raw_size'] = volume['size'] volume_details['vm_id'] = util.get_guid_with_curly_brackets( six.text_type(instance_id)) volume_details['user_id'] = util.get_guid_with_curly_brackets( volume['user_id']) volume_details['project_id'] = ( util.get_guid_with_curly_brackets(volume['project_id'])) volume_details['volume_qos'] = 1 volume_details['dn_reflection_factor'] = 0 if reflection_target_ip is not None: volume_details['is_reflection_source'] = 1 volume_details['dn_reflection_factor'] = 1 volume_details['reflection_target_ip'] = reflection_target_ip return volume_details
def _create_replica(self, volume, metadata): """Create vdisk on peer data node.""" try: reflection_target_ip = None rt_routing_key, reflection_target_ip = ( self._select_rt(volume, metadata)) LOG.debug("_create_replica %(rt_key)s %(rt_ip)s", {"rt_key": rt_routing_key, "rt_ip": reflection_target_ip}) metadata_update = {} metadata_update['Secondary_datanode_key'] = rt_routing_key metadata_update['Secondary_datanode_ip'] = reflection_target_ip if rt_routing_key is None or rt_routing_key == 'NA': return False, None, metadata_update instance_id = self._get_volume_metadata_value(metadata, 'InstanceId') util.message_data_plane( rt_routing_key, 'hyperscale.storage.dm.volume.create', pool_name=POOL_NAME, volume_guid=util.get_guid_with_curly_brackets( volume['id']), display_name=util.get_guid_with_curly_brackets( volume['id']), volume_raw_size=volume['size'], vm_id=util.get_guid_with_curly_brackets( six.text_type(instance_id)), is_reflection_source=0, dn_reflection_factor=1, reflection_src_ip=self.datanode_ip, user_id=util.get_guid_with_curly_brackets( volume['user_id']), project_id=util.get_guid_with_curly_brackets( volume['project_id']), volume_qos=1) # Failure handling TBD. ret = True LOG.debug("Create volume sent to reflection target data node") except (exception.VolumeNotFound, exception.UnableToProcessHyperScaleCmdOutput, exception.ErrorInSendingMsg): LOG.error("Exception in creating replica", exc_info=True) metadata_update['Secondary_datanode_key'] = 'NA' metadata_update['Secondary_datanode_ip'] = 'NA' metadata_update['DN_Resiliency'] = 'degraded' ret = False return ret, reflection_target_ip, metadata_update
def _create_replica(self, volume, metadata): """Create vdisk on peer data node.""" try: reflection_target_ip = None rt_routing_key, reflection_target_ip = ( self._select_rt(volume, metadata)) LOG.debug("_create_replica %(rt_key)s %(rt_ip)s", {"rt_key": rt_routing_key, "rt_ip": reflection_target_ip}) metadata_update = {} metadata_update['Secondary_datanode_key'] = rt_routing_key metadata_update['Secondary_datanode_ip'] = reflection_target_ip if rt_routing_key is None or rt_routing_key == 'NA': return False, None, metadata_update instance_id = self._get_volume_metadata_value(metadata, 'InstanceId') util.message_data_plane( rt_routing_key, 'hyperscale.storage.dm.volume.create', pool_name=POOL_NAME, volume_guid=util.get_guid_with_curly_brackets( volume['id']), display_name=util.get_guid_with_curly_brackets( volume['id']), volume_raw_size=volume['size'], vm_id=util.get_guid_with_curly_brackets( six.text_type(instance_id)), is_reflection_source=0, dn_reflection_factor=1, reflection_src_ip=self.datanode_ip, user_id=util.get_guid_with_curly_brackets( volume['user_id']), project_id=util.get_guid_with_curly_brackets( volume['project_id']), volume_qos=1) # Failure handling TBD. ret = True LOG.debug("Create volume sent to reflection target data node") except (exception.VolumeNotFound, v_exception.UnableToProcessHyperScaleCmdOutput, v_exception.ErrorInSendingMsg): LOG.error("Exception in creating replica", exc_info=True) metadata_update['Secondary_datanode_key'] = 'NA' metadata_update['Secondary_datanode_ip'] = 'NA' metadata_update['DN_Resiliency'] = 'degraded' ret = False return ret, reflection_target_ip, metadata_update
def delete_snapshot(self, snapshot): """Deletes a snapshot.""" meta = snapshot.get('metadata') if 'force' in meta.keys(): LOG.debug("Found force flag for snapshot metadata." " Not sending call to datanode ") LOG.debug('snapshot metadata %s', meta) return if 'is_busy' in meta.keys(): LOG.warning("Snapshot %s is being used, skipping delete", snapshot['id']) raise exception.SnapshotIsBusy(snapshot_name=snapshot['id']) else: LOG.warning("Snapshot %s is being deleted," " is_busy key not present", snapshot['id']) message_body = {} message_body['volume_guid'] = ( util.get_guid_with_curly_brackets(snapshot['volume_id'])) message_body['snapshot_id'] = ( util.get_guid_with_curly_brackets(snapshot['id'])) # HyperScale snapshots whether Episodic or User initiated, all resides # in the data plane. # Hence delete snapshot operation will go to datanode rt_key = None # Get metadata for volume snapshot_volume = snapshot.get('volume') metadata = snapshot_volume['metadata'] rt_key = self._get_volume_metadata_value(metadata, 'current_dn_owner') if rt_key is None: rt_key = self.dn_routing_key try: # send message to data node util.message_data_plane( rt_key, 'hyperscale.storage.dm.version.delete', **message_body) except (exception.UnableToExecuteHyperScaleCmd, exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in delete snapshot')
def delete_snapshot(self, snapshot): """Deletes a snapshot.""" meta = snapshot.get('metadata') if 'force' in meta.keys(): LOG.debug("Found force flag for snapshot metadata." " Not sending call to datanode ") LOG.debug('snapshot metadata %s', meta) return if 'is_busy' in meta.keys(): LOG.warning("Snapshot %s is being used, skipping delete", snapshot['id']) raise exception.SnapshotIsBusy(snapshot_name=snapshot['id']) else: LOG.warning("Snapshot %s is being deleted," " is_busy key not present", snapshot['id']) message_body = {} message_body['volume_guid'] = ( util.get_guid_with_curly_brackets(snapshot['volume_id'])) message_body['snapshot_id'] = ( util.get_guid_with_curly_brackets(snapshot['id'])) # HyperScale snapshots whether Episodic or User initiated, all resides # in the data plane. # Hence delete snapshot operation will go to datanode rt_key = None # Get metadata for volume snapshot_volume = snapshot.get('volume') metadata = snapshot_volume['metadata'] rt_key = self._get_volume_metadata_value(metadata, 'current_dn_owner') if rt_key is None: rt_key = self.dn_routing_key try: # send message to data node util.message_data_plane( rt_key, 'hyperscale.storage.dm.version.delete', **message_body) except (v_exception.UnableToExecuteHyperScaleCmd, v_exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in delete snapshot')
def extend_volume(self, volume, size_gb): """Extend volume.""" LOG.debug("Extend volume") try: message_body = {} message_body['volume_guid'] = (util.get_guid_with_curly_brackets( volume['id'])) message_body['new_size'] = size_gb # Send Extend Volume message to Data Node util.message_data_plane(self.dn_routing_key, 'hyperscale.storage.dm.volume.extend', **message_body) except (exception.UnableToProcessHyperScaleCmdOutput, exception.ErrorInSendingMsg): msg = _('Exception in extend volume %s') % volume['name'] LOG.exception(msg) raise exception.VolumeDriverException(message=msg)
def extend_volume(self, volume, size_gb): """Extend volume.""" LOG.debug("Extend volume") try: message_body = {} message_body['volume_guid'] = ( util.get_guid_with_curly_brackets(volume['id'])) message_body['new_size'] = size_gb # Send Extend Volume message to Data Node util.message_data_plane( self.dn_routing_key, 'hyperscale.storage.dm.volume.extend', **message_body) except (v_exception.UnableToProcessHyperScaleCmdOutput, v_exception.ErrorInSendingMsg): msg = _('Exception in extend volume %s') % volume['name'] LOG.exception(msg) raise exception.VolumeDriverException(message=msg)
def create_volume_from_snapshot(self, volume, snapshot): """Create volume from snapshot.""" LOG.debug("Create volume from snapshot") model_update = {} try: LOG.debug("Clone new volume %(t_id)s from snapshot with id" " %(s_id)s", {"t_id": volume['id'], "s_id": volume['snapshot_id']}) # 1. Make a call to DN # Check if current_dn_owner is set. # Route the snapshot creation request to current_dn_owner rt_key = None # Get metadata for volume snap_vol = snapshot['volume'] metadata = snap_vol['metadata'] rt_key = self._get_volume_metadata_value(metadata, 'current_dn_owner') if rt_key is None: rt_key = self.dn_routing_key util.message_data_plane( rt_key, 'hyperscale.storage.dm.volume.clone.create', pool_name=POOL_NAME, display_name=util.get_guid_with_curly_brackets( volume['id']), version_name=util.get_guid_with_curly_brackets( volume['snapshot_id']), volume_raw_size=volume['size'], volume_qos=1, parent_volume_guid=util.get_guid_with_curly_brackets( snapshot['volume_id']), user_id=util.get_guid_with_curly_brackets( volume['user_id']), project_id=util.get_guid_with_curly_brackets( volume['project_id']), volume_guid=util.get_guid_with_curly_brackets( volume['id'])) LOG.debug("Volume created successfully on data node") # Get metadata for volume volume_metadata = self._get_volume_metadata(volume) parent_cur_dn = self._get_volume_metadata_value(metadata, 'current_dn_ip') metadata_update = {} metadata_update['snapshot_id'] = snapshot['id'] metadata_update['parent_volume_guid'] = ( util.get_guid_with_curly_brackets( snapshot['volume_id'])) metadata_update['Primary_datanode_ip'] = parent_cur_dn metadata_update['current_dn_owner'] = rt_key metadata_update['current_dn_ip'] = parent_cur_dn # 2. Choose a potential replica here. # The actual decision to have potential replica is made in NOVA. rt_key, rt_dn_ip = self._select_rt(volume, volume_metadata, only_select=True) if rt_key and rt_dn_ip: metadata_update['Potential_secondary_key'] = rt_key metadata_update['Potential_secondary_ip'] = rt_dn_ip except (exception.UnableToExecuteHyperScaleCmd, exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in creating volume from snapshot') except exception.InvalidMetadataType: with excutils.save_and_reraise_exception(): LOG.exception('Exception updating metadata in create' ' volume from snapshot') volume_metadata.update(metadata_update) volume['provider_location'] = PROVIDER_LOCATION model_update = {'provider_location': volume['provider_location'], 'metadata': volume_metadata} return model_update
def create_snapshot(self, snapshot): """Create a snapshot.""" LOG.debug("Create Snapshot %s", snapshot['volume_id']) workflow_id = None last_in_eds_seq = None model_update = {} rt_key = None # Get metadata for volume snapshot_volume = snapshot.get('volume') metadata = snapshot_volume['metadata'] rt_key = self._get_volume_metadata_value(metadata, 'current_dn_owner') if rt_key is None: rt_key = self.dn_routing_key # Check for episodic based on metadata key workflow_snap = 0 meta = snapshot.get('metadata') LOG.debug('Snapshot metatadata %s', meta) if 'SNAPSHOT-COOKIE' in meta.keys(): snapsize = meta['SIZE'] # Call DataNode for episodic snapshots LOG.debug('Calling Data Node for episodic snapshots') message_body = {} message_body['snapshot_id'] = ( util.get_guid_with_curly_brackets(snapshot['id'])) message_body['volume_guid'] = ( util.get_guid_with_curly_brackets( snapshot['volume_id'])) message_body['snapshot_cookie'] = meta['SNAPSHOT-COOKIE'] try: # send message to data node util.message_data_plane( rt_key, 'hyperscale.storage.dm.volume.snapshot.update', **message_body) # Update size via cinder api if snapsize is not None: model_update['volume_size'] = snapsize.value # Set the episodic type metatdata for filtering purpose meta['TYPE'] = TYPE_EPISODIC_SNAP meta['status'] = 'available' meta['datanode_ip'] = self.datanode_ip except (exception.VolumeNotFound, exception.UnableToExecuteHyperScaleCmd, exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in create snapshot') model_update['metadata'] = meta return model_update else: out_meta = util.episodic_snap(meta) if out_meta.get('update'): meta['TYPE'] = out_meta.get('TYPE') meta['status'] = out_meta.get('status') meta['datanode_ip'] = self.datanode_ip model_update['metadata'] = meta return model_update if 'workflow_id' in meta.keys(): workflow_snap = 1 workflow_id = meta['workflow_id'] if 'monitor_snap' in meta.keys(): if int(meta['monitor_snap']) == constants.SNAP_RESTORE_RF: last_in_eds_seq = 0 else: last_in_eds_seq = 1 # If code falls through here then it mean its user initiated snapshots try: # Get metadata for volume vsa_routing_key = None snapshot_volume = snapshot.get('volume') metadata = snapshot_volume['metadata'] LOG.debug('Calling Compute Node for user initiated snapshots') vsa_ip = self._get_volume_metadata_value(metadata, 'acting_vdisk_owner') if vsa_ip is None: vsa_ip = self._get_volume_metadata_value(metadata, 'vsa_ip') LOG.debug("Create snap on compute vsa %s", vsa_ip) if vsa_ip: vsa_routing_key = vsa_ip.replace('.', '') message_body = {} # Set the parent volume id message_body['vdisk_id_str'] = ( util.get_guid_with_curly_brackets( snapshot['volume_id'])) # Set the snapshot details message_body['snapshot_id_str'] = ( util.get_guid_with_curly_brackets(snapshot['id'])) message_body['snapshot_name'] = snapshot['name'] if workflow_snap == 1: message_body['workflow_snapshot'] = 1 else: message_body['user_initiated'] = 1 if last_in_eds_seq is not None: message_body['last_in_eds_seq'] = last_in_eds_seq # send message to compute node util.message_compute_plane( vsa_routing_key, 'hyperscale.storage.nfs.volume.snapshot.create', **message_body) # Set the snapshot type to either workflow or user initiated # snapshot in metatdata for filtering purpose if workflow_snap: LOG.debug('__help request for WORKFLOW snapshot') meta['TYPE'] = TYPE_WORKFLOW_SNAP meta['status'] = 'creating' meta['datanode_ip'] = self.datanode_ip else: LOG.debug('__help request for MANUAL snapshot') meta['TYPE'] = TYPE_USER_SNAP meta['status'] = 'creating' meta['datanode_ip'] = self.datanode_ip if workflow_id is not None: message_body = {} message_body['workflow_id'] = workflow_id message_body['skip_upto_sentinel'] = ( 'hyperscale.vdisk.failback.snapmark_sentinel') # send message to controller node util.message_controller( constants.HS_CONTROLLER_EXCH, 'hyperscale.controller.execute.workflow', **message_body) except (exception.VolumeNotFound, exception.UnableToExecuteHyperScaleCmd, exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in create snapshot') model_update['metadata'] = meta return model_update
def create_volume_from_snapshot(self, volume, snapshot): """Create volume from snapshot.""" LOG.debug("Create volume from snapshot") model_update = {} try: LOG.debug("Clone new volume %(t_id)s from snapshot with id" " %(s_id)s", {"t_id": volume['id'], "s_id": volume['snapshot_id']}) # 1. Make a call to DN # Check if current_dn_owner is set. # Route the snapshot creation request to current_dn_owner rt_key = None # Get metadata for volume snap_vol = snapshot['volume'] metadata = snap_vol['metadata'] rt_key = self._get_volume_metadata_value(metadata, 'current_dn_owner') if rt_key is None: rt_key = self.dn_routing_key util.message_data_plane( rt_key, 'hyperscale.storage.dm.volume.clone.create', pool_name=POOL_NAME, display_name=util.get_guid_with_curly_brackets( volume['id']), version_name=util.get_guid_with_curly_brackets( volume['snapshot_id']), volume_raw_size=volume['size'], volume_qos=1, parent_volume_guid=util.get_guid_with_curly_brackets( snapshot['volume_id']), user_id=util.get_guid_with_curly_brackets( volume['user_id']), project_id=util.get_guid_with_curly_brackets( volume['project_id']), volume_guid=util.get_guid_with_curly_brackets( volume['id'])) LOG.debug("Volume created successfully on data node") # Get metadata for volume volume_metadata = self._get_volume_metadata(volume) parent_cur_dn = self._get_volume_metadata_value(metadata, 'current_dn_ip') metadata_update = {} metadata_update['snapshot_id'] = snapshot['id'] metadata_update['parent_volume_guid'] = ( util.get_guid_with_curly_brackets( snapshot['volume_id'])) metadata_update['Primary_datanode_ip'] = parent_cur_dn metadata_update['current_dn_owner'] = rt_key metadata_update['current_dn_ip'] = parent_cur_dn # 2. Choose a potential replica here. # The actual decision to have potential replica is made in NOVA. rt_key, rt_dn_ip = self._select_rt(volume, volume_metadata, only_select=True) if rt_key and rt_dn_ip: metadata_update['Potential_secondary_key'] = rt_key metadata_update['Potential_secondary_ip'] = rt_dn_ip except (v_exception.UnableToExecuteHyperScaleCmd, v_exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in creating volume from snapshot') except exception.InvalidMetadataType: with excutils.save_and_reraise_exception(): LOG.exception('Exception updating metadata in create' ' volume from snapshot') volume_metadata.update(metadata_update) volume['provider_location'] = PROVIDER_LOCATION model_update = {'provider_location': volume['provider_location'], 'metadata': volume_metadata} return model_update
def create_snapshot(self, snapshot): """Create a snapshot.""" LOG.debug("Create Snapshot %s", snapshot['volume_id']) workflow_id = None last_in_eds_seq = None model_update = {} rt_key = None # Get metadata for volume snapshot_volume = snapshot.get('volume') metadata = snapshot_volume['metadata'] rt_key = self._get_volume_metadata_value(metadata, 'current_dn_owner') if rt_key is None: rt_key = self.dn_routing_key # Check for episodic based on metadata key workflow_snap = 0 meta = snapshot.get('metadata') LOG.debug('Snapshot metatadata %s', meta) if 'SNAPSHOT-COOKIE' in meta.keys(): snapsize = meta['SIZE'] # Call DataNode for episodic snapshots LOG.debug('Calling Data Node for episodic snapshots') message_body = {} message_body['snapshot_id'] = ( util.get_guid_with_curly_brackets(snapshot['id'])) message_body['volume_guid'] = ( util.get_guid_with_curly_brackets( snapshot['volume_id'])) message_body['snapshot_cookie'] = meta['SNAPSHOT-COOKIE'] try: # send message to data node util.message_data_plane( rt_key, 'hyperscale.storage.dm.volume.snapshot.update', **message_body) # Update size via cinder api if snapsize is not None: model_update['volume_size'] = snapsize.value # Set the episodic type metatdata for filtering purpose meta['TYPE'] = TYPE_EPISODIC_SNAP meta['status'] = 'available' meta['datanode_ip'] = self.datanode_ip except (exception.VolumeNotFound, v_exception.UnableToExecuteHyperScaleCmd, v_exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in create snapshot') model_update['metadata'] = meta return model_update else: out_meta = util.episodic_snap(meta) if out_meta.get('update'): meta['TYPE'] = out_meta.get('TYPE') meta['status'] = out_meta.get('status') meta['datanode_ip'] = self.datanode_ip model_update['metadata'] = meta return model_update if 'workflow_id' in meta.keys(): workflow_snap = 1 workflow_id = meta['workflow_id'] if 'monitor_snap' in meta.keys(): if int(meta['monitor_snap']) == constants.SNAP_RESTORE_RF: last_in_eds_seq = 0 else: last_in_eds_seq = 1 # If code falls through here then it mean its user initiated snapshots try: # Get metadata for volume vsa_routing_key = None snapshot_volume = snapshot.get('volume') metadata = snapshot_volume['metadata'] LOG.debug('Calling Compute Node for user initiated snapshots') vsa_ip = self._get_volume_metadata_value(metadata, 'acting_vdisk_owner') if vsa_ip is None: vsa_ip = self._get_volume_metadata_value(metadata, 'vsa_ip') LOG.debug("Create snap on compute vsa %s", vsa_ip) if vsa_ip: vsa_routing_key = vsa_ip.replace('.', '') message_body = {} # Set the parent volume id message_body['vdisk_id_str'] = ( util.get_guid_with_curly_brackets( snapshot['volume_id'])) # Set the snapshot details message_body['snapshot_id_str'] = ( util.get_guid_with_curly_brackets(snapshot['id'])) message_body['snapshot_name'] = snapshot['name'] if workflow_snap == 1: message_body['workflow_snapshot'] = 1 else: message_body['user_initiated'] = 1 if last_in_eds_seq is not None: message_body['last_in_eds_seq'] = last_in_eds_seq # send message to compute node util.message_compute_plane( vsa_routing_key, 'hyperscale.storage.nfs.volume.snapshot.create', **message_body) # Set the snapshot type to either workflow or user initiated # snapshot in metatdata for filtering purpose if workflow_snap: LOG.debug('__help request for WORKFLOW snapshot') meta['TYPE'] = TYPE_WORKFLOW_SNAP meta['status'] = 'creating' meta['datanode_ip'] = self.datanode_ip else: LOG.debug('__help request for MANUAL snapshot') meta['TYPE'] = TYPE_USER_SNAP meta['status'] = 'creating' meta['datanode_ip'] = self.datanode_ip if workflow_id is not None: message_body = {} message_body['workflow_id'] = workflow_id message_body['skip_upto_sentinel'] = ( 'hyperscale.vdisk.failback.snapmark_sentinel') # send message to controller node util.message_controller( constants.HS_CONTROLLER_EXCH, 'hyperscale.controller.execute.workflow', **message_body) except (exception.VolumeNotFound, v_exception.UnableToExecuteHyperScaleCmd, v_exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in create snapshot') model_update['metadata'] = meta return model_update