def _create_replica(self, volume, metadata): """Create vdisk on peer data node.""" try: reflection_target_ip = None rt_routing_key, reflection_target_ip = ( self._select_rt(volume, metadata)) LOG.debug("_create_replica %(rt_key)s %(rt_ip)s", {"rt_key": rt_routing_key, "rt_ip": reflection_target_ip}) metadata_update = {} metadata_update['Secondary_datanode_key'] = rt_routing_key metadata_update['Secondary_datanode_ip'] = reflection_target_ip if rt_routing_key is None or rt_routing_key == 'NA': return False, None, metadata_update instance_id = self._get_volume_metadata_value(metadata, 'InstanceId') util.message_data_plane( rt_routing_key, 'hyperscale.storage.dm.volume.create', pool_name=POOL_NAME, volume_guid=util.get_guid_with_curly_brackets( volume['id']), display_name=util.get_guid_with_curly_brackets( volume['id']), volume_raw_size=volume['size'], vm_id=util.get_guid_with_curly_brackets( six.text_type(instance_id)), is_reflection_source=0, dn_reflection_factor=1, reflection_src_ip=self.datanode_ip, user_id=util.get_guid_with_curly_brackets( volume['user_id']), project_id=util.get_guid_with_curly_brackets( volume['project_id']), volume_qos=1) # Failure handling TBD. ret = True LOG.debug("Create volume sent to reflection target data node") except (exception.VolumeNotFound, v_exception.UnableToProcessHyperScaleCmdOutput, v_exception.ErrorInSendingMsg): LOG.error("Exception in creating replica", exc_info=True) metadata_update['Secondary_datanode_key'] = 'NA' metadata_update['Secondary_datanode_ip'] = 'NA' metadata_update['DN_Resiliency'] = 'degraded' ret = False return ret, reflection_target_ip, metadata_update
def _create_replica(self, volume, metadata): """Create vdisk on peer data node.""" try: reflection_target_ip = None rt_routing_key, reflection_target_ip = ( self._select_rt(volume, metadata)) LOG.debug("_create_replica %(rt_key)s %(rt_ip)s", {"rt_key": rt_routing_key, "rt_ip": reflection_target_ip}) metadata_update = {} metadata_update['Secondary_datanode_key'] = rt_routing_key metadata_update['Secondary_datanode_ip'] = reflection_target_ip if rt_routing_key is None or rt_routing_key == 'NA': return False, None, metadata_update instance_id = self._get_volume_metadata_value(metadata, 'InstanceId') util.message_data_plane( rt_routing_key, 'hyperscale.storage.dm.volume.create', pool_name=POOL_NAME, volume_guid=util.get_guid_with_curly_brackets( volume['id']), display_name=util.get_guid_with_curly_brackets( volume['id']), volume_raw_size=volume['size'], vm_id=util.get_guid_with_curly_brackets( six.text_type(instance_id)), is_reflection_source=0, dn_reflection_factor=1, reflection_src_ip=self.datanode_ip, user_id=util.get_guid_with_curly_brackets( volume['user_id']), project_id=util.get_guid_with_curly_brackets( volume['project_id']), volume_qos=1) # Failure handling TBD. ret = True LOG.debug("Create volume sent to reflection target data node") except (exception.VolumeNotFound, exception.UnableToProcessHyperScaleCmdOutput, exception.ErrorInSendingMsg): LOG.error("Exception in creating replica", exc_info=True) metadata_update['Secondary_datanode_key'] = 'NA' metadata_update['Secondary_datanode_ip'] = 'NA' metadata_update['DN_Resiliency'] = 'degraded' ret = False return ret, reflection_target_ip, metadata_update
def create_volume(self, volume): """Creates a hyperscale volume.""" model_update = {} metadata_update = {} reflection_target_ip = None LOG.debug("Create volume") try: volume_metadata = self._get_volume_metadata(volume) # 1. Check how many replicas needs to be created. replicas = self._get_replicas(volume, volume_metadata) if replicas > 1: # 2. Create replica on peer datanode. LOG.debug("Create volume message sent to peer data node") ret, reflection_target_ip, metadata_update = ( self._create_replica(volume, volume_metadata)) if ret is False: metadata_update['DN_Resiliency'] = 'degraded' # Do not fail volume creation, just create one replica. reflection_target_ip = None # 3. Get volume details based on reflection factor # for volume volume_details = self._get_volume_details_for_create_volume( reflection_target_ip, volume, volume_metadata) # 4. Send create volume to data node with volume details util.message_data_plane(self.dn_routing_key, 'hyperscale.storage.dm.volume.create', **volume_details) LOG.debug("Create volume message sent to data node") volume_metadata['Primary_datanode_ip'] = self.datanode_ip volume_metadata['current_dn_owner'] = self.dn_routing_key volume_metadata['current_dn_ip'] = self.datanode_ip volume_metadata['hs_image_id'] = util.get_hyperscale_image_id() volume_metadata.update(metadata_update) volume['provider_location'] = PROVIDER_LOCATION model_update = { 'provider_location': volume['provider_location'], 'metadata': volume_metadata } except (exception.UnableToProcessHyperScaleCmdOutput, exception.ErrorInSendingMsg): with excutils.save_and_reraise_exception(): LOG.exception('Unable to create hyperscale volume') return model_update
def create_volume(self, volume): """Creates a hyperscale volume.""" model_update = {} metadata_update = {} reflection_target_ip = None LOG.debug("Create volume") try: volume_metadata = self._get_volume_metadata(volume) # 1. Check how many replicas needs to be created. replicas = self._get_replicas(volume, volume_metadata) if replicas > 1: # 2. Create replica on peer datanode. LOG.debug("Create volume message sent to peer data node") ret, reflection_target_ip, metadata_update = ( self._create_replica(volume, volume_metadata)) if ret is False: metadata_update['DN_Resiliency'] = 'degraded' # Do not fail volume creation, just create one replica. reflection_target_ip = None # 3. Get volume details based on reflection factor # for volume volume_details = self._get_volume_details_for_create_volume( reflection_target_ip, volume, volume_metadata) # 4. Send create volume to data node with volume details util.message_data_plane( self.dn_routing_key, 'hyperscale.storage.dm.volume.create', **volume_details) LOG.debug("Create volume message sent to data node") volume_metadata['Primary_datanode_ip'] = self.datanode_ip volume_metadata['current_dn_owner'] = self.dn_routing_key volume_metadata['current_dn_ip'] = self.datanode_ip volume_metadata['hs_image_id'] = util.get_hyperscale_image_id() volume_metadata.update(metadata_update) volume['provider_location'] = PROVIDER_LOCATION model_update = {'provider_location': volume['provider_location'], 'metadata': volume_metadata} except (v_exception.UnableToProcessHyperScaleCmdOutput, v_exception.ErrorInSendingMsg): with excutils.save_and_reraise_exception(): LOG.exception('Unable to create hyperscale volume') return model_update
def delete_snapshot(self, snapshot): """Deletes a snapshot.""" meta = snapshot.get('metadata') if 'force' in meta.keys(): LOG.debug("Found force flag for snapshot metadata." " Not sending call to datanode ") LOG.debug('snapshot metadata %s', meta) return if 'is_busy' in meta.keys(): LOG.warning("Snapshot %s is being used, skipping delete", snapshot['id']) raise exception.SnapshotIsBusy(snapshot_name=snapshot['id']) else: LOG.warning("Snapshot %s is being deleted," " is_busy key not present", snapshot['id']) message_body = {} message_body['volume_guid'] = ( util.get_guid_with_curly_brackets(snapshot['volume_id'])) message_body['snapshot_id'] = ( util.get_guid_with_curly_brackets(snapshot['id'])) # HyperScale snapshots whether Episodic or User initiated, all resides # in the data plane. # Hence delete snapshot operation will go to datanode rt_key = None # Get metadata for volume snapshot_volume = snapshot.get('volume') metadata = snapshot_volume['metadata'] rt_key = self._get_volume_metadata_value(metadata, 'current_dn_owner') if rt_key is None: rt_key = self.dn_routing_key try: # send message to data node util.message_data_plane( rt_key, 'hyperscale.storage.dm.version.delete', **message_body) except (exception.UnableToExecuteHyperScaleCmd, exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in delete snapshot')
def delete_volume(self, volume): """Deletes a volume.""" LOG.debug("Delete volume with id %s", volume['id']) # 1. Check for provider location if not volume['provider_location']: LOG.warning('Volume %s does not have provider_location specified', volume['name']) raise exception.VolumeMetadataNotFound( volume_id=volume['id'], metadata_key='provider_location') # 2. Message data plane for volume deletion message_body = {'display_name': volume['name']} # if Secondary_datanode_key is present, # delete the replica from secondary datanode. rt_key = None # Get metadata for volume metadata = self._get_volume_metadata(volume) rt_key = self._get_volume_metadata_value(metadata, 'Secondary_datanode_key') rt_dn_ip = self._get_volume_metadata_value(metadata, 'Secondary_datanode_ip') current_dn_ip = self._get_volume_metadata_value(metadata, 'current_dn_ip') if current_dn_ip is not None and rt_dn_ip == current_dn_ip: rt_key = None # Send Delete Volume to Data Node try: if rt_key is not None: util.message_data_plane( rt_key, 'hyperscale.storage.dm.volume.delete', **message_body) util.message_data_plane( self.dn_routing_key, 'hyperscale.storage.dm.volume.delete', **message_body) except (exception.UnableToProcessHyperScaleCmdOutput, exception.ErrorInSendingMsg): LOG.error('Exception while deleting volume', exc_info=True) raise exception.VolumeIsBusy(volume_name=volume['name'])
def delete_snapshot(self, snapshot): """Deletes a snapshot.""" meta = snapshot.get('metadata') if 'force' in meta.keys(): LOG.debug("Found force flag for snapshot metadata." " Not sending call to datanode ") LOG.debug('snapshot metadata %s', meta) return if 'is_busy' in meta.keys(): LOG.warning("Snapshot %s is being used, skipping delete", snapshot['id']) raise exception.SnapshotIsBusy(snapshot_name=snapshot['id']) else: LOG.warning("Snapshot %s is being deleted," " is_busy key not present", snapshot['id']) message_body = {} message_body['volume_guid'] = ( util.get_guid_with_curly_brackets(snapshot['volume_id'])) message_body['snapshot_id'] = ( util.get_guid_with_curly_brackets(snapshot['id'])) # HyperScale snapshots whether Episodic or User initiated, all resides # in the data plane. # Hence delete snapshot operation will go to datanode rt_key = None # Get metadata for volume snapshot_volume = snapshot.get('volume') metadata = snapshot_volume['metadata'] rt_key = self._get_volume_metadata_value(metadata, 'current_dn_owner') if rt_key is None: rt_key = self.dn_routing_key try: # send message to data node util.message_data_plane( rt_key, 'hyperscale.storage.dm.version.delete', **message_body) except (v_exception.UnableToExecuteHyperScaleCmd, v_exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in delete snapshot')
def delete_volume(self, volume): """Deletes a volume.""" LOG.debug("Delete volume with id %s", volume['id']) # 1. Check for provider location if not volume['provider_location']: LOG.warning('Volume %s does not have provider_location specified', volume['name']) raise exception.VolumeMetadataNotFound( volume_id=volume['id'], metadata_key='provider_location') # 2. Message data plane for volume deletion message_body = {'display_name': volume['name']} # if Secondary_datanode_key is present, # delete the replica from secondary datanode. rt_key = None # Get metadata for volume metadata = self._get_volume_metadata(volume) rt_key = self._get_volume_metadata_value(metadata, 'Secondary_datanode_key') rt_dn_ip = self._get_volume_metadata_value(metadata, 'Secondary_datanode_ip') current_dn_ip = self._get_volume_metadata_value(metadata, 'current_dn_ip') if current_dn_ip is not None and rt_dn_ip == current_dn_ip: rt_key = None # Send Delete Volume to Data Node try: if rt_key is not None: util.message_data_plane( rt_key, 'hyperscale.storage.dm.volume.delete', **message_body) util.message_data_plane( self.dn_routing_key, 'hyperscale.storage.dm.volume.delete', **message_body) except (v_exception.UnableToProcessHyperScaleCmdOutput, v_exception.ErrorInSendingMsg): LOG.error('Exception while deleting volume', exc_info=True) raise exception.VolumeIsBusy(volume_name=volume['name'])
def extend_volume(self, volume, size_gb): """Extend volume.""" LOG.debug("Extend volume") try: message_body = {} message_body['volume_guid'] = (util.get_guid_with_curly_brackets( volume['id'])) message_body['new_size'] = size_gb # Send Extend Volume message to Data Node util.message_data_plane(self.dn_routing_key, 'hyperscale.storage.dm.volume.extend', **message_body) except (exception.UnableToProcessHyperScaleCmdOutput, exception.ErrorInSendingMsg): msg = _('Exception in extend volume %s') % volume['name'] LOG.exception(msg) raise exception.VolumeDriverException(message=msg)
def extend_volume(self, volume, size_gb): """Extend volume.""" LOG.debug("Extend volume") try: message_body = {} message_body['volume_guid'] = ( util.get_guid_with_curly_brackets(volume['id'])) message_body['new_size'] = size_gb # Send Extend Volume message to Data Node util.message_data_plane( self.dn_routing_key, 'hyperscale.storage.dm.volume.extend', **message_body) except (v_exception.UnableToProcessHyperScaleCmdOutput, v_exception.ErrorInSendingMsg): msg = _('Exception in extend volume %s') % volume['name'] LOG.exception(msg) raise exception.VolumeDriverException(message=msg)
def _fetch_volume_status(self): """Retrieve Volume Stats from Datanode.""" LOG.debug("Request Volume Stats from Datanode") data = {} data["volume_backend_name"] = 'Veritas_HyperScale' data["vendor_name"] = 'Veritas Technologies LLC' data["driver_version"] = self.VERSION data["storage_protocol"] = 'nfs' data['total_capacity_gb'] = 0.0 data['free_capacity_gb'] = 0.0 data['reserved_percentage'] = self.configuration.reserved_percentage data['QoS_support'] = False try: message_body = {} # send message to data node cmd_out, cmd_error = util.message_data_plane( self.dn_routing_key, 'hyperscale.storage.dm.discover.stats', **message_body) LOG.debug("Response Message from Datanode: %s", cmd_out) payload = cmd_out.get('payload') if 'stats' in payload.keys(): if 'total_capacity' in payload.get( 'stats')[0].keys(): total_capacity = payload.get( 'stats')[0]['total_capacity'] if 'free_capacity' in payload.get( 'stats')[0].keys(): free_capacity = payload.get( 'stats')[0]['free_capacity'] if total_capacity is not None: data['total_capacity_gb'] = float(total_capacity) data['free_capacity_gb'] = float(free_capacity) except (exception.UnableToExecuteHyperScaleCmd, exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception during fetch stats') return data
def _fetch_volume_status(self): """Retrieve Volume Stats from Datanode.""" LOG.debug("Request Volume Stats from Datanode") data = {} data["volume_backend_name"] = 'Veritas_HyperScale' data["vendor_name"] = 'Veritas Technologies LLC' data["driver_version"] = self.VERSION data["storage_protocol"] = 'nfs' data['total_capacity_gb'] = 0.0 data['free_capacity_gb'] = 0.0 data['reserved_percentage'] = self.configuration.reserved_percentage data['QoS_support'] = False try: message_body = {} # send message to data node cmd_out, cmd_error = util.message_data_plane( self.dn_routing_key, 'hyperscale.storage.dm.discover.stats', **message_body) LOG.debug("Response Message from Datanode: %s", cmd_out) payload = cmd_out.get('payload') if 'stats' in payload.keys(): if 'total_capacity' in payload.get( 'stats')[0].keys(): total_capacity = payload.get( 'stats')[0]['total_capacity'] if 'free_capacity' in payload.get( 'stats')[0].keys(): free_capacity = payload.get( 'stats')[0]['free_capacity'] if total_capacity is not None: data['total_capacity_gb'] = float(total_capacity) data['free_capacity_gb'] = float(free_capacity) except (v_exception.UnableToExecuteHyperScaleCmd, v_exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception during fetch stats') return data
def create_volume_from_snapshot(self, volume, snapshot): """Create volume from snapshot.""" LOG.debug("Create volume from snapshot") model_update = {} try: LOG.debug("Clone new volume %(t_id)s from snapshot with id" " %(s_id)s", {"t_id": volume['id'], "s_id": volume['snapshot_id']}) # 1. Make a call to DN # Check if current_dn_owner is set. # Route the snapshot creation request to current_dn_owner rt_key = None # Get metadata for volume snap_vol = snapshot['volume'] metadata = snap_vol['metadata'] rt_key = self._get_volume_metadata_value(metadata, 'current_dn_owner') if rt_key is None: rt_key = self.dn_routing_key util.message_data_plane( rt_key, 'hyperscale.storage.dm.volume.clone.create', pool_name=POOL_NAME, display_name=util.get_guid_with_curly_brackets( volume['id']), version_name=util.get_guid_with_curly_brackets( volume['snapshot_id']), volume_raw_size=volume['size'], volume_qos=1, parent_volume_guid=util.get_guid_with_curly_brackets( snapshot['volume_id']), user_id=util.get_guid_with_curly_brackets( volume['user_id']), project_id=util.get_guid_with_curly_brackets( volume['project_id']), volume_guid=util.get_guid_with_curly_brackets( volume['id'])) LOG.debug("Volume created successfully on data node") # Get metadata for volume volume_metadata = self._get_volume_metadata(volume) parent_cur_dn = self._get_volume_metadata_value(metadata, 'current_dn_ip') metadata_update = {} metadata_update['snapshot_id'] = snapshot['id'] metadata_update['parent_volume_guid'] = ( util.get_guid_with_curly_brackets( snapshot['volume_id'])) metadata_update['Primary_datanode_ip'] = parent_cur_dn metadata_update['current_dn_owner'] = rt_key metadata_update['current_dn_ip'] = parent_cur_dn # 2. Choose a potential replica here. # The actual decision to have potential replica is made in NOVA. rt_key, rt_dn_ip = self._select_rt(volume, volume_metadata, only_select=True) if rt_key and rt_dn_ip: metadata_update['Potential_secondary_key'] = rt_key metadata_update['Potential_secondary_ip'] = rt_dn_ip except (exception.UnableToExecuteHyperScaleCmd, exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in creating volume from snapshot') except exception.InvalidMetadataType: with excutils.save_and_reraise_exception(): LOG.exception('Exception updating metadata in create' ' volume from snapshot') volume_metadata.update(metadata_update) volume['provider_location'] = PROVIDER_LOCATION model_update = {'provider_location': volume['provider_location'], 'metadata': volume_metadata} return model_update
def create_snapshot(self, snapshot): """Create a snapshot.""" LOG.debug("Create Snapshot %s", snapshot['volume_id']) workflow_id = None last_in_eds_seq = None model_update = {} rt_key = None # Get metadata for volume snapshot_volume = snapshot.get('volume') metadata = snapshot_volume['metadata'] rt_key = self._get_volume_metadata_value(metadata, 'current_dn_owner') if rt_key is None: rt_key = self.dn_routing_key # Check for episodic based on metadata key workflow_snap = 0 meta = snapshot.get('metadata') LOG.debug('Snapshot metatadata %s', meta) if 'SNAPSHOT-COOKIE' in meta.keys(): snapsize = meta['SIZE'] # Call DataNode for episodic snapshots LOG.debug('Calling Data Node for episodic snapshots') message_body = {} message_body['snapshot_id'] = ( util.get_guid_with_curly_brackets(snapshot['id'])) message_body['volume_guid'] = ( util.get_guid_with_curly_brackets( snapshot['volume_id'])) message_body['snapshot_cookie'] = meta['SNAPSHOT-COOKIE'] try: # send message to data node util.message_data_plane( rt_key, 'hyperscale.storage.dm.volume.snapshot.update', **message_body) # Update size via cinder api if snapsize is not None: model_update['volume_size'] = snapsize.value # Set the episodic type metatdata for filtering purpose meta['TYPE'] = TYPE_EPISODIC_SNAP meta['status'] = 'available' meta['datanode_ip'] = self.datanode_ip except (exception.VolumeNotFound, exception.UnableToExecuteHyperScaleCmd, exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in create snapshot') model_update['metadata'] = meta return model_update else: out_meta = util.episodic_snap(meta) if out_meta.get('update'): meta['TYPE'] = out_meta.get('TYPE') meta['status'] = out_meta.get('status') meta['datanode_ip'] = self.datanode_ip model_update['metadata'] = meta return model_update if 'workflow_id' in meta.keys(): workflow_snap = 1 workflow_id = meta['workflow_id'] if 'monitor_snap' in meta.keys(): if int(meta['monitor_snap']) == constants.SNAP_RESTORE_RF: last_in_eds_seq = 0 else: last_in_eds_seq = 1 # If code falls through here then it mean its user initiated snapshots try: # Get metadata for volume vsa_routing_key = None snapshot_volume = snapshot.get('volume') metadata = snapshot_volume['metadata'] LOG.debug('Calling Compute Node for user initiated snapshots') vsa_ip = self._get_volume_metadata_value(metadata, 'acting_vdisk_owner') if vsa_ip is None: vsa_ip = self._get_volume_metadata_value(metadata, 'vsa_ip') LOG.debug("Create snap on compute vsa %s", vsa_ip) if vsa_ip: vsa_routing_key = vsa_ip.replace('.', '') message_body = {} # Set the parent volume id message_body['vdisk_id_str'] = ( util.get_guid_with_curly_brackets( snapshot['volume_id'])) # Set the snapshot details message_body['snapshot_id_str'] = ( util.get_guid_with_curly_brackets(snapshot['id'])) message_body['snapshot_name'] = snapshot['name'] if workflow_snap == 1: message_body['workflow_snapshot'] = 1 else: message_body['user_initiated'] = 1 if last_in_eds_seq is not None: message_body['last_in_eds_seq'] = last_in_eds_seq # send message to compute node util.message_compute_plane( vsa_routing_key, 'hyperscale.storage.nfs.volume.snapshot.create', **message_body) # Set the snapshot type to either workflow or user initiated # snapshot in metatdata for filtering purpose if workflow_snap: LOG.debug('__help request for WORKFLOW snapshot') meta['TYPE'] = TYPE_WORKFLOW_SNAP meta['status'] = 'creating' meta['datanode_ip'] = self.datanode_ip else: LOG.debug('__help request for MANUAL snapshot') meta['TYPE'] = TYPE_USER_SNAP meta['status'] = 'creating' meta['datanode_ip'] = self.datanode_ip if workflow_id is not None: message_body = {} message_body['workflow_id'] = workflow_id message_body['skip_upto_sentinel'] = ( 'hyperscale.vdisk.failback.snapmark_sentinel') # send message to controller node util.message_controller( constants.HS_CONTROLLER_EXCH, 'hyperscale.controller.execute.workflow', **message_body) except (exception.VolumeNotFound, exception.UnableToExecuteHyperScaleCmd, exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in create snapshot') model_update['metadata'] = meta return model_update
def create_volume_from_snapshot(self, volume, snapshot): """Create volume from snapshot.""" LOG.debug("Create volume from snapshot") model_update = {} try: LOG.debug("Clone new volume %(t_id)s from snapshot with id" " %(s_id)s", {"t_id": volume['id'], "s_id": volume['snapshot_id']}) # 1. Make a call to DN # Check if current_dn_owner is set. # Route the snapshot creation request to current_dn_owner rt_key = None # Get metadata for volume snap_vol = snapshot['volume'] metadata = snap_vol['metadata'] rt_key = self._get_volume_metadata_value(metadata, 'current_dn_owner') if rt_key is None: rt_key = self.dn_routing_key util.message_data_plane( rt_key, 'hyperscale.storage.dm.volume.clone.create', pool_name=POOL_NAME, display_name=util.get_guid_with_curly_brackets( volume['id']), version_name=util.get_guid_with_curly_brackets( volume['snapshot_id']), volume_raw_size=volume['size'], volume_qos=1, parent_volume_guid=util.get_guid_with_curly_brackets( snapshot['volume_id']), user_id=util.get_guid_with_curly_brackets( volume['user_id']), project_id=util.get_guid_with_curly_brackets( volume['project_id']), volume_guid=util.get_guid_with_curly_brackets( volume['id'])) LOG.debug("Volume created successfully on data node") # Get metadata for volume volume_metadata = self._get_volume_metadata(volume) parent_cur_dn = self._get_volume_metadata_value(metadata, 'current_dn_ip') metadata_update = {} metadata_update['snapshot_id'] = snapshot['id'] metadata_update['parent_volume_guid'] = ( util.get_guid_with_curly_brackets( snapshot['volume_id'])) metadata_update['Primary_datanode_ip'] = parent_cur_dn metadata_update['current_dn_owner'] = rt_key metadata_update['current_dn_ip'] = parent_cur_dn # 2. Choose a potential replica here. # The actual decision to have potential replica is made in NOVA. rt_key, rt_dn_ip = self._select_rt(volume, volume_metadata, only_select=True) if rt_key and rt_dn_ip: metadata_update['Potential_secondary_key'] = rt_key metadata_update['Potential_secondary_ip'] = rt_dn_ip except (v_exception.UnableToExecuteHyperScaleCmd, v_exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in creating volume from snapshot') except exception.InvalidMetadataType: with excutils.save_and_reraise_exception(): LOG.exception('Exception updating metadata in create' ' volume from snapshot') volume_metadata.update(metadata_update) volume['provider_location'] = PROVIDER_LOCATION model_update = {'provider_location': volume['provider_location'], 'metadata': volume_metadata} return model_update
def create_snapshot(self, snapshot): """Create a snapshot.""" LOG.debug("Create Snapshot %s", snapshot['volume_id']) workflow_id = None last_in_eds_seq = None model_update = {} rt_key = None # Get metadata for volume snapshot_volume = snapshot.get('volume') metadata = snapshot_volume['metadata'] rt_key = self._get_volume_metadata_value(metadata, 'current_dn_owner') if rt_key is None: rt_key = self.dn_routing_key # Check for episodic based on metadata key workflow_snap = 0 meta = snapshot.get('metadata') LOG.debug('Snapshot metatadata %s', meta) if 'SNAPSHOT-COOKIE' in meta.keys(): snapsize = meta['SIZE'] # Call DataNode for episodic snapshots LOG.debug('Calling Data Node for episodic snapshots') message_body = {} message_body['snapshot_id'] = ( util.get_guid_with_curly_brackets(snapshot['id'])) message_body['volume_guid'] = ( util.get_guid_with_curly_brackets( snapshot['volume_id'])) message_body['snapshot_cookie'] = meta['SNAPSHOT-COOKIE'] try: # send message to data node util.message_data_plane( rt_key, 'hyperscale.storage.dm.volume.snapshot.update', **message_body) # Update size via cinder api if snapsize is not None: model_update['volume_size'] = snapsize.value # Set the episodic type metatdata for filtering purpose meta['TYPE'] = TYPE_EPISODIC_SNAP meta['status'] = 'available' meta['datanode_ip'] = self.datanode_ip except (exception.VolumeNotFound, v_exception.UnableToExecuteHyperScaleCmd, v_exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in create snapshot') model_update['metadata'] = meta return model_update else: out_meta = util.episodic_snap(meta) if out_meta.get('update'): meta['TYPE'] = out_meta.get('TYPE') meta['status'] = out_meta.get('status') meta['datanode_ip'] = self.datanode_ip model_update['metadata'] = meta return model_update if 'workflow_id' in meta.keys(): workflow_snap = 1 workflow_id = meta['workflow_id'] if 'monitor_snap' in meta.keys(): if int(meta['monitor_snap']) == constants.SNAP_RESTORE_RF: last_in_eds_seq = 0 else: last_in_eds_seq = 1 # If code falls through here then it mean its user initiated snapshots try: # Get metadata for volume vsa_routing_key = None snapshot_volume = snapshot.get('volume') metadata = snapshot_volume['metadata'] LOG.debug('Calling Compute Node for user initiated snapshots') vsa_ip = self._get_volume_metadata_value(metadata, 'acting_vdisk_owner') if vsa_ip is None: vsa_ip = self._get_volume_metadata_value(metadata, 'vsa_ip') LOG.debug("Create snap on compute vsa %s", vsa_ip) if vsa_ip: vsa_routing_key = vsa_ip.replace('.', '') message_body = {} # Set the parent volume id message_body['vdisk_id_str'] = ( util.get_guid_with_curly_brackets( snapshot['volume_id'])) # Set the snapshot details message_body['snapshot_id_str'] = ( util.get_guid_with_curly_brackets(snapshot['id'])) message_body['snapshot_name'] = snapshot['name'] if workflow_snap == 1: message_body['workflow_snapshot'] = 1 else: message_body['user_initiated'] = 1 if last_in_eds_seq is not None: message_body['last_in_eds_seq'] = last_in_eds_seq # send message to compute node util.message_compute_plane( vsa_routing_key, 'hyperscale.storage.nfs.volume.snapshot.create', **message_body) # Set the snapshot type to either workflow or user initiated # snapshot in metatdata for filtering purpose if workflow_snap: LOG.debug('__help request for WORKFLOW snapshot') meta['TYPE'] = TYPE_WORKFLOW_SNAP meta['status'] = 'creating' meta['datanode_ip'] = self.datanode_ip else: LOG.debug('__help request for MANUAL snapshot') meta['TYPE'] = TYPE_USER_SNAP meta['status'] = 'creating' meta['datanode_ip'] = self.datanode_ip if workflow_id is not None: message_body = {} message_body['workflow_id'] = workflow_id message_body['skip_upto_sentinel'] = ( 'hyperscale.vdisk.failback.snapmark_sentinel') # send message to controller node util.message_controller( constants.HS_CONTROLLER_EXCH, 'hyperscale.controller.execute.workflow', **message_body) except (exception.VolumeNotFound, v_exception.UnableToExecuteHyperScaleCmd, v_exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in create snapshot') model_update['metadata'] = meta return model_update