def create_snapshot(self, snapshot): """Create a snapshot.""" LOG.debug("Create Snapshot %s", snapshot['volume_id']) workflow_id = None last_in_eds_seq = None model_update = {} rt_key = None # Get metadata for volume snapshot_volume = snapshot.get('volume') metadata = snapshot_volume['metadata'] rt_key = self._get_volume_metadata_value(metadata, 'current_dn_owner') if rt_key is None: rt_key = self.dn_routing_key # Check for episodic based on metadata key workflow_snap = 0 meta = snapshot.get('metadata') LOG.debug('Snapshot metatadata %s', meta) if 'SNAPSHOT-COOKIE' in meta.keys(): snapsize = meta['SIZE'] # Call DataNode for episodic snapshots LOG.debug('Calling Data Node for episodic snapshots') message_body = {} message_body['snapshot_id'] = ( util.get_guid_with_curly_brackets(snapshot['id'])) message_body['volume_guid'] = ( util.get_guid_with_curly_brackets( snapshot['volume_id'])) message_body['snapshot_cookie'] = meta['SNAPSHOT-COOKIE'] try: # send message to data node util.message_data_plane( rt_key, 'hyperscale.storage.dm.volume.snapshot.update', **message_body) # Update size via cinder api if snapsize is not None: model_update['volume_size'] = snapsize.value # Set the episodic type metatdata for filtering purpose meta['TYPE'] = TYPE_EPISODIC_SNAP meta['status'] = 'available' meta['datanode_ip'] = self.datanode_ip except (exception.VolumeNotFound, exception.UnableToExecuteHyperScaleCmd, exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in create snapshot') model_update['metadata'] = meta return model_update else: out_meta = util.episodic_snap(meta) if out_meta.get('update'): meta['TYPE'] = out_meta.get('TYPE') meta['status'] = out_meta.get('status') meta['datanode_ip'] = self.datanode_ip model_update['metadata'] = meta return model_update if 'workflow_id' in meta.keys(): workflow_snap = 1 workflow_id = meta['workflow_id'] if 'monitor_snap' in meta.keys(): if int(meta['monitor_snap']) == constants.SNAP_RESTORE_RF: last_in_eds_seq = 0 else: last_in_eds_seq = 1 # If code falls through here then it mean its user initiated snapshots try: # Get metadata for volume vsa_routing_key = None snapshot_volume = snapshot.get('volume') metadata = snapshot_volume['metadata'] LOG.debug('Calling Compute Node for user initiated snapshots') vsa_ip = self._get_volume_metadata_value(metadata, 'acting_vdisk_owner') if vsa_ip is None: vsa_ip = self._get_volume_metadata_value(metadata, 'vsa_ip') LOG.debug("Create snap on compute vsa %s", vsa_ip) if vsa_ip: vsa_routing_key = vsa_ip.replace('.', '') message_body = {} # Set the parent volume id message_body['vdisk_id_str'] = ( util.get_guid_with_curly_brackets( snapshot['volume_id'])) # Set the snapshot details message_body['snapshot_id_str'] = ( util.get_guid_with_curly_brackets(snapshot['id'])) message_body['snapshot_name'] = snapshot['name'] if workflow_snap == 1: message_body['workflow_snapshot'] = 1 else: message_body['user_initiated'] = 1 if last_in_eds_seq is not None: message_body['last_in_eds_seq'] = last_in_eds_seq # send message to compute node util.message_compute_plane( vsa_routing_key, 'hyperscale.storage.nfs.volume.snapshot.create', **message_body) # Set the snapshot type to either workflow or user initiated # snapshot in metatdata for filtering purpose if workflow_snap: LOG.debug('__help request for WORKFLOW snapshot') meta['TYPE'] = TYPE_WORKFLOW_SNAP meta['status'] = 'creating' meta['datanode_ip'] = self.datanode_ip else: LOG.debug('__help request for MANUAL snapshot') meta['TYPE'] = TYPE_USER_SNAP meta['status'] = 'creating' meta['datanode_ip'] = self.datanode_ip if workflow_id is not None: message_body = {} message_body['workflow_id'] = workflow_id message_body['skip_upto_sentinel'] = ( 'hyperscale.vdisk.failback.snapmark_sentinel') # send message to controller node util.message_controller( constants.HS_CONTROLLER_EXCH, 'hyperscale.controller.execute.workflow', **message_body) except (exception.VolumeNotFound, exception.UnableToExecuteHyperScaleCmd, exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in create snapshot') model_update['metadata'] = meta return model_update
def create_snapshot(self, snapshot): """Create a snapshot.""" LOG.debug("Create Snapshot %s", snapshot['volume_id']) workflow_id = None last_in_eds_seq = None model_update = {} rt_key = None # Get metadata for volume snapshot_volume = snapshot.get('volume') metadata = snapshot_volume['metadata'] rt_key = self._get_volume_metadata_value(metadata, 'current_dn_owner') if rt_key is None: rt_key = self.dn_routing_key # Check for episodic based on metadata key workflow_snap = 0 meta = snapshot.get('metadata') LOG.debug('Snapshot metatadata %s', meta) if 'SNAPSHOT-COOKIE' in meta.keys(): snapsize = meta['SIZE'] # Call DataNode for episodic snapshots LOG.debug('Calling Data Node for episodic snapshots') message_body = {} message_body['snapshot_id'] = ( util.get_guid_with_curly_brackets(snapshot['id'])) message_body['volume_guid'] = ( util.get_guid_with_curly_brackets( snapshot['volume_id'])) message_body['snapshot_cookie'] = meta['SNAPSHOT-COOKIE'] try: # send message to data node util.message_data_plane( rt_key, 'hyperscale.storage.dm.volume.snapshot.update', **message_body) # Update size via cinder api if snapsize is not None: model_update['volume_size'] = snapsize.value # Set the episodic type metatdata for filtering purpose meta['TYPE'] = TYPE_EPISODIC_SNAP meta['status'] = 'available' meta['datanode_ip'] = self.datanode_ip except (exception.VolumeNotFound, v_exception.UnableToExecuteHyperScaleCmd, v_exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in create snapshot') model_update['metadata'] = meta return model_update else: out_meta = util.episodic_snap(meta) if out_meta.get('update'): meta['TYPE'] = out_meta.get('TYPE') meta['status'] = out_meta.get('status') meta['datanode_ip'] = self.datanode_ip model_update['metadata'] = meta return model_update if 'workflow_id' in meta.keys(): workflow_snap = 1 workflow_id = meta['workflow_id'] if 'monitor_snap' in meta.keys(): if int(meta['monitor_snap']) == constants.SNAP_RESTORE_RF: last_in_eds_seq = 0 else: last_in_eds_seq = 1 # If code falls through here then it mean its user initiated snapshots try: # Get metadata for volume vsa_routing_key = None snapshot_volume = snapshot.get('volume') metadata = snapshot_volume['metadata'] LOG.debug('Calling Compute Node for user initiated snapshots') vsa_ip = self._get_volume_metadata_value(metadata, 'acting_vdisk_owner') if vsa_ip is None: vsa_ip = self._get_volume_metadata_value(metadata, 'vsa_ip') LOG.debug("Create snap on compute vsa %s", vsa_ip) if vsa_ip: vsa_routing_key = vsa_ip.replace('.', '') message_body = {} # Set the parent volume id message_body['vdisk_id_str'] = ( util.get_guid_with_curly_brackets( snapshot['volume_id'])) # Set the snapshot details message_body['snapshot_id_str'] = ( util.get_guid_with_curly_brackets(snapshot['id'])) message_body['snapshot_name'] = snapshot['name'] if workflow_snap == 1: message_body['workflow_snapshot'] = 1 else: message_body['user_initiated'] = 1 if last_in_eds_seq is not None: message_body['last_in_eds_seq'] = last_in_eds_seq # send message to compute node util.message_compute_plane( vsa_routing_key, 'hyperscale.storage.nfs.volume.snapshot.create', **message_body) # Set the snapshot type to either workflow or user initiated # snapshot in metatdata for filtering purpose if workflow_snap: LOG.debug('__help request for WORKFLOW snapshot') meta['TYPE'] = TYPE_WORKFLOW_SNAP meta['status'] = 'creating' meta['datanode_ip'] = self.datanode_ip else: LOG.debug('__help request for MANUAL snapshot') meta['TYPE'] = TYPE_USER_SNAP meta['status'] = 'creating' meta['datanode_ip'] = self.datanode_ip if workflow_id is not None: message_body = {} message_body['workflow_id'] = workflow_id message_body['skip_upto_sentinel'] = ( 'hyperscale.vdisk.failback.snapmark_sentinel') # send message to controller node util.message_controller( constants.HS_CONTROLLER_EXCH, 'hyperscale.controller.execute.workflow', **message_body) except (exception.VolumeNotFound, v_exception.UnableToExecuteHyperScaleCmd, v_exception.UnableToProcessHyperScaleCmdOutput): with excutils.save_and_reraise_exception(): LOG.exception('Exception in create snapshot') model_update['metadata'] = meta return model_update