Example #1
0
    def get_volume_stats(self, refresh=False):
        """Get volume status."""

        # If 'refresh' is True, run update the stats first.

        LOG.debug("Get volume status")

        self._stats = self._fetch_volume_status()
        new_total = self._stats['total_capacity_gb']
        new_free = self._stats['free_capacity_gb']

        if self.old_total != new_total or self.old_free != new_free:
            self.old_total = new_total
            self.old_free = new_free

            message_body = {'hostname': self.datanode_hostname,
                            'is_admin': 1,
                            'total': new_total,
                            'free': new_free}
            try:
                cmd_out, cmd_error = util.message_controller(
                    constants.HS_CONTROLLER_EXCH,
                    'hyperscale.controller.set.datanode.storage.stats',
                    **message_body)
                LOG.debug("Response Message from Controller: %s",
                          cmd_out)

            except (exception.UnableToExecuteHyperScaleCmd,
                    exception.UnableToProcessHyperScaleCmdOutput):
                with excutils.save_and_reraise_exception():
                    LOG.exception('Exception during fetch stats')

        return self._stats
Example #2
0
    def get_volume_stats(self, refresh=False):
        """Get volume status."""

        # If 'refresh' is True, run update the stats first.

        LOG.debug("Get volume status")

        self._stats = self._fetch_volume_status()
        new_total = self._stats['total_capacity_gb']
        new_free = self._stats['free_capacity_gb']

        if self.old_total != new_total or self.old_free != new_free:
            self.old_total = new_total
            self.old_free = new_free

            message_body = {'hostname': self.datanode_hostname,
                            'is_admin': 1,
                            'total': new_total,
                            'free': new_free}
            try:
                cmd_out, cmd_error = util.message_controller(
                    constants.HS_CONTROLLER_EXCH,
                    'hyperscale.controller.set.datanode.storage.stats',
                    **message_body)
                LOG.debug("Response Message from Controller: %s",
                          cmd_out)

            except (v_exception.UnableToExecuteHyperScaleCmd,
                    v_exception.UnableToProcessHyperScaleCmdOutput):
                with excutils.save_and_reraise_exception():
                    LOG.exception('Exception during fetch stats')

        return self._stats
Example #3
0
    def _get_datanodes_info(self):
        # Get hyperscale datanode config information from controller

        msg_body = {}
        data = None

        try:
            cmd_out, cmd_error = util.message_controller(
                constants.HS_CONTROLLER_EXCH,
                'hyperscale.controller.get.membership', **msg_body)
            LOG.debug("Response Message from Controller: %s", cmd_out)
            payload = cmd_out.get('payload')
            data = payload.get('of_membership')

        except (exception.UnableToExecuteHyperScaleCmd,
                exception.UnableToProcessHyperScaleCmdOutput):
            with excutils.save_and_reraise_exception():
                LOG.exception("Failed to get datanode config "
                              "information from controller")

        return data
Example #4
0
    def _get_datanodes_info(self):
        # Get hyperscale datanode config information from controller

        msg_body = {}
        data = None

        try:
            cmd_out, cmd_error = util.message_controller(
                constants.HS_CONTROLLER_EXCH,
                'hyperscale.controller.get.membership',
                **msg_body)
            LOG.debug("Response Message from Controller: %s",
                      cmd_out)
            payload = cmd_out.get('payload')
            data = payload.get('of_membership')

        except (v_exception.UnableToExecuteHyperScaleCmd,
                v_exception.UnableToProcessHyperScaleCmdOutput):
            with excutils.save_and_reraise_exception():
                LOG.exception("Failed to get datanode config "
                              "information from controller")

        return data
Example #5
0
    def create_snapshot(self, snapshot):
        """Create a snapshot."""

        LOG.debug("Create Snapshot %s", snapshot['volume_id'])
        workflow_id = None
        last_in_eds_seq = None
        model_update = {}
        rt_key = None

        # Get metadata for volume
        snapshot_volume = snapshot.get('volume')
        metadata = snapshot_volume['metadata']
        rt_key = self._get_volume_metadata_value(metadata,
                                                 'current_dn_owner')
        if rt_key is None:
            rt_key = self.dn_routing_key

        # Check for episodic based on metadata key
        workflow_snap = 0

        meta = snapshot.get('metadata')
        LOG.debug('Snapshot metatadata %s', meta)
        if 'SNAPSHOT-COOKIE' in meta.keys():
            snapsize = meta['SIZE']

            # Call DataNode for episodic snapshots
            LOG.debug('Calling Data Node for episodic snapshots')
            message_body = {}
            message_body['snapshot_id'] = (
                util.get_guid_with_curly_brackets(snapshot['id']))
            message_body['volume_guid'] = (
                util.get_guid_with_curly_brackets(
                    snapshot['volume_id']))
            message_body['snapshot_cookie'] = meta['SNAPSHOT-COOKIE']

            try:
                # send message to data node
                util.message_data_plane(
                    rt_key,
                    'hyperscale.storage.dm.volume.snapshot.update',
                    **message_body)

                # Update size via cinder api
                if snapsize is not None:
                    model_update['volume_size'] = snapsize.value

                # Set the episodic type metatdata for filtering purpose
                meta['TYPE'] = TYPE_EPISODIC_SNAP
                meta['status'] = 'available'
                meta['datanode_ip'] = self.datanode_ip

            except (exception.VolumeNotFound,
                    exception.UnableToExecuteHyperScaleCmd,
                    exception.UnableToProcessHyperScaleCmdOutput):
                with excutils.save_and_reraise_exception():
                    LOG.exception('Exception in create snapshot')

            model_update['metadata'] = meta
            return model_update

        else:
            out_meta = util.episodic_snap(meta)
            if out_meta.get('update'):
                meta['TYPE'] = out_meta.get('TYPE')
                meta['status'] = out_meta.get('status')
                meta['datanode_ip'] = self.datanode_ip
                model_update['metadata'] = meta
                return model_update

        if 'workflow_id' in meta.keys():
            workflow_snap = 1
            workflow_id = meta['workflow_id']

        if 'monitor_snap' in meta.keys():
            if int(meta['monitor_snap']) == constants.SNAP_RESTORE_RF:
                last_in_eds_seq = 0
            else:
                last_in_eds_seq = 1

        # If code falls through here then it mean its user initiated snapshots
        try:
            # Get metadata for volume
            vsa_routing_key = None
            snapshot_volume = snapshot.get('volume')
            metadata = snapshot_volume['metadata']
            LOG.debug('Calling Compute Node for user initiated snapshots')
            vsa_ip = self._get_volume_metadata_value(metadata,
                                                     'acting_vdisk_owner')
            if vsa_ip is None:
                vsa_ip = self._get_volume_metadata_value(metadata, 'vsa_ip')

            LOG.debug("Create snap on compute vsa %s", vsa_ip)
            if vsa_ip:
                vsa_routing_key = vsa_ip.replace('.', '')

            message_body = {}
            # Set the parent volume id
            message_body['vdisk_id_str'] = (
                util.get_guid_with_curly_brackets(
                    snapshot['volume_id']))
            # Set the snapshot details
            message_body['snapshot_id_str'] = (
                util.get_guid_with_curly_brackets(snapshot['id']))
            message_body['snapshot_name'] = snapshot['name']

            if workflow_snap == 1:
                message_body['workflow_snapshot'] = 1
            else:
                message_body['user_initiated'] = 1

            if last_in_eds_seq is not None:
                message_body['last_in_eds_seq'] = last_in_eds_seq

            # send message to compute node
            util.message_compute_plane(
                vsa_routing_key,
                'hyperscale.storage.nfs.volume.snapshot.create',
                **message_body)

            # Set the snapshot type to either workflow or user initiated
            # snapshot in metatdata for filtering purpose
            if workflow_snap:
                LOG.debug('__help request for WORKFLOW snapshot')
                meta['TYPE'] = TYPE_WORKFLOW_SNAP
                meta['status'] = 'creating'
                meta['datanode_ip'] = self.datanode_ip
            else:
                LOG.debug('__help request for MANUAL snapshot')
                meta['TYPE'] = TYPE_USER_SNAP
                meta['status'] = 'creating'
                meta['datanode_ip'] = self.datanode_ip

            if workflow_id is not None:
                message_body = {}
                message_body['workflow_id'] = workflow_id
                message_body['skip_upto_sentinel'] = (
                    'hyperscale.vdisk.failback.snapmark_sentinel')

                # send message to controller node
                util.message_controller(
                    constants.HS_CONTROLLER_EXCH,
                    'hyperscale.controller.execute.workflow',
                    **message_body)

        except (exception.VolumeNotFound,
                exception.UnableToExecuteHyperScaleCmd,
                exception.UnableToProcessHyperScaleCmdOutput):
            with excutils.save_and_reraise_exception():
                LOG.exception('Exception in create snapshot')

        model_update['metadata'] = meta
        return model_update
Example #6
0
    def create_snapshot(self, snapshot):
        """Create a snapshot."""

        LOG.debug("Create Snapshot %s", snapshot['volume_id'])
        workflow_id = None
        last_in_eds_seq = None
        model_update = {}
        rt_key = None

        # Get metadata for volume
        snapshot_volume = snapshot.get('volume')
        metadata = snapshot_volume['metadata']
        rt_key = self._get_volume_metadata_value(metadata,
                                                 'current_dn_owner')
        if rt_key is None:
            rt_key = self.dn_routing_key

        # Check for episodic based on metadata key
        workflow_snap = 0

        meta = snapshot.get('metadata')
        LOG.debug('Snapshot metatadata %s', meta)
        if 'SNAPSHOT-COOKIE' in meta.keys():
            snapsize = meta['SIZE']

            # Call DataNode for episodic snapshots
            LOG.debug('Calling Data Node for episodic snapshots')
            message_body = {}
            message_body['snapshot_id'] = (
                util.get_guid_with_curly_brackets(snapshot['id']))
            message_body['volume_guid'] = (
                util.get_guid_with_curly_brackets(
                    snapshot['volume_id']))
            message_body['snapshot_cookie'] = meta['SNAPSHOT-COOKIE']

            try:
                # send message to data node
                util.message_data_plane(
                    rt_key,
                    'hyperscale.storage.dm.volume.snapshot.update',
                    **message_body)

                # Update size via cinder api
                if snapsize is not None:
                    model_update['volume_size'] = snapsize.value

                # Set the episodic type metatdata for filtering purpose
                meta['TYPE'] = TYPE_EPISODIC_SNAP
                meta['status'] = 'available'
                meta['datanode_ip'] = self.datanode_ip

            except (exception.VolumeNotFound,
                    v_exception.UnableToExecuteHyperScaleCmd,
                    v_exception.UnableToProcessHyperScaleCmdOutput):
                with excutils.save_and_reraise_exception():
                    LOG.exception('Exception in create snapshot')

            model_update['metadata'] = meta
            return model_update

        else:
            out_meta = util.episodic_snap(meta)
            if out_meta.get('update'):
                meta['TYPE'] = out_meta.get('TYPE')
                meta['status'] = out_meta.get('status')
                meta['datanode_ip'] = self.datanode_ip
                model_update['metadata'] = meta
                return model_update

        if 'workflow_id' in meta.keys():
            workflow_snap = 1
            workflow_id = meta['workflow_id']

        if 'monitor_snap' in meta.keys():
            if int(meta['monitor_snap']) == constants.SNAP_RESTORE_RF:
                last_in_eds_seq = 0
            else:
                last_in_eds_seq = 1

        # If code falls through here then it mean its user initiated snapshots
        try:
            # Get metadata for volume
            vsa_routing_key = None
            snapshot_volume = snapshot.get('volume')
            metadata = snapshot_volume['metadata']
            LOG.debug('Calling Compute Node for user initiated snapshots')
            vsa_ip = self._get_volume_metadata_value(metadata,
                                                     'acting_vdisk_owner')
            if vsa_ip is None:
                vsa_ip = self._get_volume_metadata_value(metadata, 'vsa_ip')

            LOG.debug("Create snap on compute vsa %s", vsa_ip)
            if vsa_ip:
                vsa_routing_key = vsa_ip.replace('.', '')

            message_body = {}
            # Set the parent volume id
            message_body['vdisk_id_str'] = (
                util.get_guid_with_curly_brackets(
                    snapshot['volume_id']))
            # Set the snapshot details
            message_body['snapshot_id_str'] = (
                util.get_guid_with_curly_brackets(snapshot['id']))
            message_body['snapshot_name'] = snapshot['name']

            if workflow_snap == 1:
                message_body['workflow_snapshot'] = 1
            else:
                message_body['user_initiated'] = 1

            if last_in_eds_seq is not None:
                message_body['last_in_eds_seq'] = last_in_eds_seq

            # send message to compute node
            util.message_compute_plane(
                vsa_routing_key,
                'hyperscale.storage.nfs.volume.snapshot.create',
                **message_body)

            # Set the snapshot type to either workflow or user initiated
            # snapshot in metatdata for filtering purpose
            if workflow_snap:
                LOG.debug('__help request for WORKFLOW snapshot')
                meta['TYPE'] = TYPE_WORKFLOW_SNAP
                meta['status'] = 'creating'
                meta['datanode_ip'] = self.datanode_ip
            else:
                LOG.debug('__help request for MANUAL snapshot')
                meta['TYPE'] = TYPE_USER_SNAP
                meta['status'] = 'creating'
                meta['datanode_ip'] = self.datanode_ip

            if workflow_id is not None:
                message_body = {}
                message_body['workflow_id'] = workflow_id
                message_body['skip_upto_sentinel'] = (
                    'hyperscale.vdisk.failback.snapmark_sentinel')

                # send message to controller node
                util.message_controller(
                    constants.HS_CONTROLLER_EXCH,
                    'hyperscale.controller.execute.workflow',
                    **message_body)

        except (exception.VolumeNotFound,
                v_exception.UnableToExecuteHyperScaleCmd,
                v_exception.UnableToProcessHyperScaleCmdOutput):
            with excutils.save_and_reraise_exception():
                LOG.exception('Exception in create snapshot')

        model_update['metadata'] = meta
        return model_update