Esempio n. 1
0
    def detach(self, volume, params):

        profile_id = parse('profileId').find(volume)[0].value
        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': cumulus.config.girder.baseUrl,
            'girder_token': get_task_token()['_id']
        }

        log_write_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl,
                                               volume['_id'])

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)
        if aws_volume is None or aws_volume['state'] != VolumeState.INUSE:
            raise RestException('This volume is not attached '
                                'to a cluster', 400)

        if 'clusterId' not in volume:
            raise RestException('clusterId is not set on this volume!', 400)

        try:
            volume['path']
        except KeyError:
            raise RestException('path is not set on this volume!', 400)

        cluster = ModelImporter.model('cluster',
                                      'cumulus').load(volume['clusterId'],
                                                      user=getCurrentUser(),
                                                      level=AccessType.ADMIN)
        master = p.get_master_instance(cluster['_id'])
        if master['state'] != InstanceState.RUNNING:
            raise RestException('Master instance is not running!', 400)
        user = getCurrentUser()
        cluster = ModelImporter.model('cluster',
                                      'cumulus').filter(cluster,
                                                        user,
                                                        passphrase=False)
        cumulus.ansible.tasks.volume.detach_volume\
            .delay(profile, cluster, master,
                   self._model.filter(volume, user),
                   secret_key, log_write_url, girder_callback_info)

        volume['status'] = VolumeState.DETACHING
        volume = self._model.update_volume(user, volume)

        return self._model.filter(volume, user)
Esempio n. 2
0
    def attach(self, volume, cluster, params):
        body = getBodyJson()

        self.requireParams(['path'], body)
        path = body['path']

        profile_id = parse('profileId').find(volume)[0].value
        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': cumulus.config.girder.baseUrl,
            'girder_token': get_task_token()['_id']
        }
        log_write_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl,
                                               volume['_id'])

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)

        # If volume exists it needs to be available to be attached. If
        # it doesn't exist it will be created as part of the attach
        # playbook.
        if aws_volume is not None and \
           aws_volume['state'] != VolumeState.AVAILABLE:
            raise RestException(
                'This volume is not available to attach '
                'to a cluster', 400)

        master = p.get_master_instance(cluster['_id'])
        if master['state'] != InstanceState.RUNNING:
            raise RestException('Master instance is not running!', 400)

        cluster = ModelImporter.model('cluster',
                                      'cumulus').filter(cluster,
                                                        getCurrentUser(),
                                                        passphrase=False)
        cumulus.ansible.tasks.volume.attach_volume\
            .delay(profile, cluster, master,
                   self._model.filter(volume, getCurrentUser()), path,
                   secret_key, log_write_url, girder_callback_info)

        volume['status'] = VolumeState.ATTACHING
        volume = self._model.update_volume(getCurrentUser(), volume)

        return self._model.filter(volume, getCurrentUser())
Esempio n. 3
0
    def delete(self, volume, params):
        if 'clusterId' in volume:
            raise RestException('Unable to delete attached volume')

        # If the volume is in state created and it has no ec2 volume id
        # associated with it,  we should be able to just delete it
        if volume['status'] in (VolumeState.CREATED, VolumeState.ERROR):
            if 'id' in volume['ec2'] and volume['ec2']['id'] is not None:
                raise RestException('Unable to delete volume,  it is '
                                    'associated with an ec2 volume %s' %
                                    volume['ec2']['id'])

            self._model.remove(volume)
            return None

        log_write_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl,
                                               volume['_id'])

        # Call EC2 to delete volume
        profile_id = parse('profileId').find(volume)[0].value

        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': cumulus.config.girder.baseUrl,
            'girder_token': get_task_token()['_id']
        }

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)
        if aws_volume['state'] != VolumeState.AVAILABLE:
            raise RestException(
                'Volume must be in an "%s" status to be deleted' %
                VolumeState.AVAILABLE, 400)

        user = getCurrentUser()
        cumulus.ansible.tasks.volume.delete_volume\
            .delay(profile, self._model.filter(volume, user),
                   secret_key, log_write_url, girder_callback_info)

        volume['status'] = VolumeState.DELETING
        volume = self._model.update_volume(user, volume)

        return self._model.filter(volume, user)
Esempio n. 4
0
    def detach(self, volume, params):

        profile_id = parse('profileId').find(volume)[0].value
        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': getApiUrl(),
            'girder_token': get_task_token()['_id']}

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)
        if aws_volume is None or aws_volume['state'] != VolumeState.INUSE:
            raise RestException('This volume is not attached '
                                'to a cluster',
                                400)

        if 'clusterId' not in volume:
            raise RestException('clusterId is not set on this volume!', 400)

        try:
            volume['path']
        except KeyError:
            raise RestException('path is not set on this volume!', 400)

        cluster = self.model('cluster', 'cumulus').load(volume['clusterId'],
                                                        user=getCurrentUser(),
                                                        level=AccessType.ADMIN)
        master = p.get_master_instance(cluster['_id'])
        if master['state'] != InstanceState.RUNNING:
            raise RestException('Master instance is not running!',
                                400)
        user = getCurrentUser()
        cluster = self.model('cluster', 'cumulus').filter(
            cluster, user, passphrase=False)
        cumulus.ansible.tasks.volume.detach_volume\
            .delay(profile, cluster, master,
                   self._model.filter(volume, user),
                   secret_key, girder_callback_info)

        volume['status'] = VolumeState.DETACHING
        volume = self._model.update_volume(user, volume)

        return self._model.filter(volume, user)
Esempio n. 5
0
    def attach(self, volume, cluster, params):
        body = getBodyJson()

        self.requireParams(['path'], body)
        path = body['path']

        profile_id = parse('profileId').find(volume)[0].value
        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': getApiUrl(),
            'girder_token': get_task_token()['_id']}

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)

        # If volume exists it needs to be available to be attached. If
        # it doesn't exist it will be created as part of the attach
        # playbook.
        if aws_volume is not None and \
           aws_volume['state'] != VolumeState.AVAILABLE:
            raise RestException('This volume is not available to attach '
                                'to a cluster',
                                400)

        master = p.get_master_instance(cluster['_id'])
        if master['state'] != InstanceState.RUNNING:
            raise RestException('Master instance is not running!',
                                400)

        cluster = self.model('cluster', 'cumulus').filter(
            cluster, getCurrentUser(), passphrase=False)
        cumulus.ansible.tasks.volume.attach_volume\
            .delay(profile, cluster, master,
                   self._model.filter(volume, getCurrentUser()), path,
                   secret_key, girder_callback_info)

        volume['status'] = VolumeState.ATTACHING
        volume = self._model.update_volume(getCurrentUser(), volume)

        return self._model.filter(volume, getCurrentUser())
Esempio n. 6
0
    def delete(self, volume, params):
        if 'clusterId' in volume:
            raise RestException('Unable to delete attached volume')

        # If the volume is in state created and it has no ec2 volume id
        # associated with it,  we should be able to just delete it
        if volume['status'] == VolumeState.CREATED:
            if 'id' in volume['ec2'] and volume['ec2']['id'] is not None:
                raise RestException(
                    'Unable to delete volume,  it is '
                    'associated with an ec2 volume %s' % volume['ec2']['id'])

            self._model.remove(volume)
            return None

        # Call EC2 to delete volume
        profile_id = parse('profileId').find(volume)[0].value

        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': getApiUrl(),
            'girder_token': get_task_token()['_id']}

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)
        if aws_volume['state'] != VolumeState.AVAILABLE:
            raise RestException(
                'Volume must be in an "%s" status to be deleted'
                % VolumeState.AVAILABLE, 400)

        user = getCurrentUser()
        cumulus.ansible.tasks.volume.delete_volume\
            .delay(profile, self._model.filter(volume, user),
                   secret_key, girder_callback_info)

        volume['status'] = VolumeState.DELETING
        volume = self._model.update_volume(user, volume)

        return self._model.filter(volume, user)