示例#1
0
    def provision(self):
        self.status = ClusterStatus.PROVISIONING

        base_url = getApiUrl()
        log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id'])
        girder_token = get_task_token()['_id']

        profile, secret_key = _get_profile(self.cluster['profileId'])

        playbook = get_property(
            'config.provision.spec', self.cluster,
            default=self.DEFAULT_PLAYBOOK)
        playbook_params = get_property(
            'config.provision.params', self.cluster, default={})
        provision_ssh_user = get_property(
            'config.provision.ssh.user', self.cluster, default='ubuntu')
        playbook_params['cluster_state'] = ClusterStatus.RUNNING
        playbook_params['ansible_ssh_user'] = provision_ssh_user

        cumulus.ansible.tasks.cluster.provision_cluster \
            .delay(playbook,
                   self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
                   profile, secret_key, playbook_params,
                   girder_token, log_write_url, ClusterStatus.RUNNING)

        return self.cluster
示例#2
0
文件: volume.py 项目: Kitware/cumulus
    def create(self, params):
        body = getBodyJson()

        self.requireParams(['name', 'type', 'size', 'profileId'], body)

        if not VolumeType.is_valid_type(body['type']):
                raise RestException('Invalid volume type.', code=400)

        profile_id = parse('profileId').find(body)
        if not profile_id:
            raise RestException('A profile id must be provided', 400)

        profile_id = profile_id[0].value

        profile, secret_key = _get_profile(profile_id)

        if not profile:
            raise RestException('Invalid profile', 400)

        if 'zone' in body:
            zone = body['zone']
        else:
            zone = profile['availabilityZone']

        volume = self._create_ebs(body, zone)

        cherrypy.response.status = 201
        cherrypy.response.headers['Location'] = '/volumes/%s' % volume['_id']

        return self._model.filter(volume, getCurrentUser())
    def provision(self):
        self.status = ClusterStatus.PROVISIONING

        base_url = cumulus.config.girder.baseUrl
        log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id'])
        girder_token = get_task_token()['_id']

        profile, secret_key = _get_profile(self.cluster['profileId'])

        playbook = get_property('config.provision.spec',
                                self.cluster,
                                default=self.DEFAULT_PLAYBOOK)
        playbook_params = get_property('config.provision.params',
                                       self.cluster,
                                       default={})
        provision_ssh_user = get_property('config.provision.ssh.user',
                                          self.cluster,
                                          default='ubuntu')
        playbook_params['cluster_state'] = ClusterStatus.RUNNING
        playbook_params['ansible_ssh_user'] = provision_ssh_user

        cumulus.ansible.tasks.cluster.provision_cluster \
            .delay(playbook,
                   self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
                   profile, secret_key, playbook_params,
                   girder_token, log_write_url, ClusterStatus.RUNNING)

        return self.cluster
示例#4
0
文件: volume.py 项目: psavery/cumulus
    def create(self, params):
        body = getBodyJson()

        self.requireParams(['name', 'type', 'size', 'profileId'], body)

        if not VolumeType.is_valid_type(body['type']):
            raise RestException('Invalid volume type.', code=400)

        profile_id = parse('profileId').find(body)
        if not profile_id:
            raise RestException('A profile id must be provided', 400)

        profile_id = profile_id[0].value

        profile, secret_key = _get_profile(profile_id)

        if not profile:
            raise RestException('Invalid profile', 400)

        if 'zone' in body:
            zone = body['zone']
        else:
            zone = profile['availabilityZone']

        volume = self._create_ebs(body, zone)

        cherrypy.response.status = 201
        cherrypy.response.headers['Location'] = '/volumes/%s' % volume['_id']

        return self._model.filter(volume, getCurrentUser())
示例#5
0
    def detach(self, volume, params):

        profile_id = parse('profileId').find(volume)[0].value
        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': cumulus.config.girder.baseUrl,
            'girder_token': get_task_token()['_id']
        }

        log_write_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl,
                                               volume['_id'])

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)
        if aws_volume is None or aws_volume['state'] != VolumeState.INUSE:
            raise RestException('This volume is not attached '
                                'to a cluster', 400)

        if 'clusterId' not in volume:
            raise RestException('clusterId is not set on this volume!', 400)

        try:
            volume['path']
        except KeyError:
            raise RestException('path is not set on this volume!', 400)

        cluster = ModelImporter.model('cluster',
                                      'cumulus').load(volume['clusterId'],
                                                      user=getCurrentUser(),
                                                      level=AccessType.ADMIN)
        master = p.get_master_instance(cluster['_id'])
        if master['state'] != InstanceState.RUNNING:
            raise RestException('Master instance is not running!', 400)
        user = getCurrentUser()
        cluster = ModelImporter.model('cluster',
                                      'cumulus').filter(cluster,
                                                        user,
                                                        passphrase=False)
        cumulus.ansible.tasks.volume.detach_volume\
            .delay(profile, cluster, master,
                   self._model.filter(volume, user),
                   secret_key, log_write_url, girder_callback_info)

        volume['status'] = VolumeState.DETACHING
        volume = self._model.update_volume(user, volume)

        return self._model.filter(volume, user)
    def start(self, request_body):
        """
        Adapters may implement this if they support a start operation.
        """

        self.status = ClusterStatus.LAUNCHING

        self.cluster['config'].setdefault('provision', {})\
            .setdefault('params', {}).update(request_body)
        self.cluster = self.model('cluster', 'cumulus').save(self.cluster)

        base_url = cumulus.config.girder.baseUrl
        log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id'])
        girder_token = get_task_token()['_id']
        profile, secret_key = _get_profile(self.cluster['profileId'])

        # Launch
        launch_playbook = get_property('config.launch.spec',
                                       self.cluster,
                                       default=self.DEFAULT_PLAYBOOK)
        launch_playbook_params = get_property('config.launch.params',
                                              self.cluster,
                                              default={})
        launch_playbook_params['cluster_state'] = ClusterStatus.RUNNING

        # Provision
        provision_playbook = get_property('config.provision.spec',
                                          self.cluster,
                                          default='gridengine/site')
        provision_playbook_params = get_property('config.provision.params',
                                                 self.cluster,
                                                 default={})
        provision_ssh_user = get_property('config.provision.ssh.user',
                                          self.cluster,
                                          default='ubuntu')
        provision_playbook_params['ansible_ssh_user'] = provision_ssh_user
        provision_playbook_params['cluster_state'] = ClusterStatus.RUNNING

        cumulus.ansible.tasks.cluster.start_cluster \
            .delay(launch_playbook,
                   # provision playbook
                   provision_playbook,
                   self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
                   profile, secret_key,
                   launch_playbook_params, provision_playbook_params,
                   girder_token, log_write_url)
示例#7
0
    def attach(self, volume, cluster, params):
        body = getBodyJson()

        self.requireParams(['path'], body)
        path = body['path']

        profile_id = parse('profileId').find(volume)[0].value
        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': cumulus.config.girder.baseUrl,
            'girder_token': get_task_token()['_id']
        }
        log_write_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl,
                                               volume['_id'])

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)

        # If volume exists it needs to be available to be attached. If
        # it doesn't exist it will be created as part of the attach
        # playbook.
        if aws_volume is not None and \
           aws_volume['state'] != VolumeState.AVAILABLE:
            raise RestException(
                'This volume is not available to attach '
                'to a cluster', 400)

        master = p.get_master_instance(cluster['_id'])
        if master['state'] != InstanceState.RUNNING:
            raise RestException('Master instance is not running!', 400)

        cluster = ModelImporter.model('cluster',
                                      'cumulus').filter(cluster,
                                                        getCurrentUser(),
                                                        passphrase=False)
        cumulus.ansible.tasks.volume.attach_volume\
            .delay(profile, cluster, master,
                   self._model.filter(volume, getCurrentUser()), path,
                   secret_key, log_write_url, girder_callback_info)

        volume['status'] = VolumeState.ATTACHING
        volume = self._model.update_volume(getCurrentUser(), volume)

        return self._model.filter(volume, getCurrentUser())
示例#8
0
文件: volume.py 项目: psavery/cumulus
    def delete(self, volume, params):
        if 'clusterId' in volume:
            raise RestException('Unable to delete attached volume')

        # If the volume is in state created and it has no ec2 volume id
        # associated with it,  we should be able to just delete it
        if volume['status'] in (VolumeState.CREATED, VolumeState.ERROR):
            if 'id' in volume['ec2'] and volume['ec2']['id'] is not None:
                raise RestException('Unable to delete volume,  it is '
                                    'associated with an ec2 volume %s' %
                                    volume['ec2']['id'])

            self._model.remove(volume)
            return None

        log_write_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl,
                                               volume['_id'])

        # Call EC2 to delete volume
        profile_id = parse('profileId').find(volume)[0].value

        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': cumulus.config.girder.baseUrl,
            'girder_token': get_task_token()['_id']
        }

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)
        if aws_volume['state'] != VolumeState.AVAILABLE:
            raise RestException(
                'Volume must be in an "%s" status to be deleted' %
                VolumeState.AVAILABLE, 400)

        user = getCurrentUser()
        cumulus.ansible.tasks.volume.delete_volume\
            .delay(profile, self._model.filter(volume, user),
                   secret_key, log_write_url, girder_callback_info)

        volume['status'] = VolumeState.DELETING
        volume = self._model.update_volume(user, volume)

        return self._model.filter(volume, user)
示例#9
0
文件: volume.py 项目: Kitware/cumulus
    def detach(self, volume, params):

        profile_id = parse('profileId').find(volume)[0].value
        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': getApiUrl(),
            'girder_token': get_task_token()['_id']}

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)
        if aws_volume is None or aws_volume['state'] != VolumeState.INUSE:
            raise RestException('This volume is not attached '
                                'to a cluster',
                                400)

        if 'clusterId' not in volume:
            raise RestException('clusterId is not set on this volume!', 400)

        try:
            volume['path']
        except KeyError:
            raise RestException('path is not set on this volume!', 400)

        cluster = self.model('cluster', 'cumulus').load(volume['clusterId'],
                                                        user=getCurrentUser(),
                                                        level=AccessType.ADMIN)
        master = p.get_master_instance(cluster['_id'])
        if master['state'] != InstanceState.RUNNING:
            raise RestException('Master instance is not running!',
                                400)
        user = getCurrentUser()
        cluster = self.model('cluster', 'cumulus').filter(
            cluster, user, passphrase=False)
        cumulus.ansible.tasks.volume.detach_volume\
            .delay(profile, cluster, master,
                   self._model.filter(volume, user),
                   secret_key, girder_callback_info)

        volume['status'] = VolumeState.DETACHING
        volume = self._model.update_volume(user, volume)

        return self._model.filter(volume, user)
示例#10
0
文件: volume.py 项目: Kitware/cumulus
    def attach(self, volume, cluster, params):
        body = getBodyJson()

        self.requireParams(['path'], body)
        path = body['path']

        profile_id = parse('profileId').find(volume)[0].value
        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': getApiUrl(),
            'girder_token': get_task_token()['_id']}

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)

        # If volume exists it needs to be available to be attached. If
        # it doesn't exist it will be created as part of the attach
        # playbook.
        if aws_volume is not None and \
           aws_volume['state'] != VolumeState.AVAILABLE:
            raise RestException('This volume is not available to attach '
                                'to a cluster',
                                400)

        master = p.get_master_instance(cluster['_id'])
        if master['state'] != InstanceState.RUNNING:
            raise RestException('Master instance is not running!',
                                400)

        cluster = self.model('cluster', 'cumulus').filter(
            cluster, getCurrentUser(), passphrase=False)
        cumulus.ansible.tasks.volume.attach_volume\
            .delay(profile, cluster, master,
                   self._model.filter(volume, getCurrentUser()), path,
                   secret_key, girder_callback_info)

        volume['status'] = VolumeState.ATTACHING
        volume = self._model.update_volume(getCurrentUser(), volume)

        return self._model.filter(volume, getCurrentUser())
示例#11
0
    def terminate(self):
        self.status = ClusterStatus.TERMINATING

        base_url = getApiUrl()
        log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id'])
        girder_token = get_task_token()['_id']

        profile, secret_key = _get_profile(self.cluster['profileId'])

        playbook = get_property(
            'config.launch.spec', self.cluster, default=self.DEFAULT_PLAYBOOK)
        playbook_params = get_property(
            'config.launch.params', self.cluster, default={})
        playbook_params['cluster_state'] = 'absent'

        cumulus.ansible.tasks.cluster.terminate_cluster \
            .delay(playbook,
                   self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
                   profile, secret_key, playbook_params, girder_token,
                   log_write_url, ClusterStatus.TERMINATED)
示例#12
0
文件: volume.py 项目: Kitware/cumulus
    def delete(self, volume, params):
        if 'clusterId' in volume:
            raise RestException('Unable to delete attached volume')

        # If the volume is in state created and it has no ec2 volume id
        # associated with it,  we should be able to just delete it
        if volume['status'] == VolumeState.CREATED:
            if 'id' in volume['ec2'] and volume['ec2']['id'] is not None:
                raise RestException(
                    'Unable to delete volume,  it is '
                    'associated with an ec2 volume %s' % volume['ec2']['id'])

            self._model.remove(volume)
            return None

        # Call EC2 to delete volume
        profile_id = parse('profileId').find(volume)[0].value

        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': getApiUrl(),
            'girder_token': get_task_token()['_id']}

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)
        if aws_volume['state'] != VolumeState.AVAILABLE:
            raise RestException(
                'Volume must be in an "%s" status to be deleted'
                % VolumeState.AVAILABLE, 400)

        user = getCurrentUser()
        cumulus.ansible.tasks.volume.delete_volume\
            .delay(profile, self._model.filter(volume, user),
                   secret_key, girder_callback_info)

        volume['status'] = VolumeState.DELETING
        volume = self._model.update_volume(user, volume)

        return self._model.filter(volume, user)
示例#13
0
    def start(self, request_body):
        """
        Adapters may implement this if they support a start operation.
        """

        self.status = ClusterStatus.LAUNCHING

        self.cluster['config'].setdefault('provision', {})\
            .setdefault('params', {}).update(request_body)
        self.cluster = self.model('cluster', 'cumulus').save(self.cluster)

        base_url = getApiUrl()
        log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id'])
        girder_token = get_task_token()['_id']
        profile, secret_key = _get_profile(self.cluster['profileId'])

        # Launch
        launch_playbook = get_property(
            'config.launch.spec', self.cluster, default=self.DEFAULT_PLAYBOOK)
        launch_playbook_params = get_property(
            'config.launch.params', self.cluster, default={})
        launch_playbook_params['cluster_state'] = ClusterStatus.RUNNING

        # Provision
        provision_playbook = get_property(
            'config.provision.spec', self.cluster, default='gridengine/site')
        provision_playbook_params = get_property(
            'config.provision.params', self.cluster, default={})
        provision_ssh_user = get_property(
            'config.provision.ssh.user', self.cluster, default='ubuntu')
        provision_playbook_params['ansible_ssh_user'] = provision_ssh_user
        provision_playbook_params['cluster_state'] = ClusterStatus.RUNNING

        cumulus.ansible.tasks.cluster.start_cluster \
            .delay(launch_playbook,
                   # provision playbook
                   provision_playbook,
                   self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
                   profile, secret_key,
                   launch_playbook_params, provision_playbook_params,
                   girder_token, log_write_url)
示例#14
0
    def terminate(self):
        self.status = ClusterStatus.TERMINATING

        base_url = cumulus.config.girder.baseUrl
        log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id'])
        girder_token = get_task_token()['_id']

        profile, secret_key = _get_profile(self.cluster['profileId'])

        playbook = get_property('config.launch.spec',
                                self.cluster,
                                default=self.DEFAULT_PLAYBOOK)
        playbook_params = get_property('config.launch.params',
                                       self.cluster,
                                       default={})
        playbook_params['cluster_state'] = 'absent'

        cumulus.ansible.tasks.cluster.terminate_cluster \
            .delay(playbook,
                   self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
                   profile, secret_key, playbook_params, girder_token,
                   log_write_url, ClusterStatus.TERMINATED)