def provision(self):
        self.status = ClusterStatus.PROVISIONING

        base_url = cumulus.config.girder.baseUrl
        log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id'])
        girder_token = get_task_token()['_id']

        profile, secret_key = _get_profile(self.cluster['profileId'])

        playbook = get_property('config.provision.spec',
                                self.cluster,
                                default=self.DEFAULT_PLAYBOOK)
        playbook_params = get_property('config.provision.params',
                                       self.cluster,
                                       default={})
        provision_ssh_user = get_property('config.provision.ssh.user',
                                          self.cluster,
                                          default='ubuntu')
        playbook_params['cluster_state'] = ClusterStatus.RUNNING
        playbook_params['ansible_ssh_user'] = provision_ssh_user

        cumulus.ansible.tasks.cluster.provision_cluster \
            .delay(playbook,
                   self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
                   profile, secret_key, playbook_params,
                   girder_token, log_write_url, ClusterStatus.RUNNING)

        return self.cluster
Esempio n. 2
0
    def provision(self):
        self.status = ClusterStatus.PROVISIONING

        base_url = getApiUrl()
        log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id'])
        girder_token = get_task_token()['_id']

        profile, secret_key = _get_profile(self.cluster['profileId'])

        playbook = get_property(
            'config.provision.spec', self.cluster,
            default=self.DEFAULT_PLAYBOOK)
        playbook_params = get_property(
            'config.provision.params', self.cluster, default={})
        provision_ssh_user = get_property(
            'config.provision.ssh.user', self.cluster, default='ubuntu')
        playbook_params['cluster_state'] = ClusterStatus.RUNNING
        playbook_params['ansible_ssh_user'] = provision_ssh_user

        cumulus.ansible.tasks.cluster.provision_cluster \
            .delay(playbook,
                   self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
                   profile, secret_key, playbook_params,
                   girder_token, log_write_url, ClusterStatus.RUNNING)

        return self.cluster
Esempio n. 3
0
def create_profile(user, params):
    body = getBodyJson()
    requireParams([
        'name', 'accessKeyId', 'secretAccessKey', 'regionName',
        'availabilityZone'
    ], body)

    profile_type = 'ec2' if 'cloudProvider' not in body.keys() \
                   else body['cloudProvider']

    model = ModelImporter.model('aws', 'cumulus')
    profile = model.create_profile(user['_id'], body['name'], profile_type,
                                   body['accessKeyId'],
                                   body['secretAccessKey'], body['regionName'],
                                   body['availabilityZone'],
                                   body.get('publicIPs', False))

    # Now fire of a task to create a key pair for this profile
    try:
        cumulus.aws.ec2.tasks.key.generate_key_pair.delay(
            _filter(profile),
            get_task_token()['_id'])

        cherrypy.response.status = 201
        cherrypy.response.headers['Location'] \
            = '/user/%s/aws/profile/%s' % (str(user['_id']),
                                           str(profile['_id']))

        return model.filter(profile, getCurrentUser())
    except Exception:
        # Remove profile if error occurs fire of task
        model.remove(profile)
        raise
Esempio n. 4
0
    def submit_job(self, job):
        log_url = '%s/jobs/%s/log' % (getApiUrl(), job['_id'])

        girder_token = get_task_token(self.cluster)['_id']
        cumulus.tasks.job.submit(
            girder_token,
            self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
            job, log_url)
 def delete(self):
     super(TraditionClusterAdapter, self).delete()
     # Clean up key associate with cluster
     cumulus.ssh.tasks.key.delete_key_pair.delay(
         self._model.filter(self.cluster,
                            getCurrentUser(),
                            passphrase=False),
         get_task_token()['_id'])
Esempio n. 6
0
    def start(self, request_body):
        log_write_url = '%s/clusters/%s/log' % (getApiUrl(),
                                                self.cluster['_id'])

        girder_token = get_task_token(self.cluster)['_id']
        cumulus.tasks.cluster.test_connection \
            .delay(
                self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
                log_write_url=log_write_url, girder_token=girder_token)
    def start(self, request_body):
        log_write_url = '%s/clusters/%s/log' % (cumulus.config.girder.baseUrl,
                                                self.cluster['_id'])

        girder_token = get_task_token(self.cluster)['_id']
        cumulus.tasks.cluster.test_connection \
            .delay(
                self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
                log_write_url=log_write_url, girder_token=girder_token)
    def submit_job(self, job):
        log_url = '%s/jobs/%s/log' % (cumulus.config.girder.baseUrl,
                                      job['_id'])

        girder_token = get_task_token(self.cluster)['_id']
        cumulus.tasks.job.submit(
            girder_token,
            self._model.filter(self.cluster,
                               getCurrentUser(),
                               passphrase=False), job, log_url)
Esempio n. 9
0
    def start(self, request_body):
        if self.cluster['status'] == ClusterStatus.CREATING:
            raise RestException('Cluster is not ready to start.', code=400)

        log_write_url = '%s/clusters/%s/log' % (getApiUrl(),
                                                self.cluster['_id'])
        girder_token = get_task_token()['_id']
        cumulus.tasks.cluster.test_connection \
            .delay(self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
                   log_write_url=log_write_url,
                   girder_token=girder_token)
Esempio n. 10
0
    def start(self, request_body):
        if self.cluster['status'] == ClusterStatus.CREATING:
            raise RestException('Cluster is not ready to start.', code=400)

        log_write_url = '%s/clusters/%s/log' % (cumulus.config.girder.baseUrl,
                                                self.cluster['_id'])
        girder_token = get_task_token()['_id']
        cumulus.tasks.cluster.test_connection \
            .delay(self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
                   log_write_url=log_write_url,
                   girder_token=girder_token)
Esempio n. 11
0
    def detach(self, volume, params):

        profile_id = parse('profileId').find(volume)[0].value
        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': cumulus.config.girder.baseUrl,
            'girder_token': get_task_token()['_id']
        }

        log_write_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl,
                                               volume['_id'])

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)
        if aws_volume is None or aws_volume['state'] != VolumeState.INUSE:
            raise RestException('This volume is not attached '
                                'to a cluster', 400)

        if 'clusterId' not in volume:
            raise RestException('clusterId is not set on this volume!', 400)

        try:
            volume['path']
        except KeyError:
            raise RestException('path is not set on this volume!', 400)

        cluster = ModelImporter.model('cluster',
                                      'cumulus').load(volume['clusterId'],
                                                      user=getCurrentUser(),
                                                      level=AccessType.ADMIN)
        master = p.get_master_instance(cluster['_id'])
        if master['state'] != InstanceState.RUNNING:
            raise RestException('Master instance is not running!', 400)
        user = getCurrentUser()
        cluster = ModelImporter.model('cluster',
                                      'cumulus').filter(cluster,
                                                        user,
                                                        passphrase=False)
        cumulus.ansible.tasks.volume.detach_volume\
            .delay(profile, cluster, master,
                   self._model.filter(volume, user),
                   secret_key, log_write_url, girder_callback_info)

        volume['status'] = VolumeState.DETACHING
        volume = self._model.update_volume(user, volume)

        return self._model.filter(volume, user)
Esempio n. 12
0
    def start(self, request_body):
        """
        Adapters may implement this if they support a start operation.
        """

        self.status = ClusterStatus.LAUNCHING

        self.cluster['config'].setdefault('provision', {})\
            .setdefault('params', {}).update(request_body)
        self.cluster = self.model('cluster', 'cumulus').save(self.cluster)

        base_url = cumulus.config.girder.baseUrl
        log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id'])
        girder_token = get_task_token()['_id']
        profile, secret_key = _get_profile(self.cluster['profileId'])

        # Launch
        launch_playbook = get_property('config.launch.spec',
                                       self.cluster,
                                       default=self.DEFAULT_PLAYBOOK)
        launch_playbook_params = get_property('config.launch.params',
                                              self.cluster,
                                              default={})
        launch_playbook_params['cluster_state'] = ClusterStatus.RUNNING

        # Provision
        provision_playbook = get_property('config.provision.spec',
                                          self.cluster,
                                          default='gridengine/site')
        provision_playbook_params = get_property('config.provision.params',
                                                 self.cluster,
                                                 default={})
        provision_ssh_user = get_property('config.provision.ssh.user',
                                          self.cluster,
                                          default='ubuntu')
        provision_playbook_params['ansible_ssh_user'] = provision_ssh_user
        provision_playbook_params['cluster_state'] = ClusterStatus.RUNNING

        cumulus.ansible.tasks.cluster.start_cluster \
            .delay(launch_playbook,
                   # provision playbook
                   provision_playbook,
                   self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
                   profile, secret_key,
                   launch_playbook_params, provision_playbook_params,
                   girder_token, log_write_url)
Esempio n. 13
0
    def attach(self, volume, cluster, params):
        body = getBodyJson()

        self.requireParams(['path'], body)
        path = body['path']

        profile_id = parse('profileId').find(volume)[0].value
        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': cumulus.config.girder.baseUrl,
            'girder_token': get_task_token()['_id']
        }
        log_write_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl,
                                               volume['_id'])

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)

        # If volume exists it needs to be available to be attached. If
        # it doesn't exist it will be created as part of the attach
        # playbook.
        if aws_volume is not None and \
           aws_volume['state'] != VolumeState.AVAILABLE:
            raise RestException(
                'This volume is not available to attach '
                'to a cluster', 400)

        master = p.get_master_instance(cluster['_id'])
        if master['state'] != InstanceState.RUNNING:
            raise RestException('Master instance is not running!', 400)

        cluster = ModelImporter.model('cluster',
                                      'cumulus').filter(cluster,
                                                        getCurrentUser(),
                                                        passphrase=False)
        cumulus.ansible.tasks.volume.attach_volume\
            .delay(profile, cluster, master,
                   self._model.filter(volume, getCurrentUser()), path,
                   secret_key, log_write_url, girder_callback_info)

        volume['status'] = VolumeState.ATTACHING
        volume = self._model.update_volume(getCurrentUser(), volume)

        return self._model.filter(volume, getCurrentUser())
Esempio n. 14
0
    def delete(self, volume, params):
        if 'clusterId' in volume:
            raise RestException('Unable to delete attached volume')

        # If the volume is in state created and it has no ec2 volume id
        # associated with it,  we should be able to just delete it
        if volume['status'] in (VolumeState.CREATED, VolumeState.ERROR):
            if 'id' in volume['ec2'] and volume['ec2']['id'] is not None:
                raise RestException('Unable to delete volume,  it is '
                                    'associated with an ec2 volume %s' %
                                    volume['ec2']['id'])

            self._model.remove(volume)
            return None

        log_write_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl,
                                               volume['_id'])

        # Call EC2 to delete volume
        profile_id = parse('profileId').find(volume)[0].value

        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': cumulus.config.girder.baseUrl,
            'girder_token': get_task_token()['_id']
        }

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)
        if aws_volume['state'] != VolumeState.AVAILABLE:
            raise RestException(
                'Volume must be in an "%s" status to be deleted' %
                VolumeState.AVAILABLE, 400)

        user = getCurrentUser()
        cumulus.ansible.tasks.volume.delete_volume\
            .delay(profile, self._model.filter(volume, user),
                   secret_key, log_write_url, girder_callback_info)

        volume['status'] = VolumeState.DELETING
        volume = self._model.update_volume(user, volume)

        return self._model.filter(volume, user)
Esempio n. 15
0
    def detach(self, volume, params):

        profile_id = parse('profileId').find(volume)[0].value
        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': getApiUrl(),
            'girder_token': get_task_token()['_id']}

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)
        if aws_volume is None or aws_volume['state'] != VolumeState.INUSE:
            raise RestException('This volume is not attached '
                                'to a cluster',
                                400)

        if 'clusterId' not in volume:
            raise RestException('clusterId is not set on this volume!', 400)

        try:
            volume['path']
        except KeyError:
            raise RestException('path is not set on this volume!', 400)

        cluster = self.model('cluster', 'cumulus').load(volume['clusterId'],
                                                        user=getCurrentUser(),
                                                        level=AccessType.ADMIN)
        master = p.get_master_instance(cluster['_id'])
        if master['state'] != InstanceState.RUNNING:
            raise RestException('Master instance is not running!',
                                400)
        user = getCurrentUser()
        cluster = self.model('cluster', 'cumulus').filter(
            cluster, user, passphrase=False)
        cumulus.ansible.tasks.volume.detach_volume\
            .delay(profile, cluster, master,
                   self._model.filter(volume, user),
                   secret_key, girder_callback_info)

        volume['status'] = VolumeState.DETACHING
        volume = self._model.update_volume(user, volume)

        return self._model.filter(volume, user)
Esempio n. 16
0
    def attach(self, volume, cluster, params):
        body = getBodyJson()

        self.requireParams(['path'], body)
        path = body['path']

        profile_id = parse('profileId').find(volume)[0].value
        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': getApiUrl(),
            'girder_token': get_task_token()['_id']}

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)

        # If volume exists it needs to be available to be attached. If
        # it doesn't exist it will be created as part of the attach
        # playbook.
        if aws_volume is not None and \
           aws_volume['state'] != VolumeState.AVAILABLE:
            raise RestException('This volume is not available to attach '
                                'to a cluster',
                                400)

        master = p.get_master_instance(cluster['_id'])
        if master['state'] != InstanceState.RUNNING:
            raise RestException('Master instance is not running!',
                                400)

        cluster = self.model('cluster', 'cumulus').filter(
            cluster, getCurrentUser(), passphrase=False)
        cumulus.ansible.tasks.volume.attach_volume\
            .delay(profile, cluster, master,
                   self._model.filter(volume, getCurrentUser()), path,
                   secret_key, girder_callback_info)

        volume['status'] = VolumeState.ATTACHING
        volume = self._model.update_volume(getCurrentUser(), volume)

        return self._model.filter(volume, getCurrentUser())
Esempio n. 17
0
    def terminate(self):
        self.status = ClusterStatus.TERMINATING

        base_url = getApiUrl()
        log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id'])
        girder_token = get_task_token()['_id']

        profile, secret_key = _get_profile(self.cluster['profileId'])

        playbook = get_property(
            'config.launch.spec', self.cluster, default=self.DEFAULT_PLAYBOOK)
        playbook_params = get_property(
            'config.launch.params', self.cluster, default={})
        playbook_params['cluster_state'] = 'absent'

        cumulus.ansible.tasks.cluster.terminate_cluster \
            .delay(playbook,
                   self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
                   profile, secret_key, playbook_params, girder_token,
                   log_write_url, ClusterStatus.TERMINATED)
Esempio n. 18
0
    def delete(self, volume, params):
        if 'clusterId' in volume:
            raise RestException('Unable to delete attached volume')

        # If the volume is in state created and it has no ec2 volume id
        # associated with it,  we should be able to just delete it
        if volume['status'] == VolumeState.CREATED:
            if 'id' in volume['ec2'] and volume['ec2']['id'] is not None:
                raise RestException(
                    'Unable to delete volume,  it is '
                    'associated with an ec2 volume %s' % volume['ec2']['id'])

            self._model.remove(volume)
            return None

        # Call EC2 to delete volume
        profile_id = parse('profileId').find(volume)[0].value

        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': getApiUrl(),
            'girder_token': get_task_token()['_id']}

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)
        if aws_volume['state'] != VolumeState.AVAILABLE:
            raise RestException(
                'Volume must be in an "%s" status to be deleted'
                % VolumeState.AVAILABLE, 400)

        user = getCurrentUser()
        cumulus.ansible.tasks.volume.delete_volume\
            .delay(profile, self._model.filter(volume, user),
                   secret_key, girder_callback_info)

        volume['status'] = VolumeState.DELETING
        volume = self._model.update_volume(user, volume)

        return self._model.filter(volume, user)
Esempio n. 19
0
    def start(self, request_body):
        """
        Adapters may implement this if they support a start operation.
        """

        self.status = ClusterStatus.LAUNCHING

        self.cluster['config'].setdefault('provision', {})\
            .setdefault('params', {}).update(request_body)
        self.cluster = self.model('cluster', 'cumulus').save(self.cluster)

        base_url = getApiUrl()
        log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id'])
        girder_token = get_task_token()['_id']
        profile, secret_key = _get_profile(self.cluster['profileId'])

        # Launch
        launch_playbook = get_property(
            'config.launch.spec', self.cluster, default=self.DEFAULT_PLAYBOOK)
        launch_playbook_params = get_property(
            'config.launch.params', self.cluster, default={})
        launch_playbook_params['cluster_state'] = ClusterStatus.RUNNING

        # Provision
        provision_playbook = get_property(
            'config.provision.spec', self.cluster, default='gridengine/site')
        provision_playbook_params = get_property(
            'config.provision.params', self.cluster, default={})
        provision_ssh_user = get_property(
            'config.provision.ssh.user', self.cluster, default='ubuntu')
        provision_playbook_params['ansible_ssh_user'] = provision_ssh_user
        provision_playbook_params['cluster_state'] = ClusterStatus.RUNNING

        cumulus.ansible.tasks.cluster.start_cluster \
            .delay(launch_playbook,
                   # provision playbook
                   provision_playbook,
                   self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
                   profile, secret_key,
                   launch_playbook_params, provision_playbook_params,
                   girder_token, log_write_url)
Esempio n. 20
0
    def terminate(self):
        self.status = ClusterStatus.TERMINATING

        base_url = cumulus.config.girder.baseUrl
        log_write_url = '%s/clusters/%s/log' % (base_url, self.cluster['_id'])
        girder_token = get_task_token()['_id']

        profile, secret_key = _get_profile(self.cluster['profileId'])

        playbook = get_property('config.launch.spec',
                                self.cluster,
                                default=self.DEFAULT_PLAYBOOK)
        playbook_params = get_property('config.launch.params',
                                       self.cluster,
                                       default={})
        playbook_params['cluster_state'] = 'absent'

        cumulus.ansible.tasks.cluster.terminate_cluster \
            .delay(playbook,
                   self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
                   profile, secret_key, playbook_params, girder_token,
                   log_write_url, ClusterStatus.TERMINATED)
Esempio n. 21
0
def delete_profile(user, profile, params):

    query = {'profileId': profile['_id']}

    if ModelImporter.model('volume', 'cumulus').findOne(query):
        raise RestException(
            'Unable to delete profile as it is associated with'
            ' a volume', 400)

    if ModelImporter.model('cluster', 'cumulus').findOne(query):
        raise RestException(
            'Unable to delete profile as it is associated with'
            ' a cluster', 400)

    # Clean up key associate with profile
    cumulus.aws.ec2.tasks.key.delete_key_pair.delay(_filter(profile),
                                                    get_task_token()['_id'])

    client = get_ec2_client(profile)
    client.delete_key_pair(KeyName=str(profile['_id']))

    ModelImporter.model('aws', 'cumulus').remove(profile)
Esempio n. 22
0
 def get_task_token(self, cluster=None):
     return get_task_token(cluster)
Esempio n. 23
0
 def delete(self):
     super(TraditionClusterAdapter, self).delete()
     # Clean up key associate with cluster
     cumulus.ssh.tasks.key.delete_key_pair.delay(
         self._model.filter(self.cluster, getCurrentUser(), passphrase=False),
         get_task_token()['_id'])