Beispiel #1
0
def launch_cluster(playbook, cluster, profile, secret_key, extra_vars,
                   girder_token, log_write_url, post_status):
    playbook = get_playbook_path(playbook)
    playbook_variables = get_playbook_variables(cluster, profile, extra_vars)

    env = os.environ.copy()
    env.update({'AWS_ACCESS_KEY_ID': profile['accessKeyId'],
                'AWS_SECRET_ACCESS_KEY': secret_key,
                'GIRDER_TOKEN': girder_token,
                'LOG_WRITE_URL': log_write_url,
                'CLUSTER_ID': cluster['_id']})

    inventory = simple_inventory('localhost')

    with inventory.to_tempfile() as inventory_path:
        ansible = run_playbook(playbook, inventory_path, playbook_variables,
                               env=env, verbose=3)

    p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

    master = p.get_master_instance(cluster['_id'])

    status_url = '%s/clusters/%s' % (cumulus.config.girder.baseUrl,
                                     cluster['_id'])
    updates = {
        'config': {
            'host': master['public_ip']
        }
    }
    headers = {'Girder-Token':  girder_token}
    r = requests.patch(status_url, headers=headers, json=updates)
    check_status(r)

    check_ansible_return_code(ansible, cluster, girder_token)
    check_girder_cluster_status(cluster, girder_token, post_status)
Beispiel #2
0
def _attach_volume(task, profile, volume, cluster):
    girder_callback_info = {
        'girder_api_url': task.taskflow.girder_api_url,
        'girder_token': task.taskflow.girder_token
    }
    p = CloudProvider(dict(**profile))
    master = p.get_master_instance(cluster['_id'])
    if master['state'] != InstanceState.RUNNING:
        task.logger.exception('Master instance is not running!')
        raise
    log_write_url = '%s/volumes/%s/log' % (task.taskflow.girder_api_url,
                                           volume['_id'])
    cumulus.ansible.tasks.volume.attach_volume(profile, cluster, master,
                                               volume, '/data',
                                               profile['secretAccessKey'],
                                               log_write_url,
                                               girder_callback_info)
    task.logger.info('Volume attached.')

    # Get the up to date volume
    client = create_girder_client(task.taskflow.girder_api_url,
                                  task.taskflow.girder_token)
    volume = client.get('volumes/%s' % volume['_id'])

    return volume
Beispiel #3
0
    def detach(self, volume, params):

        profile_id = parse('profileId').find(volume)[0].value
        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': cumulus.config.girder.baseUrl,
            'girder_token': get_task_token()['_id']
        }

        log_write_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl,
                                               volume['_id'])

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)
        if aws_volume is None or aws_volume['state'] != VolumeState.INUSE:
            raise RestException('This volume is not attached '
                                'to a cluster', 400)

        if 'clusterId' not in volume:
            raise RestException('clusterId is not set on this volume!', 400)

        try:
            volume['path']
        except KeyError:
            raise RestException('path is not set on this volume!', 400)

        cluster = ModelImporter.model('cluster',
                                      'cumulus').load(volume['clusterId'],
                                                      user=getCurrentUser(),
                                                      level=AccessType.ADMIN)
        master = p.get_master_instance(cluster['_id'])
        if master['state'] != InstanceState.RUNNING:
            raise RestException('Master instance is not running!', 400)
        user = getCurrentUser()
        cluster = ModelImporter.model('cluster',
                                      'cumulus').filter(cluster,
                                                        user,
                                                        passphrase=False)
        cumulus.ansible.tasks.volume.detach_volume\
            .delay(profile, cluster, master,
                   self._model.filter(volume, user),
                   secret_key, log_write_url, girder_callback_info)

        volume['status'] = VolumeState.DETACHING
        volume = self._model.update_volume(user, volume)

        return self._model.filter(volume, user)
Beispiel #4
0
def terminate_cluster(playbook, cluster, profile, secret_key, extra_vars,
                      girder_token, log_write_url, post_status):

    playbook = get_playbook_path(playbook)
    playbook_variables = get_playbook_variables(cluster, profile, extra_vars)

    env = os.environ.copy()
    env.update({
        'AWS_ACCESS_KEY_ID': profile['accessKeyId'],
        'AWS_SECRET_ACCESS_KEY': secret_key,
        'GIRDER_TOKEN': girder_token,
        'LOG_WRITE_URL': log_write_url,
        'CLUSTER_ID': cluster['_id']
    })

    # if there are any volumes,  make sure to detach them first.
    if 'volumes' in cluster and len(cluster['volumes']):
        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))
        master = p.get_master_instance(cluster['_id'])

        for volume_id in cluster['volumes']:
            r = requests.get('%s/volumes/%s' %
                             (cumulus.config.girder.baseUrl, volume_id),
                             headers={'Girder-Token': girder_token})
            check_status(r)
            volume = r.json()

            girder_callback_info = {
                'girder_api_url': cumulus.config.girder.baseUrl,
                'girder_token': girder_token
            }

            vol_log_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl,
                                                 volume_id)
            detach_volume(profile, cluster, master, volume, secret_key,
                          vol_log_url, girder_callback_info)

    inventory = simple_inventory('localhost')

    with inventory.to_tempfile() as inventory_path:
        ansible = run_playbook(playbook,
                               inventory_path,
                               playbook_variables,
                               env=env,
                               verbose=3)

    check_ansible_return_code(ansible, cluster, girder_token)
    check_girder_cluster_status(cluster, girder_token, post_status)
Beispiel #5
0
    def attach(self, volume, cluster, params):
        body = getBodyJson()

        self.requireParams(['path'], body)
        path = body['path']

        profile_id = parse('profileId').find(volume)[0].value
        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': cumulus.config.girder.baseUrl,
            'girder_token': get_task_token()['_id']
        }
        log_write_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl,
                                               volume['_id'])

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)

        # If volume exists it needs to be available to be attached. If
        # it doesn't exist it will be created as part of the attach
        # playbook.
        if aws_volume is not None and \
           aws_volume['state'] != VolumeState.AVAILABLE:
            raise RestException(
                'This volume is not available to attach '
                'to a cluster', 400)

        master = p.get_master_instance(cluster['_id'])
        if master['state'] != InstanceState.RUNNING:
            raise RestException('Master instance is not running!', 400)

        cluster = ModelImporter.model('cluster',
                                      'cumulus').filter(cluster,
                                                        getCurrentUser(),
                                                        passphrase=False)
        cumulus.ansible.tasks.volume.attach_volume\
            .delay(profile, cluster, master,
                   self._model.filter(volume, getCurrentUser()), path,
                   secret_key, log_write_url, girder_callback_info)

        volume['status'] = VolumeState.ATTACHING
        volume = self._model.update_volume(getCurrentUser(), volume)

        return self._model.filter(volume, getCurrentUser())
Beispiel #6
0
    def detach(self, volume, params):

        profile_id = parse('profileId').find(volume)[0].value
        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': getApiUrl(),
            'girder_token': get_task_token()['_id']}

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)
        if aws_volume is None or aws_volume['state'] != VolumeState.INUSE:
            raise RestException('This volume is not attached '
                                'to a cluster',
                                400)

        if 'clusterId' not in volume:
            raise RestException('clusterId is not set on this volume!', 400)

        try:
            volume['path']
        except KeyError:
            raise RestException('path is not set on this volume!', 400)

        cluster = self.model('cluster', 'cumulus').load(volume['clusterId'],
                                                        user=getCurrentUser(),
                                                        level=AccessType.ADMIN)
        master = p.get_master_instance(cluster['_id'])
        if master['state'] != InstanceState.RUNNING:
            raise RestException('Master instance is not running!',
                                400)
        user = getCurrentUser()
        cluster = self.model('cluster', 'cumulus').filter(
            cluster, user, passphrase=False)
        cumulus.ansible.tasks.volume.detach_volume\
            .delay(profile, cluster, master,
                   self._model.filter(volume, user),
                   secret_key, girder_callback_info)

        volume['status'] = VolumeState.DETACHING
        volume = self._model.update_volume(user, volume)

        return self._model.filter(volume, user)
Beispiel #7
0
    def attach(self, volume, cluster, params):
        body = getBodyJson()

        self.requireParams(['path'], body)
        path = body['path']

        profile_id = parse('profileId').find(volume)[0].value
        profile, secret_key = _get_profile(profile_id)

        girder_callback_info = {
            'girder_api_url': getApiUrl(),
            'girder_token': get_task_token()['_id']}

        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

        aws_volume = p.get_volume(volume)

        # If volume exists it needs to be available to be attached. If
        # it doesn't exist it will be created as part of the attach
        # playbook.
        if aws_volume is not None and \
           aws_volume['state'] != VolumeState.AVAILABLE:
            raise RestException('This volume is not available to attach '
                                'to a cluster',
                                400)

        master = p.get_master_instance(cluster['_id'])
        if master['state'] != InstanceState.RUNNING:
            raise RestException('Master instance is not running!',
                                400)

        cluster = self.model('cluster', 'cumulus').filter(
            cluster, getCurrentUser(), passphrase=False)
        cumulus.ansible.tasks.volume.attach_volume\
            .delay(profile, cluster, master,
                   self._model.filter(volume, getCurrentUser()), path,
                   secret_key, girder_callback_info)

        volume['status'] = VolumeState.ATTACHING
        volume = self._model.update_volume(getCurrentUser(), volume)

        return self._model.filter(volume, getCurrentUser())
Beispiel #8
0
def terminate_cluster(playbook, cluster, profile, secret_key, extra_vars,
                      girder_token, log_write_url, post_status):

    playbook = get_playbook_path(playbook)
    playbook_variables = get_playbook_variables(cluster, profile, extra_vars)

    env = os.environ.copy()
    env.update({'AWS_ACCESS_KEY_ID': profile['accessKeyId'],
                'AWS_SECRET_ACCESS_KEY': secret_key,
                'GIRDER_TOKEN': girder_token,
                'LOG_WRITE_URL': log_write_url,
                'CLUSTER_ID': cluster['_id']})

    # if there are any volumes,  make sure to detach them first.
    if 'volumes' in cluster and len(cluster['volumes']):
        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))
        master = p.get_master_instance(cluster['_id'])

        for volume_id in cluster['volumes']:
            r = requests.get('%s/volumes/%s' %
                             (cumulus.config.girder.baseUrl, volume_id),
                             headers={'Girder-Token':  girder_token})
            check_status(r)
            volume = r.json()

            girder_callback_info = {
                'girder_api_url': cumulus.config.girder.baseUrl,
                'girder_token': girder_token}

            detach_volume(profile, cluster, master, volume,
                          secret_key, girder_callback_info)

    inventory = simple_inventory('localhost')

    with inventory.to_tempfile() as inventory_path:
        ansible = run_playbook(playbook, inventory_path, playbook_variables,
                               env=env, verbose=3)

    check_ansible_return_code(ansible, cluster, girder_token)
    check_girder_cluster_status(cluster, girder_token, post_status)
Beispiel #9
0
def launch_cluster(playbook, cluster, profile, secret_key, extra_vars,
                   girder_token, log_write_url, post_status):
    playbook = get_playbook_path(playbook)
    playbook_variables = get_playbook_variables(cluster, profile, extra_vars)

    env = os.environ.copy()
    env.update({
        'AWS_ACCESS_KEY_ID': profile['accessKeyId'],
        'AWS_SECRET_ACCESS_KEY': secret_key,
        'GIRDER_TOKEN': girder_token,
        'LOG_WRITE_URL': log_write_url,
        'CLUSTER_ID': cluster['_id']
    })

    inventory = simple_inventory('localhost')

    with inventory.to_tempfile() as inventory_path:
        ansible = run_playbook(playbook,
                               inventory_path,
                               playbook_variables,
                               env=env,
                               verbose=3)

    p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

    master = p.get_master_instance(cluster['_id'])

    status_url = '%s/clusters/%s' % (cumulus.config.girder.baseUrl,
                                     cluster['_id'])
    updates = {'config': {'host': master['public_ip']}}
    headers = {'Girder-Token': girder_token}
    r = requests.patch(status_url, headers=headers, json=updates)
    check_status(r)

    check_ansible_return_code(ansible, cluster, girder_token)
    check_girder_cluster_status(cluster, girder_token, post_status)