Example #1
0
def provision_cluster(playbook, cluster, profile, secret_key, extra_vars,
                      girder_token, log_write_url, post_status):

    playbook = get_playbook_path(playbook)
    playbook_variables = get_playbook_variables(cluster, profile, extra_vars)

    env = os.environ.copy()
    env.update({
        'AWS_ACCESS_KEY_ID': profile['accessKeyId'],
        'AWS_SECRET_ACCESS_KEY': secret_key,
        'GIRDER_TOKEN': girder_token,
        'LOG_WRITE_URL': log_write_url,
        'CLUSTER_ID': cluster['_id'],
        'REGION_NAME': profile['regionName'],
        'ANSIBLE_HOST_KEY_CHECKING': 'false',
        'ANSIBLE_CALLBACK_PLUGINS': get_callback_plugins_path()
    })

    inventory = os.path.join(os.path.dirname(__file__), 'providers', 'ec2.py')

    ansible = run_playbook(playbook,
                           inventory,
                           playbook_variables,
                           env=env,
                           verbose=3)

    check_girder_cluster_status(cluster, girder_token, post_status)
    check_ansible_return_code(ansible, cluster, girder_token)
Example #2
0
def launch_cluster(playbook, cluster, profile, secret_key, extra_vars,
                   girder_token, log_write_url, post_status):
    playbook = get_playbook_path(playbook)
    playbook_variables = get_playbook_variables(cluster, profile, extra_vars)

    env = os.environ.copy()
    env.update({'AWS_ACCESS_KEY_ID': profile['accessKeyId'],
                'AWS_SECRET_ACCESS_KEY': secret_key,
                'GIRDER_TOKEN': girder_token,
                'LOG_WRITE_URL': log_write_url,
                'CLUSTER_ID': cluster['_id']})

    inventory = simple_inventory('localhost')

    with inventory.to_tempfile() as inventory_path:
        ansible = run_playbook(playbook, inventory_path, playbook_variables,
                               env=env, verbose=3)

    p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

    master = p.get_master_instance(cluster['_id'])

    status_url = '%s/clusters/%s' % (cumulus.config.girder.baseUrl,
                                     cluster['_id'])
    updates = {
        'config': {
            'host': master['public_ip']
        }
    }
    headers = {'Girder-Token':  girder_token}
    r = requests.patch(status_url, headers=headers, json=updates)
    check_status(r)

    check_ansible_return_code(ansible, cluster, girder_token)
    check_girder_cluster_status(cluster, girder_token, post_status)
Example #3
0
def provision_ec2_cluster(task, cluster, profile, provision_params={}):

    if 'ansible_ssh_user' not in provision_params:
        provision_params['ansible_ssh_user'] = '******'

    if 'cluster_state' not in provision_params:
        provision_params['cluster_state'] = 'running'

    girder_token = task.taskflow.girder_token
    client = create_girder_client(task.taskflow.girder_api_url, girder_token)
    secret_key = profile['secretAccessKey']
    profile = profile.copy()
    del profile['secretAccessKey']
    log_write_url = '%s/clusters/%s/log' % (task.taskflow.girder_api_url,
                                            cluster['_id'])

    check_girder_cluster_status(cluster, girder_token, 'provisioning')

    cumulus.ansible.tasks.cluster.provision_cluster(PROVISION_SPEC, cluster,
                                                    profile, secret_key,
                                                    provision_params,
                                                    girder_token,
                                                    log_write_url, 'running')

    # Now update the cluster state to 'running' Is this needed?
    check_girder_cluster_status(cluster, girder_token, 'running')

    # Get the update to data cluster
    cluster = client.get('clusters/%s' % cluster['_id'])

    return cluster
Example #4
0
def start_cluster(launch_playbook, provision_playbook, cluster, profile,
                  secret_key, launch_extra_vars, provision_extra_vars,
                  girder_token, log_write_url):

    launch_cluster(launch_playbook, cluster, profile, secret_key,
                   launch_extra_vars, girder_token, log_write_url, 'running')

    check_girder_cluster_status(cluster, girder_token, 'provisioning')

    provision_cluster(provision_playbook, cluster, profile, secret_key,
                      provision_extra_vars, girder_token, log_write_url,
                      'running')

    # Now update the cluster state to 'running'
    check_girder_cluster_status(cluster, girder_token, 'running')
Example #5
0
def start_cluster(launch_playbook, provision_playbook, cluster, profile,
                  secret_key, launch_extra_vars, provision_extra_vars,
                  girder_token, log_write_url):

    launch_cluster(launch_playbook, cluster, profile, secret_key,
                   launch_extra_vars, girder_token, log_write_url, 'running')

    check_girder_cluster_status(cluster, girder_token, 'provisioning')

    provision_cluster(provision_playbook, cluster, profile, secret_key,
                      provision_extra_vars, girder_token, log_write_url,
                      'running')

    # Now update the cluster state to 'running'
    check_girder_cluster_status(cluster, girder_token, 'running')
Example #6
0
def terminate_cluster(playbook, cluster, profile, secret_key, extra_vars,
                      girder_token, log_write_url, post_status):

    playbook = get_playbook_path(playbook)
    playbook_variables = get_playbook_variables(cluster, profile, extra_vars)

    env = os.environ.copy()
    env.update({
        'AWS_ACCESS_KEY_ID': profile['accessKeyId'],
        'AWS_SECRET_ACCESS_KEY': secret_key,
        'GIRDER_TOKEN': girder_token,
        'LOG_WRITE_URL': log_write_url,
        'CLUSTER_ID': cluster['_id']
    })

    # if there are any volumes,  make sure to detach them first.
    if 'volumes' in cluster and len(cluster['volumes']):
        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))
        master = p.get_master_instance(cluster['_id'])

        for volume_id in cluster['volumes']:
            r = requests.get('%s/volumes/%s' %
                             (cumulus.config.girder.baseUrl, volume_id),
                             headers={'Girder-Token': girder_token})
            check_status(r)
            volume = r.json()

            girder_callback_info = {
                'girder_api_url': cumulus.config.girder.baseUrl,
                'girder_token': girder_token
            }

            vol_log_url = '%s/volumes/%s/log' % (cumulus.config.girder.baseUrl,
                                                 volume_id)
            detach_volume(profile, cluster, master, volume, secret_key,
                          vol_log_url, girder_callback_info)

    inventory = simple_inventory('localhost')

    with inventory.to_tempfile() as inventory_path:
        ansible = run_playbook(playbook,
                               inventory_path,
                               playbook_variables,
                               env=env,
                               verbose=3)

    check_ansible_return_code(ansible, cluster, girder_token)
    check_girder_cluster_status(cluster, girder_token, post_status)
Example #7
0
def terminate_cluster(playbook, cluster, profile, secret_key, extra_vars,
                      girder_token, log_write_url, post_status):

    playbook = get_playbook_path(playbook)
    playbook_variables = get_playbook_variables(cluster, profile, extra_vars)

    env = os.environ.copy()
    env.update({'AWS_ACCESS_KEY_ID': profile['accessKeyId'],
                'AWS_SECRET_ACCESS_KEY': secret_key,
                'GIRDER_TOKEN': girder_token,
                'LOG_WRITE_URL': log_write_url,
                'CLUSTER_ID': cluster['_id']})

    # if there are any volumes,  make sure to detach them first.
    if 'volumes' in cluster and len(cluster['volumes']):
        p = CloudProvider(dict(secretAccessKey=secret_key, **profile))
        master = p.get_master_instance(cluster['_id'])

        for volume_id in cluster['volumes']:
            r = requests.get('%s/volumes/%s' %
                             (cumulus.config.girder.baseUrl, volume_id),
                             headers={'Girder-Token':  girder_token})
            check_status(r)
            volume = r.json()

            girder_callback_info = {
                'girder_api_url': cumulus.config.girder.baseUrl,
                'girder_token': girder_token}

            detach_volume(profile, cluster, master, volume,
                          secret_key, girder_callback_info)

    inventory = simple_inventory('localhost')

    with inventory.to_tempfile() as inventory_path:
        ansible = run_playbook(playbook, inventory_path, playbook_variables,
                               env=env, verbose=3)

    check_ansible_return_code(ansible, cluster, girder_token)
    check_girder_cluster_status(cluster, girder_token, post_status)
Example #8
0
def provision_cluster(playbook, cluster, profile, secret_key, extra_vars,
                      girder_token, log_write_url, post_status):

    playbook = get_playbook_path(playbook)
    playbook_variables = get_playbook_variables(cluster, profile, extra_vars)

    env = os.environ.copy()
    env.update({'AWS_ACCESS_KEY_ID': profile['accessKeyId'],
                'AWS_SECRET_ACCESS_KEY': secret_key,
                'GIRDER_TOKEN': girder_token,
                'LOG_WRITE_URL': log_write_url,
                'CLUSTER_ID': cluster['_id'],
                'REGION_NAME': profile['regionName'],
                'ANSIBLE_HOST_KEY_CHECKING': 'false',
                'ANSIBLE_CALLBACK_PLUGINS': get_callback_plugins_path()})

    inventory = os.path.join(os.path.dirname(__file__), 'providers', 'ec2.py')

    ansible = run_playbook(playbook, inventory, playbook_variables,
                           env=env, verbose=3)

    check_girder_cluster_status(cluster, girder_token, post_status)
    check_ansible_return_code(ansible, cluster, girder_token)
Example #9
0
def launch_cluster(playbook, cluster, profile, secret_key, extra_vars,
                   girder_token, log_write_url, post_status):
    playbook = get_playbook_path(playbook)
    playbook_variables = get_playbook_variables(cluster, profile, extra_vars)

    env = os.environ.copy()
    env.update({
        'AWS_ACCESS_KEY_ID': profile['accessKeyId'],
        'AWS_SECRET_ACCESS_KEY': secret_key,
        'GIRDER_TOKEN': girder_token,
        'LOG_WRITE_URL': log_write_url,
        'CLUSTER_ID': cluster['_id']
    })

    inventory = simple_inventory('localhost')

    with inventory.to_tempfile() as inventory_path:
        ansible = run_playbook(playbook,
                               inventory_path,
                               playbook_variables,
                               env=env,
                               verbose=3)

    p = CloudProvider(dict(secretAccessKey=secret_key, **profile))

    master = p.get_master_instance(cluster['_id'])

    status_url = '%s/clusters/%s' % (cumulus.config.girder.baseUrl,
                                     cluster['_id'])
    updates = {'config': {'host': master['public_ip']}}
    headers = {'Girder-Token': girder_token}
    r = requests.patch(status_url, headers=headers, json=updates)
    check_status(r)

    check_ansible_return_code(ansible, cluster, girder_token)
    check_girder_cluster_status(cluster, girder_token, post_status)
Example #10
0
def setup_cluster(task, *args, **kwargs):
    cluster = kwargs['cluster']
    profile = kwargs.get('profile')
    volume = kwargs.get('volume')
    new = False

    if '_id' in cluster:
        task.taskflow.logger.info('We are using an existing cluster: %s' %
                                  cluster['name'])
    else:
        new = True
        task.taskflow.logger.info('We are creating an EC2 cluster.')
        task.logger.info('Cluster name %s' % cluster['name'])
        kwargs['machine'] = cluster.get('machine')

        if volume:
            config = cluster.setdefault('config', {})
            config['jobOutputDir'] = '/data'

        # Create the model in Girder
        cluster = create_ec2_cluster(task, cluster, profile,
                                     kwargs['image_spec'])

        # Now launch the cluster
        cluster = launch_ec2_cluster(task, cluster, profile)

        task.logger.info('Cluster started.')

    if volume and '_id' in volume:
        task.taskflow.logger.info('We are using an existing volume: %s' %
                                  volume['name'])
    elif volume:
        task.taskflow.logger.info('We are creating a new volume: "%s"' %
                                  volume['name'])
        volume = create_volume(task, volume, profile)

    # Now provision
    if new:
        provision_params = {}

        girder_token = task.taskflow.girder_token
        check_girder_cluster_status(cluster, girder_token, 'provisioning')

        # attach volume
        if volume:
            volume = _attach_volume(task, profile, volume, cluster)
            path = volume.get('path')
            if path:
                provision_params['master_nfs_exports_extra'] = [path]

        cluster = provision_ec2_cluster(task, cluster, profile,
                                        provision_params)

    # Call any follow on task
    if 'next' in kwargs:
        kwargs['cluster'] = cluster
        next = Signature.from_dict(kwargs['next'])

        if next.task == 'celery.chain':
            # If we are dealing with a chain we want to update the arg and
            # kwargs passed into the chain.
            first_task = next.kwargs['tasks'][0]
            if first_task:
                if args:
                    first_task.args = tuple(args) + tuple(first_task.args)

                if kwargs:
                    first_task.kwargs = dict(first_task.kwargs, **kwargs)

        next.delay(*args, **kwargs)