Пример #1
0
def refresh_code_on_machines():
    """ Refresh the current version of the code on the machines. """
    hostnames = [instance.public_dns_name for instance in get_all_instances()]

    with settings(parallel=True, user='******',
                  key_filename='/Users/aismail/.ssh/ami-keypair.pem'):
        execute('refresh_code', hosts=hostnames)
Пример #2
0
def run_experiment(url='https://raw.github.com/ami-lab/AmI-Platform/master/dumps/diana.txt',
                   name='cloud_experiment',
                   file_name='/tmp/experiment.txt',
                   experiment_profile='default_experiment.json'):

    # Load up which machines are needed from the experiment profile
    machines = json.load(open(experiment_profile, 'rt'))

    # Only open new machines if it's necessary
    opened_instances = get_all_instances()
    if len(opened_instances) == 0:
        # Open exactly the desired number of machines. Sometimes EC2 fails
        # weirdly to open the requested number of machines (don't know why)
        # so I'm putting in a retry mechanism.
        machines_to_open = len(machines)
        machines_opened = 0
        machine_hostnames = []
        while machines_opened < machines_to_open:
            hostnames = execute('open_machines',
                                count=machines_to_open - machines_opened)['<local-only>']
            machine_hostnames.extend(hostnames)
            machines_opened += len(hostnames)
    else:
        hostnames = [instance.public_dns_name for instance in opened_instances]

    # Attach tags to machines. This meta-data is used for provisioning and
    # for the lifecycle management of machines as well.
    for hostname, machine_meta_data in zip(hostnames, machines):
        tag_instance(hostname, machine_meta_data)

    execute('bootstrap_machines')
    execute('provision_machines')
    execute('copy_experiment', url=url, file_name=file_name, name=name)
    execute('play_experiment', name=name)
Пример #3
0
def bootstrap_machines():
    hostnames = [instance.public_dns_name for instance in get_all_instances()]

    # Provision the machines in parallel. The manifest for each machine
    # will be taken from env.hostname_to_manifest, because it's the only sane
    # way I found in fab to do the provisioning in parallel.
    with settings(parallel=True, user='******'):
        execute('bootstrap_machine', hosts=hostnames)
Пример #4
0
def configure_hiera_for_machines():
    """ Configures hiera for all machines launched by our experiment. """
    hostnames = [instance.public_dns_name for instance in get_all_instances()]
    execute('generate_hiera_datasources')
    # NOTE: we would be better off executing configure_hiera_for_machine
    # as root but I don't think it's OK to complicate ourselves by provisioning
    # the keypair for the root user as well. So when we need, for example, to
    # copy the files in a folder only accessible by root, we copy them in a
    # folder accessible by the ami user, and then use sudo to move it around
    # locally.
    with settings(parallel=True):
        execute('configure_hiera_for_machine', hosts=hostnames)
Пример #5
0
def provision_machines():
    hostnames = [instance.public_dns_name for instance in get_all_instances()]

    # Provision the machines in parallel. The manifest for each machine
    # will be taken from env.hostname_to_manifest, because it's the only sane
    # way I found in fab to do the provisioning in parallel.
    with settings(parallel=True):
        execute('provision_machine', hosts=hostnames)

    # Determine the crunching hostnames - these are generally modules of
    # type 'crunch' but there might be exceptions when we want to keep the
    # code close to a machine, especially since we have all the code on all
    # the machines.
    crunching_hostnames = [instance.public_dns_name
                           for instance in get_all_instances()
                           if len(instance.tags.get('modules', '').strip()) > 0]

    # For crunch nodes, generate settings_local.py files and services.txt files.
    # Afterwards, run deploy task on each of them.
    with settings(parallel=True):
        execute('generate_services_file', hosts=crunching_hostnames)
        execute('deploy_ami_services_on_crunch_node', hosts=crunching_hostnames)
Пример #6
0
def provision_machines():
    hostnames = [instance.public_dns_name for instance in get_all_instances()]

    # Provision the machines in parallel. The manifest for each machine
    # will be taken from env.hostname_to_manifest, because it's the only sane
    # way I found in fab to do the provisioning in parallel.
    with settings(parallel=True, user='******',
                  key_filename='/Users/aismail/.ssh/ami-keypair.pem'):
        execute('provision_machine', hosts=hostnames)

    crunch_hostnames = [instance.public_dns_name for instance in
                        get_instances_by_tags({'type': 'crunch'})]

    # For crunch nodes, generate settings_local.py files and services.txt files.
    # Afterwards, run deploy task on each of them.
    with settings(parallel=True, user='******',
                  key_filename='/Users/aismail/.ssh/ami-keypair.pem'):
        execute('generate_settings_local_file', hosts=crunch_hostnames)
        execute('generate_services_file', hosts=crunch_hostnames)
        execute('deploy_ami_services_on_crunch_node', hosts=crunch_hostnames)
Пример #7
0
def generate_hiera_datasources():
    """ Hiera datasources are hierarchical configurations to be applied by
    puppet automatically in its template files. We need them for provisioning
    purposes. """
    instances = get_all_instances()

    local('rm -rf /tmp/hiera')
    local('mkdir -p /tmp/hiera/node')

    common_context = {
        'mongo_hostname': get_instance_by_tags({'Name': 'measurements'}).public_dns_name,
        'redis_hostname': get_instance_by_tags({'Name': 'sessions'}).public_dns_name,
        'kestrel_hostname': get_instance_by_tags({'Name': 'queues'}).public_dns_name
    }
    render_template('admin/templates/common.json',
                    common_context,
                    '/tmp/hiera/common.json')

    for instance in instances:
        context = {'hostname': instance.public_dns_name}
        render_template('admin/templates/node.json',
                        context,
                        '/tmp/hiera/node/%s.json' % instance.private_dns_name)
Пример #8
0
def refresh_code_on_machines():
    """ Refresh the current version of the code on the machines. """
    hostnames = [instance.public_dns_name for instance in get_all_instances()]

    with settings(parallel=True):
        execute('refresh_code', hosts=hostnames)