Beispiel #1
0
def provision(
    region,
    availability_zone,
    stack_name,
    cf_template_path,
    launcher_ami,
    launcher_instance_type,
    director_conf_path,
    cluster_ami,
    num_workers,
):
    start_time = datetime.now()

    # create cloudformation stack (VPC etc)
    cf_conn = create_cf_connection(region)
    create_cf_stack(cf_conn, stack_name, cf_template_path, availability_zone)

    # create launcher instance
    ec2_conn = create_ec2_connection(region)
    launcher_instance = create_launcher_instance(ec2_conn, cf_conn, stack_name, launcher_ami, launcher_instance_type)

    # run bootstrap on launcher
    execute(
        run_director_bootstrap,
        director_conf_path=director_conf_path,
        region=region,
        cluster_ami=cluster_ami,
        num_workers=num_workers,
        stack_name=stack_name,
        hosts=[launcher_instance.ip_address],
    )

    end_time = datetime.now()
    print "Cluster has started. Took {t} minutes.".format(t=(end_time - start_time).seconds / 60)
Beispiel #2
0
def teardown(region, stack_name):
    # terminate Hadoop cluster (prompts for confirmation)
    ec2_conn = create_ec2_connection(region)
    execute(run_director_terminate, hosts=[get_launcher_instance(ec2_conn, stack_name).ip_address])

    # terminate launcher instance
    terminate_launcher_instance(ec2_conn, stack_name)

    # delete stack
    cf_conn = create_cf_connection(region)
    delete_stack(cf_conn, stack_name)
Beispiel #3
0
def run_director_bootstrap(director_conf_path, region, cluster_ami, num_workers, stack_name):
    # replace variables in conf template and copy to launcher
    cf_conn = create_cf_connection(region)
    params = {
        "accessKeyId": get_aws_access_key_id(),
        "secretAccessKey": get_aws_secret_access_key(),
        "region": region,
        "stack_name": stack_name,
        "owner": getuser(),
        "keyName": get_ec2_key_pair(),
        "subnetId": get_subnet_id(cf_conn, stack_name),
        "securityGroupsIds": get_security_group_id(cf_conn, stack_name),
        "image": cluster_ami,
        "num_workers": num_workers,
    }
    with open(director_conf_path, "r") as template_file:
        interpolated_body = template_file.read() % params
        director_conf = StringIO(interpolated_body)
    put(director_conf, "director.conf")
    # bootstrap the Hadoop cluster
    run("cloudera-director bootstrap director.conf")