示例#1
0
def run(repo_uri, ami_id):
    """
    Run the code
    :param repo_uri: string, the URI of an existing AWS ECR repository.
    :param ami_id: string, the id of an existing AWS AMI.
    """

    # Load a bunch of JSON blobs containing policies and other things that boto3 clients
    # require as input.
    configs_folder = get_configs_folder()

    with open(os.path.join(configs_folder, 'assume-batch-role.json')) as fn:
        assume_batch_role_policy_json = json.dumps(json.load(fn))
    with open(os.path.join(configs_folder, 'batch-service-role.json')) as fn:
        batch_service_role_policy_json = json.dumps(json.load(fn))
    with open(os.path.join(configs_folder, 'assume-ec2-role.json')) as fn:
        assume_ec2_role_policy_json = json.dumps(json.load(fn))
    with open(os.path.join(configs_folder, 'batch-instance-role.json')) as fn:
        batch_instance_role_policy_json = json.dumps(json.load(fn))
    with open(os.path.join(configs_folder, 'compute-environment.json')) as fn:
        compute_environment_dict = json.load(fn)
    with open(os.path.join(configs_folder, 'container-props.json')) as fn:
        container_props_dict = json.load(fn)
    aws_object_names = get_aws_object_names()
    print('JSON loaded')

    # Grab the names from aws_object_names
    comp_env_role = aws_object_names['comp_env_role']
    comp_env_name = aws_object_names['comp_env_name']
    instance_profile = aws_object_names['instance_profile']
    security_group = aws_object_names['security_group']

    if "subnets" not in compute_environment_dict:
        # "subnets": ["subnet-af1f02e6"]
        ec2_client = boto3.client('ec2')
        subnets = ec2_client.describe_subnets()['Subnets']
        if len(set([y['VpcId'] for y in subnets])) != 1:
            print "\n"
            print "It looks like you have multiple VPCs in this region, which means this script"
            print "cannot automatically determine the correct subnets on which to place"
            print "the data pipeline compute servers."
            print "You can resolve this by adding a line with the key 'subnets' like the following"
            print "to the compute-environment.json file in the configs folder."
            print """  "subnets": ["subnet-abc123"]"""
            exit(1)
        else:
            # add a 1 item list containing a valid subnet
            compute_environment_dict['subnets'] = [subnets[0]['SubnetId']]

    # Create a new IAM role for the compute environment
    set_default_region()
    iam_client = boto3.client('iam')

    comp_env_role_arn = iam_client.create_role(
        RoleName=comp_env_role,
        AssumeRolePolicyDocument=assume_batch_role_policy_json,
    )['Role']['Arn']

    try:
        iam_client.put_role_policy(
            RoleName=comp_env_role,
            PolicyName=
            'aws-batch-service-policy',  # This name isn't used anywhere else
            PolicyDocument=batch_service_role_policy_json,
        )
        print('Batch role created')
    except Exception:
        print(
            'WARNING: Batch service role creation failed, assuming that this means it already exists.'
        )

    # Create an EC2 instance profile for the compute environment
    try:
        iam_client.create_role(
            RoleName=instance_profile,
            AssumeRolePolicyDocument=assume_ec2_role_policy_json,
        )
    except Exception:
        print(
            'WARNING: Batch role creation failed, assuming that this means it already exists.'
        )

    try:
        iam_client.put_role_policy(
            RoleName=instance_profile,
            PolicyName=
            'aws-batch-instance-policy',  # This name isn't used anywhere else
            PolicyDocument=batch_instance_role_policy_json,
        )
    except Exception:
        print(
            'WARNING: assigning role creation failed, assuming that this means it already exists.'
        )

    resp = iam_client.create_instance_profile(
        InstanceProfileName=instance_profile, )

    instance_profile_arn = resp['InstanceProfile']['Arn']
    compute_environment_dict['instanceRole'] = instance_profile_arn
    iam_client.add_role_to_instance_profile(
        InstanceProfileName=instance_profile,
        RoleName=instance_profile,
    )
    print('Instance profile created')

    # Create a security group for the compute environment
    ec2_client = boto3.client('ec2')

    group_id = ec2_client.create_security_group(
        Description='Security group for AWS Batch',
        GroupName=security_group,
    )['GroupId']

    compute_environment_dict['securityGroupIds'] = [group_id]

    # Create the batch compute environment
    batch_client = boto3.client('batch')
    compute_environment_dict['imageId'] = ami_id

    batch_client.create_compute_environment(
        computeEnvironmentName=comp_env_name,
        type='MANAGED',
        computeResources=compute_environment_dict,
        serviceRole=comp_env_role_arn,
    )

    # The compute environment takes somewhere between 10 and 45 seconds to create. Until it
    # is created, we cannot create a job queue. So first, we wait until the compute environment
    # has finished being created.
    print('Waiting for compute environment...')
    while True:
        # Ping the AWS server for a description of the compute environment
        resp = batch_client.describe_compute_environments(
            computeEnvironments=[comp_env_name], )
        status = resp['computeEnvironments'][0]['status']

        if status == 'VALID':
            # If the compute environment is valid, we can proceed to creating the job queue
            break
        elif status == 'CREATING' or status == 'UPDATING':
            # If the compute environment is still being created (or has been created and is
            # now being modified), we wait one second and then ping the server again.
            sleep(1)
        else:
            # If the compute environment is invalid (or deleting or deleted), we cannot
            # continue with job queue creation. Raise an error and quit the script.
            raise RuntimeError('Compute Environment is Invalid')
    print('Compute environment created')

    # Create a batch job queue
    batch_client.create_job_queue(
        jobQueueName=aws_object_names['queue_name'],
        priority=1,
        computeEnvironmentOrder=[{
            'order': 0,
            'computeEnvironment': comp_env_name
        }],
    )
    print('Job queue created')

    # Create a batch job definition
    container_props_dict['image'] = repo_uri
    container_props_dict['environment'] = [
        {
            'name': 'access_key_ssm_name',
            'value': aws_object_names['access_key_ssm_name'],
        },
        {
            'name': 'secret_key_ssm_name',
            'value': aws_object_names['secret_key_ssm_name'],
        },
        {
            'name': 'region_name',
            'value': get_current_region(),
        },
        {
            'name': 'server_url',
            'value': aws_object_names['server_url'],
        },
    ]
    batch_client.register_job_definition(
        jobDefinitionName=aws_object_names['job_defn_name'],
        type='container',
        containerProperties=container_props_dict,
    )
    print('Job definition created')
示例#2
0
def run():
    """
    Run the code
    :return: The AMI's id, to be used for attaching it to the batch jobs
    """

    # Load a bunch of JSON blobs containing policies and other things that boto3 clients
    # require as input.
    configs_folder = get_configs_folder()

    with open(os.path.join(configs_folder,
                           'ami-ec2-instance-props.json')) as fn:
        ami_ec2_instance_props_dict = json.load(fn)
    aws_object_names = get_aws_object_names()
    print('JSON loaded')

    # Get the AMI ID for the local region
    set_default_region()
    ec2_client = boto3.client('ec2')
    image_name = ami_ec2_instance_props_dict.pop('ImageName')
    resp = ec2_client.describe_images(Filters=[{
        'Name': 'name',
        'Values': [image_name]
    }])
    ami_ec2_instance_props_dict['ImageId'] = resp['Images'][0]['ImageId']

    # Create an EC2 instance to model the AMI off of
    resp = ec2_client.run_instances(**ami_ec2_instance_props_dict)
    ec2_instance_id = resp['Instances'][0]['InstanceId']
    print('EC2 instance created')

    # Create an AMI based off of the EC2 instance. It takes some time for the EC2 instance to
    # be ready, so we delay up to thirty seconds.
    print('Waiting for unencrypted AMI...')
    tries = 0
    while True:
        try:
            resp = ec2_client.create_image(
                InstanceId=ec2_instance_id,
                Name=aws_object_names['ami_name'] + '-unencrypted',
            )
        except ClientError:
            # In case the EC2 instance isn't ready yet
            tries += 1
            if tries > 30:
                raise
            sleep(1)
        else:
            break
    unencrypted_ami_id = resp['ImageId']
    print('Unencrypted AMI created')

    # Create an encrypted AMI based off of the previous AMI. This is the quickest way to
    # create an encrypted AMI, because you can't create an EC2 instance with an encrypted root
    # drive, and you can't create an encrypted AMI directly from an unencrypted EC2 instance.
    region_name = boto3.session.Session().region_name
    print('Waiting to encrypt AMI...')
    tries = 0
    while True:
        try:
            resp = ec2_client.copy_image(
                SourceImageId=unencrypted_ami_id,
                SourceRegion=region_name,
                Encrypted=True,
                Name=aws_object_names['ami_name'],
            )
        except ClientError:
            # In case the unencrypted AMI isn't ready yet
            tries += 1
            if tries > 300:
                raise
            else:
                print "waiting on unencrypted ami..."
            sleep(1)
        else:
            break
    ami_id = resp['ImageId']
    print('Encrypted AMI created')

    # Delete the EC2 instance and the unencrypted AMI; only the encrypted AMI is useful
    # going forward.
    ec2_client.terminate_instances(InstanceIds=[ec2_instance_id])
    # ec2_client.deregister_image(ImageId=unencrypted_ami_id)

    return ami_id
示例#3
0
def run():
    """
    Run the code
    :return: The repository's URI, to be used in creating AWS Batch jobs elsewhere
    """

    # Load basic info about the Beiwe instance setup
    configs_folder = get_configs_folder()
    with open(os.path.join(configs_folder, 'git-repository-info.json')) as fn:
        git_repository_info_dict = json.load(fn)

    # Install docker, git and AWS command line interface
    # -y means "don't ask for confirmation"
    # check_call will raise an error if the command fails (i.e. returns nonzero)
    subprocess.check_call(['sudo', 'yum', 'update', '-y'])
    subprocess.check_call(['sudo', 'yum', 'install', '-y', 'docker'])
    subprocess.check_call(['sudo', 'yum', 'install', '-y', 'git'])
    subprocess.check_call(['pip', 'install', 'awscli', '--upgrade', '--user'])
    print('Installations complete')

    # Get git repo to put in the docker
    pipeline_folder = get_pipeline_folder()
    git_destination = os.path.join(pipeline_folder, 'Beiwe-Analysis')
    git_repo = git_repository_info_dict['repository_url']
    git_branch = git_repository_info_dict['branch']
    try:
        subprocess.check_call([
            'git', 'clone', git_repo, git_destination, '--branch', git_branch
        ])
        print('Git repository cloned')
    except subprocess.CalledProcessError:
        # The repository already exists in git_destination
        subprocess.check_call(
            ['git', '-C', git_destination, 'checkout', git_branch])
        subprocess.check_call(['git', '-C', git_destination, 'pull'])
        print('Git repository updated')

    # Create the docker image
    subprocess.check_call(['sudo', 'service', 'docker', 'start'])
    subprocess.check_call(
        ['sudo', 'docker', 'build', '-t', 'beiwe-analysis', pipeline_folder])
    print('Docker image created')

    # Create an AWS ECR repository to put the docker image into, and get the repository's URI
    # If such a repository already exists, get the repository's URI
    aws_object_names = get_aws_object_names()
    ecr_repo_name = aws_object_names['ecr_repo_name']
    set_default_region()
    client = boto3.client('ecr')
    try:
        resp = client.create_repository(repositoryName=ecr_repo_name, )
        repo_uri = resp['repository']['repositoryUri']
        print('ECR repository created')
    except ClientError:
        resp = client.describe_repositories(
            repositoryNames=(ecr_repo_name, ), )
        repo_uri = resp['repositories'][0]['repositoryUri']
        print('Existing ECR repository found')

    # Tag the local docker image with the remote repository's URI. This is similar to
    # having a local git branch track a remote one.
    subprocess.check_call(
        ['sudo', 'docker', 'tag', 'beiwe-analysis', repo_uri])

    # Push the docker file to our new repository
    # FIXME: using get-login is not ideal because it puts the password in process lists
    ecr_login = subprocess.check_output(
        ['aws', 'ecr', 'get-login', '--no-include-email'])
    ecr_login_as_list = ['sudo'
                         ] + ecr_login.decode("utf-8").strip('\n').split(' ')
    subprocess.check_call(ecr_login_as_list)
    subprocess.check_call(['sudo', 'docker', 'push', repo_uri])
    print('Docker pushed')

    return repo_uri
示例#4
0
def run(repo_uri, ami_id):
    """
    Run the code
    :param repo_uri: string, the URI of an existing AWS ECR repository.
    :param ami_id: string, the id of an existing AWS AMI.
    """

    # Load a bunch of JSON blobs containing policies and other things that boto3 clients
    # require as input.
    configs_folder = get_configs_folder()

    with open(os.path.join(configs_folder, 'assume-batch-role.json')) as fn:
        assume_batch_role_policy_json = json.dumps(json.load(fn))
    with open(os.path.join(configs_folder, 'batch-service-role.json')) as fn:
        batch_service_role_policy_json = json.dumps(json.load(fn))
    with open(os.path.join(configs_folder, 'assume-ec2-role.json')) as fn:
        assume_ec2_role_policy_json = json.dumps(json.load(fn))
    with open(os.path.join(configs_folder, 'batch-instance-role.json')) as fn:
        batch_instance_role_policy_json = json.dumps(json.load(fn))
    with open(os.path.join(configs_folder, 'compute-environment.json')) as fn:
        compute_environment_dict = json.load(fn)
    with open(os.path.join(configs_folder, 'container-props.json')) as fn:
        container_props_dict = json.load(fn)
    aws_object_names = get_aws_object_names()
    print('JSON loaded')

    # Grab the names from aws_object_names
    comp_env_role = aws_object_names['comp_env_role']
    instance_profile = aws_object_names['instance_profile']
    security_group = aws_object_names['security_group']

    # default names for entities that we may chang the name of.
    default_comp_env_name = aws_object_names['comp_env_name']
    default_queue_name = aws_object_names['queue_name']
    default_job_definition_name = aws_object_names['job_defn_name']

    if "subnets" not in compute_environment_dict:
        # "subnets": ["subnet-af1f02e6"]
        ec2_client = boto3.client('ec2')
        subnets = ec2_client.describe_subnets()['Subnets']
        if len(set([y['VpcId'] for y in subnets])) != 1:
            print("\n")
            print(
                "It looks like you have multiple VPCs in this region, which means this script"
            )
            print(
                "cannot automatically determine the correct subnets on which to place"
            )
            print("the data pipeline compute servers.")
            print(
                "You can resolve this by adding a line with the key 'subnets' like the following"
            )
            print(
                "to the compute-environment.json file in the configs folder.")
            print("""  "subnets": ["subnet-abc123"]""")
            exit(1)
        else:
            # add a 1 item list containing a valid subnet
            compute_environment_dict['subnets'] = [subnets[0]['SubnetId']]

    # Create a new IAM role for the compute environment
    set_default_region()
    iam_client = boto3.client('iam')

    try:
        comp_env_role_arn = iam_client.create_role(
            RoleName=comp_env_role,
            AssumeRolePolicyDocument=assume_batch_role_policy_json,
        )['Role']['Arn']
    except Exception as e:
        if "Role with name AWSBatchServiceRole already exists." in str(e):
            comp_env_role_arn = iam_client.get_role(
                RoleName=comp_env_role)['Role']['Arn']
        else:
            raise

    try:
        iam_client.put_role_policy(
            RoleName=comp_env_role,
            PolicyName=
            'aws-batch-service-policy',  # This name isn't used anywhere else
            PolicyDocument=batch_service_role_policy_json,
        )
        print('Batch role created')
    except Exception:
        print(
            'WARNING: Batch service role creation failed, assuming that this means it already exists.'
        )

    # Create an EC2 instance profile for the compute environment
    try:
        iam_client.create_role(
            RoleName=instance_profile,
            AssumeRolePolicyDocument=assume_ec2_role_policy_json,
        )
    except Exception:
        print(
            'WARNING: Batch role creation failed, assuming that this means it already exists.'
        )

    try:
        iam_client.put_role_policy(
            RoleName=instance_profile,
            PolicyName=
            'aws-batch-instance-policy',  # This name isn't used anywhere else
            PolicyDocument=batch_instance_role_policy_json,
        )
    except Exception:
        print(
            'WARNING: assigning role creation failed, assuming that this means it already exists.'
        )

    try:
        resp = iam_client.create_instance_profile(
            InstanceProfileName=instance_profile)
    except Exception as e:
        if "Instance Profile ecsInstanceRole already exists." in str(e):
            resp = iam_client.get_instance_profile(
                InstanceProfileName=instance_profile)

    compute_environment_dict['instanceRole'] = resp['InstanceProfile']['Arn']
    try:
        iam_client.add_role_to_instance_profile(
            InstanceProfileName=instance_profile,
            RoleName=instance_profile,
        )
        print('Instance profile created')
    except Exception as e:
        if not "Cannot exceed quota for InstanceSessionsPerInstanceProfile" in str(
                e):
            raise

    # Create a security group for the compute environment
    ec2_client = boto3.client('ec2')

    try:
        group_id = ec2_client.describe_security_groups(
            GroupNames=[security_group])['SecurityGroups'][0]['GroupId']
    except Exception:
        try:
            group_id = ec2_client.create_security_group(
                Description='Security group for AWS Batch',
                GroupName=security_group,
            )['GroupId']
        except Exception as e:
            if "InvalidGroup.Duplicate" not in str(e):
                # unknown case.
                raise

    # setup for batch compute environment creation
    # (the raise condition above is sufficient for this potential unbound local error)
    batch_client = boto3.client('batch')
    compute_environment_dict['imageId'] = ami_id
    compute_environment_dict['securityGroupIds'] = [group_id]

    final_comp_env_name = create_compute_environment(batch_client,
                                                     compute_environment_dict,
                                                     default_comp_env_name,
                                                     comp_env_role_arn)

    # Then create the job queue
    final_jobq_name = create_batch_job_queue(batch_client, default_queue_name,
                                             final_comp_env_name)

    # Create a batch job definition
    container_props_dict['image'] = repo_uri
    container_props_dict['environment'] = [{
        'name':
        'access_key_ssm_name',
        'value':
        aws_object_names['access_key_ssm_name'],
    }, {
        'name':
        'secret_key_ssm_name',
        'value':
        aws_object_names['secret_key_ssm_name'],
    }, {
        'name': 'region_name',
        'value': get_current_region(),
    }]

    final_job_definition_name = create_job_definition(
        batch_client, default_job_definition_name, container_props_dict)

    print(
        "\n\nFINAL NOTES for settings you will need to set your Beiwe server:")
    print("You will need to set 'comp_env_name' to '%s'" % final_comp_env_name)
    print("You will need to set 'queue_name' to '%s'" % final_jobq_name)
    print("You will need to set 'job_defn_name' to '%s'" %
          final_job_definition_name)
示例#5
0
def run():
    """
    Run the code
    :return: The AMI's id, to be used for attaching it to the batch jobs
    """
    # Load a bunch of JSON blobs containing policies and other things that boto3 clients
    # require as input.
    configs_folder = get_configs_folder()
    
    with open(os.path.join(configs_folder, 'ami-ec2-instance-props.json')) as fn:
        ami_ec2_instance_props_dict = json.load(fn)

    aws_object_names = get_aws_object_names()

    with open(os.path.join(configs_folder, 'ami-key-name.json')) as fn:
        ami_key = json.load(fn)

    ami_ec2_instance_props_dict["KeyName"] = ami_key["AWS_KEY_NAME"]
    print('JSON loaded')
    
    # Get the AMI ID for the local region
    set_default_region()
    ec2_client = boto3.client('ec2')
    image_name = ami_ec2_instance_props_dict.pop('ImageName')
    resp = ec2_client.describe_images(Filters=[{'Name': 'name', 'Values': [image_name]}])
    ami_ec2_instance_props_dict['ImageId'] = resp['Images'][0]['ImageId']
    
    # Create an EC2 instance to model the AMI off of
    resp = ec2_client.run_instances(**ami_ec2_instance_props_dict)
    ec2_instance_id = resp['Instances'][0]['InstanceId']
    print('EC2 instance created')

    ec2_resource = boto3.resource('ec2')
    
    for instance in ec2_resource.instances.filter(InstanceIds=[ec2_instance_id]):
        break
    instance.modify_attribute(Groups=["sg-052fc91e1bf5852b5"])
    print(instance.public_dns_name)
  
    # Fabric configuration
    fabric_api.env.host_string = instance.public_dns_name
    fabric_api.env.user = '******'
    fabric_api.env.key_filename = ami_key["AWS_KEY_PATH"]
    retry(fabric_api.run, "# waiting for ssh to be connectable...")

    fabric_api.sudo("yum -y update")
    fabric_api.sudo("mkfs -t ext4 /dev/xvdb")
    fabric_api.sudo("mkdir /docker_scratch")
    fabric_api.sudo("echo -e '/dev/xvdb\t/docker_scratch\text4\tdefaults\t0\t0' | sudo tee -a /etc/fstab")
    fabric_api.sudo("mount -a")
    try:
        fabric_api.sudo("stop ecs")
    except:
        print('ignoring stop ecs error')
        
    fabric_api.sudo("rm -rf /var/lib/ecs/data/ecs_agent_data.json")

 
    # Create an AMI based off of the EC2 instance. It takes some time for the EC2 instance to
    # be ready, so we delay up to thirty seconds.
    print('Waiting for unencrypted AMI...')
    tries = 0
    while True:
        try:
            resp = ec2_client.create_image(
                InstanceId=ec2_instance_id,
                Name=aws_object_names['ami_name'] + '-unencrypted',
            )
        except ClientError:
            # In case the EC2 instance isn't ready yet
            tries += 1
            if tries > 30:
                raise
            sleep(1)
        else:
            break
    unencrypted_ami_id = resp['ImageId']
    print('Unencrypted AMI created')
    
    # Create an encrypted AMI based off of the previous AMI. This is the quickest way to
    # create an encrypted AMI, because you can't create an EC2 instance with an encrypted root
    # drive, and you can't create an encrypted AMI directly from an unencrypted EC2 instance.
    region_name = boto3.session.Session().region_name
    print('Waiting to encrypt AMI...')
    tries = 0
    while True:
        try:
            resp = ec2_client.copy_image(
                SourceImageId=unencrypted_ami_id,
                SourceRegion=region_name,
                Encrypted=True,
                Name=aws_object_names['ami_name'],
            )
        except ClientError:
            # In case the unencrypted AMI isn't ready yet
            tries += 1
            if tries > 300:
                raise
            else:
                print "waiting on unencrypted ami..."
            sleep(1)
        else:
            break
    ami_id = resp['ImageId']
    print('Encrypted AMI created')
    
    # Delete the EC2 instance and the unencrypted AMI; only the encrypted AMI is useful
    # going forward.
    ec2_client.terminate_instances(InstanceIds=[ec2_instance_id])
    # ec2_client.deregister_image(ImageId=unencrypted_ami_id)
    
    return ami_id