예제 #1
0
def get_or_create_eb_application():
    """
    https://docs.aws.amazon.com/elasticbeanstalk/latest/api/API_CreateApplication.html
    """
    eb_client = create_eb_client()

    applications = eb_client.describe_applications().get('Applications', None)
    for app in applications:
        app_name = app.get('ApplicationName', None)
        if app_name and BEIWE_APPLICATION_NAME.lower() in app_name.lower():
            log.info('Using Elastic Beanstalk application named "%s."' %
                     app_name)
            return BEIWE_APPLICATION_NAME

    # raise Exception("no beiwe applications found")
    _ = eb_client.create_application(
        ApplicationName=BEIWE_APPLICATION_NAME,
        Description='Your Beiwe Application',
        ResourceLifecycleConfig={
            'ServiceRole': get_or_create_automation_policy()['Arn'],
            # The ARN of an IAM service role that Elastic Beanstalk has permission to assume
            'VersionLifecycleConfig': {
                'MaxCountRule': {
                    'Enabled': False,
                    'MaxCount': 1000,  # should be ignored
                    'DeleteSourceFromS3': True
                },
                'MaxAgeRule': {
                    'Enabled': False,
                    'MaxAgeInDays': 1000,  # should be ignored
                    'DeleteSourceFromS3': True
                }
            }
        })
    return BEIWE_APPLICATION_NAME
예제 #2
0
def do_create_manager():
    name = prompt_for_extant_eb_environment_name()
    do_fail_if_environment_does_not_exist(name)
    create_processing_server_configuration_file(name)

    try:
        settings = get_server_configuration_file(name)
    except Exception as e:
        log.error("could not read settings file")
        log.error(e)
        EXIT(1)

    log.info("creating manager server for %s..." % name)
    try:
        instance = create_processing_control_server(
            name, settings["MANAGER_SERVER_INSTANCE_TYPE"])
    except Exception as e:
        log.error(e)
        EXIT(1)
    public_ip = instance['NetworkInterfaces'][0]['PrivateIpAddresses'][0][
        'Association']['PublicIp']

    log.info("Finished creating manager server for %s..." % name)

    # TODO: fabric up the rabbitmq and cron task, ensure other servers can connect, watch data process
    configure_fabric(name, public_ip)
    push_files()
    apt_installs(manager=True)
    load_git_repo()
    setup_python()
    push_beiwe_configuration(name)
    push_manager_private_ip(name)
    # CC add script to create rabbitmq user
    setup_celery_manager()
    setup_manager_cron()
def get_most_recent_ubuntu():
    """ Unfortunately the different fundamental ec2 server types require a specific image type.
    All the ubuntu xenial prefixe matches are defined below, the currently selected is known to
    function for T2, M4, and C4 server classes.  Other server types may require testing the
    different classes
    """
    ec2_client = create_ec2_client()
    images = ec2_client.describe_images(Filters=[
        {
            "Name": 'state',
            "Values": ['available']
        },
        {
            "Name": 'name',
            # "Values": ["ubuntu/images/ebs-ssd/ubuntu-xenial-16.04-amd64-server*"]}
            # "Values": ["ubuntu/images/ubuntu-xenial-16.04-amd64-server*"]}
            # "Values": ["ubuntu/images/hvm-instance/ubuntu-xenial-16.04-amd64-server*"]}
            "Values":
            ["ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server*"]
        }
    ])['Images']
    # The names are time-sortable, we want the most recent one, it is at the bottom of a sorted list
    images.sort(key=lambda x: x['Name'])
    log.info("Using AMI '%s'" % images[-1]['Name'])
    return images[-1]
def get_most_recent_ubuntu():
    """ Unfortunately the different fundamental ec2 server types require a specific image type.
    All the ubuntu xenial prefixe matches are defined below, the currently selected is known to
    function for T2, M4, and C4 server classes.  Other server types may require testing the
    different classes,  (seems to work with t3, c5, m5)
    """
    ec2_client = create_ec2_client()
    images = ec2_client.describe_images(Filters=[
        {
            "Name": 'state',
            "Values": ['available']
        },
        {
            "Name": 'name',
            "Values":
            ["ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server*"]
        },
    ])['Images']
    # The names are time-sortable, we want the most recent one, it is at the bottom of a sorted list
    images = [
        image for image in images
        if "aws-marketplace" not in image["ImageLocation"]
    ]
    images.sort(key=lambda x: x['Name'])
    log.info("Using AMI '%s'" % images[-1]['Name'])
    return images[-1]
예제 #5
0
def do_create_manager():
    name = prompt_for_extant_eb_environment_name()
    do_fail_if_environment_does_not_exist(name)
    create_processing_server_configuration_file(name)

    try:
        settings = get_server_configuration_file(name)
    except Exception as e:
        log.error("could not read settings file")
        log.error(e)
        EXIT(1)

    log.info("creating manager server for %s..." % name)
    try:
        instance = create_processing_control_server(
            name, settings["MANAGER_SERVER_INSTANCE_TYPE"])
    except Exception as e:
        log.error(e)
        EXIT(1)
    public_ip = instance['NetworkInterfaces'][0]['PrivateIpAddresses'][0][
        'Association']['PublicIp']

    configure_fabric(name, public_ip)
    push_files()
    apt_installs(manager=True)
    setup_rabbitmq()
    load_git_repo()
    setup_python()
    push_beiwe_configuration(name)
    push_manager_private_ip_and_password(name)
    setup_manager_cron()
예제 #6
0
def create_data_bucket(eb_environment_name):
    for i in range(10):
        name = "beiwe-data-{}-{}".format(
            eb_environment_name, random_alphanumeric_string(63))[:63].lower()
        log.info("checking availability of bucket name '%s'" % name)
        if check_bucket_name_available(name):
            s3_create_bucket(name)
            return name
    raise Exception(
        "Was not able to construct a bucket name that is not in use.")
예제 #7
0
def do_create_environment():
    print(DO_CREATE_ENVIRONMENT)
    name = prompt_for_new_eb_environment_name(with_prompt=False)
    do_fail_if_bad_environment_name(name)
    do_fail_if_environment_exists(name)
    validate_beiwe_environment_config(
        name)  # Exits if any non-autogenerated credentials are bad.
    create_new_rds_instance(name)
    create_finalized_configuration(name)
    create_eb_environment(name)
    log.info("Created Beiwe cluster environment successfully")
예제 #8
0
def write_rds_credentials(eb_environment_name, credentials, test_for_existing_files):
    """ Writes to the database credentials file for the environment. """
    db_credentials_path = get_db_credentials_file_path(eb_environment_name)
    if test_for_existing_files and os.path.exists(db_credentials_path):
        msg = "Encountered a file at %s, abortiing." % db_credentials_path
        log.error("Encountered a file at %s, abortiing.")
        raise Exception(msg)

    with open(db_credentials_path, 'w') as f:
        json.dump(credentials, f, indent=1)
        log.info("database credentials have been written to %s" % db_credentials_path)
예제 #9
0
def get_or_create_eb_instance_profile():
    #     """ This function creates the appropriate roles that apply to the instances in an elastic
    #     beanstalk environment, based of of the roles created when using the online AWS console. """
    iam_client = create_iam_client()
    try:
        return iam_find_instance_profile(iam_client, EB_INSTANCE_PROFILE_NAME)
    except IamEntityMissingError:
        log.info("eb instance _profile_ not found, creating...")
        iam_client.create_instance_profile(
            InstanceProfileName=EB_INSTANCE_PROFILE_NAME)
        _ = iam_client.add_role_to_instance_profile(
            InstanceProfileName=EB_INSTANCE_PROFILE_NAME,
            RoleName=get_or_create_eb_instance_profile_role()['RoleName'])
    return iam_find_instance_profile(iam_client, EB_INSTANCE_PROFILE_NAME)
예제 #10
0
def get_or_create_eb_instance_profile_role():
    """ This function creates the appropriate roles that apply to the instances in an elastic
    beanstalk environment, based of of the roles created when using the online AWS console. """
    iam_client = create_iam_client()
    try:
        iam_find_role(iam_client, EB_INSTANCE_PROFILE_ROLE)
    except IamEntityMissingError:
        log.info("eb instance profile _role_ not found, creating...")
        iam_create_role(iam_client, EB_INSTANCE_PROFILE_ROLE, get_instance_assume_role_policy_document())
    # This first one is in the original role, but it is almost definitely not required.
    iam_attach_role_policy(iam_client, EB_INSTANCE_PROFILE_ROLE, AWS_EB_MULTICONTAINER_DOCKER)
    iam_attach_role_policy(iam_client, EB_INSTANCE_PROFILE_ROLE, AWS_EB_WEB_TIER)
    iam_attach_role_policy(iam_client, EB_INSTANCE_PROFILE_ROLE, AWS_EB_WORKER_TIER)
    return iam_find_role(iam_client, EB_INSTANCE_PROFILE_ROLE)
예제 #11
0
def get_or_create_eb_service_role():
    """ This function creates the appropriate roles that apply to the elastic beanstalk environment,
    based of of the roles created when using the online AWS console. """
    iam_client = create_iam_client()

    try:
        iam_find_role(iam_client, EB_SERVICE_ROLE)
    except IamEntityMissingError:
        log.info("eb service role not found, creating...")
        iam_create_role(iam_client, EB_SERVICE_ROLE, get_elasticbeanstalk_assume_role_policy_document())

    iam_attach_role_policy(iam_client, EB_SERVICE_ROLE, AWS_EB_SERVICE)
    iam_attach_role_policy(iam_client, EB_SERVICE_ROLE, AWS_EB_ENHANCED_HEALTH)
    return iam_find_role(iam_client, EB_SERVICE_ROLE)
예제 #12
0
def encrypt_eb_s3_bucket():
    '''
    This function obtain the account ID and the region, constructs the
    elasticbeanstalk s3 bucket name and applies a encrypt by default policy
    to the bucket.
    '''
    global_config = get_global_config()
    sts_client = create_sts_client()
    account_id = sts_client.get_caller_identity().get('Account')
    # There ought to be an easier way to get this name, but this works.
    s3_eb_bucket = 'elasticbeanstalk-{}-{}'.format(global_config['AWS_REGION'],
                                                   account_id)

    log.info('Enabling encryption on S3 bucket: %s' % s3_eb_bucket)
    s3_encrypt_bucket(s3_eb_bucket)
예제 #13
0
def do_create_worker():
    name = prompt_for_extant_eb_environment_name()
    do_fail_if_environment_does_not_exist(name)
    manager_instance = get_manager_instance_by_eb_environment_name(name)
    if manager_instance is None:
        log.error(
            "There is no manager server for the %s cluster, cannot deploy a worker until there is."
            % name)
        EXIT(1)

    if manager_instance['State']['Name'] != 'running':
        log.error(
            "There is a manager server for the %s cluster, but it is not in the running state (%s)."
            % (name, manager_instance['State']['Name']))
        EXIT(1)

    manager_public_ip = get_manager_public_ip(name)
    manager_private_ip = get_manager_private_ip(name)

    try:
        settings = get_server_configuration_file(name)
    except Exception as e:
        log.error("could not read settings file")
        log.error(e)
        EXIT(1)

    log.info("creating worker server for %s..." % name)
    try:
        instance = create_processing_server(
            name, settings["MANAGER_SERVER_INSTANCE_TYPE"])
    except Exception as e:
        log.error(e)
        EXIT(1)
    instance_ip = instance['NetworkInterfaces'][0]['PrivateIpAddresses'][0][
        'Association']['PublicIp']
    # TODO: fabric up the worker with the celery/supervisord and ensure it can connect to manager.

    configure_fabric(name, instance_ip)
    push_files()
    apt_installs()
    load_git_repo()
    setup_python()
    push_beiwe_configuration(name)
    push_manager_private_ip(name)
    setup_celery_worker()
    setup_worker_cron()
def terminate_all_processing_servers(eb_environment_name):
    ec2_client = create_ec2_client()
    worker_ids = [
        worker['InstanceId']
        for worker in get_worker_instances(eb_environment_name)
    ]

    # don't optimize, we want the log statements
    for instance_id in worker_ids:
        ec2_client.terminate_instances(InstanceIds=[instance_id])
        log.info(f"Terminating worker instance {instance_id}")

    manager_info = get_manager_instance_by_eb_environment_name(
        eb_environment_name)
    if manager_info:
        log.info(f"Terminating manager instance {manager_info['InstanceId']}")
        ec2_client.terminate_instances(
            InstanceIds=[manager_info['InstanceId']])
def get_or_create_rabbit_mq_security_group(eb_environment_name):
    rabbit_mq_sec_grp_name = construct_rabbit_mq_security_group_name(eb_environment_name)
    # we assume that the group was created correctly, don't attempt to add rules if we find it
    try:
        return get_security_group_by_name(rabbit_mq_sec_grp_name)
    except InvalidSecurityGroupNameException:
        log.info("Did not find a security group named '%s,' creating it." % rabbit_mq_sec_grp_name)
        instance_sec_grp_id = get_rds_security_groups_by_eb_name(eb_environment_name)["instance_sec_grp"]['GroupId']
        ingress_params = create_sec_grp_rule_parameters_allowing_traffic_from_another_security_group(
                tcp_port=RABBIT_MQ_PORT, sec_grp_id=instance_sec_grp_id
        )
        sec_grp = create_security_group(
                rabbit_mq_sec_grp_name,
                RABBIT_MQ_SEC_GRP_DESCRIPTION % instance_sec_grp_id,
                list_of_dicts_of_ingress_kwargs=[ingress_params]
        )
        open_tcp_port(sec_grp['GroupId'], 22)
        return get_security_group_by_id(sec_grp['GroupId'])
예제 #16
0
def do_create_worker():
    name = prompt_for_extant_eb_environment_name()
    do_fail_if_environment_does_not_exist(name)
    manager_instance = get_manager_instance_by_eb_environment_name(name)
    if manager_instance is None:
        log.error(
            "There is no manager server for the %s cluster, cannot deploy a worker until there is."
            % name)
        EXIT(1)

    try:
        settings = get_server_configuration_file(name)
    except Exception as e:
        log.error("could not read settings file")
        log.error(e)
        settings = None  # ide warnings...
        EXIT(1)

    log.info("creating worker server for %s..." % name)
    try:
        instance = create_processing_server(
            name, settings[WORKER_SERVER_INSTANCE_TYPE])
    except Exception as e:
        log.error(e)
        instance = None  # ide warnings...
        EXIT(1)
    instance_ip = instance['NetworkInterfaces'][0]['PrivateIpAddresses'][0][
        'Association']['PublicIp']

    configure_fabric(name, instance_ip)
    create_swap()
    push_home_directory_files()
    apt_installs()
    load_git_repo()
    setup_python()
    push_beiwe_configuration(name)
    push_manager_private_ip_and_password(name)
    setup_worker_cron()
    setup_celery_worker()  # run setup worker last.
    log.warning(
        "Server is almost up.  Waiting 20 seconds to avoid a race condition..."
    )
    sleep(20)
    run("supervisord")
def create_processing_control_server(eb_environment_name, aws_server_type):
    """ The differences between a data processing worker server and a processing controller
    server is that the controller needs to allow connections from the processors. """

    get_rds_security_groups_by_eb_name(eb_environment_name)["instance_sec_grp"]['GroupId']
    
    # TODO: functions that terminate all worker and all manager servers for an environment
    log.info("creating processing control server for %s..." % eb_environment_name)

    manager_info = get_manager_instance_by_eb_environment_name(eb_environment_name)

    if manager_info is not None and manager_info['State']['Name'] != 'terminated':
        if manager_info['InstanceType'] == aws_server_type:
            msg = "A manager server, %s, already exists for this environment, and it is of the provided type (%s)." % (manager_info['InstanceId'], aws_server_type)
        else:
            msg = "A manager server, %s, already exists for this environment." % manager_info['InstanceId']
        log.error(msg)
        msg = "Continuing with existing manager."
        # msg = "You must terminate all worker and manager servers before you can create a new manager."
        log.error(msg)
        sleep(0.1)  # sometimes log has problems if you don't give it a second, the error messages above are critical
        return manager_info

        # raise Exception(msg)
    
    rabbit_mq_sec_grp_id = get_or_create_rabbit_mq_security_group(eb_environment_name)['GroupId']
    instance_sec_grp_id = get_rds_security_groups_by_eb_name(eb_environment_name)["instance_sec_grp"]['GroupId']
    
    try:
        open_tcp_port(instance_sec_grp_id, 22)
    except ClientError:
        # we need to open the ssh port for future worker servers, but it blows up with duplicate
        # if a user ever creates two managers during the life of the environment.
        pass
    
    instance_info = create_server(eb_environment_name, aws_server_type,
                                  security_groups=[rabbit_mq_sec_grp_id, instance_sec_grp_id])
    instance_resource = create_ec2_resource().Instance(instance_info["InstanceId"])
    instance_resource.create_tags(Tags=[
        {"Key": "Name", "Value":PROCESSING_MANAGER_NAME  % eb_environment_name},
        {"Key": "is_processing_manager", "Value":"1"}
    ])
    return instance_info
예제 #18
0
def do_setup_eb_update():
    print("\n", DO_SETUP_EB_UPDATE_OPEN)

    files = sorted(
        [f for f in os.listdir(STAGED_FILES) if f.lower().endswith(".zip")])

    if not files:
        print("Could not find any zip files in " + STAGED_FILES)
        EXIT(1)

    print("Enter the version of the codebase do you want to use:")
    for i, file_name in enumerate(files):
        print("[%s]: %s" % (i + 1, file_name))
    print("(press CTL-C to cancel)\n")
    try:
        index = int(input("$ "))
    except Exception:
        log.error("Could not parse input.")
        index = None  # ide warnings
        EXIT(1)

    if index < 1 or index > len(files):
        log.error("%s was not a valid option." % index)
        EXIT(1)

    # handle 1-indexing
    file_name = files[index - 1]
    # log.info("Processing %s..." % file_name)
    time_ext = current_time_string().replace(" ", "_").replace(":", "_")
    output_file_name = file_name[:-4] + "_processed_" + time_ext + ".zip"
    do_zip_reduction(file_name, STAGED_FILES, output_file_name)
    log.info("Done processing %s." % file_name)
    log.info("The new file %s has been placed in %s" %
             (output_file_name, STAGED_FILES))
    print(
        "You can now provide Elastic Beanstalk with %s to run an automated deployment of the new code."
        % output_file_name)
    EXIT(0)
예제 #19
0
def do_get_worker_ip_addresses():
    name = prompt_for_extant_eb_environment_name()
    do_fail_if_environment_does_not_exist(name)
    ips = ', '.join(get_worker_public_ips(name))
    if ips:
        log.info(f"The IP address of the worker servers for {name} are {ips}")
예제 #20
0
def do_get_manager_ip_address():
    name = prompt_for_extant_eb_environment_name()
    do_fail_if_environment_does_not_exist(name)
    log.info(
        f"The IP address of the manager server for {name} is {get_manager_public_ip(name)}"
    )
def create_server(eb_environment_name, aws_server_type, security_groups=None):
    ec2_client = create_ec2_client()
    if security_groups is None:
        security_groups = []
    if not isinstance(security_groups, list):
        raise Exception("security_groups must be a list, received '%s'" % type(security_groups))
    
    ebs_parameters = {
        'DeviceName': '/dev/sda1',  # boot drive...
        'Ebs': {
            'DeleteOnTermination': True,
            'VolumeSize': 8,
        # gigabytes, No storage is required on any ubuntu machines, 8 is default
            'VolumeType': 'gp2'}  # SSD...
    }
    
    instance = ec2_client.run_instances(
            ImageId=get_most_recent_ubuntu()['ImageId'],
            MinCount=1,
            MaxCount=1,
            KeyName=GLOBAL_CONFIGURATION['DEPLOYMENT_KEY_NAME'],
            InstanceType=aws_server_type,
            SecurityGroupIds=security_groups,
            # NetworkInterfaces=[{"DeviceIndex": 0,
            #                     "AssociatePublicIpAddress": True,
            #                     "SubnetId": config.public_subnet_id,
            #                     "Groups": security_groups_list}],
            # IamInstanceProfile={"Arn": MANAGER_IAM_ROLE},
            BlockDeviceMappings=[ebs_parameters],
            Monitoring={'Enabled': False},
            InstanceInitiatedShutdownBehavior='stop',
            # Placement={'AvailabilityZone': 'string',
            #            'Affinity': 'string',
            #            'GroupName': 'string',
            #            'HostId': 'string',
            #            'Tenancy': 'default'|'dedicated'|'host',
            #            'SpreadDomain': 'string'
            #           },
            # IamInstanceProfile={'Arn': 'string',
            #                    'Name': 'string'},
            
            # NetworkInterfaces=[ {
            #         'AssociatePublicIpAddress': True|False,
            #         'DeleteOnTermination': True|False,
            #         'Description': 'string',
            #         'DeviceIndex': 123,
            #         'Groups': ['string',],
            #         'Ipv6AddressCount': 123,
            #         'Ipv6Addresses': [ { 'Ipv6Address': 'string' }, ],
            #         'NetworkInterfaceId': 'string',
            #         'PrivateIpAddress': 'string',
            #         'PrivateIpAddresses': [ {'Primary': True|False,
            #                                  'PrivateIpAddress': 'string'},],
            #         'SecondaryPrivateIpAddressCount': 123,
            #         'SubnetId': 'string'
            #     }, ],
            #
            # TagSpecifications=[ {
            #         'ResourceType': 'customer-gateway'|'dhcp-options'|'image'|'instance'|'internet-gateway'|'network-acl'|'network-interface'|'reserved-instances'|'route-table'|'snapshot'|'spot-instances-request'|'subnet'|'security-group'|'volume'|'vpc'|'vpn-connection'|'vpn-gateway',
            #         'Tags': [ { 'Key': 'string',
            #                     'Value': 'string'},]
            #         },
            # ]
    )["Instances"][0]
    instance_id = instance["InstanceId"]
    instance_resource = create_ec2_resource().Instance(instance_id)
    log.info("Waiting for server %s to show up..." % instance_id)
    instance_resource.wait_until_exists()
    log.info("Waiting until server %s is up and running (this may take a minute) ..." % instance_id)
    instance_resource.wait_until_running()
    return get_instance_by_id(instance_id)
예제 #22
0
def create_new_rds_instance(eb_environment_name):
    db_instance_identifier = construct_db_name(eb_environment_name)
    # identify whether there is already a database with this name, we don't want to
    try:
        _ = get_db_info(eb_environment_name)
        log.error("There is already a database named %s" % eb_environment_name)
        EXIT()
    except DBInstanceNotFound:
        pass

    database_server_type = get_server_configuration_file(
        eb_environment_name)['DB_SERVER_TYPE']
    engine = get_most_recent_postgres_engine()

    credentials = generate_valid_postgres_credentials()
    log.info(
        "writing database credentials to disk, database address will be added later."
    )

    write_rds_credentials(eb_environment_name, credentials, True)

    # There is some weirdness involving security groups.  It looks like there is this concept of
    # non-vpc security groups, I am fairly certain that this interacts with cross-vpc, IAM based
    # database access.
    create_rds_security_groups(db_instance_identifier)
    db_sec_grp_id = get_rds_security_groups(
        db_instance_identifier)['database_sec_grp']['GroupId']

    log.info("Creating RDS Postgres database named %s" %
             db_instance_identifier)

    rds_client = create_rds_client()
    rds_instance = rds_client.create_db_instance(
        # server details
        DBInstanceIdentifier=db_instance_identifier,
        DBInstanceClass="db." + database_server_type,
        MultiAZ=False,
        PubliclyAccessible=False,
        Port=POSTGRES_PORT,

        # attach the security group that will allow access
        VpcSecurityGroupIds=[db_sec_grp_id],
        #TODO: is this even relevant?
        # providing the subnet is critical, not providing this value causes the db to be non-vpc
        # DBSubnetGroupName='string',

        # db storage
        StorageType='gp2',  # valid options are standard, gp2, io1
        # Iops=1000,  # multiple between 3 and 10 times the storage; only for use with io1.

        # AllocatedStorage has weird constraints:
        # General Purpose (SSD) storage (gp2): Must be an integer from 5 to 6144.
        # Provisioned IOPS storage (io1): Must be an integer from 100 to 6144.
        # Magnetic storage (standard): Must be an integer from 5 to 3072.
        AllocatedStorage=50,  # in gigabytes

        # StorageEncrypted=True | False,  # buh? drive encryption I think.
        # KmsKeyId='string',
        # TdeCredentialArn='string',  # probably not something we will implement
        # TdeCredentialPassword='******',  # probably not something we will implement

        # Security
        MasterUsername=credentials['RDS_USERNAME'],
        MasterUserPassword=credentials['RDS_PASSWORD'],
        DBName=credentials['RDS_DB_NAME'],
        EnableIAMDatabaseAuthentication=False,
        Engine=engine['Engine'],  # will be "postgres"
        EngineVersion=engine[
            'EngineVersion'],  # most recent postgres version in this region.
        PreferredMaintenanceWindow=MAINTAINANCE_WINDOW,
        PreferredBackupWindow=BACKUP_WINDOW,
        AutoMinorVersionUpgrade=True,  # auto-upgrades are fantastic
        BackupRetentionPeriod=BACKUP_RETENTION_PERIOD_DAYS,
        Tags=[
            {
                'Key': 'BEIWE-NAME',
                'Value': 'Beiwe postgres database for %s' % eb_environment_name
            },
        ],

        # Enhanced monitoring, leave disabled
        # MonitoringInterval=5,  # in seconds, Valid Values: 0, 1, 5, 10, 15, 30, 60
        # MonitoringRoleArn='string',  # required for monitoring interval other than 0

        # near as I can tell this is the "insert postgres paratmeters here" section.
        # DBParameterGroupName='string',

        # AvailabilityZone='string',  # leave as default (random)
        # DBSecurityGroups=['strings'], # non-vpc rds instance settings
        # LicenseModel='string',
        # CharacterSetName='string',
        # OptionGroupName='string',  # don't think this is required.
        # Domain='string',  # has the phrase "active directory" in the description
        # DomainIAMRoleName='string',
        # CopyTagsToSnapshot=True | False,
        # Timezone='string',  # only used by MSSQL
        # DBClusterIdentifier='string',  #
        # EnablePerformanceInsights=True,  # Aurora specific
        # PerformanceInsightsKMSKeyId='string'  # Aurora specific
        # PromotionTier = 123,  # Aurora specific
    )

    while True:
        try:
            db = get_db_info(eb_environment_name)
        except DBInstanceNotFound:
            log.error(
                "couldn't find database %s, hopefully this is a momentary glitch. Retrying."
            )
            sleep(5)
            continue
        log.info(
            '%s: RDS instance status is %s, waiting until status is "Ready"' %
            (current_time_string(), db['DBInstanceStatus']))
        # RDS spinup goes creating > backing up > available.
        if db['DBInstanceStatus'] in ["creating", 'backing-up']:
            sleep(5)
        elif db['DBInstanceStatus'] == "available":
            log.info("Database status is no longer 'creating', it is '%s'" %
                     db['DBInstanceStatus'])
            break
        else:
            raise Exception('encountered unknown database state "%s"' %
                            db['DBInstanceStatus'])

    return db
예제 #23
0
def create_eb_environment(eb_environment_name, without_db=False):
    # Don't actually use the without_db flag in production, it is for debugging
    app = get_or_create_eb_application()

    # if not without_db:
    #     try:
    #         _ = get_db_info(eb_environment_name)
    #     except DBInstanceNotFound:
    #         log.error("could not find a database named '%s,' you must create a database first."
    #                   % construct_db_name(eb_environment_name))
    #         EXIT()

    option_settings = construct_eb_environment_variables(eb_environment_name)

    log.info(
        "creating a new Elastic Beanstalk environment named %s... this will take a while."
        % eb_environment_name)
    eb_client = create_eb_client()

    env = eb_client.create_environment(
        ApplicationName=BEIWE_APPLICATION_NAME,
        EnvironmentName=eb_environment_name,
        Description='elastic beanstalk beiwe cluster',
        PlatformArn=get_python36_platform_arn(),
        OptionSettings=option_settings,
        # VersionLabel='string',  # TODO: this will probably be required later?

        # a different form of configuration management
        # OptionsToRemove=[
        #     {'ResourceName': 'string',
        #      'Namespace': 'string',
        #      'OptionName': 'string'}]

        # Tags=[{'Key': 'string',
        #        'Value': 'string'}],

        # CNAMEPrefix='string',  # not required
        # Tier={'Name': 'string',
        #       'Type': 'string',
        #       'Version': 'string'},

        # GroupName='string',  # for use in other methods of eb configuration
        # TemplateName='string',  # nope
        # SolutionStackName='string', # more about templates
    )

    env_id = env['EnvironmentId']
    good_eb_environment_states = ["Launching", "Updating"]
    bad_eb_environment_states = ["Terminating", "Terminated"]

    while True:
        envs = retry(eb_client.describe_environments,
                     EnvironmentIds=[env_id])['Environments']
        log.info(
            '%s: Elastic Beanstalk status is "%s", waiting until status is "Ready"'
            % (current_time_string(), env['Status']))
        if len(envs) != 1:
            raise Exception(
                "describe_environments is broken, %s environments returned" %
                len(envs))
        env = envs[0]
        if env['Status'] in bad_eb_environment_states:
            msg = "environment deployment failed:\n%s" % format(env)
            log.error(
                msg
            )  # python logging is weird and this fails to print if python exits too quickly.
            raise EnvironmentDeploymentFailure(msg)
        if env['Status'] in good_eb_environment_states:
            sleep(5)
            continue
        if env['Status'] == "Ready":
            log.info("environment %s, is ready to have Beiwe deployed to it." %
                     eb_environment_name)
            break

    encrypt_eb_s3_bucket()
    allow_eb_environment_database_access(eb_environment_name)
    allow_443_traffic_to_load_balancer(eb_environment_name)
    return env
####################################################################################################
##################################### Argument Parsing #############################################
####################################################################################################

if __name__ == "__main__":
    # validate the global configuration file
    if not all(
        (are_aws_credentials_present(), is_global_configuration_valid())):
        EXIT(1)

    # get CLI arguments, see function for details
    arguments = cli_args_validation()

    if arguments.prod:
        log.info("RUNNING IN PROD MODE")
        PROD_MODE = True

    if arguments.dev:
        if PROD_MODE:
            log.error("You cannot provide -prod and -dev at the same time.")
            EXIT(1)
        DEV_MODE = True
        log.info("RUNNING IN DEV MODE")

    if arguments.help_setup_new_environment:
        do_help_setup_new_environment()
        EXIT(0)

    if arguments.create_environment:
        do_create_environment()