示例#1
0
def get_python27_platform_arn():
    """ gets the most recent platform arm for a python 2.7 elastic beanstalk cluster.
    Warning! The query below returns different values in different AWS regions.
    We've tested this in us-east-1, us-east-2, us-west-1, and us-west-2 on November 1, 2017
    and confirmed that those 4 regions worked; however, this call may not work for other AWS
    regions, and it may break if Amazon changes the return values in the future. """
    eb_client = create_eb_client()
    platforms = []
    botoFilters = [{'Operator': 'contains', 'Type': 'PlatformName', 'Values': ['Python']}]
    # Note: regardless of the MaxRecords value, we're only seeing boto3 return 100 records max
    for platform in eb_client.list_platform_versions(MaxRecords=1000, Filters=botoFilters)['PlatformSummaryList']:
        if (platform.get('PlatformCategory', None) == 'Python' and
                    "Python 2.7" in platform.get('PlatformArn', []) and
                    "64bit Amazon Linux" in platform.get('PlatformArn', [])
            ):
            platforms.append(platform['PlatformArn'])

    platforms.sort()

    if len(platforms) == 0:
        raise PythonPlatformDiscoveryError("could not find python 2.7 platform")
    if len(platforms) > 1:
        log.error("\n***********************************************************\n"
                  "Warning: encountered multiple Python 2.7 Elastic Beanstalk environment platforms.\n"
                  "Beiwe did its best to automatically determine which environment to use.\n"
                  "After deployment finishes, determine whether there is a platform upgrade you can\n"
                  "apply for this cluster.\n"
                  "***********************************************************")
        return platforms[-1]
    if len(platforms) == 1:
        return platforms[0]
示例#2
0
def get_python36_platform_arn():
    """ Gets the most recent platform arn for a python 3.6 elastic beanstalk cluster, is region specific.."""
    eb_client = create_eb_client()
    platforms = []
    botoFilters = [{'Operator': 'contains', 'Type': 'PlatformName', 'Values': ['Python']}]
    # Note: regardless of the MaxRecords value, we're only seeing boto3 return 100 records max
    for platform in eb_client.list_platform_versions(MaxRecords=1000, Filters=botoFilters)['PlatformSummaryList']:
        if (platform.get(
                'PlatformCategory', None) == 'Python' and
                "Python 3.6" in platform.get('PlatformArn', []) and
                "64bit Amazon Linux" in platform.get('PlatformArn', [])
        ):
            platforms.append(platform['PlatformArn'])

    # platform arns are not necessarily human-alphanumerically, but a best effort here is fine.
    # looks like this:
    # ['arn:aws:elasticbeanstalk:us-east-1::platform/Python 3.6 running on 64bit'Amazon Linux/2.9.2',
    #  'arn:aws:elasticbeanstalk:us-east-1::platform/Python 3.6 running on 64bit Amazon Linux/2.9.3']
    platforms.sort()

    if len(platforms) == 0:
        raise PythonPlatformDiscoveryError("could not find python 3.6 platform")
    if len(platforms) > 1:
        log.error("\n***********************************************************\n"
                  "Warning: encountered multiple Python 3.6 Elastic Beanstalk environment platforms.\n"
                  "Beiwe did its best to automatically determine which environment to use.\n"
                  "After deployment finishes, determine whether there is a platform upgrade you can\n"
                  "apply for this cluster.\n"
                  "***********************************************************")
        return platforms[-1]
    if len(platforms) == 1:
        return platforms[0]
示例#3
0
def get_instances_by_name(instance_name):
    """ thank you to https://rob.salmond.ca/filtering-instances-by-name-with-boto3/ for having
    sufficient SEO to be a googleable answer on how to even do this.
    And then this stack overflow for how to query by instances that are running:
    https://stackoverflow.com/questions/37293366/what-is-the-correct-ways-to-write-boto3-filters-to-use-customise-tag-name
    """
    reservations = create_ec2_client().describe_instances(Filters=[
        {
            'Name': 'tag:Name',
            'Values': [instance_name]
        },
        {
            'Name': 'instance-state-name',
            'Values': ['running']
        },
    ])['Reservations']

    # need to concatenate all instances from every "reservation", whatever that is.
    instances = []
    for reservation in reservations:
        instances.extend(reservation['Instances'])

    if not instances:
        log.error("Could not find any instances matching the name '%s'" %
                  instance_name)

    return instances
示例#4
0
def create_finalized_configuration(eb_environment_name):
    # requires an rds server has been created for the environment.
    # FLASK_SECRET_KEY
    # S3_BUCKET
    finalized_cred_path = get_finalized_credentials_file_path(
        eb_environment_name)
    if os.path.exists(finalized_cred_path):
        log.error("Encountered a finalized configuration file at %s." %
                  finalized_cred_path)
        log.error(
            "This file contains autogenerated parameters which must be identical between "
            "data processing servers and the Elastic Beanstalk frontend servers.  This file "
            "should not exist at this time, so the deployment process has been aborted."
        )
        EXIT(1)

    config = validate_beiwe_environment_config(eb_environment_name)
    config.update(get_full_db_credentials(eb_environment_name))
    config['FLASK_SECRET_KEY'] = random_alphanumeric_string(80)
    config["S3_BUCKET"] = create_data_bucket(eb_environment_name)
    config.update(create_server_access_credentials(config["S3_BUCKET"]))

    with open(finalized_cred_path, 'w') as f:
        json.dump(config, f, indent=1)
    return config
示例#5
0
def are_aws_credentials_present():
    ret = _simple_validate_required(get_aws_credentials, AWS_CREDENTIALS_FILE,
                                    AWS_CREDENTIALS_FILE_KEYS,
                                    relpath(AWS_CREDENTIALS_FILE))
    if not ret:
        log.error(VALIDATE_AWS_CREDENTIALS_MESSAGE)
    return ret
示例#6
0
def do_help_setup_new_environment():
    print(HELP_SETUP_NEW_ENVIRONMENT)
    name = prompt_for_new_eb_environment_name()
    do_fail_if_bad_environment_name(name)
    do_fail_if_environment_exists(name)

    beiwe_environment_fp = get_beiwe_python_environment_variables_file_path(
        name)
    processing_server_settings_fp = get_server_configuration_file_path(name)
    extant_files = os.listdir(DEPLOYMENT_SPECIFIC_CONFIG_FOLDER)

    for fp in (beiwe_environment_fp, processing_server_settings_fp):
        if os.path.basename(fp) in extant_files:
            log.error("is already a file at %s" %
                      relpath(beiwe_environment_fp))
            EXIT(1)

    with open(beiwe_environment_fp, 'w') as f:
        json.dump(reference_environment_configuration_file(), f, indent=1)
    with open(processing_server_settings_fp, 'w') as f:
        json.dump(reference_data_processing_server_configuration(),
                  f,
                  indent=1)

    print("Environment specific files have been created at %s and %s." % (
        relpath(beiwe_environment_fp),
        relpath(processing_server_settings_fp),
    ))

    # Note: we actually cannot generate RDS credentials until we have a server, this is because
    # the hostname cannot exist until the server exists.
    print(
        """After filling in the required contents of these newly created files you will be able
    to run the -create-environment command.  Note that several more credentials files will be
    generated as part of that process. """)
示例#7
0
def is_global_configuration_valid():
    ret = _simple_validate_required(get_global_config,
                                    GLOBAL_CONFIGURATION_FILE,
                                    GLOBAL_CONFIGURATION_FILE_KEYS,
                                    relpath(GLOBAL_CONFIGURATION_FILE))
    if not ret:
        log.error(VALIDATE_GLOBAL_CONFIGURATION_MESSAGE)
    return ret
示例#8
0
def do_fail_if_bad_environment_name(name):
    if not (4 <= len(name) < 40):
        log.error("That name is either too long or too short.")
        EXIT(1)

    if not re.match("^[a-zA-Z0-9-]+$", name) or name.endswith("-"):
        log.error("that is not a valid Elastic Beanstalk environment name.")
        EXIT(1)
def get_manager_instance_by_eb_environment_name(eb_environment_name):
    manager = get_instances_by_name(PROCESSING_MANAGER_NAME % eb_environment_name)

    if len(manager) > 1:
        log.error("discovered multiple manager servers.  This configuration is not supported and should be corrected.")
    try:
        return manager[0]
    except IndexError:
        return None
示例#10
0
def prompt_for_extant_eb_environment_name():
    print(EXTANT_ENVIRONMENT_PROMPT)
    name = input()
    environment_exists = check_if_eb_environment_exists(name)
    if not environment_exists:
        log.error("There is no environment with the name %s" % name)
        EXIT(1)
    validate_beiwe_environment_config(name)
    return name
示例#11
0
def do_fix_health_checks():
    name = prompt_for_extant_eb_environment_name()
    do_fail_if_environment_does_not_exist(name)
    try:
        print("Setting environment to ignore health checks")
        fix_deploy(name)
    except Exception as e:
        log.error("unable to run command due to the following error:\n %s" % e)
        raise
    print("Success.")
示例#12
0
def write_rds_credentials(eb_environment_name, credentials, test_for_existing_files):
    """ Writes to the database credentials file for the environment. """
    db_credentials_path = get_db_credentials_file_path(eb_environment_name)
    if test_for_existing_files and os.path.exists(db_credentials_path):
        msg = "Encountered a file at %s, abortiing." % db_credentials_path
        log.error("Encountered a file at %s, abortiing.")
        raise Exception(msg)

    with open(db_credentials_path, 'w') as f:
        json.dump(credentials, f, indent=1)
        log.info("database credentials have been written to %s" % db_credentials_path)
示例#13
0
def create_processing_control_server(eb_environment_name, aws_server_type):
    """ The differences between a data processing worker server and a processing controller
    server is that the controller needs to allow connections from the processors. """

    get_rds_security_groups_by_eb_name(
        eb_environment_name)["instance_sec_grp"]['GroupId']

    # TODO: functions that terminate all worker and all manager servers for an environment

    manager_info = get_manager_instance_by_eb_environment_name(
        eb_environment_name)
    if manager_info is not None and manager_info['State'][
            'Name'] != 'terminated':
        if manager_info['InstanceType'] == aws_server_type:
            msg = "A manager server, %s, already exists for this environment, and it is of the provided type (%s)." % (
                manager_info['InstanceId'], aws_server_type)
        else:
            msg = "A manager server, %s, already exists for this environment." % manager_info[
                'InstanceId']
        log.error(msg)
        msg = "You must terminate all worker and manager servers before you can create a new manager."
        log.error(msg)
        sleep(
            0.1
        )  # sometimes log has problems if you don't give it a second, the error messages above are critical
        raise Exception(msg)

    rabbit_mq_sec_grp_id = get_or_create_rabbit_mq_security_group(
        eb_environment_name)['GroupId']
    instance_sec_grp_id = get_rds_security_groups_by_eb_name(
        eb_environment_name)["instance_sec_grp"]['GroupId']

    try:
        open_tcp_port(instance_sec_grp_id, 22)
    except ClientError:
        # we need to open the ssh port for future worker servers, but it blows up with duplicate
        # if a user ever creates two managers during the life of the environment.
        pass

    instance_info = create_server(
        eb_environment_name,
        aws_server_type,
        security_groups=[rabbit_mq_sec_grp_id, instance_sec_grp_id])
    instance_resource = create_ec2_resource().Instance(
        instance_info["InstanceId"])
    instance_resource.create_tags(
        Tags=[{
            "Key": "Name",
            "Value": PROCESSING_MANAGER_NAME % eb_environment_name
        }, {
            "Key": "is_processing_manager",
            "Value": "1"
        }])
    return instance_info
def get_manager_instance_by_eb_environment_name(eb_environment_name):
    """ Get a manager dictionary of the currently running manager server. """
    managers = get_instances_by_name(PROCESSING_MANAGER_NAME % eb_environment_name)

    if len(managers) > 1:
        msg = "Discovered multiple manager servers. This configuration is not supported and should be corrected."
        log.error(msg)
        raise Exception(msg)

    if managers:
        return managers[0]
    else:
        log.warning("No manager found.")
        return None
示例#15
0
def _simple_validate_required(getter_func, file_path, appropriate_keys,
                              display_name):
    """ returns False if invalid, True if valid.  For use with fully required keys, prints useful messages."""
    # try and load, fail usefully.
    try:
        json_config = getter_func()
    except Exception:
        log.error("could not load the %s file '%s'." %
                  (display_name, file_path))
        sleep(0.1)
        return False  # could not load, did not pass

    # check for invalid values and keyserrors
    error_free = True
    for k, v in json_config.iteritems():
        if k not in appropriate_keys:
            log.error("a key '%s' is present in %s, but was not expected." %
                      (k, display_name))
            error_free = False
        if not v:
            error_free = False
            log.error("'%s' must be present in %s and have a value." %
                      (k, display_name))

    for key in appropriate_keys:
        if key not in json_config:
            log.error("the key '%s' was expected in %s but not present." %
                      (key, display_name))
            error_free = False

    sleep(0.1)  # python logging is dumb, wait so logs actually appear
    return error_free
示例#16
0
def do_create_worker():
    name = prompt_for_extant_eb_environment_name()
    do_fail_if_environment_does_not_exist(name)
    manager_instance = get_manager_instance_by_eb_environment_name(name)
    if manager_instance is None:
        log.error(
            "There is no manager server for the %s cluster, cannot deploy a worker until there is."
            % name)
        EXIT(1)

    if manager_instance['State']['Name'] != 'running':
        log.error(
            "There is a manager server for the %s cluster, but it is not in the running state (%s)."
            % (name, manager_instance['State']['Name']))
        EXIT(1)

    manager_public_ip = get_manager_public_ip(name)
    manager_private_ip = get_manager_private_ip(name)

    try:
        settings = get_server_configuration_file(name)
    except Exception as e:
        log.error("could not read settings file")
        log.error(e)
        EXIT(1)

    log.info("creating worker server for %s..." % name)
    try:
        instance = create_processing_server(
            name, settings["MANAGER_SERVER_INSTANCE_TYPE"])
    except Exception as e:
        log.error(e)
        EXIT(1)
    instance_ip = instance['NetworkInterfaces'][0]['PrivateIpAddresses'][0][
        'Association']['PublicIp']
    # TODO: fabric up the worker with the celery/supervisord and ensure it can connect to manager.

    configure_fabric(name, instance_ip)
    push_files()
    apt_installs()
    load_git_repo()
    setup_python()
    push_beiwe_configuration(name)
    push_manager_private_ip(name)
    setup_celery_worker()
    setup_worker_cron()
示例#17
0
def do_create_manager():
    name = prompt_for_extant_eb_environment_name()
    do_fail_if_environment_does_not_exist(name)
    create_processing_server_configuration_file(name)

    try:
        settings = get_server_configuration_file(name)
    except Exception as e:
        log.error("could not read settings file")
        log.error(e)
        EXIT(1)

    log.info("creating manager server for %s..." % name)
    try:
        instance = create_processing_control_server(
            name, settings["MANAGER_SERVER_INSTANCE_TYPE"])
    except Exception as e:
        log.error(e)
        EXIT(1)
    public_ip = instance['NetworkInterfaces'][0]['PrivateIpAddresses'][0][
        'Association']['PublicIp']

    log.info("Finished creating manager server for %s..." % name)

    # TODO: fabric up the rabbitmq and cron task, ensure other servers can connect, watch data process
    configure_fabric(name, public_ip)
    push_files()
    apt_installs(manager=True)
    load_git_repo()
    setup_python()
    push_beiwe_configuration(name)
    push_manager_private_ip(name)
    # CC add script to create rabbitmq user
    setup_celery_manager()
    setup_manager_cron()
def do_create_manager():
    name = prompt_for_extant_eb_environment_name()
    do_fail_if_environment_does_not_exist(name)
    create_processing_server_configuration_file(name)

    try:
        settings = get_server_configuration_file(name)
    except Exception as e:
        log.error("could not read settings file")
        log.error(e)
        EXIT(1)

    log.info("creating manager server for %s..." % name)
    try:
        instance = create_processing_control_server(
            name, settings["MANAGER_SERVER_INSTANCE_TYPE"])
    except Exception as e:
        log.error(e)
        EXIT(1)
    public_ip = instance['NetworkInterfaces'][0]['PrivateIpAddresses'][0][
        'Association']['PublicIp']

    configure_fabric(name, public_ip)
    push_files()
    apt_installs(manager=True)
    setup_rabbitmq()
    load_git_repo()
    setup_python()
    push_beiwe_configuration(name)
    push_manager_private_ip_and_password(name)
    setup_manager_cron()
示例#19
0
def do_setup_eb_update():
    print("\n", DO_SETUP_EB_UPDATE_OPEN)

    files = sorted(
        [f for f in os.listdir(STAGED_FILES) if f.lower().endswith(".zip")])

    if not files:
        print("Could not find any zip files in " + STAGED_FILES)
        EXIT(1)

    print("Enter the version of the codebase do you want to use:")
    for i, file_name in enumerate(files):
        print("[%s]: %s" % (i + 1, file_name))
    print("(press CTL-C to cancel)\n")
    try:
        index = int(input("$ "))
    except Exception:
        log.error("Could not parse input.")
        index = None  # ide warnings
        EXIT(1)

    if index < 1 or index > len(files):
        log.error("%s was not a valid option." % index)
        EXIT(1)

    # handle 1-indexing
    file_name = files[index - 1]
    # log.info("Processing %s..." % file_name)
    time_ext = current_time_string().replace(" ", "_").replace(":", "_")
    output_file_name = file_name[:-4] + "_processed_" + time_ext + ".zip"
    do_zip_reduction(file_name, STAGED_FILES, output_file_name)
    log.info("Done processing %s." % file_name)
    log.info("The new file %s has been placed in %s" %
             (output_file_name, STAGED_FILES))
    print(
        "You can now provide Elastic Beanstalk with %s to run an automated deployment of the new code."
        % output_file_name)
    EXIT(0)
示例#20
0
def do_create_worker():
    name = prompt_for_extant_eb_environment_name()
    do_fail_if_environment_does_not_exist(name)
    manager_instance = get_manager_instance_by_eb_environment_name(name)
    if manager_instance is None:
        log.error(
            "There is no manager server for the %s cluster, cannot deploy a worker until there is."
            % name)
        EXIT(1)

    try:
        settings = get_server_configuration_file(name)
    except Exception as e:
        log.error("could not read settings file")
        log.error(e)
        settings = None  # ide warnings...
        EXIT(1)

    log.info("creating worker server for %s..." % name)
    try:
        instance = create_processing_server(
            name, settings[WORKER_SERVER_INSTANCE_TYPE])
    except Exception as e:
        log.error(e)
        instance = None  # ide warnings...
        EXIT(1)
    instance_ip = instance['NetworkInterfaces'][0]['PrivateIpAddresses'][0][
        'Association']['PublicIp']

    configure_fabric(name, instance_ip)
    create_swap()
    push_home_directory_files()
    apt_installs()
    load_git_repo()
    setup_python()
    push_beiwe_configuration(name)
    push_manager_private_ip_and_password(name)
    setup_worker_cron()
    setup_celery_worker()  # run setup worker last.
    log.warning(
        "Server is almost up.  Waiting 20 seconds to avoid a race condition..."
    )
    sleep(20)
    run("supervisord")
示例#21
0
def construct_eb_environment_variables(eb_environment_name):
    global_config = get_global_config()
    try:
        environment_variables = get_finalized_environment_variables(
            eb_environment_name)
    except Exception as e:
        log.error("could not get your environment settings.")
        log.error(e)
        raise

    try:
        server_settings = get_server_configuration_file(eb_environment_name)
    except Exception as e:
        log.error("could not get your server settings.")
        log.error(e)
        raise
    # This needs to be a comma separated list of environment variables declared as "var=value"
    env_var_string = ",".join(
        ["%s=%s" % (k, v) for k, v in environment_variables.iteritems()])

    generated_configuration_details = {
        "ServiceRole": get_or_create_eb_service_role()['Arn'],
        "IamInstanceProfile": get_or_create_eb_instance_profile()['Arn'],
        "EnvironmentVariables": env_var_string,
        "EC2KeyName": global_config["DEPLOYMENT_KEY_NAME"],
        "InstanceType": server_settings['ELASTIC_BEANSTALK_INSTANCE_TYPE'],
        "Notification Endpoint": global_config['SYSTEM_ADMINISTRATOR_EMAIL']
    }

    configuration = get_base_eb_configuration()
    for option in configuration:
        if isinstance(option['Value'], DynamicParameter):
            option['Value'] = generated_configuration_details.pop(
                option['OptionName'])

    if generated_configuration_details:
        pprint(generated_configuration_details)
        raise Exception(
            "encountered unused autogenerated configs, see print statement above to debug."
        )

    return configuration
示例#22
0
if __name__ == "__main__":
    # validate the global configuration file
    if not all(
        (are_aws_credentials_present(), is_global_configuration_valid())):
        EXIT(1)

    # get CLI arguments, see function for details
    arguments = cli_args_validation()

    if arguments.prod:
        log.warning("RUNNING IN PROD MODE")
        PROD_MODE.set(True)

    if arguments.dev:
        if PROD_MODE:
            log.error("You cannot provide -prod and -dev at the same time.")
            EXIT(1)
        DEV_MODE.set(True)
        log.warning("RUNNING IN DEV MODE")

    if arguments.help_setup_new_environment:
        do_help_setup_new_environment()
        EXIT(0)

    if arguments.create_environment:
        do_create_environment()
        EXIT(0)

    if arguments.create_manager:
        do_create_manager()
        EXIT(0)
示例#23
0
def do_fail_if_environment_exists(name):
    environment_exists = check_if_eb_environment_exists(name)
    if environment_exists:
        log.error("There is already an environment named '%s'" % name.lower())
        EXIT(1)
示例#24
0
def validate_beiwe_environment_config(eb_environment_name):
    # DOMAIN_NAME
    # SENTRY_ANDROID_DSN
    # SENTRY_DATA_PROCESSING_DSN
    # SENTRY_ELASTIC_BEANSTALK_DSN
    # SENTRY_JAVASCRIPT_DSN
    # SYSADMIN_EMAILS
    errors = []
    try:
        aws_credentials = get_aws_credentials()
        global_config = get_global_config()
        beiwe_variables = get_beiwe_environment_variables(eb_environment_name)
    except Exception as e:
        log.error(
            "encountered an error while trying to read configuration files.")
        log.error(e)
        EXIT(1)

    beiwe_variables_name = os.path.basename(
        get_beiwe_python_environment_variables_file_path(eb_environment_name))
    reference_environment_configuration_keys = reference_environment_configuration_file(
    ).keys()
    # Validate the data

    sysadmin_email = global_config.get('SYSTEM_ADMINISTRATOR_EMAIL', "")
    if not sysadmin_email:
        errors.append(
            '(Global Configuration) System administrator email cannot be empty.'
        )
    else:
        if not re.match('^[\S]+@[\S]+\.[\S]+$', sysadmin_email):
            errors.append(
                '(Global Configuration) Invalid email address: {}'.format(
                    sysadmin_email))

    # check sentry urls
    sentry_dsns = {
        "SENTRY_ELASTIC_BEANSTALK_DSN":
        beiwe_variables.get('SENTRY_ELASTIC_BEANSTALK_DSN', ''),
        "SENTRY_DATA_PROCESSING_DSN":
        beiwe_variables.get('SENTRY_DATA_PROCESSING_DSN', ''),
        "SENTRY_ANDROID_DSN":
        beiwe_variables.get('SENTRY_ANDROID_DSN', ''),
        "SENTRY_JAVASCRIPT_DSN":
        beiwe_variables.get('SENTRY_JAVASCRIPT_DSN', ''),
    }

    for name, dsn in sentry_dsns.iteritems():
        if ensure_nonempty_string(dsn, name, errors, beiwe_variables_name):
            if not DSN_REGEX.match(dsn):
                errors.append('({}) Invalid DSN: {}'.format(
                    beiwe_variables_name, dsn))
            # if name == "SENTRY_JAVASCRIPT_DSN":
            #     if not PUBLIC_DSN_REGEX.match(dsn):
            #         errors.append('({}) Invalid DSN: {}'.format(beiwe_variables_name, dsn))
            # elif not PRIVATE_DSN_REGEX.match(dsn):
            #     errors.append('({}) Invalid DSN: {}'.format(beiwe_variables_name, dsn))

    domain_name = beiwe_variables.get('DOMAIN', None)
    ensure_nonempty_string(domain_name, 'Domain name', errors,
                           beiwe_variables_name)

    for key in reference_environment_configuration_keys:
        if key not in beiwe_variables:
            errors.append("{} is missing.".format(key))

    for key in beiwe_variables:
        if key not in reference_environment_configuration_keys:
            errors.append("{} is present but was not expected.".format(key))

    # Raise any errors
    if errors:
        for e in errors:
            log.error(e)
        sleep(
            0.1
        )  # python logging has some issues if you exit too fast... isn't it supposed to be synchronous?
        EXIT(1)  # forcibly exit, do not continue to run any code.

    # Check for presence of the server settings file:
    if not file_exists(
            get_server_configuration_file_path(eb_environment_name)):
        log.error("No server settings file exists at %s." %
                  get_server_configuration_file_path(eb_environment_name))
        EXIT(1)

    # Put the data into one dict to be returned
    return {
        'DOMAIN_NAME': domain_name,
        'SYSADMIN_EMAILS': sysadmin_email,
        'SENTRY_ELASTIC_BEANSTALK_DSN':
        sentry_dsns['SENTRY_ELASTIC_BEANSTALK_DSN'],
        'SENTRY_DATA_PROCESSING_DSN':
        sentry_dsns['SENTRY_DATA_PROCESSING_DSN'],
        'SENTRY_ANDROID_DSN': sentry_dsns['SENTRY_ANDROID_DSN'],
        'SENTRY_JAVASCRIPT_DSN': sentry_dsns['SENTRY_JAVASCRIPT_DSN']
    }
示例#25
0
def create_eb_environment(eb_environment_name, without_db=False):
    # Don't actually use the without_db flag in production, it is for debugging
    app = get_or_create_eb_application()

    # if not without_db:
    #     try:
    #         _ = get_db_info(eb_environment_name)
    #     except DBInstanceNotFound:
    #         log.error("could not find a database named '%s,' you must create a database first."
    #                   % construct_db_name(eb_environment_name))
    #         EXIT()

    option_settings = construct_eb_environment_variables(eb_environment_name)

    log.info(
        "creating a new Elastic Beanstalk environment named %s... this will take a while."
        % eb_environment_name)
    eb_client = create_eb_client()

    env = eb_client.create_environment(
        ApplicationName=BEIWE_APPLICATION_NAME,
        EnvironmentName=eb_environment_name,
        Description='elastic beanstalk beiwe cluster',
        PlatformArn=get_python36_platform_arn(),
        OptionSettings=option_settings,
        # VersionLabel='string',  # TODO: this will probably be required later?

        # a different form of configuration management
        # OptionsToRemove=[
        #     {'ResourceName': 'string',
        #      'Namespace': 'string',
        #      'OptionName': 'string'}]

        # Tags=[{'Key': 'string',
        #        'Value': 'string'}],

        # CNAMEPrefix='string',  # not required
        # Tier={'Name': 'string',
        #       'Type': 'string',
        #       'Version': 'string'},

        # GroupName='string',  # for use in other methods of eb configuration
        # TemplateName='string',  # nope
        # SolutionStackName='string', # more about templates
    )

    env_id = env['EnvironmentId']
    good_eb_environment_states = ["Launching", "Updating"]
    bad_eb_environment_states = ["Terminating", "Terminated"]

    while True:
        envs = retry(eb_client.describe_environments,
                     EnvironmentIds=[env_id])['Environments']
        log.info(
            '%s: Elastic Beanstalk status is "%s", waiting until status is "Ready"'
            % (current_time_string(), env['Status']))
        if len(envs) != 1:
            raise Exception(
                "describe_environments is broken, %s environments returned" %
                len(envs))
        env = envs[0]
        if env['Status'] in bad_eb_environment_states:
            msg = "environment deployment failed:\n%s" % format(env)
            log.error(
                msg
            )  # python logging is weird and this fails to print if python exits too quickly.
            raise EnvironmentDeploymentFailure(msg)
        if env['Status'] in good_eb_environment_states:
            sleep(5)
            continue
        if env['Status'] == "Ready":
            log.info("environment %s, is ready to have Beiwe deployed to it." %
                     eb_environment_name)
            break

    encrypt_eb_s3_bucket()
    allow_eb_environment_database_access(eb_environment_name)
    allow_443_traffic_to_load_balancer(eb_environment_name)
    return env
示例#26
0
def create_new_rds_instance(eb_environment_name):
    db_instance_identifier = construct_db_name(eb_environment_name)
    # identify whether there is already a database with this name, we don't want to
    try:
        _ = get_db_info(eb_environment_name)
        log.error("There is already a database named %s" % eb_environment_name)
        EXIT()
    except DBInstanceNotFound:
        pass

    database_server_type = get_server_configuration_file(
        eb_environment_name)['DB_SERVER_TYPE']
    engine = get_most_recent_postgres_engine()

    credentials = generate_valid_postgres_credentials()
    log.info(
        "writing database credentials to disk, database address will be added later."
    )

    write_rds_credentials(eb_environment_name, credentials, True)

    # There is some weirdness involving security groups.  It looks like there is this concept of
    # non-vpc security groups, I am fairly certain that this interacts with cross-vpc, IAM based
    # database access.
    create_rds_security_groups(db_instance_identifier)
    db_sec_grp_id = get_rds_security_groups(
        db_instance_identifier)['database_sec_grp']['GroupId']

    log.info("Creating RDS Postgres database named %s" %
             db_instance_identifier)

    rds_client = create_rds_client()
    rds_instance = rds_client.create_db_instance(
        # server details
        DBInstanceIdentifier=db_instance_identifier,
        DBInstanceClass="db." + database_server_type,
        MultiAZ=False,
        PubliclyAccessible=False,
        Port=POSTGRES_PORT,

        # attach the security group that will allow access
        VpcSecurityGroupIds=[db_sec_grp_id],
        #TODO: is this even relevant?
        # providing the subnet is critical, not providing this value causes the db to be non-vpc
        # DBSubnetGroupName='string',

        # db storage
        StorageType='gp2',  # valid options are standard, gp2, io1
        # Iops=1000,  # multiple between 3 and 10 times the storage; only for use with io1.

        # AllocatedStorage has weird constraints:
        # General Purpose (SSD) storage (gp2): Must be an integer from 5 to 6144.
        # Provisioned IOPS storage (io1): Must be an integer from 100 to 6144.
        # Magnetic storage (standard): Must be an integer from 5 to 3072.
        AllocatedStorage=50,  # in gigabytes

        # StorageEncrypted=True | False,  # buh? drive encryption I think.
        # KmsKeyId='string',
        # TdeCredentialArn='string',  # probably not something we will implement
        # TdeCredentialPassword='******',  # probably not something we will implement

        # Security
        MasterUsername=credentials['RDS_USERNAME'],
        MasterUserPassword=credentials['RDS_PASSWORD'],
        DBName=credentials['RDS_DB_NAME'],
        EnableIAMDatabaseAuthentication=False,
        Engine=engine['Engine'],  # will be "postgres"
        EngineVersion=engine[
            'EngineVersion'],  # most recent postgres version in this region.
        PreferredMaintenanceWindow=MAINTAINANCE_WINDOW,
        PreferredBackupWindow=BACKUP_WINDOW,
        AutoMinorVersionUpgrade=True,  # auto-upgrades are fantastic
        BackupRetentionPeriod=BACKUP_RETENTION_PERIOD_DAYS,
        Tags=[
            {
                'Key': 'BEIWE-NAME',
                'Value': 'Beiwe postgres database for %s' % eb_environment_name
            },
        ],

        # Enhanced monitoring, leave disabled
        # MonitoringInterval=5,  # in seconds, Valid Values: 0, 1, 5, 10, 15, 30, 60
        # MonitoringRoleArn='string',  # required for monitoring interval other than 0

        # near as I can tell this is the "insert postgres paratmeters here" section.
        # DBParameterGroupName='string',

        # AvailabilityZone='string',  # leave as default (random)
        # DBSecurityGroups=['strings'], # non-vpc rds instance settings
        # LicenseModel='string',
        # CharacterSetName='string',
        # OptionGroupName='string',  # don't think this is required.
        # Domain='string',  # has the phrase "active directory" in the description
        # DomainIAMRoleName='string',
        # CopyTagsToSnapshot=True | False,
        # Timezone='string',  # only used by MSSQL
        # DBClusterIdentifier='string',  #
        # EnablePerformanceInsights=True,  # Aurora specific
        # PerformanceInsightsKMSKeyId='string'  # Aurora specific
        # PromotionTier = 123,  # Aurora specific
    )

    while True:
        try:
            db = get_db_info(eb_environment_name)
        except DBInstanceNotFound:
            log.error(
                "couldn't find database %s, hopefully this is a momentary glitch. Retrying."
            )
            sleep(5)
            continue
        log.info(
            '%s: RDS instance status is %s, waiting until status is "Ready"' %
            (current_time_string(), db['DBInstanceStatus']))
        # RDS spinup goes creating > backing up > available.
        if db['DBInstanceStatus'] in ["creating", 'backing-up']:
            sleep(5)
        elif db['DBInstanceStatus'] == "available":
            log.info("Database status is no longer 'creating', it is '%s'" %
                     db['DBInstanceStatus'])
            break
        else:
            raise Exception('encountered unknown database state "%s"' %
                            db['DBInstanceStatus'])

    return db