コード例 #1
0
def configure_fabric(eb_environment_name, ip_address, key_filename=None):
    if eb_environment_name is not None:
        get_finalized_environment_variables(eb_environment_name)
    if key_filename is None:
        key_filename = get_global_config()['DEPLOYMENT_KEY_FILE_PATH']
    fabric_env.host_string = ip_address
    fabric_env.user = REMOTE_USERNAME
    fabric_env.key_filename = key_filename
    retry(run, "# waiting for ssh to be connectable...")
    run("echo >> {log}".format(log=LOG_FILE))
    sudo("chmod 666 {log}".format(log=LOG_FILE))
コード例 #2
0
def apt_installs(manager=False, single_server_ami=False):
    if manager:
        apt_install_list = APT_MANAGER_INSTALLS
    elif single_server_ami:
        apt_install_list = APT_SINGLE_SERVER_AMI_INSTALLS
    else:
        apt_install_list = APT_WORKER_INSTALLS
    installs_string = " ".join(apt_install_list)

    sudo('apt-get -y update >> {log}'.format(log=LOG_FILE))

    retry(
        sudo, 'apt-get -y install {installs} >> {log}'.format(
            installs=installs_string, log=LOG_FILE))
コード例 #3
0
def manager_fix():
    # It is unclear what causes this.  The notifications task create zombie processes that on at
    # least one occasion did not respond to kill -9 commands even when run as the superuser. This
    # occurs on both workers and managers, a 20 second sleep operation fixes it, 10 seconds does not.
    # Tested on the slowest server, t3a.nano' with swap that is required to run the celery tasks.)

    # Update: it turns out there is an alternate failure mode if you try to do the 20 second
    # wait (which works for workers), which is that all calls to the celery Inspect object
    # block for exceptionally long periods, even when a timeout value is provided. (This behavior
    # has other triggers too, this is just a reliable way to trigger it.)
    try_sudo("shutdown -r now")
    log.warning("rebooting server to fix rabbitmq bugs...")
    sleep(5)
    retry(run, "# waiting for server to reboot, this might take a while.")

    # we need to re-enable the swap after the reboot, then we can finally start supervisor without
    # creating zombie celery threads.
    sudo("swapon /swapfile")
    sudo("swapon -s")
コード例 #4
0
def create_eb_environment(eb_environment_name, without_db=False):
    # Don't actually use the without_db flag in production, it is for debugging
    app = get_or_create_eb_application()

    # if not without_db:
    #     try:
    #         _ = get_db_info(eb_environment_name)
    #     except DBInstanceNotFound:
    #         log.error("could not find a database named '%s,' you must create a database first."
    #                   % construct_db_name(eb_environment_name))
    #         EXIT()

    option_settings = construct_eb_environment_variables(eb_environment_name)

    log.info(
        "creating a new Elastic Beanstalk environment named %s... this will take a while."
        % eb_environment_name)
    eb_client = create_eb_client()

    env = eb_client.create_environment(
        ApplicationName=BEIWE_APPLICATION_NAME,
        EnvironmentName=eb_environment_name,
        Description='elastic beanstalk beiwe cluster',
        PlatformArn=get_python36_platform_arn(),
        OptionSettings=option_settings,
        # VersionLabel='string',  # TODO: this will probably be required later?

        # a different form of configuration management
        # OptionsToRemove=[
        #     {'ResourceName': 'string',
        #      'Namespace': 'string',
        #      'OptionName': 'string'}]

        # Tags=[{'Key': 'string',
        #        'Value': 'string'}],

        # CNAMEPrefix='string',  # not required
        # Tier={'Name': 'string',
        #       'Type': 'string',
        #       'Version': 'string'},

        # GroupName='string',  # for use in other methods of eb configuration
        # TemplateName='string',  # nope
        # SolutionStackName='string', # more about templates
    )

    env_id = env['EnvironmentId']
    good_eb_environment_states = ["Launching", "Updating"]
    bad_eb_environment_states = ["Terminating", "Terminated"]

    while True:
        envs = retry(eb_client.describe_environments,
                     EnvironmentIds=[env_id])['Environments']
        log.info(
            '%s: Elastic Beanstalk status is "%s", waiting until status is "Ready"'
            % (current_time_string(), env['Status']))
        if len(envs) != 1:
            raise Exception(
                "describe_environments is broken, %s environments returned" %
                len(envs))
        env = envs[0]
        if env['Status'] in bad_eb_environment_states:
            msg = "environment deployment failed:\n%s" % format(env)
            log.error(
                msg
            )  # python logging is weird and this fails to print if python exits too quickly.
            raise EnvironmentDeploymentFailure(msg)
        if env['Status'] in good_eb_environment_states:
            sleep(5)
            continue
        if env['Status'] == "Ready":
            log.info("environment %s, is ready to have Beiwe deployed to it." %
                     eb_environment_name)
            break

    encrypt_eb_s3_bucket()
    allow_eb_environment_database_access(eb_environment_name)
    allow_443_traffic_to_load_balancer(eb_environment_name)
    return env