def do_create_manager(): name = prompt_for_extant_eb_environment_name() do_fail_if_environment_does_not_exist(name) create_processing_server_configuration_file(name) try: settings = get_server_configuration_file(name) except Exception as e: log.error("could not read settings file") log.error(e) EXIT(1) log.info("creating manager server for %s..." % name) try: instance = create_processing_control_server( name, settings["MANAGER_SERVER_INSTANCE_TYPE"]) except Exception as e: log.error(e) EXIT(1) public_ip = instance['NetworkInterfaces'][0]['PrivateIpAddresses'][0][ 'Association']['PublicIp'] configure_fabric(name, public_ip) push_files() apt_installs(manager=True) setup_rabbitmq() load_git_repo() setup_python() push_beiwe_configuration(name) push_manager_private_ip_and_password(name) setup_manager_cron()
def do_create_manager(): name = prompt_for_extant_eb_environment_name() do_fail_if_environment_does_not_exist(name) create_processing_server_configuration_file(name) try: settings = get_server_configuration_file(name) except Exception as e: log.error("could not read settings file") log.error(e) EXIT(1) log.info("creating manager server for %s..." % name) try: instance = create_processing_control_server( name, settings["MANAGER_SERVER_INSTANCE_TYPE"]) except Exception as e: log.error(e) EXIT(1) public_ip = instance['NetworkInterfaces'][0]['PrivateIpAddresses'][0][ 'Association']['PublicIp'] log.info("Finished creating manager server for %s..." % name) # TODO: fabric up the rabbitmq and cron task, ensure other servers can connect, watch data process configure_fabric(name, public_ip) push_files() apt_installs(manager=True) load_git_repo() setup_python() push_beiwe_configuration(name) push_manager_private_ip(name) # CC add script to create rabbitmq user setup_celery_manager() setup_manager_cron()
def do_create_worker(): name = prompt_for_extant_eb_environment_name() do_fail_if_environment_does_not_exist(name) manager_instance = get_manager_instance_by_eb_environment_name(name) if manager_instance is None: log.error( "There is no manager server for the %s cluster, cannot deploy a worker until there is." % name) EXIT(1) if manager_instance['State']['Name'] != 'running': log.error( "There is a manager server for the %s cluster, but it is not in the running state (%s)." % (name, manager_instance['State']['Name'])) EXIT(1) manager_public_ip = get_manager_public_ip(name) manager_private_ip = get_manager_private_ip(name) try: settings = get_server_configuration_file(name) except Exception as e: log.error("could not read settings file") log.error(e) EXIT(1) log.info("creating worker server for %s..." % name) try: instance = create_processing_server( name, settings["MANAGER_SERVER_INSTANCE_TYPE"]) except Exception as e: log.error(e) EXIT(1) instance_ip = instance['NetworkInterfaces'][0]['PrivateIpAddresses'][0][ 'Association']['PublicIp'] # TODO: fabric up the worker with the celery/supervisord and ensure it can connect to manager. configure_fabric(name, instance_ip) push_files() apt_installs() load_git_repo() setup_python() push_beiwe_configuration(name) push_manager_private_ip(name) setup_celery_worker() setup_worker_cron()
def do_create_worker(): name = prompt_for_extant_eb_environment_name() do_fail_if_environment_does_not_exist(name) manager_instance = get_manager_instance_by_eb_environment_name(name) if manager_instance is None: log.error( "There is no manager server for the %s cluster, cannot deploy a worker until there is." % name) EXIT(1) try: settings = get_server_configuration_file(name) except Exception as e: log.error("could not read settings file") log.error(e) settings = None # ide warnings... EXIT(1) log.info("creating worker server for %s..." % name) try: instance = create_processing_server( name, settings[WORKER_SERVER_INSTANCE_TYPE]) except Exception as e: log.error(e) instance = None # ide warnings... EXIT(1) instance_ip = instance['NetworkInterfaces'][0]['PrivateIpAddresses'][0][ 'Association']['PublicIp'] configure_fabric(name, instance_ip) create_swap() push_home_directory_files() apt_installs() load_git_repo() setup_python() push_beiwe_configuration(name) push_manager_private_ip_and_password(name) setup_worker_cron() setup_celery_worker() # run setup worker last. log.warning( "Server is almost up. Waiting 20 seconds to avoid a race condition..." ) sleep(20) run("supervisord")
def construct_eb_environment_variables(eb_environment_name): global_config = get_global_config() try: environment_variables = get_finalized_environment_variables( eb_environment_name) except Exception as e: log.error("could not get your environment settings.") log.error(e) raise try: server_settings = get_server_configuration_file(eb_environment_name) except Exception as e: log.error("could not get your server settings.") log.error(e) raise # This needs to be a comma separated list of environment variables declared as "var=value" env_var_string = ",".join( ["%s=%s" % (k, v) for k, v in environment_variables.iteritems()]) generated_configuration_details = { "ServiceRole": get_or_create_eb_service_role()['Arn'], "IamInstanceProfile": get_or_create_eb_instance_profile()['Arn'], "EnvironmentVariables": env_var_string, "EC2KeyName": global_config["DEPLOYMENT_KEY_NAME"], "InstanceType": server_settings['ELASTIC_BEANSTALK_INSTANCE_TYPE'], "Notification Endpoint": global_config['SYSTEM_ADMINISTRATOR_EMAIL'] } configuration = get_base_eb_configuration() for option in configuration: if isinstance(option['Value'], DynamicParameter): option['Value'] = generated_configuration_details.pop( option['OptionName']) if generated_configuration_details: pprint(generated_configuration_details) raise Exception( "encountered unused autogenerated configs, see print statement above to debug." ) return configuration
def create_new_rds_instance(eb_environment_name): db_instance_identifier = construct_db_name(eb_environment_name) # identify whether there is already a database with this name, we don't want to try: _ = get_db_info(eb_environment_name) log.error("There is already a database named %s" % eb_environment_name) EXIT() except DBInstanceNotFound: pass database_server_type = get_server_configuration_file( eb_environment_name)['DB_SERVER_TYPE'] engine = get_most_recent_postgres_engine() credentials = generate_valid_postgres_credentials() log.info( "writing database credentials to disk, database address will be added later." ) write_rds_credentials(eb_environment_name, credentials, True) # There is some weirdness involving security groups. It looks like there is this concept of # non-vpc security groups, I am fairly certain that this interacts with cross-vpc, IAM based # database access. create_rds_security_groups(db_instance_identifier) db_sec_grp_id = get_rds_security_groups( db_instance_identifier)['database_sec_grp']['GroupId'] log.info("Creating RDS Postgres database named %s" % db_instance_identifier) rds_client = create_rds_client() rds_instance = rds_client.create_db_instance( # server details DBInstanceIdentifier=db_instance_identifier, DBInstanceClass="db." + database_server_type, MultiAZ=False, PubliclyAccessible=False, Port=POSTGRES_PORT, # attach the security group that will allow access VpcSecurityGroupIds=[db_sec_grp_id], #TODO: is this even relevant? # providing the subnet is critical, not providing this value causes the db to be non-vpc # DBSubnetGroupName='string', # db storage StorageType='gp2', # valid options are standard, gp2, io1 # Iops=1000, # multiple between 3 and 10 times the storage; only for use with io1. # AllocatedStorage has weird constraints: # General Purpose (SSD) storage (gp2): Must be an integer from 5 to 6144. # Provisioned IOPS storage (io1): Must be an integer from 100 to 6144. # Magnetic storage (standard): Must be an integer from 5 to 3072. AllocatedStorage=50, # in gigabytes # StorageEncrypted=True | False, # buh? drive encryption I think. # KmsKeyId='string', # TdeCredentialArn='string', # probably not something we will implement # TdeCredentialPassword='******', # probably not something we will implement # Security MasterUsername=credentials['RDS_USERNAME'], MasterUserPassword=credentials['RDS_PASSWORD'], DBName=credentials['RDS_DB_NAME'], EnableIAMDatabaseAuthentication=False, Engine=engine['Engine'], # will be "postgres" EngineVersion=engine[ 'EngineVersion'], # most recent postgres version in this region. PreferredMaintenanceWindow=MAINTAINANCE_WINDOW, PreferredBackupWindow=BACKUP_WINDOW, AutoMinorVersionUpgrade=True, # auto-upgrades are fantastic BackupRetentionPeriod=BACKUP_RETENTION_PERIOD_DAYS, Tags=[ { 'Key': 'BEIWE-NAME', 'Value': 'Beiwe postgres database for %s' % eb_environment_name }, ], # Enhanced monitoring, leave disabled # MonitoringInterval=5, # in seconds, Valid Values: 0, 1, 5, 10, 15, 30, 60 # MonitoringRoleArn='string', # required for monitoring interval other than 0 # near as I can tell this is the "insert postgres paratmeters here" section. # DBParameterGroupName='string', # AvailabilityZone='string', # leave as default (random) # DBSecurityGroups=['strings'], # non-vpc rds instance settings # LicenseModel='string', # CharacterSetName='string', # OptionGroupName='string', # don't think this is required. # Domain='string', # has the phrase "active directory" in the description # DomainIAMRoleName='string', # CopyTagsToSnapshot=True | False, # Timezone='string', # only used by MSSQL # DBClusterIdentifier='string', # # EnablePerformanceInsights=True, # Aurora specific # PerformanceInsightsKMSKeyId='string' # Aurora specific # PromotionTier = 123, # Aurora specific ) while True: try: db = get_db_info(eb_environment_name) except DBInstanceNotFound: log.error( "couldn't find database %s, hopefully this is a momentary glitch. Retrying." ) sleep(5) continue log.info( '%s: RDS instance status is %s, waiting until status is "Ready"' % (current_time_string(), db['DBInstanceStatus'])) # RDS spinup goes creating > backing up > available. if db['DBInstanceStatus'] in ["creating", 'backing-up']: sleep(5) elif db['DBInstanceStatus'] == "available": log.info("Database status is no longer 'creating', it is '%s'" % db['DBInstanceStatus']) break else: raise Exception('encountered unknown database state "%s"' % db['DBInstanceStatus']) return db