def announce(): require('_deployment_name') require('_deployment_confs') require('_active_gen') # Make sure we're in the git repo _get_git_repo() _announce_deployment()
def up( environment_name, configuration_manager, resource_tracker, generation=ACTIVE, ): """ Make sure that the instances for the specified generation are running and have current code. Will update code and deploy new EC2 and RDS instances as needed. """ env._active_gen = True if generation == ACTIVE: # Always force the active generation in operation if possible make_operational = True with logs_duration(timer, timer_name='pre_deploy_validation'): # TODO: Make this an optional hook that can be registered git_conf = {} if git_conf.get('enable'): repo = _get_git_repo() # Force submodules to be updated # TODO: Make this an optional hook that can be registered with prompt_on_exception("Git submodule update failed"): repo.submodule_update(init=True, recursive=True) # Optionally require that we deploy from a tagged commit. if git_conf.get('require_tag', False): logger.info("Enforcing git tag requirement") if not _is_unchanged_from_head(repo): logger.critical( "Refusing to deploy, uncommitted changes exist.") exit(1) if not _is_tagged_version(repo): logger.critical( "Refusing to deploy from an untagged commit.", ) exit(1) _push_tags(repo) # TODO: Make this an optional hook that can be registered pagerduty_conf = {} if pagerduty_conf.get('temporarily_become_oncall', False): logger.info("Taking Pagerduty, temporarily") _take_temporary_pagerduty( duration=pagerduty_conf.get('temporary_oncall_duration'), api_key=pagerduty_conf.get('api_key'), user_id=pagerduty_conf.get('user_id'), project_subdomain=pagerduty_conf.get('project_subdomain'), schedule_key=pagerduty_conf.get('schedule_key'), ) logger.info("Gathering deployment state") with logs_duration(timer, timer_name='gather deployment state'): environment_config = configuration_manager.get_environment_config( environment_name, ) deployment = Deployment( environment_name, environment_config.get('ec2', {}), environment_config.get('rds', {}), environment_config.get('elb', {}), ) # up never deals with old nodes, so just verify pending and active to # save HTTP round trips deployment.verify_deployment_state(verify_old=False) # Gather all of the configurations for each node, including their # seed deployment information logger.info("Gathering seed deployment state") with logs_duration(timer, timer_name='seed_deployment_state'): # If this environment has a seed environment, build that environment # manager seed_deployment = None seed_deployment_name = configuration_manager.get_seed_environment_name( environment_name, ) if seed_deployment_name: seed_config = configuration_manager.get_environment_config( seed_deployment_name, ) seed_deployment = Deployment( seed_deployment_name, seed_config.get('ec2', {}), seed_config.get('rds', {}), seed_config.get('elb', {}), ) logger.info("Verifying seed deployment state") seed_deployment.verify_deployment_state(verify_old=False) # Build all of the deployment objects logger.info("Building Node deployers") with logs_duration(timer, timer_name='build deployers'): ec2_deployers = [] rds_deployers = [] # All rds and ec2 nodes, rds nodes first dep_confs = [ ( 'rds', environment_config.get('ec2', {}), ), ( 'ec2', environment_config.get('rds', {}), ), ] for aws_type, node_confs in dep_confs: for node_name, conf in node_confs.items(): # Get the seed deployment new instances will be copied from seed_node_name = None if seed_deployment and 'seed' in conf: seed_node_name = conf['seed']['unique_id'] verify_seed_data = conf['seed_node'].get('verify', False) else: logger.info("No seed node configured") seed_node_name = None verify_seed_data = False if aws_type == 'ec2': klass = Ec2NodeDeployment elif aws_type == 'rds': klass = RdsNodeDeployment deployer = klass( deployment=deployment, seed_deployment=seed_deployment, is_active=env._active_gen, aws_type=aws_type, node_name=node_name, seed_node_name=seed_node_name, seed_verification=verify_seed_data, brain_wrinkles=conf.get('brain_wrinkles', {}), conf=conf, ) if aws_type == 'ec2': ec2_deployers.append(deployer) elif aws_type == 'rds': rds_deployers.append(deployer) # We don't actually want to do deployments until we have tests assert False # Provision the RDS nodes with logs_duration(timer, timer_name='initial provision'): logger.info("Provisioning RDS nodes") for deployer in rds_deployers: if deployer.seed_verification and deployer.get_node() is None: _prompt_for_seed_verification(deployer) deployer.ensure_node_created() # Provision the EC2 nodes logger.info("Provisioning EC2 nodes") for deployer in ec2_deployers: if deployer.seed_verification and deployer.get_node() is None: _prompt_for_seed_verification(deployer) deployer.ensure_node_created() # Configure the RDS nodes logger.info("Configuring RDS nodes") with logs_duration(timer, timer_name='deploy rds'): for deployer in rds_deployers: deployer.run() logger.info("Determining EC2 node deploy priority") ec2_deployers = _order_ec2_deployers_by_priority(ec2_deployers) # Configure the EC2 nodes logger.info("Deploying to EC2 nodes") for deployer in ec2_deployers: timer_name = '%s deploy' % deployer.node_name with logs_duration(timer, timer_name='full %s' % timer_name): node = deployer.get_node() with seamless_modification( node, deployer.deployment, force_seamless=env._active_gen, make_operational_if_not_already=make_operational, ): pre_deploy_time = datetime.now() with logs_duration( timer, timer_name=timer_name, output_result=True, ): deployer.run() if DT_NOTIFY: _send_deployment_done_desktop_notification( pre_deploy_time, deployer, ) _announce_deployment() time_logger.info("Timing Breakdown:") sorted_timers = sorted( timer.items(), key=lambda x: x[1], reverse=True, ) for timer_name, duration in sorted_timers: time_logger.info("%02ds- %s", duration, timer_name)