def refresh_cloned_instances(event): filters = [{'Name': 'tag-key', 'Values': ['ClonedFrom']}] if 'ClonedFrom' in event: print('Refreshing instances cloned from ' + event['ClonedFrom']) filters.append({ 'Name': 'tag:ClonedFrom', 'Values': [event['ClonedFrom']] }) if 'DeploymentName' in event: print('Refreshing the ' + event['DeploymentName'] + ' clone') filters.append({ 'Name': 'tag:DeploymentName', 'Values': [event['DeploymentName']] }) running_instances = find_instances(filters + [{ 'Name': 'instance-state-name', 'Values': ['running'] }]) stopped_instances = find_instances(filters + [{ 'Name': 'instance-state-name', 'Values': ['stopped'] }]) instances = running_instances + stopped_instances if len(instances) == 0: print('No clones to refresh') return if len(running_instances) > 0: stop_tasks = sum([[ asyncio.ensure_future(stop_instance(instance)) for instance in running_instances ]], []) loop.run_until_complete(asyncio.wait(stop_tasks)) clone_tasks = sum([[ asyncio.ensure_future( clone_volume_into_instance(instance, get_tag(instance, 'DeploymentType'), get_tag(instance, 'ClonedFrom'))) for instance in instances ]], []) loop.run_until_complete(asyncio.wait(clone_tasks)) if len(running_instances) > 0: start_tasks = sum([[ asyncio.ensure_future(start_instance(instance)) for instance in running_instances ]], []) loop.run_until_complete(asyncio.wait(start_tasks)) print('Finished refreshing all clones')
def tear_down_tupaia_deployment(event): if 'DeploymentName' not in event: raise Exception( 'You must include "DeploymentName" in the lambda config, which is the subdomain of tupaia.org you want to tear down (e.g. "dev").' ) instance_filters = [ { 'Name': 'tag:DeploymentName', 'Values': [event['DeploymentName']] }, { 'Name': 'instance-state-name', 'Values': ['running', 'stopped'] } # ignore terminated instances ] instances = find_instances(instance_filters) if len(instances) == 0: raise Exception('No matching instances found') print('Tearing down the instances ' + ', '.join([get_tag(instance, 'Name') for instance in instances])) for instance in instances: teardown_instance(instance) print('Finished tearing down clone')
def swap_out_tupaia_server(event): # validate input config if 'DeploymentName' not in event: raise Exception('You must include the key "DeploymentName" in the lambda config, e.g. "dev".') deployment_name = event['DeploymentName'] if 'NewInstanceId' not in event: raise Exception('You must include the key "NewInstanceId" in the lambda config, e.g. "dev".') new_instance_id = event['NewInstanceId'] new_instance = get_instance_by_id(new_instance_id) if not new_instance: raise Exception('Could not find new instance to swap in') old_instance = get_instance_behind_gateway('tupaia', deployment_name) if not old_instance: raise Exception('Could not find old instance to swap out') # set up ELB from the old instance to point at the new one swap_gateway_instance('tupaia', deployment_name, old_instance['InstanceId'], new_instance_id) # add the subdomain tags that now relate to the new instance add_tag(new_instance_id, 'SubdomainsViaGateway', get_tag(old_instance, 'SubdomainsViaGateway')) terminate_instance(old_instance) print('Successfully swapped out ' + deployment_name)
def delete_old_deployments(event): current_datetime = datetime.now() current_date = time.strftime("%Y-%m-%d") filters = [ {'Name': 'instance-state-name', 'Values': ['running', 'stopped']}, # ignore terminated instances {'Name': 'tag:DeleteAfter', 'Values': [current_date + '*']} # get any due to be deleted this hour ] instances = find_instances(filters) for instance in instances: delete_instance_after = datetime.strptime(get_tag(instance, 'DeleteAfter'), "%Y-%m-%d %H:%M") if current_datetime > delete_instance_after: teardown_instance(instance)
def redeploy_tupaia_server(event): instance_filters = [ { 'Name': 'tag-key', 'Values': ['SubdomainsViaGateway'] }, # the server that is currently behind the ELB { 'Name': 'instance-state-name', 'Values': ['running', 'stopped'] } # ignore terminated instances ] branch = event.get('Branch') deployment_name = event.get('DeploymentName') if not branch and not deployment_name: raise Exception( 'You must include either "DeploymentName" or "Branch" in the lambda config, e.g. "dev".' ) if deployment_name and branch and deployment_name == 'production' and branch != 'master': raise Exception( 'The production deployment branch should not be changed from master to ' + branch) if branch: instance_filters.append({'Name': 'tag:Branch', 'Values': [branch]}) if deployment_name: instance_filters.append({ 'Name': 'tag:DeploymentName', 'Values': [deployment_name] }) # find current instances existing_instances = find_instances(instance_filters) if len(existing_instances) == 0: raise Exception( 'No existing instances found to redeploy, perhaps you want to spin up a new deployment?' ) response = [] # could be multiple deployments to redeploy if just "Branch" was specified for existing_instance in existing_instances: original_deployed_by = get_tag(existing_instance, 'DeployedBy') if original_deployed_by: original_deployed_by = original_deployed_by.split( ' (latest redeploy by ', 1)[0] deployed_by = original_deployed_by + ' (latest redeploy by ' + event[ 'User'] + ')' else: deployed_by = event['User'] extra_tags = [{'Key': 'DeployedBy', 'Value': deployed_by}] delete_after = get_tag(existing_instance, 'DeleteAfter') if delete_after != '': extra_tags.append({'Key': 'DeleteAfter', 'Value': delete_after}), # launch server instance based on gold master AMI # original instance will be deleted by lambda script "swap_out_tupaia_server" once new instance is running new_instance = create_tupaia_instance_from_image( deployment_name=get_tag(existing_instance, 'DeploymentName'), branch=get_tag(existing_instance, 'Branch'), instance_type=event.get('InstanceType', existing_instance['InstanceType']), extra_tags=extra_tags + [{ 'Key': 'DeploymentComponent', 'Value': 'app-server' }], # TODO remove deployment component stuff after move to RDS image_code=event.get( 'ImageCode', None), # will use id below if not defined in the event image_id=existing_instance['ImageId'], security_group_code=event.get( 'SecurityGroupCode', None), # will use id below if not defined in the event security_group_id=existing_instance['SecurityGroups'][0] ['GroupId'], setup_gateway=False, ) deployment_name = get_tag(new_instance, 'DeploymentName') print('Successfully deployed ' + deployment_name) response.append({ "DeploymentName": deployment_name, "NewInstanceId": new_instance['InstanceId'] }) return response
def backup_instances(event): # ignore terminated instances filters = [{ 'Name': 'instance-state-name', 'Values': ['running', 'stopped'] }] if 'InstanceName' in event: print('Only backing up ' + event['InstanceName']) filters.append({'Name': 'tag:Name', 'Values': [event['InstanceName']]}) else: print('Backing up all instances tagged "Backup') filters.append({'Name': 'tag-key', 'Values': ['backup', 'Backup']}) reservations = ec.describe_instances(Filters=filters).get( 'Reservations', []) instances = sum([[i for i in r['Instances']] for r in reservations], []) if len(instances) == 0: print( 'Found no instances to back up. Make sure the instance has the tag "Backup"' ) for instance in instances: instance_name = get_tag(instance, 'Name') deployment_type = get_tag(instance, 'DeploymentType') deployment_name = get_tag(instance, 'DeploymentName') retention_days = get_tag(instance, 'Retention') if retention_days == '': retention_days = '7' # default to 7 days of snapshot retention print('Backing up ' + instance_name) for dev in instance['BlockDeviceMappings']: if dev.get('Ebs', None) is None: continue vol_id = dev['Ebs']['VolumeId'] snap = ec.create_snapshot( VolumeId=vol_id, Description='Backup created from ' + instance_name, ) delete_date = datetime.date.today() + datetime.timedelta( days=int(retention_days)) delete_fmt = delete_date.strftime('%Y-%m-%d') snapshot_tags = [ { 'Key': 'DeleteOn', 'Value': delete_fmt }, { 'Key': 'DeploymentType', 'Value': deployment_type }, { 'Key': 'DeploymentName', 'Value': deployment_name }, ] ec.create_tags(Resources=[snap['SnapshotId']], Tags=snapshot_tags)