def tunnel(ctx, tunnel_name): """ Tunnel through an EC2 instance in the ECS cluster. The parameters for this command should be found in a tunnels: top-level section in the yaml file, in the format: \b tunnels: - name: my_tunnel service: my_service host: config.MY_TUNNEL_DESTINATION_HOST port: 3306 local_port: 8888 where config.MY_TUNNEL_DESTINATION_HOST is the value of MY_TUNNEL_DESTINATION_HOST for this service in the AWS Parameter Store. The host value could also just be a hostname. """ config = Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE'], import_env=ctx.obj['IMPORT_ENV'], tfe_token=ctx.obj['TFE_TOKEN']) yml = config.get_category_item('tunnels', tunnel_name) service_name = yml['service'] service = Service(yml=config.get_service(service_name)) host = _interpolate_tunnel_info(yml['host'], service) port = int(_interpolate_tunnel_info(yml['port'], service)) local_port = int(_interpolate_tunnel_info(yml['local_port'], service)) interim_port = random.randrange(10000, 64000, 1) service.tunnel(host, local_port, interim_port, port)
def setUp(self): current_dir = os.path.dirname(os.path.abspath(__file__)) config_yml = os.path.join(current_dir, 'interpolate.yml') env_file = os.path.join(current_dir, 'env_file.env') self.config = Config(filename=config_yml, env_file=env_file, interpolate=False)
class TestContainerDefinition_terraform_statefile_interpolation(unittest.TestCase): def setUp(self): current_dir = os.path.dirname(os.path.abspath(__file__)) config_yml = os.path.join(current_dir, 'terraform_interpolate.yml') with Replacer() as r: self.get_mock = r('deployfish.terraform.Terraform._get_state_file_from_s3', Mock()) self.get_mock.side_effect = statefile_loader self.config = Config(filename=config_yml) def test_environment_gets_replaced_for_each_environment(self): calls = [ call('s3://my-qa-statefile', profile=None, region=None), call('s3://my-prod-statefile', profile=None, region=None), ] self.get_mock.assert_has_calls(calls) def test_file_interpolation_gets_values_from_correct_statefile(self): prod = self.config.get_section_item('services', 'foobar-prod') self.assertEqual(prod['cluster'], 'foobar-cluster-prod') self.assertEqual(prod['load_balancer']['load_balancer_name'], 'foobar-prod-elb') self.assertEqual(prod['task_role_arn'], 'arn:aws:iam::324958023459:role/foobar-prod-task') qa = self.config.get_section_item('services', 'foobar-qa') self.assertEqual(qa['cluster'], 'foobar-cluster-qa') self.assertEqual(qa['load_balancer']['load_balancer_name'], 'foobar-qa-elb') self.assertEqual(qa['task_role_arn'], 'arn:aws:iam::324958023459:role/foobar-qa-task')
def _entrypoint(ctx, section, section_name, cluster_name, parameter_prefix, command, dry_run): if section_name and cluster_name: # The only thing we need out of Config is the names of any config: # section variables we might have. We don't need to do interpolation # in the config: section, because we retrieve the values from Parameter # Store, and we don't want to use any aws: section that might be in the # deployfish.yml to configure our boto3 session because we want to defer # to the IAM ECS Task Role. config = Config(filename=ctx.obj['CONFIG_FILE'], interpolate=False, use_aws_section=False) try: section_yml = config.get_section_item(section, section_name) except KeyError: click.echo( "Our container's deployfish config file '{}' does not have section '{}' in '{}'" .format(ctx.obj['CONFIG_FILE'] or 'deployfish.yml', section_name, section)) sys.exit(1) parameter_store = [] if 'config' in section_yml: parameter_name = parameter_prefix + section_name parameter_store = ParameterStore(parameter_name, cluster_name, yml=section_yml['config']) parameter_store.populate() if not dry_run: for param in parameter_store: if param.exists: if param.should_exist: os.environ[param.key] = param.aws_value else: print( "event='deploy.entrypoint.parameter.ignored.not_in_deployfish_yml' " "section='{}' parameter='{}'".format( section_name, param.name)) else: print( "event='deploy.entrypoint.parameter.ignored.not_in_aws' section='{}' parameter='{}'" .format(section_name, param.name)) else: exists = [] not_exists = [] for param in parameter_store: if param.exists: exists.append(param) else: not_exists.append(param) click.secho("Would have set these environment variables:", fg="cyan") for param in exists: click.echo(' {}={}'.format(param.key, param.aws_value)) click.secho("\nThese parameters are not in AWS:", fg="red") for param in not_exists: click.echo(' {}'.format(param.key)) if dry_run: click.secho('\n\nCOMMAND: {}'.format(command)) else: subprocess.call(command)
class TestContainerDefinition_load_yaml_no_interpolate(unittest.TestCase): def setUp(self): current_dir = os.path.dirname(os.path.abspath(__file__)) state_file = os.path.join(current_dir, 'terraform.tfstate') config_yml = os.path.join(current_dir, 'interpolate.yml') env_file = os.path.join(current_dir, 'env_file.env') with open(state_file) as f: tfstate = json.loads(f.read()) with Replacer() as r: get_mock = r('deployfish.terraform.Terraform._get_state_file_from_s3', Mock()) get_mock.return_value = tfstate self.config = Config(filename=config_yml, env_file=env_file, interpolate=False) def test_simple_interpolation(self): self.assertEqual(self.config.get_service('foobar-prod')['cluster'], '${terraform.cluster_name}') def test_nested_dict_interpolation(self): self.assertEqual( self.config.get_service('foobar-prod')['load_balancer']['load_balancer_name'], '${terraform.elb_id}' ) def test_nested_list_interpolation(self): self.assertEqual( self.config.get_service('foobar-prod')['containers'][0]['environment'][2], 'SECRETS_BUCKET_NAME=${terraform.secrets_bucket_name}' ) def test_environment_simple_interpolation(self): self.assertEqual(self.config.get_service('foobar-prod')['config'][0], 'FOOBAR=${env.FOOBAR_ENV}') self.assertEqual( self.config.get_service('foobar-prod')['config'][2], 'FOO_BAR_PREFIX=${env.FOO_BAR_PREFIX_ENV}/test' )
def setUp(self): current_dir = os.path.dirname(os.path.abspath(__file__)) config_yml = os.path.join(current_dir, 'terraform_interpolate.yml') with Replacer() as r: self.get_mock = r('deployfish.terraform.Terraform._get_state_file_from_s3', Mock()) self.get_mock.side_effect = statefile_loader self.config = Config(filename=config_yml)
class TestContainerDefinition_load_yaml(unittest.TestCase): def setUp(self): current_dir = os.path.dirname(os.path.abspath(__file__)) state_file = os.path.join(current_dir, 'terraform.tfstate') config_yml = os.path.join(current_dir, 'interpolate.yml') with open(state_file) as f: tfstate = json.loads(f.read()) os.environ['FOOBAR_ENV'] = "hi_mom" with Replacer() as r: get_mock = r('deployfish.terraform.Terraform._get_state_file_from_s3', Mock()) get_mock.return_value = tfstate self.config = Config(filename=config_yml) def tearDown(self): del os.environ['FOOBAR_ENV'] def test_terraform_simple_interpolation(self): self.assertEqual(self.config.get_service('cit-auth-prod')['cluster'], 'foobar-proxy-prod') def test_terraform_nested_dict_interpolation(self): self.assertEqual(self.config.get_service('cit-auth-prod')['load_balancer']['load_balancer_name'], 'foobar-proxy-prod') def test_terraform_nested_list_interpolation(self): self.assertEqual(self.config.get_service('cit-auth-prod')['containers'][0]['environment'][2], 'SECRETS_BUCKET_NAME=ac-config-store') def test_environment_simple_interpolation(self): self.assertEqual(self.config.get_service('cit-auth-prod')['config'][0], 'FOOBAR=hi_mom')
def setUp(self): current_dir = os.path.dirname(os.path.abspath(__file__)) state_file = os.path.join(current_dir, 'terraform.tfstate') config_yml = os.path.join(current_dir, 'interpolate.yml') env_file = os.path.join(current_dir, 'env_file.env') with open(state_file) as f: tfstate = json.loads(f.read()) with Replacer() as r: get_mock = r('deployfish.terraform.Terraform._get_state_file_from_s3', Mock()) get_mock.return_value = tfstate self.config = Config(filename=config_yml, env_file=env_file)
class TestContainerDefinition_load_yaml(unittest.TestCase): def setUp(self): current_dir = os.path.dirname(os.path.abspath(__file__)) state_file = os.path.join(current_dir, 'terraform.tfstate') config_yml = os.path.join(current_dir, 'interpolate.yml') env_file = os.path.join(current_dir, 'env_file.env') with open(state_file) as f: tfstate = json.loads(f.read()) with Replacer() as r: get_mock = r( 'deployfish.terraform.Terraform._get_state_file_from_s3', Mock()) get_mock.return_value = tfstate self.config = Config(filename=config_yml, env_file=env_file) def tearDown(self): pass def test_terraform_simple_interpolation(self): self.assertEqual( self.config.get_service('foobar-prod')['cluster'], 'foobar-cluster-prod') def test_terraform_nested_dict_interpolation(self): self.assertEqual( self.config.get_service('foobar-prod')['load_balancer'] ['load_balancer_name'], 'foobar-elb-prod') def test_terraform_nested_list_interpolation(self): self.assertEqual( self.config.get_service('foobar-prod')['containers'][0] ['environment'][2], 'SECRETS_BUCKET_NAME=my-config-store') def test_terraform_list_output_interpolation(self): self.assertListEqual( self.config.get_service('foobar-prod')['vpc_configuration'] ['security_groups'], ['sg-1234567', 'sg-2345678', 'sg-3456789']) def test_terraform_map_output_interpolation(self): self.assertListEqual( self.config.get_service('output-test')['vpc_configuration'] ['subnets'], ['subnet-1234567']) self.assertListEqual( self.config.get_service('output-test')['vpc_configuration'] ['security_groups'], ['sg-1234567']) self.assertEqual( self.config.get_service('output-test')['vpc_configuration'] ['public_ip'], 'DISABLED') def test_environment_simple_interpolation(self): self.assertEqual( self.config.get_service('foobar-prod')['config'][0], 'FOOBAR=hi_mom') self.assertEqual( self.config.get_service('foobar-prod')['config'][2], 'FOO_BAR_PREFIX=oh_no/test')
def setUp(self): current_dir = os.path.dirname(os.path.abspath(__file__)) filename = os.path.join(current_dir, 'simple.yml') self.config = Config(filename=filename, interpolate=False) client_mock = Mock() client_mock.get_parameters.return_value = {'Parameters': []} client_mock.describe_parameters.return_value = {'Parameters': []} session_mock = Mock(client=Mock(return_value=client_mock)) with Replacer() as r: r.replace('deployfish.aws.ecs.Service.from_aws', Mock()) r.replace('deployfish.aws.ecs.TaskDefinition.create', Mock()) r.replace('deployfish.aws.boto3_session', session_mock) self.service = Service('foobar-secrets-ec2', config=self.config) self.service.create()
def scale(ctx, service_name, count, dry_run, wait, asg, force_asg): """ Set the desired count for service SERVICE_NAME to COUNT. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE'], import_env=ctx.obj['IMPORT_ENV'], tfe_token=ctx.obj['TFE_TOKEN']).get_service(service_name)) print manage_asg_count(service, count, asg, force_asg) click.secho( 'Updating desiredCount on "{}" service in cluster "{}" to {}.'.format( service.serviceName, service.clusterName, count), fg="white") if not dry_run: service.scale(count) if wait: click.secho( " Waiting until the service is stable with our new count ...", fg='cyan') if service.wait_until_stable(): click.secho(" Done.", fg='white') else: click.secho(" FAILURE: the service failed to start.", fg='red') sys.exit(1)
class TestService_no_ec2_secrets_in_task_definition_if_no_execution_role(unittest.TestCase): def setUp(self): current_dir = os.path.dirname(os.path.abspath(__file__)) filename = os.path.join(current_dir, 'simple.yml') self.config = Config(filename=filename, interpolate=False) del self.config.raw['services'][4]['execution_role'] client_mock = Mock() client_mock.get_parameters.return_value = {'Parameters': []} client_mock.describe_parameters.return_value = {'Parameters': []} session_mock = Mock(client=Mock(return_value=client_mock)) with Replacer() as r: r.replace('deployfish.aws.ecs.Service.from_aws', Mock()) r.replace('deployfish.aws.ecs.TaskDefinition.create', Mock()) r.replace('deployfish.aws.boto3_session', session_mock) self.service = Service('foobar-secrets-ec2', config=self.config) self.service.create() def test_sanity_check_name(self): self.assertEqual(self.service.serviceName, 'foobar-secrets-ec2') def test_sanity_check_config(self): self.assertEqual(len(self.config.get_service('foobar-secrets-ec2')['config']), 3) def test_config_with_execution_role(self): self.assertEqual(len(self.service.desired_task_definition.containers[0].secrets), 0)
def delete(ctx, service_name, dry_run): """ Delete the service SERVICE_NAME from AWS. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) print() click.secho('Deleting service "{}":'.format(service.serviceName), fg="white") click.secho(' Service info:', fg="green") print_service_info(service) click.secho(' Task Definition info:', fg="green") print_task_definition(service.active_task_definition) print() if not dry_run: click.echo( "If you really want to do this, answer \"{}\" to the question below.\n" .format(service.serviceName)) value = click.prompt("What service do you want to delete? ") if value == service.serviceName: service.scale(0) print(" Waiting for our existing containers to die ...") service.wait_until_stable() print(" All containers dead.") service.delete() print(" Deleted service {} from cluster {}.".format( service.serviceName, service.clusterName)) else: click.echo("\nNot deleting service \"{}\"".format( service.serviceName))
def version(ctx, service_name): """Print the tag of the image in the first container on the service""" service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE'], import_env=ctx.obj['IMPORT_ENV']).get_service(service_name)) print(service.version())
def tunnel(ctx, service_name): """ Tunnel through an EC2 instance in the ECS cluster for service SERVICE_NAME to the host defined by the DEPLOYFISH__TUNNEL_HOST environment variable. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) host_substring = os.environ.get('DEPLOYFISH__TUNNEL_HOST', None) if not host_substring: click.secho( "\nDEPLOYFISH__TUNNEL_HOST is not defined in your environment:", fg="red") return host_port = os.environ.get('DEPLOYFISH__TUNNEL_PORT', '3306') local_port = os.environ.get('DEPLOYFISH__TUNNEL_LOCAL_PORT', '8888') tunnel_host = None for param in service.get_config(): if param.key.endswith(host_substring): tunnel_host = param.value if not tunnel_host: click.secho("\nCould not find the host in your config:", fg="red") return interim_port = random.randrange(10000, 64000, 1) service.tunnel(tunnel_host, local_port, interim_port, host_port)
def write_config(ctx, service_name, dry_run): """ If the service SERVICE_NAME has a "config:" section defined, write all of the parameters for the service to AWS Parameter Store. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) parameters = service.get_config() if len(parameters) == 0: click.secho( 'No parameters found for service "{}":'.format(service_name), fg='white') else: if not dry_run: click.secho( 'Updating parameters for service "{}":'.format(service_name), fg='white') else: click.secho( 'Would update parameters for service "{}" like so:'.format( service_name), fg='white') print_sorted_parameters(parameters) if not dry_run: service.write_config() else: click.echo('\nDRY RUN: not making changes in AWS')
def info(ctx, service_name): """ Show current AWS information about this service and its task definition """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) print if service.exists(): click.secho('"{}" service live info:'.format(service.serviceName), fg="white") click.secho(' Service info:', fg="green") print_service_info(service) click.secho(' Task Definition:', fg="green") print_task_definition(service.active_task_definition) if service.tasks: click.secho('\n"{}" helper tasks:'.format(service.serviceName), fg='white') for key, value in service.tasks.items(): click.secho(" {}".format(key), fg='green') print_task_definition(value.active_task_definition) else: click.secho('"{}" service is not in AWS yet.'.format( service.serviceName), fg="white")
def validate(ctx, service_name): service = Service(yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) host, name, user, passwd, port = _get_db_parameters(service) cmd = "/usr/bin/mysql --host={} --user={} --password={} --port={} --execute='select version(), current_date;'" cmd = cmd.format(host, user, quote(passwd), port) success, output = service.run_remote_script([cmd]) print success print output
def create(ctx, service_name, update_configs, dry_run, wait, asg, force_asg): """ Create a new ECS service named SERVICE_NAME. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE'], import_env=ctx.obj['IMPORT_ENV'], tfe_token=ctx.obj['TFE_TOKEN']).get_service(service_name)) print if service.exists(): click.secho('Service "{}" already exists!'.format(service.serviceName), fg='red') sys.exit(1) click.secho('Creating service with these attributes:', fg='white') click.secho(' Service info:', fg="green") print_service_info(service) click.secho(' Task Definition:', fg='green') print_task_definition(service.desired_task_definition) if service.tasks: click.secho('\nCreating these helper tasks:', fg='white') for key, value in service.tasks.items(): click.secho(" {}".format(key), fg='green') print_task_definition(value.desired_task_definition) parameters = service.get_config() if update_configs: if len(parameters) > 0: click.secho('\nUpdating service config parameters like so:', fg='white') print_sorted_parameters(parameters) else: click.secho('\nService has no config parameters defined: SKIPPING', fg='white') else: if parameters: click.secho('\nService has config parameters defined: SKIPPING', fg='red') if dry_run: click.secho( ' Either run create with the --update-configs flag or do "deploy config write {}"' .format(service_name)) else: click.secho( ' To update them in AWS, do "deploy config write {}"'. format(service_name)) if not dry_run: manage_asg_count(service, service.count, asg, force_asg) service.create() if wait: click.secho("\n Waiting until the service is stable ...", fg='white') if service.wait_until_stable(): click.secho(" Done.", fg='white') else: click.secho(" FAILURE: the service failed to start.", fg='red') sys.exit(1)
def docker_exec(ctx, service_name): """ SSH to an EC2 instance in the cluster defined in the service named SERVICE_NAME, then run docker exec on the appropriate container. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) service.docker_exec()
def entrypoint(ctx, command, dry_run): """ Use this as the entrypoint for your containers. It will look in the shell environment for the environment variables DEPLOYFISH_SERVICE_NAME and DEPLOYFISH__CLUSTER_NAME. If found, it will use them to: \b * download the parameters listed in "config:" section for service DEPLOYFISH__SERVICE_NAME from the AWS System Manager Parameter Store (which are prefixed by "${DEPLOYFISH_CLUSTER_NAME}.${DEPLOYFISH_SERVICE_NAME}.") * set those parameters and their values as environment variables * run COMMAND If either DEPLOYFISH__SERVICE_NAME or DEPLOYFISH__CLUSTER_NAME are not in the environment, just run COMMMAND. """ service_name = os.environ.get('DEPLOYFISH_SERVICE_NAME', None) cluster_name = os.environ.get('DEPLOYFISH_CLUSTER_NAME', None) if service_name and cluster_name: service_yml = Config(filename=ctx.obj['CONFIG_FILE'], interpolate=False).get_service(service_name) parameter_store = [] if 'config' in service_yml: parameter_store = ParameterStore(service_name, cluster_name, yml=service_yml['config']) parameter_store.populate() if not dry_run: for param in parameter_store: if param.exists and param.should_exist: os.environ[param.key] = param.aws_value else: print( "event='deploy.entrypoint.parameter.not_in_aws' service='{}' parameter='{}'" .format(service_name, param.name)) else: exists = [] notexists = [] for param in parameter_store: if param.exists: exists.append(param) else: notexists.append(param) click.secho("Would have set these environment variables:", fg="cyan") for param in exists: click.echo(' {}={}'.format(param.key, param.aws_value)) click.secho("\nThese parameters are not in AWS:", fg="red") for param in notexists: click.echo(' {}'.format(param.key)) if dry_run: click.secho('\n\nCOMMAND: {}'.format(command)) else: subprocess.call(command)
def run_task(ctx, service_name, command): """ Run the one-off task COMMAND on SERVICE_NAME. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) response = service.run_task(command) if response: print(response)
def setUp(self): current_dir = os.path.dirname(os.path.abspath(__file__)) filename = os.path.join(current_dir, 'simple.yml') self.config = Config(filename=filename, interpolate=False) with Replacer() as r: r.replace('deployfish.aws.ecs.Service.from_aws', Mock()) self.service = Service('foobar-prod', config=self.config) p = Parameter('foobar-service', 'foobar-cluster', yml='KEY=VALUE') self.service.parameter_store.append(p) self.service.desired_task_definition.set_parameter_store(self.service.parameter_store)
def docker_exec(ctx, service_name, verbose): """ SSH to an EC2 instance in the cluster defined in the service named SERVICE_NAME, then run docker exec on the appropriate container. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE'], import_env=ctx.obj['IMPORT_ENV'], tfe_token=ctx.obj['TFE_TOKEN']).get_service(service_name)) service.docker_exec(verbose=verbose)
def update(ctx, service_name, dry_run, wait): """ Update the our ECS service from what is in deployfish.yml. This means two things: \b * Update the task definition * Update the scaling policies (if any) These things can only be changed by deleting and recreating the service: \b * service name * cluster name * load balancer If you want to update the desiredCount on the service, use "deploy scale". """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE'], import_env=ctx.obj['IMPORT_ENV'], tfe_token=ctx.obj['TFE_TOKEN']).get_service(service_name)) print click.secho('Updating "{}" service:'.format(service.serviceName), fg="white") click.secho(' Current task definition:', fg="yellow") print_task_definition(service.active_task_definition) click.secho('\n New task definition:', fg="green") print_task_definition(service.desired_task_definition) if service.tasks: click.secho('\nUpdating "{}" helper tasks to:'.format( service.serviceName), fg='white') for key, value in service.tasks.items(): click.secho(" {}".format(key), fg='green') print_task_definition(value.desired_task_definition) if service.scaling and service.scaling.needs_update(): click.secho('\nUpdating "{}" application scaling'.format( service.serviceName), fg='white') if not dry_run: service.update() if wait: click.secho( "\n Waiting until the service is stable with our new task def ...", fg='white') if service.wait_until_stable(): click.secho(" Done.", fg='white') else: click.secho(" FAILURE: the service failed to start.", fg='red') sys.exit(1)
def ssh(ctx, service_name, verbose): """ If the service SERVICE_NAME has any running tasks, randomly choose one of the container instances on which one of those tasks is running and ssh into it. If the service SERVICE_NAME has no running tasks, randomly choose one of the container instances in the cluster on which the service is defined. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) service.ssh(verbose=verbose)
def restart(ctx, service_name, hard): """ Restart all tasks in the service SERVICE_NAME by killing them off one by one. Kill each task and wait for it to be replaced before killing the next one off. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) print click.secho('Restarting tasks in "{}" service in cluster "{}"'.format( service.serviceName, service.clusterName)) service.restart(hard=hard)