def tunnel(ctx, service_name): """ Tunnel through an EC2 instance in the ECS cluster for service SERVICE_NAME to the host defined by the DEPLOYFISH__TUNNEL_HOST environment variable. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) host_substring = os.environ.get('DEPLOYFISH__TUNNEL_HOST', None) if not host_substring: click.secho( "\nDEPLOYFISH__TUNNEL_HOST is not defined in your environment:", fg="red") return host_port = os.environ.get('DEPLOYFISH__TUNNEL_PORT', '3306') local_port = os.environ.get('DEPLOYFISH__TUNNEL_LOCAL_PORT', '8888') tunnel_host = None for param in service.get_config(): if param.key.endswith(host_substring): tunnel_host = param.value if not tunnel_host: click.secho("\nCould not find the host in your config:", fg="red") return interim_port = random.randrange(10000, 64000, 1) service.tunnel(tunnel_host, local_port, interim_port, host_port)
def info(ctx, service_name): """ Show current AWS information about this service and its task definition """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) print if service.exists(): click.secho('"{}" service live info:'.format(service.serviceName), fg="white") click.secho(' Service info:', fg="green") print_service_info(service) click.secho(' Task Definition:', fg="green") print_task_definition(service.active_task_definition) if service.tasks: click.secho('\n"{}" helper tasks:'.format(service.serviceName), fg='white') for key, value in service.tasks.items(): click.secho(" {}".format(key), fg='green') print_task_definition(value.active_task_definition) else: click.secho('"{}" service is not in AWS yet.'.format( service.serviceName), fg="white")
def scale(ctx, service_name, count, dry_run, wait, asg, force_asg): """ Set the desired count for service SERVICE_NAME to COUNT. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE'], import_env=ctx.obj['IMPORT_ENV'], tfe_token=ctx.obj['TFE_TOKEN']).get_service(service_name)) print manage_asg_count(service, count, asg, force_asg) click.secho( 'Updating desiredCount on "{}" service in cluster "{}" to {}.'.format( service.serviceName, service.clusterName, count), fg="white") if not dry_run: service.scale(count) if wait: click.secho( " Waiting until the service is stable with our new count ...", fg='cyan') if service.wait_until_stable(): click.secho(" Done.", fg='white') else: click.secho(" FAILURE: the service failed to start.", fg='red') sys.exit(1)
class TestService_no_ec2_secrets_in_task_definition_if_no_execution_role(unittest.TestCase): def setUp(self): current_dir = os.path.dirname(os.path.abspath(__file__)) filename = os.path.join(current_dir, 'simple.yml') self.config = Config(filename=filename, interpolate=False) del self.config.raw['services'][4]['execution_role'] client_mock = Mock() client_mock.get_parameters.return_value = {'Parameters': []} client_mock.describe_parameters.return_value = {'Parameters': []} session_mock = Mock(client=Mock(return_value=client_mock)) with Replacer() as r: r.replace('deployfish.aws.ecs.Service.from_aws', Mock()) r.replace('deployfish.aws.ecs.TaskDefinition.create', Mock()) r.replace('deployfish.aws.boto3_session', session_mock) self.service = Service('foobar-secrets-ec2', config=self.config) self.service.create() def test_sanity_check_name(self): self.assertEqual(self.service.serviceName, 'foobar-secrets-ec2') def test_sanity_check_config(self): self.assertEqual(len(self.config.get_service('foobar-secrets-ec2')['config']), 3) def test_config_with_execution_role(self): self.assertEqual(len(self.service.desired_task_definition.containers[0].secrets), 0)
def setUp(self): current_dir = os.path.dirname(os.path.abspath(__file__)) filename = os.path.join(current_dir, 'simple.yml') config = Config(filename=filename, interpolate=False) with Replacer() as r: r.replace('deployfish.aws.ecs.Service.from_aws', Mock()) self.service = Service('foobar-prod3', config=config)
def show_config(ctx, service_name, diff, to_env_file): """ If the service SERVICE_NAME has a "config:" section defined, print a list of all parameters for the service and the values they currently have in AWS. """ service = Service(service_name, config=ctx.obj['CONFIG']) if not to_env_file: if diff: click.secho('Diff between local and AWS parameters for service "{}":'.format(service_name), fg='white') else: click.secho('Live values of parameters for service "{}":'.format(service_name), fg='white') parameters = service.get_config() if len(parameters) == 0: click.secho(" No parameters found.") else: if diff: print_sorted_parameters(parameters) else: for p in parameters: if p.exists: if p.should_exist: if to_env_file: print("{}={}".format(p.key, p.aws_value)) else: click.secho(" {}".format(p.display(p.key, p.aws_value))) else: if not to_env_file: click.secho(" {}".format(p.display(p.key, "[NOT IN AWS]")), fg="red")
def docker_exec(ctx, service_name, verbose): """ SSH to an EC2 instance in the cluster defined in the service named SERVICE_NAME, then run docker exec on the appropriate container. """ service = Service(service_name, config=ctx.obj['CONFIG']) service.docker_exec(verbose=verbose)
def tunnel(ctx, tunnel_name): """ Tunnel through an EC2 instance in the ECS cluster. The parameters for this command should be found in a tunnels: top-level section in the yaml file, in the format: \b tunnels: - name: my_tunnel service: my_service host: config.MY_TUNNEL_DESTINATION_HOST port: 3306 local_port: 8888 where config.MY_TUNNEL_DESTINATION_HOST is the value of MY_TUNNEL_DESTINATION_HOST for this service in the AWS Parameter Store. The host value could also just be a hostname. """ config = ctx.obj['CONFIG'] yml = config.get_section_item('tunnels', tunnel_name) service_name = yml['service'] service = Service(service_name, config=config) host = _interpolate_tunnel_info(yml['host'], service) port = int(_interpolate_tunnel_info(yml['port'], service)) local_port = int(_interpolate_tunnel_info(yml['local_port'], service)) interim_port = random.randrange(10000, 64000, 1) service.tunnel(host, local_port, interim_port, port)
def write_config(ctx, service_name, dry_run): """ If the service SERVICE_NAME has a "config:" section defined, write all of the parameters for the service to AWS Parameter Store. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) parameters = service.get_config() if len(parameters) == 0: click.secho( 'No parameters found for service "{}":'.format(service_name), fg='white') else: if not dry_run: click.secho( 'Updating parameters for service "{}":'.format(service_name), fg='white') else: click.secho( 'Would update parameters for service "{}" like so:'.format( service_name), fg='white') print_sorted_parameters(parameters) if not dry_run: service.write_config() else: click.echo('\nDRY RUN: not making changes in AWS')
def version(ctx, service_name): """Print the tag of the image in the first container on the service""" service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE'], import_env=ctx.obj['IMPORT_ENV']).get_service(service_name)) print(service.version())
def run_task(ctx, service_name, command): """ Run the one-off task COMMAND on SERVICE_NAME. """ service = Service(service_name, config=ctx.obj['CONFIG']) response = service.run_task(command) if response: print(response)
def validate(ctx, service_name): service = Service(yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) host, name, user, passwd, port = _get_db_parameters(service) cmd = "/usr/bin/mysql --host={} --user={} --password={} --port={} --execute='select version(), current_date;'" cmd = cmd.format(host, user, quote(passwd), port) success, output = service.run_remote_script([cmd]) print success print output
def docker_exec(ctx, service_name): """ SSH to an EC2 instance in the cluster defined in the service named SERVICE_NAME, then run docker exec on the appropriate container. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) service.docker_exec()
def setUp(self): current_dir = os.path.dirname(os.path.abspath(__file__)) filename = os.path.join(current_dir, 'simple.yml') config = Config(filename=filename, interpolate=False) del config.raw['services'][0]['maximum_percent'] del config.raw['services'][0]['minimum_healthy_percent'] with Replacer() as r: r.replace('deployfish.aws.ecs.Service.from_aws', Mock()) self.service = Service('foobar-prod', config=config)
def run_task(ctx, service_name, command): """ Run the one-off task COMMAND on SERVICE_NAME. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) response = service.run_task(command) if response: print(response)
def ssh(ctx, service_name, verbose): """ If the service SERVICE_NAME has any running tasks, randomly choose one of the container instances on which one of those tasks is running and ssh into it. If the service SERVICE_NAME has no running tasks, randomly choose one of the container instances in the cluster on which the service is defined. """ service = Service(service_name, config=ctx.obj['CONFIG']) service.ssh(verbose=verbose)
def update(ctx, service_name, dry_run, wait, push_image, push_tag, create_if_missing): """ Update the our ECS service from what is in deployfish.yml. This means two things: \b * Update the task definition * Update the scaling policies (if any) These things can only be changed by deleting and recreating the service: \b * service name * cluster name * load balancer If you want to update the desiredCount on the service, use "deploy scale". """ service = Service(service_name, config=ctx.obj['CONFIG'], push_image=push_image, push_tag=push_tag) print() click.secho('Updating "{}" service:'.format(service.serviceName), fg="white") if not service.active_task_definition and create_if_missing: click.secho('Service "{}" does not exist, creating...:'.format(service.serviceName), fg="white") return _create( ctx, service_name, update_configs=False, dry_run=dry_run, wait=wait, asg=True, force_asg=False, push_image=push_image, push_tag=push_tag ) click.secho(' Current task definition:', fg="yellow") print_task_definition(service.active_task_definition) click.secho('\n New task definition:', fg="green") print_task_definition(service.desired_task_definition) if service.tasks: click.secho('\nUpdating "{}" helper tasks to:'.format(service.serviceName), fg='white') for key, value in service.tasks.items(): click.secho(" {}".format(key), fg='green') print_task_definition(value.desired_task_definition) if service.scaling and service.scaling.needs_update(): click.secho('\nUpdating "{}" application scaling'.format(service.serviceName), fg='white') if not dry_run: service.update() if wait: click.secho("\n Waiting until the service is stable with our new task def ...", fg='white') if service.wait_until_stable(): click.secho(" Done.", fg='white') else: click.secho(" FAILURE: the service failed to start.", fg='red') sys.exit(1)
def docker_exec(ctx, service_name, verbose): """ SSH to an EC2 instance in the cluster defined in the service named SERVICE_NAME, then run docker exec on the appropriate container. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE'], import_env=ctx.obj['IMPORT_ENV'], tfe_token=ctx.obj['TFE_TOKEN']).get_service(service_name)) service.docker_exec(verbose=verbose)
def restart(ctx, service_name, hard): """ Restart all tasks in the service SERVICE_NAME by killing them off one by one. Kill each task and wait for it to be replaced before killing the next one off. """ service = Service(service_name, config=ctx.obj['CONFIG']) print() click.secho('Restarting tasks in "{}" service in cluster "{}"'.format( service.serviceName, service.clusterName)) service.restart(hard=hard)
def cluster_run(ctx, service_name): """ Run a command on each of the individual EC2 systems in the ECS cluster running SERVICE_NAME. """ command = click.prompt('Command to run') service = Service(service_name, config=ctx.obj['CONFIG']) responses = service.cluster_run([command]) for index, response in enumerate(responses): click.echo(click.style("Instance {}".format(index + 1), bold=True)) click.echo("Success: {}".format(response[0])) click.echo(response[1])
def update(ctx, service_name, dry_run, wait): """ Update the our ECS service from what is in deployfish.yml. This means two things: \b * Update the task definition * Update the scaling policies (if any) These things can only be changed by deleting and recreating the service: \b * service name * cluster name * load balancer If you want to update the desiredCount on the service, use "deploy scale". """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE'], import_env=ctx.obj['IMPORT_ENV'], tfe_token=ctx.obj['TFE_TOKEN']).get_service(service_name)) print click.secho('Updating "{}" service:'.format(service.serviceName), fg="white") click.secho(' Current task definition:', fg="yellow") print_task_definition(service.active_task_definition) click.secho('\n New task definition:', fg="green") print_task_definition(service.desired_task_definition) if service.tasks: click.secho('\nUpdating "{}" helper tasks to:'.format( service.serviceName), fg='white') for key, value in service.tasks.items(): click.secho(" {}".format(key), fg='green') print_task_definition(value.desired_task_definition) if service.scaling and service.scaling.needs_update(): click.secho('\nUpdating "{}" application scaling'.format( service.serviceName), fg='white') if not dry_run: service.update() if wait: click.secho( "\n Waiting until the service is stable with our new task def ...", fg='white') if service.wait_until_stable(): click.secho(" Done.", fg='white') else: click.secho(" FAILURE: the service failed to start.", fg='red') sys.exit(1)
def create(ctx, service_name): service = Service(yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) host, name, user, passwd, port = _get_db_parameters(service) root = click.prompt('DB root user') rootpw = click.prompt('DB root password') cmd = "/usr/bin/mysql --host={} --user={} --password={} --port={} --execute=\"create database {}; grant all privileges on {}.* to '{}'@'%' identified by '{}';\"".format(host, root, rootpw, port, name, name, user, passwd) success, output = service.run_remote_script([cmd]) # success, output = service.create_db(host, root, rootpw, name, user, passwd, port) print success print output
def restart(ctx, service_name, hard): """ Restart all tasks in the service SERVICE_NAME by killing them off one by one. Kill each task and wait for it to be replaced before killing the next one off. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) print click.secho('Restarting tasks in "{}" service in cluster "{}"'.format( service.serviceName, service.clusterName)) service.restart(hard=hard)
def ssh(ctx, service_name, verbose): """ If the service SERVICE_NAME has any running tasks, randomly choose one of the container instances on which one of those tasks is running and ssh into it. If the service SERVICE_NAME has no running tasks, randomly choose one of the container instances in the cluster on which the service is defined. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) service.ssh(verbose=verbose)
def setUp(self): current_dir = os.path.dirname(os.path.abspath(__file__)) filename = os.path.join(current_dir, 'simple.yml') self.config = Config(filename=filename, interpolate=False) client_mock = Mock() client_mock.get_parameters.return_value = {'Parameters': []} client_mock.describe_parameters.return_value = {'Parameters': []} session_mock = Mock(client=Mock(return_value=client_mock)) with Replacer() as r: r.replace('deployfish.aws.ecs.Service.from_aws', Mock()) r.replace('deployfish.aws.ecs.TaskDefinition.create', Mock()) r.replace('deployfish.aws.boto3_session', session_mock) self.service = Service('foobar-secrets-ec2', config=self.config) self.service.create()
def delete(ctx, service_name, dry_run): """ Delete the service SERVICE_NAME from AWS. """ service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE']).get_service(service_name)) print() click.secho('Deleting service "{}":'.format(service.serviceName), fg="white") click.secho(' Service info:', fg="green") print_service_info(service) click.secho(' Task Definition info:', fg="green") print_task_definition(service.active_task_definition) print() if not dry_run: click.echo( "If you really want to do this, answer \"{}\" to the question below.\n" .format(service.serviceName)) value = click.prompt("What service do you want to delete? ") if value == service.serviceName: service.scale(0) print(" Waiting for our existing containers to die ...") service.wait_until_stable() print(" All containers dead.") service.delete() print(" Deleted service {} from cluster {}.".format( service.serviceName, service.clusterName)) else: click.echo("\nNot deleting service \"{}\"".format( service.serviceName))
def cluster_ssh(ctx, service_name): """ SSH to the specified EC2 system in the ECS cluster running SERVICE_NAME. """ service = Service(service_name, config=ctx.obj['CONFIG']) ips = service.get_host_ips() for index, ip in enumerate(ips): click.echo("Instance {}: {}".format(index + 1, ip)) instance = click.prompt("Which instance to ssh to?", type=int) if instance > len(ips): click.echo("That is not a valid instance.") return instance_ip = ips[instance - 1] service.cluster_ssh(instance_ip)
def cluster_info(ctx, service_name): """ Show information about the individual EC2 systems in the ECS cluster running SERVICE_NAME. """ service = Service(service_name, config=ctx.obj['CONFIG']) instances = service.get_instance_data() for index, reservation in enumerate(instances): click.echo(click.style("Instance {}".format(index + 1), bold=True)) instance = reservation['Instances'][0] print("\tIP: {}".format(instance['PrivateIpAddress'])) print("\tType: {}".format(instance['InstanceType'])) for tag in instance['Tags']: print("\t{}: {}".format(tag['Key'], tag['Value'])) print("")
def cluster_run(ctx, service_name): """ Run a command on each of the individual EC2 systems in the ECS cluster running SERVICE_NAME. """ command = click.prompt('Command to run') service = Service( yml=Config(filename=ctx.obj['CONFIG_FILE'], env_file=ctx.obj['ENV_FILE'], import_env=ctx.obj['IMPORT_ENV']).get_service(service_name)) responses = service.cluster_run([command]) for index, response in enumerate(responses): click.echo(click.style("Instance {}".format(index + 1), bold=True)) click.echo("Success: {}".format(response[0])) click.echo(response[1])
def setUp(self): current_dir = os.path.dirname(os.path.abspath(__file__)) fname = os.path.join(current_dir, 'simple.yml') config = Config(filename=fname, interpolate=False) del config.raw['services'][0]['maximum_percent'] del config.raw['services'][0]['minimum_healthy_percent'] with Replacer() as r: r.replace('deployfish.aws.ecs.Service.from_aws', Mock()) self.service = Service('foobar-prod', config=config) # This is ugly, but it was the only way I could figure out to # simulate the AWS load self.service._Service__aws_service = { 'deploymentConfiguration': { 'minimumHealthyPercent': 53, 'maximumPercent': 275 }, 'placementConstraints': [{ 'type': 'memberOf', 'expression': 'attribute:ecs.instance-type =~ t2.*' }], 'placementStrategy': [{ 'type': 'binpack', 'field': 'memory' }], 'networkConfiguration': { 'awsvpcConfiguration': { 'subnets': ['subnet-12345678'], 'security_groups': ['sg-12345678'], 'assignPublicIp': 'DISABLED' } } }