Esempio n. 1
0
    def __init__(self, serviceName, clusterName, yml={}, aws={}, scaling_policy_arn=None):
        """
        `yml` should be a dict with two keys:

            cpu: >=60
            check_every_seconds: 60
            periods: 5

        In this case, the alarm will examine the ECS Service CPU metric every 60 seconds.
        If service CPU >=60 for 5*60 seconds == 300 seconds, enter alarm state.

        :param serviceName: the name of the ECS service to monitor
        :type serviceName: string

        :param clusterName: the name of the cluster the service is in
        :type clusterName: string

        :param aws: (optional) the dict returned by ``describe_alarms()`` for this Alarm
        :type aws: dict

        :param scaling_policy_arn: (optional) the ARN of the scaling policy that should be activated when the alarm
                             enters ALARM state.
        :type scaling_policy_arn: string

        """
        self.cloudwatch = get_boto3_session().client('cloudwatch')
        self.serviceName = serviceName
        self.clusterName = clusterName
        self.scaling_policy_arn = scaling_policy_arn
        self.__defaults()
        self.from_yaml(yml)
        self.from_aws(aws)
Esempio n. 2
0
 def __init__(self, service, cluster, aws={}, yml=None):
     self.ssm = get_boto3_session().client('ssm')
     self.service = service
     self.cluster = cluster
     self.__defaults()
     self.__from_yml(yml)
     self.__from_aws(aws)
Esempio n. 3
0
 def __init__(self, group_name=None, yml=None):
     if yml is None:
         yml = {}
     self.asg = get_boto3_session().client('autoscaling')
     self.__groupName = group_name
     self.from_yaml(yml)
     self.from_aws()
Esempio n. 4
0
 def __init__(self, name, kms_key_id=None):
     self.ssm = get_boto3_session().client('ssm')
     self._defaults(kms_key_id=kms_key_id)
     self._key = None
     # 2020-01-27 rrollins: This is weird, since name is a @property here. But I think it maybe works because of how
     # we subclass this class?
     self.name = name
Esempio n. 5
0
    def __init__(self, task_definition_id=None, yml=None):
        self.ecs = get_boto3_session().client('ecs')

        self.__defaults()
        if task_definition_id:
            self.from_aws(task_definition_id)
        if yml:
            self.from_yaml(yml)
Esempio n. 6
0
 def __create_cw_log_groups(self):
     cw = get_boto3_session().client('logs')
     for g in self.__cw_log_groups:
         tags = flatten_tags(g.get('tags', {}))
         try:
             cw.create_log_group(logGroupName=g['name'], tags=tags)
         except cw.exceptions.ResourceAlreadyExistsException:
             print('Log group {name} already exists, skipping.'.format(
                 name=g['name']))
Esempio n. 7
0
    def __init__(self, aws={}, yml={}):
        """
        :param aws: an entry from the `containerDefinitions` list in the
                           dictionary returned by `describe_task_definitions()`. If
                           the container has any volumes, this dict will differ from
                           the canonical AWS source in that it will have volume definitions
                           from the task definition added.
        :type aws: dict

        :param yml: a container definition from our deployfish.yml file
        :type yml: dict
        """
        self.ecs = get_boto3_session().client('ecs')
        self.ecr = get_boto3_session().client('ecr')
        self.__aws_container_definition = aws

        self._name = None
        self._cpu = None
        self._image = None
        self._memory = None
        self._memoryReservation = None
        self._command = None
        self._entryPoint = None
        self._essential = True
        self._dockerLabels = {}
        self._volumes = []
        self._extraHosts = []
        self._links = []
        self._ulimits = []
        self._cap_add = []
        self._cap_drop = []
        self._tmpfs = []
        self._environment = {}
        self._portMappings = []
        self._secrets = []
        self.logConfiguration = None
        self.local_image = None
        self.firelensConfiguration = None

        if 'logConfiguration' in aws:
            self.logConfiguration = LogConfiguration(aws['logConfiguration'])

        if yml:
            self.from_yaml(yml)
Esempio n. 8
0
 def __init__(self, service, cluster, aws=None, yml=None):
     if aws is None:
         aws = {}
     self.ssm = get_boto3_session().client('ssm')
     self.service = service
     self.cluster = cluster
     self._defaults()
     self.is_external = False
     self.__from_yml(yml)
     self._from_aws(aws)
Esempio n. 9
0
    def populate(self):
        """
        Lazy loading function to load the values from AWS.
        """
        if self.populated:
            return

        self.ssm = get_boto3_session().client('ssm')
        self.from_yaml(self.yml)
        self.from_aws()
        self.populated = True
Esempio n. 10
0
    def __init__(self, task):
        self.task = task
        self.client = get_boto3_session().client('events')
        if self.task.scheduler_name:
            self.name = task.scheduler_name
        else:
            self.name = "{}-scheduler".format(self.task.taskName)

        if self.task.scheduler_target_name:
            self.target_name = self.task.scheduler_target_name
        else:
            self.target_name = "{}-scheduler-target".format(self.task.taskName)
Esempio n. 11
0
    def __init__(self, serviceName, clusterName, yml=None, aws=None):
        """
        ``yml`` is dict parsed from the ``application-scaling`` section from
        ``deployfish.yml``.  Example:

            {
                'min_capacity': 2,
                'max_capacity': 4,
                'role_arn': 'arn:aws:iam::123445678901:role/ecsServiceRole',
                'scale-up': {
                    'cpu': '>=60',
                    'check_every_seconds': 60,
                    'periods': 5,
                    'cooldown': 60,
                    'scale_by': 1
                },
                'scale-down': {
                    'cpu': '<=30',
                    'check_every_seconds': 60,
                    'periods': 30,
                    'cooldown': 60,
                    'scale_by': -1
                }
            }

        ``aws`` is an entry from the ``ScalableTargets`` list in the response from
        ``boto3.client('application-autoscaling').describe_scalable_targets()`

        :param serviceName: the name of an ECS service in cluster ``clusterName``
        :type serviceName: string

        :param clusterName: the name of an ECS cluster
        :type clusterName: string

        :param yml: scaling config from ``deployfish.yml`` as described above
        :type yml: dict

        :param aws: scalable target AWS dict as described above
        :type aws: dict
        """
        if aws is None:
            aws = {}
        if yml is None:
            yml = {}
        self.scaling = get_boto3_session().client('application-autoscaling')
        self.serviceName = serviceName
        self.clusterName = clusterName
        self.__yml = {}
        self.policies = {}
        self.__defaults()
        self.from_yaml(yml)
        self.from_aws(aws)
Esempio n. 12
0
    def get_instance_data(self):
        """
        Returns data on the instances in the ECS cluster.

        :return: list
        """
        self._search_hosts()
        instances = self.hosts.values()
        ec2 = get_boto3_session().client('ec2')
        response = ec2.describe_instances(InstanceIds=list(instances))
        if response['Reservations']:
            instances = response['Reservations']
            return instances
        return []
Esempio n. 13
0
    def __init__(self, clusterName, yml={}):
        """
        :param clusterName: the name of the cluster in which we'll run our
                            helper tasks
        :type clusterName: string

        :param yml: the task definition information for the task from our
                    deployfish.yml file
        :type yml: dict
        """
        self.clusterName = clusterName
        self.ecs = get_boto3_session().client('ecs')
        self.commands = {}
        self.from_yaml(yml)
        self.desired_task_definition = TaskDefinition(yml=yml)
        self.active_task_definition = None
Esempio n. 14
0
    def __init__(self, name, service=False, config=None):
        if service:
            yml = config.get_service(name)
        else:
            yml = config.get_task(name)

        self.ecs = get_boto3_session().client('ecs')

        self.taskName = None
        self.clusterName = None
        self.desired_count = 1
        self._launchType = 'EC2'
        self.cluster_specified = False
        self.__defaults()
        self.from_yaml(yml)
        self.from_aws()
        self.scheduler = TaskScheduler(self)
Esempio n. 15
0
    def __init__(self, serviceName, clusterName, yml=None, aws=None):
        """
        ``yml`` is dict parsed from one of the scaling policy subsection of the
        ``application-scaling`` section from ``deployfish.yml``.  Example:

            {
                'cpu': '>=60',
                'check_every_seconds': 60,
                'periods': 5,
                'cooldown': 60,
                'scale_by': 1
            }

        ``aws`` is an entry from the ``ScalingPolicies`` list in the response from
        ``boto3.client('application-autoscaling').describe_scaling_policies()`

        :param serviceName: the name of an ECS service in cluster ``clusterName``
        :type serviceName: string

        :param clusterName: the name of an ECS cluster
        :type clusterName: string

        :param yml: scaling policy from ``deployfish.yml`` as described above
        :type yml: dict

        :param aws: scaling policy AWS dict as described above
        :type aws: dict
        """
        if not yml:
            yml = {}
        if not aws:
            aws = {}
        self.scaling = get_boto3_session().client('application-autoscaling')
        self.serviceName = serviceName
        self.clusterName = clusterName
        self.alarm = None
        self.__defaults()
        self.from_yaml(yml)
        self.from_aws(aws)
        self.alarm = None
        if yml:
            self.alarm = ECSServiceCPUAlarm(self.serviceName,
                                            self.clusterName,
                                            scaling_policy_arn=self.arn,
                                            yml=yml)
Esempio n. 16
0
 def _get_state_file_from_s3(self, state_file_url, profile=None, region=None):
     if profile:
         session = boto3.session.Session(profile_name=profile, region_name=region)
     else:
         session = get_boto3_session()
     s3 = session.resource('s3')
     parts = state_file_url[5:].split('/')
     bucket = parts[0]
     filename = "/".join(parts[1:])
     key = s3.Object(bucket, filename)
     try:
         state_file = key.get()["Body"].read().decode('utf-8')
     except ClientError as ex:
         if ex.response['Error']['Code'] == 'NoSuchKey':
             raise NoSuchStateFile("Could not find Terraform state file {}".format(self.state_file_url))
         else:
             raise ex
     return json.loads(state_file)
Esempio n. 17
0
    def new(name):
        """
        Returns a list of UnboundParameters matching ``name``.  If ``name`` ends with "*",
        this could be a long list of parameters.  If there is no "*" in name, there
        will be just one Parameter in the list.

        :param name: the name to search for in AWS SSM Parameter Store

        :return: list of Parameter objects
        """
        m = WILDCARD_RE.search(name)
        if m:
            # This is a wildcard search
            filter_option = "BeginsWith"
            filter_values = [m.group('prefix')]
        else:
            # Get a single parameter
            filter_option = "Equals"
            filter_values = [name]

        ssm = get_boto3_session().client('ssm')
        paginator = ssm.get_paginator('describe_parameters')
        response_iterator = paginator.paginate(ParameterFilters=[{
            'Key':
            'Name',
            'Option':
            filter_option,
            'Values':
            filter_values
        }],
                                               PaginationConfig={
                                                   'MaxItems': 100,
                                                   'PageSize': 50
                                               })
        parms = []
        for r in response_iterator:
            parms.extend(r['Parameters'])
        return [
            UnboundParameter(parm['Name'],
                             kms_key_id=parm.get('KeyId', None) or None)
            for parm in parms
        ]
Esempio n. 18
0
    def _get_host_bastion(self, instance_id):
        """
        Given an EC2 ``instance_id`` return the private IP address of
        the instance identified by ``instance_id`` and the public
        DNS name of the bastion host you would use to reach it via ssh.

        :param instance_id: an EC2 instance id
        :type instance_id: string

        :rtype: 2-tuple (instance_private_ip_address, bastion_host_dns_name)
        """
        ip = None
        vpc_id = None
        bastion = ''
        ec2 = get_boto3_session().client('ec2')
        response = ec2.describe_instances(InstanceIds=[instance_id])
        if response['Reservations']:
            instances = response['Reservations'][0]['Instances']
            if instances:
                instance = instances[0]
                vpc_id = instance['VpcId']
                ip = instance['PrivateIpAddress']
        if ip and vpc_id:
            response = ec2.describe_instances(
                Filters=[
                    {
                        'Name': 'tag:Name',
                        'Values': ['bastion*']
                    },
                    {
                        'Name': 'vpc-id',
                        'Values': [vpc_id]
                    }
                ]
            )
            if response['Reservations']:
                instances = response['Reservations'][0]['Instances']
                if instances:
                    instance = instances[0]
                    bastion = instance['PublicDnsName']
        return ip, bastion
Esempio n. 19
0
    def new(service, cluster, yml=None):
        """
        Returns a list of parameters.
        :param service:
        :param cluster:
        :param yml:
        :return: list
        """
        if yml:
            m = WILDCARD_RE.search(yml)
            if m:
                parameter_list = []
                ssm = get_boto3_session().client('ssm')
                paginator = ssm.get_paginator('describe_parameters')
                response_iterator = paginator.paginate(ParameterFilters=[{
                    'Key':
                    'Name',
                    'Option':
                    'BeginsWith',
                    'Values': [m.group('prefix')]
                }],
                                                       PaginationConfig={
                                                           'MaxItems': 100,
                                                           'PageSize': 50
                                                       })
                parms = []
                for r in response_iterator:
                    parms.extend(r['Parameters'])
                for parm in parms:
                    if parm['Type'] == 'SecureString':
                        line = "{}:external:secure:{}".format(
                            parm['Name'], parm['KeyId'])
                    else:
                        line = "{}:external".format(parm['Name'])
                    parameter_list.append(Parameter(service, cluster,
                                                    yml=line))
                return parameter_list

        return [Parameter(service, cluster, yml=yml)]
Esempio n. 20
0
    def __init__(self, service_name, config=None):
        yml = config.get_service(service_name)
        self.ecs = get_boto3_session().client('ecs')
        self.__aws_service = None

        self.asg = None
        self.scaling = None
        self.serviceDiscovery = None
        self.searched_hosts = False
        self.is_running = False
        self.hosts = None
        self.host_ips = None
        self._serviceName = None
        self._clusterName = None
        self._desired_count = 0
        self._minimumHealthyPercent = None
        self._maximumPercent = None
        self._launchType = 'EC2'
        self.__service_discovery = []
        self.__defaults()
        self.from_yaml(yml)
        self.from_aws()
Esempio n. 21
0
    def __init__(self, registry_arn=None, yml={}):
        """
        ``yml`` is dict parsed from the ``service_discovery`` section from
        ``deployfish.yml``.  Example:

            {
                'namespace': 'local',
                'name': 'test',
                'dns_records': [
                    {
                        'type': 'A',
                        'ttl': '60',
                    }
                ],


        :param yml: service discovery config from ``deployfish.yml`` as described above
        :type yml: dict
        """
        self.sd = get_boto3_session().client('servicediscovery')
        self.__defaults()
        self._registry_arn = registry_arn
        self.from_yaml(yml)
Esempio n. 22
0
    def _get_cloudwatch_logs(self):
        """
        Retrieve and display the logs corresponding to our task until there are no more available.
        """
        if not self.active_task_definition.containers[
                0].logConfiguration.driver == 'awslogs':
            return

        prefix = self.active_task_definition.containers[
            0].logConfiguration.options['awslogs-stream-prefix']
        group = self.active_task_definition.containers[
            0].logConfiguration.options['awslogs-group']
        container = self.active_task_definition.containers[0].name
        task_id = self.taskarn.split(':')[-1][5:]
        stream = "{}/{}/{}".format(prefix, container, task_id)

        log_client = get_boto3_session().client('logs')

        nextToken = None
        kwargs = {
            'logGroupName': group,
            'logStreamName': stream,
            'startFromHead': True
        }

        print("Waiting for logs...\n")
        for i in range(40):
            time.sleep(5)
            response = log_client.get_log_events(**kwargs)
            for event in response['events']:
                print(event['message'])
            token = response['nextForwardToken']
            if token == nextToken:
                return
            nextToken = response['nextForwardToken']
            kwargs['nextToken'] = nextToken
Esempio n. 23
0
 def put_string(self, data, key):
     s3 = get_boto3_session().client('s3')
     s3.put_object(Bucket=self.config["dest"], Key=key, Body=data)
Esempio n. 24
0
 def delete_object(self, key):
     s3 = get_boto3_session().client('s3')
     s3.delete_object(Bucket=self.config["dest"], Key=key)
Esempio n. 25
0
 def __init__(self, name, kms_key_id=None):
     self.ssm = get_boto3_session().client('ssm')
     self._defaults(kms_key_id=kms_key_id)
     self.name = name
Esempio n. 26
0
    def _show_current_status(self):
        response = self.__get_service()
        # print response
        status = response['status']
        events = response['events']
        desired_count = response['desiredCount']
        if status == 'ACTIVE':
            success = True
        else:
            success = False

        deployments = response['deployments']
        if len(deployments) > 1:
            success = False

        print("Deployment Desired Pending Running")
        for deploy in deployments:
            if deploy['desiredCount'] != deploy['runningCount']:
                success = False
            print(deploy['status'], deploy['desiredCount'], deploy['pendingCount'], deploy['runningCount'])

        print("")

        print("Service:")
        for index, event in enumerate(events):
            if index <= 5:
                print(event['message'])

        if self.load_balancer:
            if isinstance(self.load_balancer, dict):
                lbtype = 'elb'
            else:
                lbtype = 'alb'
        else:
            lbtype = None
        if lbtype == 'elb':
            print("")
            print("Load Balancer")
            elb = get_boto3_session().client('elb')
            response = elb.describe_instance_health(LoadBalancerName=self.load_balancer['load_balancer_name'])
            states = response['InstanceStates']
            if len(states) < desired_count:
                success = False
            for state in states:
                if state['State'] != "InService" or state['Description'] != "N/A":
                    success = False
                print(state['InstanceId'], state['State'], state['Description'])
        elif lbtype == 'alb':
            for target_group in self.load_balancer:
                print("")
                print("Target Group: {}".format(target_group['target_group_arn']))
                alb = get_boto3_session().client('elbv2')
                response = alb.describe_target_health(TargetGroupArn=target_group['target_group_arn'])
                if len(response['TargetHealthDescriptions']) < desired_count:
                    success = False
                for desc in response['TargetHealthDescriptions']:
                    if desc['TargetHealth']['State'] != 'healthy':
                        success = False
                    print(
                        desc['Target']['Id'],
                        desc['TargetHealth']['State'],
                        desc['TargetHealth'].get('Description', '')
                    )
        return success