def _update(self): # only updates InstanceCount desired_instances = self.desired_state_definition.get("Instances").get( "InstanceGroups") # instance pool if not desired_instances: desired_instances = self.desired_state_definition.get( "Instances").get("InstanceFleets") # TODO neither pool or group if not desired_instances: return True desired_instance_dict = pcf_util.list_to_dict("Name", desired_instances) desired_instance_dict = { k: pcf_util.param_filter(v, EMRCluster.FILTERED_UPDATE_PARAMS) for k, v in desired_instance_dict.items() } current_instance_dict = { k: pcf_util.keep_and_replace_keys(v, EMRCluster.UPDATE_PARAM_CONVERSIONS) for k, v in self.current_state_definition.get("Instances").items() } diff = pcf_util.diff_dict(current_instance_dict, desired_instance_dict) for k, v in diff.items(): curr_instance = self.current_state_definition["Instances"].get( k, {}).get('Id') #instance group if curr_instance and self.current_state_definition[ "Instances"].get(k, {}).get('InstanceGroupType'): self.client.modify_instance_groups( ClusterId=self._get_cluster_id(), InstanceGroups=[{ "InstanceGroupId": curr_instance, "InstanceCount": v["InstanceCount"]["updated"] }]) #instance pool elif curr_instance: od_cap = v.get("TargetOnDemandCapacity", {}).get("updated", 0) spot_cap = v.get("TargetSpotCapacity", {}).get("updated", 0) self.client.modify_instance_fleet( ClusterId=self._get_cluster_id(), InstanceFleet={ "InstanceFleetId": curr_instance, "TargetOnDemandCapacity": od_cap, "TargetSpotCapacity": spot_cap })
def _update(self): """ Calls boto3 update_service() Returns: boto3 update_service() response """ new_desired_state_def, diff_dict = pcf_util.update_dict( self.current_state_definition, self.get_desired_state_definition()) update_definition = pcf_util.keep_and_replace_keys( new_desired_state_def, ECSService.UPDATE_PARAM_CONVERSIONS) return self.client.update_service(**update_definition)
def _start(self): """ Calls boto3 run_task function to create a new task. If successful this gets the arn of the task and adds it to the current_state_definition. """ new_desired_state_def, diff_dict = pcf_util.update_dict( self.current_state_definition, self.get_desired_state_definition()) start_definition = pcf_util.keep_and_replace_keys( new_desired_state_def, ECSTask.START_PARAM_CONVERSIONS) self.sync_state() if self.state == State.stopped and "containers" in self.current_state_definition: containers = self.current_state_definition.get("containers") for container in containers: if container.get("exitCode", -1) != 0: logger.warning( "Task {} failed to execute with exit code: {} and reason: {}... setting desired state to {}" .format(self.get_task_arn(), container.get("exitCode"), container.get("reason"), State.stopped)) self.failure_reason = { "type": "container", "reason": self.current_state_definition.get( "stoppedReason", "N/A") } self.set_desired_state(State.stopped) return resp = self.client.run_task(**start_definition) task = resp.get("tasks", []) failures = resp.get("failures", []) if len(task) == 1: self.current_state_definition["taskArn"] = task[0].get("taskArn") elif len(task) == 0 and len(failures) > 0: logger.warning( "Task {} failed to be placed due to an ECS error: {}... setting desired state to {}" .format(self.get_task_arn(), failures[0].get("reason"), State.stopped)) self.failure_reason = { "type": "ecs", "reason": failures[0].get("reason") } self.set_desired_state(State.stopped) else: Exception("ECS Task failed to start")
def __s3_to_sha256(self): """ Looks to see if the zipfile located in s3 has a tag with the latest sha256. If there is no tag it calculates the hash and adds the tag. Returns: base 64 hash """ s3_kwargs = pcf_util.keep_and_replace_keys(self.desired_state_definition["Code"], LambdaFunction.S3_PARAM_CONVERSIONS) # check if tag already contains hashed value tags = self.s3client.get_object_tagging(**s3_kwargs) for kv in tags["TagSet"]: if kv['Key'] == "CodeSha256": return kv['Value'] obj = self.s3client.get_object(**s3_kwargs) zfile = obj["Body"].read() hashed_zfile = hashlib.sha256(zfile).hexdigest() hashed_b64 = codecs.encode(codecs.decode(hashed_zfile, 'hex'), 'base64').decode().strip('\n') s3client.put_object_tagging(Tagging={"TagSet": [{"Key": "CodeSha256", "Value": hashed_b64}]}, **s3_kwargs) return hashed_b64
def is_state_definition_equivalent(self): """ Determines if current state is equivalent to the desired state. Returns: bool """ desired_instances = self.desired_state_definition.get("Instances").get( "InstanceGroups") # instance pool if not desired_instances: desired_instances = self.desired_state_definition.get( "Instances").get("InstanceFleets") # TODO neither pool or group if not desired_instances: return True desired_instance_dict = pcf_util.list_to_dict("Name", desired_instances) desired_instance_dict = { k: pcf_util.param_filter(v, EMRCluster.FILTERED_UPDATE_PARAMS) for k, v in desired_instance_dict.items() } current_instance_dict = { k: pcf_util.keep_and_replace_keys(v, EMRCluster.UPDATE_PARAM_CONVERSIONS) for k, v in self.current_state_definition.get("Instances").items() } # moto bug with instance fleets if not current_instance_dict: return True diff = pcf_util.diff_dict(current_instance_dict, desired_instance_dict) return diff == {}