def execute(self, context): eks_hook = EKSHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) eks_hook.delete_nodegroup(clusterName=self.cluster_name, nodegroupName=self.nodegroup_name)
def execute(self, context): eks_hook = EKSHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) with eks_hook.generate_config_file( eks_cluster_name=self.cluster_name, pod_namespace=self.namespace ) as self.config_file: return super().execute(context)
def execute(self, context): eks_hook = EKSHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) eks_hook.delete_fargate_profile( clusterName=self.cluster_name, fargateProfileName=self.fargate_profile_name )
def poke(self, context): eks_hook = EKSHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) cluster_state = eks_hook.get_cluster_state( clusterName=self.cluster_name) self.log.info("Cluster state: %s", cluster_state) return cluster_state == self.target_state
def poke(self, context): eks_hook = EKSHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) nodegroup_state = eks_hook.get_nodegroup_state( clusterName=self.cluster_name, nodegroupName=self.nodegroup_name) self.log.info("Nodegroup state: %s", nodegroup_state) return nodegroup_state == self.target_state
def execute(self, context): eks_hook = EKSHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) if self.force_delete_compute: self.delete_any_nodegroups(eks_hook) self.delete_any_fargate_profiles(eks_hook) eks_hook.delete_cluster(name=self.cluster_name)
def execute(self, context): eks_hook = EKSHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) eks_hook.create_fargate_profile( clusterName=self.cluster_name, fargateProfileName=self.fargate_profile_name, podExecutionRoleArn=self.pod_execution_role_arn, selectors=self.selectors, )
def execute(self, context): eks_hook = EKSHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) eks_hook.create_nodegroup( clusterName=self.cluster_name, nodegroupName=self.nodegroup_name, subnets=self.nodegroup_subnets, nodeRole=self.nodegroup_role_arn, )
def execute(self, context): eks_hook = EKSHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) eks_hook.create_cluster( name=self.cluster_name, roleArn=self.cluster_role_arn, resourcesVpcConfig=self.resources_vpc_config, ) if not self.compute: return None self.log.info("Waiting for EKS Cluster to provision. This will take some time.") countdown = TIMEOUT_SECONDS while eks_hook.get_cluster_state(clusterName=self.cluster_name) != ClusterStates.ACTIVE: if countdown >= CHECK_INTERVAL_SECONDS: countdown -= CHECK_INTERVAL_SECONDS self.log.info( "Waiting for cluster to start. Checking again in %d seconds", CHECK_INTERVAL_SECONDS ) sleep(CHECK_INTERVAL_SECONDS) else: message = ( "Cluster is still inactive after the allocated time limit. " "Failed cluster will be torn down." ) self.log.error(message) # If there is something preventing the cluster for activating, tear it down and abort. eks_hook.delete_cluster(name=self.cluster_name) raise RuntimeError(message) if self.compute == 'nodegroup': eks_hook.create_nodegroup( clusterName=self.cluster_name, nodegroupName=self.nodegroup_name, subnets=self.resources_vpc_config.get('subnetIds'), nodeRole=self.nodegroup_role_arn, ) elif self.compute == 'fargate': eks_hook.create_fargate_profile( clusterName=self.cluster_name, fargateProfileName=self.fargate_profile_name, podExecutionRoleArn=self.fargate_pod_execution_role_arn, selectors=self.fargate_selectors, )
def poke(self, context): eks_hook = EKSHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) cluster_state = eks_hook.get_cluster_state(clusterName=self.cluster_name) self.log.info("Cluster state: %s", cluster_state) if cluster_state in (CLUSTER_TERMINAL_STATES - {self.target_state}): # If we reach a terminal state which is not the target state: raise AirflowException( UNEXPECTED_TERMINAL_STATE_MSG.format( current_state=cluster_state, target_state=self.target_state ) ) return cluster_state == self.target_state
def main(): parser = get_parser() args = parser.parse_args() eks_hook = EKSHook(aws_conn_id=args.aws_conn_id, region_name=args.region_name) access_token = eks_hook.fetch_access_token_for_cluster(args.cluster_name) access_token_expiration = get_expiration_time() exec_credential_object = { "kind": "ExecCredential", "apiVersion": "client.authentication.k8s.io/v1alpha1", "spec": {}, "status": { "expirationTimestamp": access_token_expiration, "token": access_token }, } print(json.dumps(exec_credential_object))
def poke(self, context): eks_hook = EKSHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) nodegroup_state = eks_hook.get_nodegroup_state( clusterName=self.cluster_name, nodegroupName=self.nodegroup_name ) self.log.info("Nodegroup state: %s", nodegroup_state) if nodegroup_state in (NODEGROUP_TERMINAL_STATES - {self.target_state}): # If we reach a terminal state which is not the target state: raise AirflowException( UNEXPECTED_TERMINAL_STATE_MSG.format( current_state=nodegroup_state, target_state=self.target_state ) ) return nodegroup_state == self.target_state
def poke(self, context): eks_hook = EKSHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) fargate_profile_state = eks_hook.get_fargate_profile_state( clusterName=self.cluster_name, fargateProfileName=self.fargate_profile_name ) self.log.info("Fargate profile state: %s", fargate_profile_state) if fargate_profile_state in (FARGATE_TERMINAL_STATES - {self.target_state}): # If we reach a terminal state which is not the target state: raise AirflowException( UNEXPECTED_TERMINAL_STATE_MSG.format( current_state=fargate_profile_state, target_state=self.target_state ) ) return fargate_profile_state == self.target_state
def execute(self, context): eks_hook = EKSHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) if self.force_delete_compute: nodegroups = eks_hook.list_nodegroups( clusterName=self.cluster_name) nodegroup_count = len(nodegroups) if nodegroup_count: self.log.info( "A cluster can not be deleted with attached nodegroups. Deleting %d nodegroups.", nodegroup_count, ) for group in nodegroups: eks_hook.delete_nodegroup(clusterName=self.cluster_name, nodegroupName=group) # Scaling up the timeout based on the number of nodegroups that are being processed. additional_seconds = 5 * 60 countdown = TIMEOUT_SECONDS + (nodegroup_count * additional_seconds) while eks_hook.list_nodegroups(clusterName=self.cluster_name): if countdown >= CHECK_INTERVAL_SECONDS: countdown -= CHECK_INTERVAL_SECONDS sleep(CHECK_INTERVAL_SECONDS) self.log.info( "Waiting for the remaining %s nodegroups to delete. " "Checking again in %d seconds.", nodegroup_count, CHECK_INTERVAL_SECONDS, ) else: raise RuntimeError( "Nodegroups are still inactive after the allocated time limit. Aborting." ) self.log.info("No nodegroups remain, deleting cluster.") eks_hook.delete_cluster(name=self.cluster_name)