def execute(self, context: 'Context'): if isinstance(self.nodegroup_subnets, str): nodegroup_subnets_list: List[str] = [] if self.nodegroup_subnets != "": try: nodegroup_subnets_list = cast( List, literal_eval(self.nodegroup_subnets)) except ValueError: self.log.warning( "The nodegroup_subnets should be List or string representing " "Python list and is %s. Defaulting to []", self.nodegroup_subnets, ) self.nodegroup_subnets = nodegroup_subnets_list eks_hook = EksHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) eks_hook.create_nodegroup( clusterName=self.cluster_name, nodegroupName=self.nodegroup_name, subnets=self.nodegroup_subnets, nodeRole=self.nodegroup_role_arn, **self.create_nodegroup_kwargs, )
def execute(self, context: 'Context'): eks_hook = EksHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) eks_hook.delete_nodegroup(clusterName=self.cluster_name, nodegroupName=self.nodegroup_name)
def execute(self, context: 'Context'): eks_hook = EksHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) with eks_hook.generate_config_file( eks_cluster_name=self.cluster_name, pod_namespace=self.namespace) as self.config_file: return super().execute(context)
def execute(self, context: 'Context'): eks_hook = EksHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) eks_hook.delete_fargate_profile( clusterName=self.cluster_name, fargateProfileName=self.fargate_profile_name)
def execute(self, context: 'Context'): eks_hook = EksHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) if self.force_delete_compute: self.delete_any_nodegroups(eks_hook) self.delete_any_fargate_profiles(eks_hook) eks_hook.delete_cluster(name=self.cluster_name)
def execute(self, context): eks_hook = EksHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) eks_hook.create_fargate_profile( clusterName=self.cluster_name, fargateProfileName=self.fargate_profile_name, podExecutionRoleArn=self.pod_execution_role_arn, selectors=self.selectors, )
def execute(self, context): eks_hook = EksHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) eks_hook.create_nodegroup( clusterName=self.cluster_name, nodegroupName=self.nodegroup_name, subnets=self.nodegroup_subnets, nodeRole=self.nodegroup_role_arn, )
def main(): parser = get_parser() args = parser.parse_args() eks_hook = EksHook(aws_conn_id=args.aws_conn_id, region_name=args.region_name) access_token = eks_hook.fetch_access_token_for_cluster(args.cluster_name) access_token_expiration = get_expiration_time() exec_credential_object = { "kind": "ExecCredential", "apiVersion": "client.authentication.k8s.io/v1alpha1", "spec": {}, "status": {"expirationTimestamp": access_token_expiration, "token": access_token}, } print(json.dumps(exec_credential_object))
def poke(self, context: 'Context'): eks_hook = EksHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) cluster_state = eks_hook.get_cluster_state(clusterName=self.cluster_name) self.log.info("Cluster state: %s", cluster_state) if cluster_state in (CLUSTER_TERMINAL_STATES - {self.target_state}): # If we reach a terminal state which is not the target state: raise AirflowException( UNEXPECTED_TERMINAL_STATE_MSG.format( current_state=cluster_state, target_state=self.target_state ) ) return cluster_state == self.target_state
def poke(self, context: 'Context'): eks_hook = EksHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) nodegroup_state = eks_hook.get_nodegroup_state( clusterName=self.cluster_name, nodegroupName=self.nodegroup_name) self.log.info("Nodegroup state: %s", nodegroup_state) if nodegroup_state in (NODEGROUP_TERMINAL_STATES - {self.target_state}): # If we reach a terminal state which is not the target state: raise AirflowException( UNEXPECTED_TERMINAL_STATE_MSG.format( current_state=nodegroup_state, target_state=self.target_state)) return nodegroup_state == self.target_state
def poke(self, context: 'Context'): eks_hook = EksHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) fargate_profile_state = eks_hook.get_fargate_profile_state( clusterName=self.cluster_name, fargateProfileName=self.fargate_profile_name ) self.log.info("Fargate profile state: %s", fargate_profile_state) if fargate_profile_state in (FARGATE_TERMINAL_STATES - {self.target_state}): # If we reach a terminal state which is not the target state: raise AirflowException( UNEXPECTED_TERMINAL_STATE_MSG.format( current_state=fargate_profile_state, target_state=self.target_state ) ) return fargate_profile_state == self.target_state
def execute(self, context: 'Context'): if self.compute: if self.compute not in SUPPORTED_COMPUTE_VALUES: raise ValueError("Provided compute type is not supported.") elif (self.compute == 'nodegroup') and not self.nodegroup_role_arn: raise ValueError( MISSING_ARN_MSG.format(compute=NODEGROUP_FULL_NAME, requirement='nodegroup_role_arn')) elif (self.compute == 'fargate') and not self.fargate_pod_execution_role_arn: raise ValueError( MISSING_ARN_MSG.format( compute=FARGATE_FULL_NAME, requirement='fargate_pod_execution_role_arn')) eks_hook = EksHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) eks_hook.create_cluster( name=self.cluster_name, roleArn=self.cluster_role_arn, resourcesVpcConfig=self.resources_vpc_config, **self.create_cluster_kwargs, ) if not self.compute: return None self.log.info( "Waiting for EKS Cluster to provision. This will take some time.") countdown = TIMEOUT_SECONDS while eks_hook.get_cluster_state( clusterName=self.cluster_name) != ClusterStates.ACTIVE: if countdown >= CHECK_INTERVAL_SECONDS: countdown -= CHECK_INTERVAL_SECONDS self.log.info( "Waiting for cluster to start. Checking again in %d seconds", CHECK_INTERVAL_SECONDS) sleep(CHECK_INTERVAL_SECONDS) else: message = ( "Cluster is still inactive after the allocated time limit. " "Failed cluster will be torn down.") self.log.error(message) # If there is something preventing the cluster for activating, tear it down and abort. eks_hook.delete_cluster(name=self.cluster_name) raise RuntimeError(message) if self.compute == 'nodegroup': eks_hook.create_nodegroup( clusterName=self.cluster_name, nodegroupName=self.nodegroup_name, subnets=cast(List[str], self.resources_vpc_config.get('subnetIds')), nodeRole=self.nodegroup_role_arn, **self.create_nodegroup_kwargs, ) elif self.compute == 'fargate': eks_hook.create_fargate_profile( clusterName=self.cluster_name, fargateProfileName=self.fargate_profile_name, podExecutionRoleArn=self.fargate_pod_execution_role_arn, selectors=self.fargate_selectors, **self.create_fargate_profile_kwargs, )
def execute(self, context): eks_hook = EksHook( aws_conn_id=self.aws_conn_id, region_name=self.region, ) eks_hook.create_cluster( name=self.cluster_name, roleArn=self.cluster_role_arn, resourcesVpcConfig=self.resources_vpc_config, ) if not self.compute: return None self.log.info( "Waiting for EKS Cluster to provision. This will take some time.") countdown = TIMEOUT_SECONDS while eks_hook.get_cluster_state( clusterName=self.cluster_name) != ClusterStates.ACTIVE: if countdown >= CHECK_INTERVAL_SECONDS: countdown -= CHECK_INTERVAL_SECONDS self.log.info( "Waiting for cluster to start. Checking again in %d seconds", CHECK_INTERVAL_SECONDS) sleep(CHECK_INTERVAL_SECONDS) else: message = ( "Cluster is still inactive after the allocated time limit. " "Failed cluster will be torn down.") self.log.error(message) # If there is something preventing the cluster for activating, tear it down and abort. eks_hook.delete_cluster(name=self.cluster_name) raise RuntimeError(message) if self.compute == 'nodegroup': eks_hook.create_nodegroup( clusterName=self.cluster_name, nodegroupName=self.nodegroup_name, subnets=self.resources_vpc_config.get('subnetIds'), nodeRole=self.nodegroup_role_arn, ) elif self.compute == 'fargate': eks_hook.create_fargate_profile( clusterName=self.cluster_name, fargateProfileName=self.fargate_profile_name, podExecutionRoleArn=self.fargate_pod_execution_role_arn, selectors=self.fargate_selectors, )