Beispiel #1
0
    def test_get_cluster_id_by_name(self):
        """
        Test that we can resolve cluster id by cluster name.
        """
        hook = EmrHook(aws_conn_id='aws_default', emr_conn_id='emr_default')

        job_flow = hook.create_job_flow({'Name': 'test_cluster',
                                         'Instances': {'KeepJobFlowAliveWhenNoSteps': True}})

        job_flow_id = job_flow['JobFlowId']

        matching_cluster = hook.get_cluster_id_by_name('test_cluster', ['RUNNING', 'WAITING'])

        self.assertEqual(matching_cluster, job_flow_id)

        no_match = hook.get_cluster_id_by_name('foo', ['RUNNING', 'WAITING', 'BOOTSTRAPPING'])

        self.assertIsNone(no_match)
    def execute(self, context):
        attempt = context['ti'].try_number
        logging.info('attempt: {}'.format(attempt))
        emr = EmrHook(aws_conn_id=self.aws_conn_id).get_conn()

        job_flow_id = self.job_flow_id

        if not job_flow_id:
            job_flow_id = emr.get_cluster_id_by_name(self.job_flow_name,
                                                     self.cluster_states)

        if self.do_xcom_push:
            context['ti'].xcom_push(key='job_flow_id', value=job_flow_id)

        step_name = self.step_name if attempt == 1 else "{} (attempt {})".format(
            self.step_name, attempt)

        action_on_failure = self.action_on_failure
        if attempt % 3 == 0:
            action_on_failure = 'TERMINATE_JOB_FLOW'

        spark_conf = self.get_spark_params_config(self.spark_params,
                                                  self.spark_conf)

        steps = self.generate_spark_step(step_name, self.main_class,
                                         self.app_name, spark_conf,
                                         self.application_args, self.jar_path,
                                         action_on_failure)
        logging.info("spark_params: " + str(steps))

        self.log.info('Adding steps to %s', job_flow_id)
        response = emr.add_job_flow_steps(JobFlowId=job_flow_id, Steps=steps)

        logging.info('Running Spark Job {} with JobFlow ID {}'.format(
            self.task_id, self.job_flow_id))
        while True:
            step_id = response['StepIds'][0]
            logging.info('step id - {}'.format(step_id))
            result = self.describe_step(emr, response)
            step_status = result['Step']['Status']['State']
            logging.info('step status - {}'.format(step_status))
            # step state can be 'PENDING'|'CANCEL_PENDING'|'RUNNING'|'COMPLETED'|'CANCELLED'|'FAILED'|'INTERRUPTED'
            if step_status == 'COMPLETED':
                break
            elif step_status != 'COMPLETED' and step_status != 'PENDING' and step_status != 'RUNNING':
                raise AirflowException('Spark job {} has failed'.format(
                    self.task_id))

            logging.info("Spark Job '{}' status is {}".format(
                self.task_id, step_status))
Beispiel #3
0
    def execute(self, context):
        emr = EmrHook(aws_conn_id=self.aws_conn_id).get_conn()

        job_flow_id = self.job_flow_id

        if not job_flow_id:
            job_flow_id = emr.get_cluster_id_by_name(self.job_flow_name, self.cluster_states)

        if self.do_xcom_push:
            context['ti'].xcom_push(key='job_flow_id', value=job_flow_id)

        self.log.info('Adding steps to %s', job_flow_id)
        response = emr.add_job_flow_steps(JobFlowId=job_flow_id, Steps=self.steps)

        if not response['ResponseMetadata']['HTTPStatusCode'] == 200:
            raise AirflowException('Adding steps failed: %s' % response)
        else:
            self.log.info('Steps %s added to JobFlow', response['StepIds'])
            return response['StepIds']
Beispiel #4
0
    def execute(self, context: Dict[str, Any]) -> List[str]:
        emr_hook = EmrHook(aws_conn_id=self.aws_conn_id)

        emr = emr_hook.get_conn()

        job_flow_id = self.job_flow_id or emr_hook.get_cluster_id_by_name(
            str(self.job_flow_name), self.cluster_states
        )

        if not job_flow_id:
            raise AirflowException(f"No cluster found for name: {self.job_flow_name}")

        if self.do_xcom_push:
            context["ti"].xcom_push(key="job_flow_id", value=job_flow_id)

        self.log.info("Adding steps to %s", job_flow_id)

        # steps may arrive as a string representing a list
        # e.g. if we used XCom or a file then: steps="[{ step1 }, { step2 }]"
        steps = self.steps
        if isinstance(steps, str):
            steps = ast.literal_eval(steps)

        response = emr.add_job_flow_steps(JobFlowId=job_flow_id, Steps=steps)

        if not response["ResponseMetadata"]["HTTPStatusCode"] == 200:
            raise AirflowException("Adding steps failed: %s" % response)
        else:
            # Assumption : ONly a single step is submitted each time.
            step_ids = response["StepIds"]
            step_id = step_ids[0]
            if self.wait_for_completion:
                self.check_status(
                    job_flow_id,
                    step_id,
                    self.describe_step,
                    self.check_interval,
                )
            self.log.info("Steps %s added to JobFlow", response["StepIds"])
            return response["StepIds"]