def execute(self, context, session=None):
        dag_id = context['ti'].dag_id
        tasks = self.xcom_pull(context, self.task_id_collector, dag_id,
                               self.xcom_tasks_key)
        queue_url = self.xcom_pull(context, self.task_id_collector, dag_id,
                                   self.xcom_sqs_queue_url_key)
        sqs_client = SQSHook(aws_conn_id=self.aws_conn_id).get_conn()
        self.log.info('Trying to push %d messages on queue: %s', len(tasks),
                      queue_url)
        entries = [{
            'Id': str(task.id),
            'MessageBody': task.request_data,
            'MessageGroupId': task.task_id,
            'MessageDeduplicationId': str(task.id)
        } for task in tasks]
        try:
            response = sqs_client.send_message_batch(QueueUrl=queue_url,
                                                     Entries=entries)
        except Exception as e:
            self.log.exception(
                'SQS Send message API failed for "%s" queue!\nRequest Entries: %',
                queue_url,
                str(entries),
                exc_info=e)

            self.log.info("Setting the tasks up for reschedule!")
            self._set_task_states([task.id for task in tasks],
                                  State.UP_FOR_RESCHEDULE,
                                  session=session)
            session.commit()

            raise

        success_resps = response.get('Successful', list())
        failed_resps = response.get('Failed', list())
        if success_resps:
            self.log.info('Successfully pushed %d messages!',
                          len(success_resps))
            self._set_task_states([int(resp['Id']) for resp in success_resps],
                                  State.QUEUED,
                                  session=session)
            jobs = [
                ErgoJob(resp['MessageId'], int(resp['Id']))
                for resp in success_resps
            ]
            session.add_all(jobs)
        if failed_resps:
            self.log.error('Failed to push %d messages!', len(failed_resps))
            self._set_task_states([int(resp['Id']) for resp in failed_resps],
                                  State.UP_FOR_RESCHEDULE,
                                  session=session)
        session.commit()
Example #2
0
 def execute(self, context, session=None):
     dag_id = context['ti'].dag_id
     tasks = self.xcom_pull(context, self.task_id_collector, dag_id,
                            self.xcom_tasks_key)
     sqs_client = SQSHook(aws_conn_id=self.aws_conn_id).get_conn()
     self.log.info(
         'SqsTaskPusherOperator trying to push %d messages on queue: %s',
         len(tasks), self.sqs_queue_url)
     entries = [{
         'Id': str(task.id),
         'MessageBody': task.request_data,
         'MessageGroupId': task.task_id,
         'MessageDeduplicationId': str(task.id)
     } for task in tasks]
     response = sqs_client.send_message_batch(QueueUrl=self.sqs_queue_url,
                                              Entries=entries)
     success_resps = response.get('Successful', list())
     failed_resps = response.get('Failed', list())
     if success_resps:
         self.log.info(
             'SqsTaskPusherOperator successfully pushed %d messages!',
             len(success_resps))
         success_tasks = session.query(ErgoTask).filter(
             ErgoTask.id.in_([int(resp['Id']) for resp in success_resps]))
         for task in success_tasks:
             task.state = State.QUEUED
         jobs = [
             ErgoJob(resp['MessageId'], int(resp['Id']))
             for resp in success_resps
         ]
         session.add_all(jobs)
     if failed_resps:
         self.log.error('SqsTaskPusherOperator failed to push %d messages!',
                        len(failed_resps))
         failed_tasks = session.query(ErgoTask).filter(
             ErgoTask.id.in_([int(resp['Id']) for resp in failed_resps]))
         for task in failed_tasks:
             task.state = State.UP_FOR_RETRY
     session.commit()