示例#1
0
    def post(self, request):
        request_ids = request.data.get('request_ids', [])
        run_ids = request.data.get('run_ids', [])
        job_group_id = request.data.get('job_group_id', [])
        pipeline_name = request.data['pipeline_name']
        pipeline = get_object_or_404(Pipeline, name=pipeline_name)

        if request_ids:
            for request_id in request_ids:
                logging.info("Submitting requestId %s to pipeline %s" %
                             (request_id, pipeline_name))
                if not job_group_id:
                    job_group = JobGroup.objects.create()
                    job_group_id = str(job_group.id)
                create_jobs_from_request.delay(request_id,
                                               pipeline.operator_id,
                                               job_group_id)
            body = {"details": "Operator Job submitted %s" % str(request_ids)}
        else:
            if run_ids:
                operator_model = Operator.objects.get(id=pipeline.operator_id)
                if job_group_id:
                    operator = OperatorFactory.get_by_model(
                        operator_model,
                        run_ids=run_ids,
                        job_group_id=job_group_id)
                    create_jobs_from_operator(operator, job_group_id)
                    body = {
                        "details":
                        "Operator Job submitted to pipeline %s, job group id %s,  with runs %s"
                        % (pipeline_name, job_group_id, str(run_ids))
                    }
                else:
                    operator = OperatorFactory.get_by_model(operator_model,
                                                            run_ids=run_ids)
                    create_jobs_from_operator(operator)
                    body = {
                        "details":
                        "Operator Job submitted to pipeline %s with runs %s" %
                        (pipeline_name, str(run_ids))
                    }
            else:
                operator_model = Operator.objects.get(id=pipeline.operator_id)
                if job_group_id:
                    operator = OperatorFactory.get_by_model(
                        operator_model, job_group_id=job_group_id)
                    run_routine_operator_job(operator, job_group_id)
                    body = {
                        "details":
                        "Operator Job submitted to operator %s (JobGroupId: %s)"
                        % (operator, job_group_id)
                    }
                else:
                    operator = OperatorFactory.get_by_model(operator_model)
                    run_routine_operator_job(operator)
                    body = {
                        "details":
                        "Operator Job submitted to operator %s" % operator
                    }
        return Response(body, status=status.HTTP_200_OK)
示例#2
0
    def post(self, request):
        request_ids = request.data.get('request_ids')
        pipeline_name = request.data.get('pipeline')
        job_group_id = request.data.get('job_group_id', None)
        for_each = request.data.get('for_each', True)

        pipeline = get_object_or_404(Pipeline, name=pipeline_name)

        errors = []
        if not request_ids:
            errors.append('request_ids needs to be specified')
        if not pipeline:
            errors.append('pipeline needs to be specified')
        if errors:
            return Response({'details': errors},
                            status=status.HTTP_400_BAD_REQUEST)

        if not job_group_id:
            if for_each:
                for req in request_ids:
                    job_group = JobGroup()
                    job_group.save()
                    job_group_id = str(job_group.id)
                    logging.info("Submitting requestId %s to pipeline %s" %
                                 (req, pipeline))
                    create_jobs_from_request.delay(req,
                                                   pipeline.operator_id,
                                                   job_group_id,
                                                   pipeline=str(pipeline.id))
            else:
                return Response({'details': 'Not Implemented'},
                                status=status.HTTP_400_BAD_REQUEST)
        else:
            if for_each:
                for req in request_ids:
                    logging.info("Submitting requestId %s to pipeline %s" %
                                 (req, pipeline))
                    try:
                        job_group_notifier = JobGroupNotifier.objects.get(
                            job_group_id=job_group_id,
                            notifier_type_id=pipeline.operator.notifier_id)
                        job_group_notifier_id = str(job_group_notifier.id)
                    except JobGroupNotifier.DoesNotExist:
                        job_group_notifier_id = notifier_start(
                            job_group_id, req, pipeline.operator)
                    create_jobs_from_request.delay(
                        req,
                        pipeline.operator_id,
                        job_group_id,
                        job_group_notifier_id=job_group_notifier_id,
                        pipeline=str(pipeline.id))
            else:
                return Response({'details': 'Not Implemented'},
                                status=status.HTTP_400_BAD_REQUEST)

        body = {"details": "Operator Job submitted %s" % str(request_ids)}
        return Response(body, status=status.HTTP_202_ACCEPTED)
示例#3
0
def request_callback(request_id, job_group=None, job_group_notifier=None):
    jg = None
    jgn = None
    try:
        jgn = JobGroupNotifier.objects.get(id=job_group_notifier)
        logger.debug("[RequestCallback] JobGroup id: %s", job_group)
    except JobGroupNotifier.DoesNotExist:
        logger.debug("[RequestCallback] JobGroup not set")
    job_group_notifier_id = str(jgn.id) if jgn else None
    assays = ETLConfiguration.objects.first()

    recipe = LIMSClient.get_request_samples(request_id).get("recipe", None)

    if (not all([
            JobStatus(job["status"]) == JobStatus.COMPLETED for job in
            Job.objects.filter(job_group=job_group,
                               run=TYPES["SAMPLE"],
                               args__igocomplete=True).values("status")
    ]) and recipe in settings.WES_ASSAYS):
        wes_job_failed = WESJobFailedEvent(job_group_notifier_id, recipe)
        send_notification.delay(wes_job_failed.to_dict())

    if not recipe:
        raise FailedToSubmitToOperatorException(
            "Not enough metadata to choose the operator for requestId:%s" %
            request_id)

    if not all(item in assays.all_recipes for item in [recipe]):
        ci_review_e = SetCIReviewEvent(job_group_notifier_id).to_dict()
        send_notification.delay(ci_review_e)
        set_unknown_assay_label = SetLabelEvent(
            job_group_notifier_id, "unrecognized_assay").to_dict()
        send_notification.delay(set_unknown_assay_label)
        unknown_assay_event = UnknownAssayEvent(job_group_notifier_id,
                                                recipe).to_dict()
        send_notification.delay(unknown_assay_event)
        return []

    if any(item in assays.hold_recipes for item in [
            recipe,
    ]):
        admin_hold_event = AdminHoldEvent(job_group_notifier_id).to_dict()
        send_notification.delay(admin_hold_event)
        custom_capture_event = CustomCaptureCCEvent(job_group_notifier_id,
                                                    recipe).to_dict()
        send_notification.delay(custom_capture_event)
        return []

    if any(item in assays.disabled_recipes for item in [
            recipe,
    ]):
        not_for_ci = NotForCIReviewEvent(job_group_notifier_id).to_dict()
        send_notification.delay(not_for_ci)
        disabled_assay_event = DisabledAssayEvent(job_group_notifier_id,
                                                  recipe).to_dict()
        send_notification.delay(disabled_assay_event)
        return []

    if len(
            FileRepository.filter(metadata={
                "requestId": request_id
            },
                                  values_metadata="recipe").all()) == 0:
        no_samples_event = AdminHoldEvent(job_group_notifier_id).to_dict()
        send_notification.delay(no_samples_event)
        return []

    if not all([
            JobStatus(job["status"]) == JobStatus.COMPLETED
            for job in Job.objects.filter(job_group=job_group).values("status")
    ]):
        ci_review_e = SetCIReviewEvent(job_group_notifier_id).to_dict()
        send_notification.delay(ci_review_e)

    lab_head_email = FileRepository.filter(
        metadata={
            "requestId": request_id
        }, values_metadata="labHeadEmail").first()
    try:
        if lab_head_email.split("@")[1] != "mskcc.org":
            event = ExternalEmailEvent(job_group_notifier_id,
                                       request_id).to_dict()
            send_notification.delay(event)
    except Exception:
        logger.error("Failed to check labHeadEmail")

    if len(
            FileRepository.filter(metadata={
                "requestId": request_id,
                "tumorOrNormal": "Tumor"
            })) == 0:
        only_normal_samples_event = OnlyNormalSamplesEvent(
            job_group_notifier_id, request_id).to_dict()
        send_notification.delay(only_normal_samples_event)
        if recipe in settings.ASSAYS_ADMIN_HOLD_ONLY_NORMALS:
            admin_hold_event = AdminHoldEvent(job_group_notifier_id).to_dict()
            send_notification.delay(admin_hold_event)
            return []

    operators = Operator.objects.filter(recipes__overlap=[recipe])

    if not operators:
        # TODO: Import ticket will have CIReviewNeeded
        msg = "No operator defined for requestId %s with recipe %s" % (
            request_id, recipe)
        logger.error(msg)
        e = OperatorRequestEvent(job_group_notifier_id,
                                 "[CIReviewEvent] %s" % msg).to_dict()
        send_notification.delay(e)
        ci_review_e = SetCIReviewEvent(job_group_notifier_id).to_dict()
        send_notification.delay(ci_review_e)
        raise FailedToSubmitToOperatorException(msg)
    for operator in operators:
        if not operator.active:
            msg = "Operator not active: %s" % operator.class_name
            logger.info(msg)
            e = OperatorRequestEvent(job_group_notifier_id,
                                     "[CIReviewEvent] %s" % msg).to_dict()
            send_notification.delay(e)
            error_label = SetLabelEvent(job_group_notifier_id,
                                        "operator_inactive").to_dict()
            send_notification.delay(error_label)
            ci_review_e = SetCIReviewEvent(job_group_notifier_id).to_dict()
            send_notification.delay(ci_review_e)
        else:
            logger.info("Submitting request_id %s to %s operator" %
                        (request_id, operator.class_name))
            if Job.objects.filter(job_group=job_group,
                                  args__request_id=request_id,
                                  run=TYPES["SAMPLE"],
                                  status=JobStatus.FAILED).all():
                partialy_complete_event = ETLImportPartiallyCompleteEvent(
                    job_notifier=job_group_notifier_id).to_dict()
                send_notification.delay(partialy_complete_event)
            else:
                complete_event = ETLImportCompleteEvent(
                    job_notifier=job_group_notifier_id).to_dict()
                send_notification.delay(complete_event)

            create_jobs_from_request.delay(request_id, operator.id, job_group)
    return []