Exemple #1
0
def fail_job(self,
             run_id,
             error_message,
             lsf_log_location=None,
             input_json_location=None):
    lock_id = "run_lock_%s" % run_id
    with memcache_task_lock(lock_id, self.app.oid) as acquired:
        if acquired:
            run = RunObjectFactory.from_db(run_id)
            if run.run_obj.is_failed:
                logger.info(
                    format_log("Run Fail already processed", obj=run.run_obj))
                return

            restart_run = run.run_obj.set_for_restart()

            if not restart_run:
                run.fail(error_message)
                run.to_db()

                job_group_notifier = run.job_group_notifier
                job_group_notifier_id = str(
                    job_group_notifier.id) if job_group_notifier else None

                ci_review = SetCIReviewEvent(job_group_notifier_id).to_dict()
                send_notification.delay(ci_review)

                _upload_qc_report(run.run_obj)
                _job_finished_notify(run, lsf_log_location,
                                     input_json_location)
            else:
                run_id, output_directory, execution_id = restart_run
                submit_job.delay(run_id, output_directory, execution_id)
        else:
            logger.warning("Run %s is processing by another worker" % run_id)
Exemple #2
0
    def evaluate_sample_errors(self, error_samples):
        s = list()
        unformatted_s = list()
        unformatted_s.append(
            "IGO Sample ID\tSample Name / Error\tPatient ID\tSpecimen Type\n")
        for sample in error_samples:
            s.append("| " + sample["sample_id"] + " | " +
                     sample["sample_name"] + " |" + sample["patient_id"] +
                     " |" + sample["specimen_type"] + " |")
            unformatted_s.append(sample["sample_id"] + "\t" +
                                 sample["sample_name"] + "\t" +
                                 sample["patient_id"] + "\t" +
                                 sample["specimen_type"] + "\n")

        msg = """
        Number of samples with error: {number_of_errors}

        Error samples (also see error_sample_formatting.txt):
        | IGO Sample ID | Sample Name / Error | Patient ID | Specimen Type |
        {error_sample_names}
        """

        msg = msg.format(number_of_errors=str(len(error_samples)),
                         error_sample_names="\n".join(s))

        self.send_message(msg)

        sample_errors_event = UploadAttachmentEvent(
            self.job_group_notifier_id, "error_sample_formatting.txt",
            "".join(unformatted_s)).to_dict()
        send_notification.delay(sample_errors_event)
Exemple #3
0
    def evaluate_sample_errors(self, error_samples):
        s = list()
        unformatted_s = list()
        unformatted_s.append(
            "IGO Sample ID\tSample Name / Error\tPatient ID\tSpecimen Type\n")
        for sample in error_samples:
            sample_name = sample.get('SM', "missingSampleName")
            sample_id = sample.get('sample_id', 'missingSampleId')
            patient_id = sample.get('patient_id', 'missingPatientId')
            specimen_type = sample.get('specimen_type', 'missingSpecimenType')
            s.append("| " + sample_id + " | " + sample_name + " |" +
                     patient_id + " |" + specimen_type + " |")
            unformatted_s.append(sample_id + "\t" + sample_name + "\t" +
                                 patient_id + "\t" + specimen_type + "\n")

        msg = """
        Number of samples with error: {number_of_errors}

        Error samples (also see error_sample_formatting.txt):
        | IGO Sample ID | Sample Name / Error | Patient ID | Specimen Type |
        {error_sample_names}
        """

        msg = msg.format(number_of_errors=str(len(error_samples)),
                         error_sample_names='\n'.join(s))

        self.send_message(msg)

        sample_errors_event = UploadAttachmentEvent(
            self.job_group_notifier_id, 'error_sample_formatting.txt',
            "".join(unformatted_s)).to_dict()
        send_notification.delay(sample_errors_event)
Exemple #4
0
 def _job_failed(self, permission_denied=False, recipe=None):
     if self.job.run == TYPES["REQUEST"]:
         if permission_denied:
             cc = settings.PERMISSION_DENIED_CC.get(recipe, "")
             permission_denied_event = PermissionDeniedEvent(
                 self.job.job_group_notifier.id,
                 "Failed to copy files because of the Permission denied issue",
                 cc).to_dict()
             send_notification.delay(permission_denied_event)
             emails = settings.PERMISSION_DENIED_EMAILS.get(recipe,
                                                            "").split(",")
             for email in emails:
                 content = (
                     "Request failed to be imported because some files don't have proper permissions. "
                     "Check more details on %s/v0/etl/jobs/%s/" %
                     (settings.BEAGLE_URL, str(self.job.id)))
                 email = SendEmailEvent(
                     job_notifier=settings.BEAGLE_NOTIFIER_EMAIL_GROUP,
                     email_to=email,
                     email_from=settings.BEAGLE_NOTIFIER_EMAIL_FROM,
                     subject="Permission Denied for request_id: %s" %
                     self.job.args.get("request_id"),
                     content=content,
                 )
                 send_notification.delay(email.to_dict())
         self._generate_ticket_decription()
Exemple #5
0
def _job_finished_notify(run):
    job_group = run.job_group
    job_group_notifier = run.job_group_notifier
    job_group_notifier_id = str(
        job_group_notifier.id) if job_group_notifier else None

    pipeline_name = run.run_obj.app.name
    pipeline_link = run.run_obj.app.pipeline_link

    if run.run_obj.operator_run:
        operator_run_id = str(run.run_obj.operator_run.id)
        total_runs = run.run_obj.operator_run.total_runs
        completed_runs = run.run_obj.operator_run.completed_runs
        failed_runs = run.run_obj.operator_run.failed_runs
        running_runs = run.run_obj.operator_run.running_runs
    else:
        operator_run_id = None
        total_runs = 1
        if run.status == RunStatus.COMPLETED:
            completed_runs, failed_runs = 1, 0
        else:
            completed_runs, failed_runs = 0, 1
        running_runs = 0

    event = RunFinishedEvent(job_group_notifier_id,
                             run.tags.get('requestId', 'UNKNOWN REQUEST'),
                             str(run.run_id), pipeline_name, pipeline_link,
                             run.run_obj.output_directory,
                             RunStatus(run.status).name, run.tags,
                             running_runs, completed_runs, failed_runs,
                             total_runs, operator_run_id)
    e = event.to_dict()
    send_notification.delay(e)
Exemple #6
0
def create_request_job(request_id, redelivery=False):
    logger.info(
        "Searching for job: %s for request_id: %s" % (TYPES['REQUEST'], request_id))
    count = Job.objects.filter(run=TYPES['REQUEST'], args__request_id=request_id,
                               status__in=[JobStatus.CREATED, JobStatus.IN_PROGRESS,
                                           JobStatus.WAITING_FOR_CHILDREN]).count()
    request_redelivered = Job.objects.filter(run=TYPES['REQUEST'], args__request_id=request_id).count() > 0

    assays = ETLConfiguration.objects.first()

    if request_redelivered and not (assays.redelivery and redelivery):
        return None, "Request is redelivered, but redelivery deactivated"

    if count == 0:
        job_group = JobGroup()
        job_group.save()
        job_group_notifier_id = notifier_start(job_group, request_id)
        job_group_notifier = JobGroupNotifier.objects.get(id=job_group_notifier_id)
        job = Job(run=TYPES['REQUEST'],
                  args={'request_id': request_id, 'job_group': str(job_group.id),
                        'job_group_notifier': job_group_notifier_id, 'redelivery': request_redelivered},
                  status=JobStatus.CREATED,
                  max_retry=1,
                  children=[],
                  callback=TYPES['REQUEST_CALLBACK'],
                  callback_args={'request_id': request_id, 'job_group': str(job_group.id),
                                 'job_group_notifier': job_group_notifier_id},
                  job_group=job_group,
                  job_group_notifier=job_group_notifier)
        job.save()
        if request_redelivered:
            redelivery_event = RedeliveryEvent(job_group_notifier_id).to_dict()
            send_notification.delay(redelivery_event)
        return job, "Job Created"
Exemple #7
0
def generate_description(job_group, job_group_notifier, request):
    files = FileRepository.filter(metadata={
        "requestId": request,
        "igocomplete": True
    })
    if files:
        data = files.first().metadata
        request_id = data["requestId"]
        recipe = data["recipe"]
        a_name = data["dataAnalystName"]
        a_email = data["dataAnalystEmail"]
        i_name = data["investigatorName"]
        i_email = data["investigatorEmail"]
        l_name = data["labHeadName"]
        l_email = data["labHeadEmail"]
        p_email = data["piEmail"]
        pm_name = data["projectManagerName"]
        qc_emails = data["qcAccessEmails"] if "qcAccessEmails" in data else ""
        data_access_emails = data[
            "dataAccessEmails"] if "dataAccessEmails" in data else ""
        other_contact_emails = data[
            "otherContactEmails"] if "otherContactEmails" in data else ""

        num_samples = len(
            files.order_by().values("metadata__cmoSampleName").annotate(
                n=Count("pk")))
        num_tumors = len(
            FileRepository.filter(
                queryset=files, metadata={
                    "tumorOrNormal": "Tumor"
                }).order_by().values("metadata__cmoSampleName").annotate(
                    n=Count("pk")))
        num_normals = len(
            FileRepository.filter(
                queryset=files, metadata={
                    "tumorOrNormal": "Normal"
                }).order_by().values("metadata__cmoSampleName").annotate(
                    n=Count("pk")))
        operator_start_event = OperatorStartEvent(
            job_group_notifier,
            job_group,
            request_id,
            num_samples,
            recipe,
            a_name,
            a_email,
            i_name,
            i_email,
            l_name,
            l_email,
            p_email,
            pm_name,
            qc_emails,
            num_tumors,
            num_normals,
            data_access_emails,
            other_contact_emails,
        ).to_dict()
        send_notification.delay(operator_start_event)
Exemple #8
0
 def _job_failed(self):
     if self.job.run == TYPES['REQUEST']:
         e = ETLJobFailedEvent(
             self.job.job_group_notifier.id,
             "[CIReviewEvent] ETL Job failed, likely child job import. Check pooled normal import, might already exist in database."
         ).to_dict()
         send_notification.delay(e)
         self._generate_ticket_decription()
Exemple #9
0
 def _send_as_notification(val, job_group):
     uri = val.get('location')
     path = FileProcessor.parse_path_from_uri(uri)
     file_name = os.path.basename(path)
     if job_group:
         event = UploadAttachmentEvent(str(job_group.id), file_name, path, download=True)
         send_notification.delay(event.to_dict())
     logger.info("Can't upload file:%s. JobGroup not specified", path)
     return val
Exemple #10
0
def generate_label(job_group_id, request):
    files = FileRepository.filter(metadata={
        'requestId': request,
        'igocomplete': True
    })
    if files:
        data = files.first().metadata
        recipe = data['recipe']
        recipe_label_event = SetLabelEvent(job_group_id, recipe).to_dict()
        send_notification.delay(recipe_label_event)
Exemple #11
0
def fetch_samples(request_id, import_pooled_normals=True, import_samples=True, job_group=None, job_group_notifier=None,
                  redelivery=False):
    logger.info("Fetching sampleIds for requestId:%s" % request_id)
    jg = None
    jgn = None
    try:
        jg = JobGroup.objects.get(id=job_group)
        logger.debug("JobGroup found")
    except JobGroup.DoesNotExist:
        logger.debug("No JobGroup Found")
    try:
        jgn = JobGroupNotifier.objects.get(id=job_group_notifier)
        logger.debug("JobGroup found")
    except JobGroupNotifier.DoesNotExist:
        logger.debug("No JobGroup Found")
    children = set()
    sample_ids = LIMSClient.get_request_samples(request_id)
    if sample_ids['requestId'] != request_id:
        raise ErrorInconsistentDataException(
            "LIMS returned wrong response for request %s. Got %s instead" % (request_id, sample_ids['requestId']))
    request_metadata = {
        "dataAnalystEmail": sample_ids['dataAnalystEmail'],
        "dataAnalystName": sample_ids['dataAnalystName'],
        "investigatorEmail": sample_ids['investigatorEmail'],
        "investigatorName": sample_ids['investigatorName'],
        "labHeadEmail": sample_ids['labHeadEmail'],
        "labHeadName": sample_ids['labHeadName'],
        "otherContactEmails": sample_ids['otherContactEmails'],
        "dataAccessEmails": sample_ids['dataAccessEmails'],
        "qcAccessEmails": sample_ids['qcAccessEmails'],
        "projectManagerName": sample_ids['projectManagerName'],
        "recipe": sample_ids['recipe'],
        "piEmail": sample_ids["piEmail"],
    }
    set_recipe_event = ETLSetRecipeEvent(job_group_notifier, request_metadata['recipe']).to_dict()
    send_notification.delay(set_recipe_event)
    pooled_normals = sample_ids.get("pooledNormals", [])
    if import_pooled_normals and pooled_normals:
        for f in pooled_normals:
            job = get_or_create_pooled_normal_job(f, jg)
            children.add(str(job.id))
    if import_samples:
        if not sample_ids.get('samples', False):
            raise FailedToFetchSampleException("No samples reported for requestId: %s" % request_id)

        for sample in sample_ids.get('samples', []):
            job = create_sample_job(sample['igoSampleId'],
                                    sample['igocomplete'],
                                    request_id,
                                    request_metadata,
                                    redelivery,
                                    jg,
                                    jgn)
            children.add(str(job.id))
    return list(children)
Exemple #12
0
def fail_job(run_id, error_message):
    run = RunObject.from_db(run_id)
    run.fail(error_message)
    run.to_db()

    job_group_notifier = run.job_group_notifier
    job_group_notifier_id = str(
        job_group_notifier.id) if job_group_notifier else None

    ci_review = SetCIReviewEvent(job_group_notifier_id).to_dict()
    send_notification.delay(ci_review)

    _job_finished_notify(run)
Exemple #13
0
    def _send_notifications(self, job_group_notifier_id, run):
        pipeline_name = run.app.name
        pipeline_version = run.app.version
        pipeline_link = run.app.pipeline_link

        pipeline_description_event = AddPipelineToDescriptionEvent(
            job_group_notifier_id, pipeline_name, pipeline_version,
            pipeline_link).to_dict()
        send_notification.delay(pipeline_description_event)

        run_event = RunStartedEvent(job_group_notifier_id, str(run.id),
                                    run.app.name, run.app.pipeline_link,
                                    run.output_directory, run.tags).to_dict()
        send_notification.delay(run_event)
Exemple #14
0
def create_jobs_from_request(request_id,
                             operator_id,
                             job_group_id,
                             job_group_notifier_id=None,
                             pipeline=None):
    logger.info(
        format_log("Creating operator with %s" % operator_id,
                   job_group_id=job_group_id,
                   request_id=request_id))
    operator_model = Operator.objects.get(id=operator_id)

    if not job_group_notifier_id:
        try:
            job_group = JobGroup.objects.get(id=job_group_id)
        except JobGroup.DoesNotExist:
            logger.info(
                format_log("Job group does not exist" % operator_id,
                           job_group_id=job_group_id,
                           request_id=request_id))
            return
        try:
            job_group_notifier_id = notifier_start(job_group,
                                                   request_id,
                                                   operator=operator_model)
            request_obj = Request.objects.filter(request_id=request_id).first()
            if request_obj:
                delivery_date_event = SetDeliveryDateFieldEvent(
                    job_group_notifier_id,
                    str(request_obj.delivery_date)).to_dict()
                send_notification.delay(delivery_date_event)
        except Exception as e:
            logger.info(
                format_log("Failed to instantiate notifier" % operator_id,
                           job_group_id=job_group_id,
                           request_id=request_id))

    operator = OperatorFactory.get_by_model(
        operator_model,
        job_group_id=job_group_id,
        job_group_notifier_id=job_group_notifier_id,
        request_id=request_id,
        pipeline=pipeline,
    )

    _set_link_to_run_ticket(request_id, job_group_notifier_id)

    generate_description(job_group_id, job_group_notifier_id, request_id)
    generate_label(job_group_notifier_id, request_id)
    create_jobs_from_operator(operator, job_group_id, job_group_notifier_id)
Exemple #15
0
 def write_to_file(self, fname, s):
     """
     Writes file to temporary location, then registers it to the temp file group
     Also uploads it to notifier if there is a job group id
     """
     output = os.path.join(self.OUTPUT_DIR, fname)
     with open(output, "w+") as fh:
         fh.write(s)
     os.chmod(output, 0o777)
     self.register_tmp_file(output)
     if self.job_group_notifier_id:
         upload_file_event = UploadAttachmentEvent(
             self.job_group_notifier_id, fname, s).to_dict()
         send_notification.delay(upload_file_event)
     return {'class': 'File', 'location': "juno://" + output}
Exemple #16
0
def approve(modeladmin, request, queryset):
    for req in queryset:
        email = "*****@*****.**" % req.username
        User.objects.create(username=req.username, email=email, first_name=req.first_name, last_name=req.last_name)
        req.approved = True
        req.save()
        content = "Your request to access Voyager is approved"
        email = SendEmailEvent(
            job_notifier=settings.BEAGLE_NOTIFIER_EMAIL_GROUP,
            email_to=email,
            email_from=settings.BEAGLE_NOTIFIER_EMAIL_FROM,
            subject="Registration approved",
            content=content,
        )
        send_notification.delay(email.to_dict())
Exemple #17
0
 def save(self, *args, **kwargs):
     if self.pk is None:
         content = "User %s %s, with email %[email protected] requested Voyager access." % (
             self.first_name,
             self.last_name,
             self.username,
         )
         for email in settings.BEAGLE_NOTIFIER_EMAIL_ABOUT_NEW_USERS.split(
                 ','):
             email = SendEmailEvent(
                 job_notifier=settings.BEAGLE_NOTIFIER_EMAIL_GROUP,
                 email_to=email,
                 email_from=settings.BEAGLE_NOTIFIER_EMAIL_FROM,
                 subject='Registration access',
                 content=content)
             send_notification.delay(email.to_dict())
     super(UserRegistrationRequest, self).save(*args, **kwargs)
Exemple #18
0
def generate_description(job_group, job_group_notifier, request):
    files = FileRepository.filter(metadata={
        'requestId': request,
        'igocomplete': True
    })
    if files:
        data = files.first().metadata
        request_id = data['requestId']
        recipe = data['recipe']
        a_name = data['dataAnalystName']
        a_email = data['dataAnalystEmail']
        i_name = data['investigatorName']
        i_email = data['investigatorEmail']
        l_name = data['labHeadName']
        l_email = data['labHeadEmail']
        p_email = data['piEmail']
        pm_name = data['projectManagerName']
        qc_emails = data['qcAccessEmails'] if 'qcAccessEmails' in data else ""

        num_samples = len(
            files.order_by().values('metadata__cmoSampleName').annotate(
                n=Count("pk")))
        num_tumors = len(
            FileRepository.filter(
                queryset=files, metadata={
                    'tumorOrNormal': 'Tumor'
                }).order_by().values('metadata__cmoSampleName').annotate(
                    n=Count("pk")))
        num_normals = len(
            FileRepository.filter(
                queryset=files, metadata={
                    'tumorOrNormal': 'Normal'
                }).order_by().values('metadata__cmoSampleName').annotate(
                    n=Count("pk")))
        operator_start_event = OperatorStartEvent(
            job_group_notifier, job_group, request_id, num_samples, recipe,
            a_name, a_email, i_name, i_email, l_name, l_email, p_email,
            pm_name, qc_emails, num_tumors, num_normals).to_dict()
        send_notification.delay(operator_start_event)
Exemple #19
0
def _set_link_to_run_ticket(request_id, job_group_notifier_id):
    jira_id = None
    import_job = Job.objects.filter(
        run=TYPES['REQUEST'],
        args__request_id=request_id).order_by('-created_date').first()
    if not import_job:
        logger.error("Could not find Import JIRA ticket")
        return
    try:
        job_group_notifier_job = JobGroupNotifier.objects.get(
            job_group=import_job.job_group.id, notifier_type__default=True)
    except JobGroupNotifier.DoesNotExist:
        logger.error("Could not find Import JIRA ticket")
        return
    try:
        new_jira = JobGroupNotifier.objects.get(id=job_group_notifier_id)
    except JobGroupNotifier.DoesNotExist:
        logger.error("Could not find Import JIRA ticket")
        return
    event = SetRunTicketInImportEvent(job_notifier=str(
        job_group_notifier_job.id),
                                      run_jira_id=new_jira.jira_id).to_dict()
    send_notification.delay(event)
Exemple #20
0
    def on_job_fail(self, run):
        cmo_sample_name = run.tags.get("sampleNameTumor")
        files = FileRepository.filter(
            queryset=self.files, metadata={"cmoSampleName": cmo_sample_name})
        if files:
            qc_report = files[0].metadata["qcReports"]
            sample_id = files[0].metadata["sampleId"]
            """
            {
                "comments": "Suboptimal quantity",
                "qcReportType": "LIBRARY",
                "IGORecommendation": "Try",
                "investigatorDecision": "Continue processing"
            }
            """
            report_str = ""
            for report in qc_report:
                report_str += "{comments}\t{qc_report_type}\t{igo_recommendation}\t{investigator_decision}\n".format(
                    comments=report["comments"],
                    qc_report_type=report["qcReportType"],
                    igo_recommendation=report["IGORecommendation"],
                    investigator_decision=report["investigatorDecision"],
                )
            msg = """
cmoSampleId: {cmo_sample_name}
sampleId: {sample_id}
Comments\tQC Report Type\tIGORecommendation\tInvestigator Decision\n
{report_str}
""".format(cmo_sample_name=cmo_sample_name,
            sample_id=sample_id,
            report_str=report_str)

            file_name = "{cmo_sample_name}_igo_qc_report".format(
                cmo_sample_name=cmo_sample_name)
            sample_errors_event = UploadAttachmentEvent(
                self.job_group_notifier_id, file_name, msg).to_dict()
            send_notification.delay(sample_errors_event)
Exemple #21
0
def create_or_update_file(path, request_id, file_group_id, file_type, igocomplete, data, library, run, sample,
                          request_metadata, r, update=False, job_group_notifier=None):
    logger.info("Creating file %s " % path)
    try:
        file_group_obj = FileGroup.objects.get(id=file_group_id)
        file_type_obj = FileType.objects.filter(name=file_type).first()
        lims_metadata = copy.deepcopy(data)
        library_copy = copy.deepcopy(library)
        lims_metadata['requestId'] = request_id
        lims_metadata['igocomplete'] = igocomplete
        lims_metadata['R'] = r
        for k, v in library_copy.items():
            lims_metadata[k] = v
        for k, v in run.items():
            lims_metadata[k] = v
        for k, v in request_metadata.items():
            lims_metadata[k] = v
        metadata = format_metadata(lims_metadata)
        # validator = MetadataValidator(METADATA_SCHEMA)
    except Exception as e:
        logger.error("Failed to parse metadata for file %s path" % path)
        raise FailedToFetchSampleException("Failed to create file %s. Error %s" % (path, str(e)))
    try:
        logger.info(lims_metadata)
        # validator.validate(metadata)
    except MetadataValidationException as e:
        logger.error("Failed to create file %s. Error %s" % (path, str(e)))
        raise FailedToFetchSampleException("Failed to create file %s. Error %s" % (path, str(e)))
    else:
        f = FileRepository.filter(path=path).first()
        if not f:
            create_file_object(path, file_group_obj, lims_metadata, metadata, file_type_obj, sample)

            if update:
                message = "File registered: %s" % path
                update = RedeliveryUpdateEvent(job_group_notifier, message).to_dict()
                send_notification.delay(update)
        else:
            if update:
                before = f.file.filemetadata_set.order_by('-created_date').count()
                update_file_object(f.file, path, metadata)
                after = f.file.filemetadata_set.order_by('-created_date').count()
                if after != before:
                    all_metadata = f.file.filemetadata_set.order_by('-created_date')
                    ddiff = DeepDiff(all_metadata[1].metadata,
                                     all_metadata[0].metadata,
                                     ignore_order=True)
                    diff_file_name = "%s_metadata_update.json" % f.file.file_name
                    message = "Updating file metadata: %s, details in file %s\n" % (path, diff_file_name)
                    update = RedeliveryUpdateEvent(job_group_notifier, message).to_dict()
                    diff_details_event = LocalStoreFileEvent(job_group_notifier, diff_file_name, str(ddiff)).to_dict()
                    send_notification.delay(update)
                    send_notification.delay(diff_details_event)
            else:
                raise FailedToFetchSampleException("File %s already exist with id %s" % (path, str(f.id)))
Exemple #22
0
def create_or_update_file(
    path,
    request_id,
    file_group_id,
    file_type,
    igocomplete,
    data,
    library,
    run,
    request_metadata,
    r,
    update=False,
    job_group_notifier=None,
):
    logger.info("Creating file %s " % path)
    try:
        file_group_obj = FileGroup.objects.get(id=file_group_id)
        file_type_obj = FileType.objects.filter(name=file_type).first()
        lims_metadata = copy.deepcopy(data)
        library_copy = copy.deepcopy(library)
        lims_metadata["requestId"] = request_id
        lims_metadata["igocomplete"] = igocomplete
        lims_metadata["R"] = r
        for k, v in library_copy.items():
            lims_metadata[k] = v
        for k, v in run.items():
            lims_metadata[k] = v
        for k, v in request_metadata.items():
            lims_metadata[k] = v
        metadata = format_metadata(lims_metadata)
        # validator = MetadataValidator(METADATA_SCHEMA)
    except Exception as e:
        logger.error("Failed to parse metadata for file %s path" % path)
        raise FailedToFetchSampleException(
            "Failed to create file %s. Error %s" % (path, str(e)))
    try:
        logger.info(lims_metadata)
        # validator.validate(metadata)
    except MetadataValidationException as e:
        logger.error("Failed to create file %s. Error %s" % (path, str(e)))
        raise FailedToFetchSampleException(
            "Failed to create file %s. Error %s" % (path, str(e)))
    else:
        recipe = metadata.get("recipe", "")
        new_path = CopyService.remap(recipe, path)  # Get copied file path
        f = FileRepository.filter(path=new_path).first()
        if not f:
            try:
                if path != new_path:
                    CopyService.copy(path, new_path)
            except Exception as e:
                if "Permission denied" in str(e):
                    raise FailedToCopyFilePermissionDeniedException(
                        "Failed to copy file %s. Error %s" % (path, str(e)))
                else:
                    raise FailedToCopyFileException(
                        "Failed to copy file %s. Error %s" % (path, str(e)))
            create_file_object(new_path, file_group_obj, lims_metadata,
                               metadata, file_type_obj)
            if update:
                message = "File registered: %s" % path
                update = RedeliveryUpdateEvent(job_group_notifier,
                                               message).to_dict()
                send_notification.delay(update)
        else:
            if update:
                before = f.file.filemetadata_set.order_by(
                    "-created_date").count()
                update_file_object(f.file, f.file.path, metadata)
                after = f.file.filemetadata_set.order_by(
                    "-created_date").count()
                if after != before:
                    all_metadata = f.file.filemetadata_set.order_by(
                        "-created_date")
                    ddiff = DeepDiff(all_metadata[1].metadata,
                                     all_metadata[0].metadata,
                                     ignore_order=True)
                    diff_file_name = "%s_metadata_update_%s.json" % (
                        f.file.file_name, all_metadata[0].version)
                    message = "Updating file metadata: %s, details in file %s\n" % (
                        path, diff_file_name)
                    update = RedeliveryUpdateEvent(job_group_notifier,
                                                   message).to_dict()
                    diff_details_event = LocalStoreFileEvent(
                        job_group_notifier, diff_file_name,
                        str(ddiff)).to_dict()
                    send_notification.delay(update)
                    send_notification.delay(diff_details_event)
            else:
                raise FailedToFetchSampleException(
                    "File %s already exist with id %s" % (path, str(f.id)))
Exemple #23
0
 def send_message(self, msg):
     event = OperatorRequestEvent(self.job_group_notifier_id, msg)
     e = event.to_dict()
     send_notification.delay(e)
Exemple #24
0
    def get_jobs(self):
        files = FileRepository.filter(queryset=self.files,
                                      metadata={
                                          "requestId": self.request_id,
                                          "igocomplete": True
                                      })
        argos_jobs = list()

        cnt_tumors = FileRepository.filter(queryset=self.files,
                                           metadata={
                                               "requestId": self.request_id,
                                               "tumorOrNormal": "Tumor",
                                               "igocomplete": True
                                           }).count()
        if cnt_tumors == 0:
            cant_do = CantDoEvent(self.job_group_notifier_id).to_dict()
            send_notification.delay(cant_do)
            all_normals_event = SetLabelEvent(self.job_group_notifier_id,
                                              "all_normals").to_dict()
            send_notification.delay(all_normals_event)
            return argos_jobs

        data = list()
        for f in files:
            sample = dict()
            sample["id"] = f.file.id
            sample["path"] = f.file.path
            sample["file_name"] = f.file.file_name
            sample["metadata"] = f.metadata
            data.append(sample)

        files = list()
        samples = list()
        # group by igoId
        igo_id_group = dict()
        for sample in data:
            igo_id = sample["metadata"]["sampleId"]
            if igo_id not in igo_id_group:
                igo_id_group[igo_id] = list()
            igo_id_group[igo_id].append(sample)

        for igo_id in igo_id_group:
            samples.append(build_sample(igo_id_group[igo_id]))

        argos_inputs, error_samples = construct_argos_jobs(samples)
        number_of_inputs = len(argos_inputs)

        sample_pairing = ""
        sample_mapping = ""
        pipeline = self.get_pipeline_id()

        try:
            pipeline_obj = Pipeline.objects.get(id=pipeline)
        except Pipeline.DoesNotExist:
            pass

        for i, job in enumerate(argos_inputs):
            tumor_sample_name = job["pair"][0]["ID"]
            for p in job["pair"][0]["R1"]:
                filepath = FileProcessor.parse_path_from_uri(p["location"])
                if filepath not in files:
                    sample_mapping += "\t".join([tumor_sample_name, filepath
                                                 ]) + "\n"
                    files.append(filepath)
            for p in job["pair"][0]["R2"]:
                filepath = FileProcessor.parse_path_from_uri(p["location"])
                if filepath not in files:
                    sample_mapping += "\t".join([tumor_sample_name, filepath
                                                 ]) + "\n"
                    files.append(filepath)
            for p in job["pair"][0]["zR1"]:
                filepath = FileProcessor.parse_path_from_uri(p["location"])
                if filepath not in files:
                    sample_mapping += "\t".join([tumor_sample_name, filepath
                                                 ]) + "\n"
                    files.append(filepath)
            for p in job["pair"][0]["zR2"]:
                filepath = FileProcessor.parse_path_from_uri(p["location"])
                if filepath not in files:
                    sample_mapping += "\t".join([tumor_sample_name, filepath
                                                 ]) + "\n"
                    files.append(filepath)

            normal_sample_name = job["pair"][1]["ID"]
            for p in job["pair"][1]["R1"]:
                filepath = FileProcessor.parse_path_from_uri(p["location"])
                if filepath not in files:
                    sample_mapping += "\t".join([normal_sample_name, filepath
                                                 ]) + "\n"
                    files.append(filepath)
            for p in job["pair"][1]["R2"]:
                filepath = FileProcessor.parse_path_from_uri(p["location"])
                if filepath not in files:
                    sample_mapping += "\t".join([normal_sample_name, filepath
                                                 ]) + "\n"
                    files.append(filepath)
            for p in job["pair"][1]["zR1"]:
                filepath = FileProcessor.parse_path_from_uri(p["location"])
                if filepath not in files:
                    sample_mapping += "\t".join([normal_sample_name, filepath
                                                 ]) + "\n"
                    files.append(filepath)
            for p in job["pair"][1]["zR2"]:
                filepath = FileProcessor.parse_path_from_uri(p["location"])
                if filepath not in files:
                    sample_mapping += "\t".join([normal_sample_name, filepath
                                                 ]) + "\n"
                    files.append(filepath)

            for p in job["pair"][1]["bam"]:
                filepath = FileProcessor.parse_path_from_uri(p["location"])
                if filepath not in files:
                    sample_mapping += "\t".join([normal_sample_name, filepath
                                                 ]) + "\n"
                    files.append(filepath)

            name = "ARGOS %s, %i of %i" % (self.request_id, i + 1,
                                           number_of_inputs)
            assay = job["assay"]
            pi = job["pi"]
            pi_email = job["pi_email"]

            sample_pairing += "\t".join(
                [normal_sample_name, tumor_sample_name]) + "\n"

            tags = {
                "requestId": self.request_id,
                "sampleNameTumor": tumor_sample_name,
                "sampleNameNormal": normal_sample_name,
                "labHeadName": pi,
                "labHeadEmail": pi_email,
            }
            argos_jobs.append(
                RunCreator(app=pipeline, inputs=job, name=name, tags=tags))

        operator_run_summary = UploadAttachmentEvent(
            self.job_group_notifier_id, "sample_pairing.txt",
            sample_pairing).to_dict()
        send_notification.delay(operator_run_summary)

        mapping_file_event = UploadAttachmentEvent(self.job_group_notifier_id,
                                                   "sample_mapping.txt",
                                                   sample_mapping).to_dict()
        send_notification.delay(mapping_file_event)

        data_clinical = generate_sample_data_content(
            files,
            pipeline_name=pipeline_obj.name,
            pipeline_github=pipeline_obj.github,
            pipeline_version=pipeline_obj.version,
        )
        sample_data_clinical_event = UploadAttachmentEvent(
            self.job_group_notifier_id, "sample_data_clinical.txt",
            data_clinical).to_dict()
        send_notification.delay(sample_data_clinical_event)

        self.evaluate_sample_errors(error_samples)
        self.summarize_pairing_info(argos_inputs)

        return argos_jobs
Exemple #25
0
def request_callback(request_id, job_group=None, job_group_notifier=None):
    jg = None
    jgn = None
    try:
        jgn = JobGroupNotifier.objects.get(id=job_group_notifier)
        logger.debug("[RequestCallback] JobGroup id: %s", job_group)
    except JobGroupNotifier.DoesNotExist:
        logger.debug("[RequestCallback] JobGroup not set")
    job_group_notifier_id = str(jgn.id) if jgn else None
    assays = ETLConfiguration.objects.first()

    recipe = LIMSClient.get_request_samples(request_id).get("recipe", None)

    if (not all([
            JobStatus(job["status"]) == JobStatus.COMPLETED for job in
            Job.objects.filter(job_group=job_group,
                               run=TYPES["SAMPLE"],
                               args__igocomplete=True).values("status")
    ]) and recipe in settings.WES_ASSAYS):
        wes_job_failed = WESJobFailedEvent(job_group_notifier_id, recipe)
        send_notification.delay(wes_job_failed.to_dict())

    if not recipe:
        raise FailedToSubmitToOperatorException(
            "Not enough metadata to choose the operator for requestId:%s" %
            request_id)

    if not all(item in assays.all_recipes for item in [recipe]):
        ci_review_e = SetCIReviewEvent(job_group_notifier_id).to_dict()
        send_notification.delay(ci_review_e)
        set_unknown_assay_label = SetLabelEvent(
            job_group_notifier_id, "unrecognized_assay").to_dict()
        send_notification.delay(set_unknown_assay_label)
        unknown_assay_event = UnknownAssayEvent(job_group_notifier_id,
                                                recipe).to_dict()
        send_notification.delay(unknown_assay_event)
        return []

    if any(item in assays.hold_recipes for item in [
            recipe,
    ]):
        admin_hold_event = AdminHoldEvent(job_group_notifier_id).to_dict()
        send_notification.delay(admin_hold_event)
        custom_capture_event = CustomCaptureCCEvent(job_group_notifier_id,
                                                    recipe).to_dict()
        send_notification.delay(custom_capture_event)
        return []

    if any(item in assays.disabled_recipes for item in [
            recipe,
    ]):
        not_for_ci = NotForCIReviewEvent(job_group_notifier_id).to_dict()
        send_notification.delay(not_for_ci)
        disabled_assay_event = DisabledAssayEvent(job_group_notifier_id,
                                                  recipe).to_dict()
        send_notification.delay(disabled_assay_event)
        return []

    if len(
            FileRepository.filter(metadata={
                "requestId": request_id
            },
                                  values_metadata="recipe").all()) == 0:
        no_samples_event = AdminHoldEvent(job_group_notifier_id).to_dict()
        send_notification.delay(no_samples_event)
        return []

    if not all([
            JobStatus(job["status"]) == JobStatus.COMPLETED
            for job in Job.objects.filter(job_group=job_group).values("status")
    ]):
        ci_review_e = SetCIReviewEvent(job_group_notifier_id).to_dict()
        send_notification.delay(ci_review_e)

    lab_head_email = FileRepository.filter(
        metadata={
            "requestId": request_id
        }, values_metadata="labHeadEmail").first()
    try:
        if lab_head_email.split("@")[1] != "mskcc.org":
            event = ExternalEmailEvent(job_group_notifier_id,
                                       request_id).to_dict()
            send_notification.delay(event)
    except Exception:
        logger.error("Failed to check labHeadEmail")

    if len(
            FileRepository.filter(metadata={
                "requestId": request_id,
                "tumorOrNormal": "Tumor"
            })) == 0:
        only_normal_samples_event = OnlyNormalSamplesEvent(
            job_group_notifier_id, request_id).to_dict()
        send_notification.delay(only_normal_samples_event)
        if recipe in settings.ASSAYS_ADMIN_HOLD_ONLY_NORMALS:
            admin_hold_event = AdminHoldEvent(job_group_notifier_id).to_dict()
            send_notification.delay(admin_hold_event)
            return []

    operators = Operator.objects.filter(recipes__overlap=[recipe])

    if not operators:
        # TODO: Import ticket will have CIReviewNeeded
        msg = "No operator defined for requestId %s with recipe %s" % (
            request_id, recipe)
        logger.error(msg)
        e = OperatorRequestEvent(job_group_notifier_id,
                                 "[CIReviewEvent] %s" % msg).to_dict()
        send_notification.delay(e)
        ci_review_e = SetCIReviewEvent(job_group_notifier_id).to_dict()
        send_notification.delay(ci_review_e)
        raise FailedToSubmitToOperatorException(msg)
    for operator in operators:
        if not operator.active:
            msg = "Operator not active: %s" % operator.class_name
            logger.info(msg)
            e = OperatorRequestEvent(job_group_notifier_id,
                                     "[CIReviewEvent] %s" % msg).to_dict()
            send_notification.delay(e)
            error_label = SetLabelEvent(job_group_notifier_id,
                                        "operator_inactive").to_dict()
            send_notification.delay(error_label)
            ci_review_e = SetCIReviewEvent(job_group_notifier_id).to_dict()
            send_notification.delay(ci_review_e)
        else:
            logger.info("Submitting request_id %s to %s operator" %
                        (request_id, operator.class_name))
            if Job.objects.filter(job_group=job_group,
                                  args__request_id=request_id,
                                  run=TYPES["SAMPLE"],
                                  status=JobStatus.FAILED).all():
                partialy_complete_event = ETLImportPartiallyCompleteEvent(
                    job_notifier=job_group_notifier_id).to_dict()
                send_notification.delay(partialy_complete_event)
            else:
                complete_event = ETLImportCompleteEvent(
                    job_notifier=job_group_notifier_id).to_dict()
                send_notification.delay(complete_event)

            create_jobs_from_request.delay(request_id, operator.id, job_group)
    return []
Exemple #26
0
    def get_jobs(self):

        argos_jobs = list()

        if self.request_id:
            files = FileRepository.filter(queryset=self.files,
                                          metadata={
                                              'requestId': self.request_id,
                                              'igocomplete': True
                                          },
                                          filter_redact=True)

            cnt_tumors = FileRepository.filter(queryset=self.files,
                                               metadata={
                                                   'requestId':
                                                   self.request_id,
                                                   'tumorOrNormal': 'Tumor',
                                                   'igocomplete': True
                                               },
                                               filter_redact=True).count()
        elif self.pairing:
            files, cnt_tumors = self.get_files_for_pairs()

        if cnt_tumors == 0:
            cant_do = CantDoEvent(self.job_group_notifier_id).to_dict()
            send_notification.delay(cant_do)
            all_normals_event = SetLabelEvent(self.job_group_notifier_id,
                                              'all_normals').to_dict()
            send_notification.delay(all_normals_event)
            return argos_jobs

        data = list()
        for f in files:
            sample = dict()
            sample['id'] = f.file.id
            sample['path'] = f.file.path
            sample['file_name'] = f.file.file_name
            sample['metadata'] = f.metadata
            data.append(sample)

        files = list()
        samples = list()
        # group by igoId
        igo_id_group = dict()
        for sample in data:
            igo_id = sample['metadata']['sampleId']
            if igo_id not in igo_id_group:
                igo_id_group[igo_id] = list()
            igo_id_group[igo_id].append(sample)

        for igo_id in igo_id_group:
            samples.append(build_sample(igo_id_group[igo_id]))

        argos_inputs, error_samples = construct_argos_jobs(
            samples, self.pairing)
        number_of_inputs = len(argos_inputs)

        sample_pairing = ""
        sample_mapping = ""
        pipeline = self.get_pipeline_id()

        try:
            pipeline_obj = Pipeline.objects.get(id=pipeline)
        except Pipeline.DoesNotExist:
            pass

        check_for_duplicates = list()
        for i, job in enumerate(argos_inputs):
            tumor_sample_name = job['pair'][0]['ID']
            for p in job['pair'][0]['R1']:
                filepath = FileProcessor.parse_path_from_uri(p['location'])
                file_str = "\t".join([tumor_sample_name, filepath]) + "\n"
                if file_str not in check_for_duplicates:
                    check_for_duplicates.append(file_str)
                    sample_mapping += file_str
                if filepath not in files:
                    files.append(filepath)
            for p in job['pair'][0]['R2']:
                filepath = FileProcessor.parse_path_from_uri(p['location'])
                file_str = "\t".join([tumor_sample_name, filepath]) + "\n"
                if file_str not in check_for_duplicates:
                    check_for_duplicates.append(file_str)
                    sample_mapping += file_str
                if filepath not in files:
                    files.append(filepath)
            for p in job['pair'][0]['zR1']:
                filepath = FileProcessor.parse_path_from_uri(p['location'])
                file_str = "\t".join([tumor_sample_name, filepath]) + "\n"
                if file_str not in check_for_duplicates:
                    check_for_duplicates.append(file_str)
                    sample_mapping += file_str
                if filepath not in files:
                    files.append(filepath)
            for p in job['pair'][0]['zR2']:
                filepath = FileProcessor.parse_path_from_uri(p['location'])
                file_str = "\t".join([tumor_sample_name, filepath]) + "\n"
                if file_str not in check_for_duplicates:
                    check_for_duplicates.append(file_str)
                    sample_mapping += file_str
                if filepath not in files:
                    files.append(filepath)

            normal_sample_name = job['pair'][1]['ID']
            for p in job['pair'][1]['R1']:
                filepath = FileProcessor.parse_path_from_uri(p['location'])
                file_str = "\t".join([normal_sample_name, filepath]) + "\n"
                if file_str not in check_for_duplicates:
                    check_for_duplicates.append(file_str)
                    sample_mapping += file_str
                if filepath not in files:
                    files.append(filepath)
            for p in job['pair'][1]['R2']:
                filepath = FileProcessor.parse_path_from_uri(p['location'])
                file_str = "\t".join([normal_sample_name, filepath]) + "\n"
                if file_str not in check_for_duplicates:
                    check_for_duplicates.append(file_str)
                    sample_mapping += file_str
                if filepath not in files:
                    sample_mapping += "\t".join([normal_sample_name, filepath
                                                 ]) + "\n"
                    files.append(filepath)
            for p in job['pair'][1]['zR1']:
                filepath = FileProcessor.parse_path_from_uri(p['location'])
                file_str = "\t".join([normal_sample_name, filepath]) + "\n"
                if file_str not in check_for_duplicates:
                    check_for_duplicates.append(file_str)
                    sample_mapping += file_str
                if filepath not in files:
                    files.append(filepath)
            for p in job['pair'][1]['zR2']:
                filepath = FileProcessor.parse_path_from_uri(p['location'])
                file_str = "\t".join([normal_sample_name, filepath]) + "\n"
                if file_str not in check_for_duplicates:
                    check_for_duplicates.append(file_str)
                    sample_mapping += file_str
                if filepath not in files:
                    files.append(filepath)

            for p in job['pair'][1]['bam']:
                filepath = FileProcessor.parse_path_from_uri(p['location'])
                file_str = "\t".join([normal_sample_name, filepath]) + "\n"
                if file_str not in check_for_duplicates:
                    check_for_duplicates.append(file_str)
                    sample_mapping += file_str
                if filepath not in files:
                    files.append(filepath)

            name = "ARGOS %s, %i of %i" % (self.request_id, i + 1,
                                           number_of_inputs)
            assay = job['assay']
            pi = job['pi']
            pi_email = job['pi_email']

            sample_pairing += "\t".join(
                [normal_sample_name, tumor_sample_name]) + "\n"

            argos_jobs.append((APIRunCreateSerializer(
                data={
                    'app': pipeline,
                    'inputs': argos_inputs,
                    'name': name,
                    'tags': {
                        'requestId': self.request_id,
                        'sampleNameTumor': tumor_sample_name,
                        'sampleNameNormal': normal_sample_name,
                        'labHeadName': pi,
                        'labHeadEmail': pi_email
                    }
                }), job))

        operator_run_summary = UploadAttachmentEvent(
            self.job_group_notifier_id, 'sample_pairing.txt',
            sample_pairing).to_dict()
        send_notification.delay(operator_run_summary)

        mapping_file_event = UploadAttachmentEvent(self.job_group_notifier_id,
                                                   'sample_mapping.txt',
                                                   sample_mapping).to_dict()
        send_notification.delay(mapping_file_event)

        data_clinical = generate_sample_data_content(
            files,
            pipeline_name=pipeline_obj.name,
            pipeline_github=pipeline_obj.github,
            pipeline_version=pipeline_obj.version)
        sample_data_clinical_event = UploadAttachmentEvent(
            self.job_group_notifier_id, 'sample_data_clinical.txt',
            data_clinical).to_dict()
        send_notification.delay(sample_data_clinical_event)

        self.evaluate_sample_errors(error_samples)
        self.summarize_pairing_info(argos_inputs)

        return argos_jobs
Exemple #27
0
def fetch_samples(
    request_id,
    import_pooled_normals=True,
    import_samples=True,
    job_group=None,
    job_group_notifier=None,
    redelivery=False,
):
    logger.info("Fetching sampleIds for requestId:%s" % request_id)
    jg = None
    jgn = None
    try:
        jg = JobGroup.objects.get(id=job_group)
        logger.debug("JobGroup found")
    except JobGroup.DoesNotExist:
        logger.debug("No JobGroup Found")
    try:
        jgn = JobGroupNotifier.objects.get(id=job_group_notifier)
        logger.debug("JobGroup found")
    except JobGroupNotifier.DoesNotExist:
        logger.debug("No JobGroup Found")
    children = set()
    sample_ids = LIMSClient.get_request_samples(request_id)
    if sample_ids["requestId"] != request_id:
        raise ErrorInconsistentDataException(
            "LIMS returned wrong response for request %s. Got %s instead" %
            (request_id, sample_ids["requestId"]))
    request_metadata = {
        "dataAnalystEmail": sample_ids["dataAnalystEmail"],
        "dataAnalystName": sample_ids["dataAnalystName"],
        "investigatorEmail": sample_ids["investigatorEmail"],
        "investigatorName": sample_ids["investigatorName"],
        "labHeadEmail": sample_ids["labHeadEmail"],
        "labHeadName": sample_ids["labHeadName"],
        "otherContactEmails": sample_ids["otherContactEmails"],
        "dataAccessEmails": sample_ids["dataAccessEmails"],
        "qcAccessEmails": sample_ids["qcAccessEmails"],
        "projectManagerName": sample_ids["projectManagerName"],
        "recipe": sample_ids["recipe"],
        "piEmail": sample_ids["piEmail"],
    }
    set_recipe_event = ETLSetRecipeEvent(job_group_notifier,
                                         request_metadata["recipe"]).to_dict()
    send_notification.delay(set_recipe_event)
    pooled_normals = sample_ids.get("pooledNormals", [])
    if import_pooled_normals and pooled_normals:
        for f in pooled_normals:
            job = get_or_create_pooled_normal_job(f,
                                                  jg,
                                                  jgn,
                                                  redelivery=redelivery)
            children.add(str(job.id))
    if import_samples:
        if not sample_ids.get("samples", False):
            raise FailedToFetchSampleException(
                "No samples reported for requestId: %s" % request_id)

        for sample in sample_ids.get("samples", []):
            sampleMetadata = LIMSClient.get_sample_manifest(
                sample["igoSampleId"])
            try:
                data = sampleMetadata[0]
            except Exception as e:
                pass
            patient_id = format_patient_id(data.get("cmoPatientId"))

            if not Patient.objects.filter(patient_id=patient_id):
                Patient.objects.create(patient_id=patient_id)

            sample_name = data.get("cmoSampleName", None)
            specimen_type = data.get("specimenType", None)
            cmo_sample_name = format_sample_name(sample_name, specimen_type)

            if not Sample.objects.filter(sample_id=sample["igoSampleId"],
                                         sample_name=sample_name,
                                         cmo_sample_name=cmo_sample_name):
                Sample.objects.create(sample_id=sample["igoSampleId"],
                                      sample_name=sample_name,
                                      cmo_sample_name=cmo_sample_name)

            job = create_sample_job(sample["igoSampleId"],
                                    sample["igoComplete"], request_id,
                                    request_metadata, redelivery, jg, jgn)
            children.add(str(job.id))
    return list(children)
Exemple #28
0
def create_request_job(request_id, redelivery=False):
    logger.info("Searching for job: %s for request_id: %s" %
                (TYPES["REQUEST"], request_id))
    count = Job.objects.filter(
        run=TYPES["REQUEST"],
        args__request_id=request_id,
        status__in=[
            JobStatus.CREATED, JobStatus.IN_PROGRESS,
            JobStatus.WAITING_FOR_CHILDREN
        ],
    ).count()
    request_redelivered = Job.objects.filter(
        run=TYPES["REQUEST"], args__request_id=request_id).count() > 0

    delivery_date = None
    try:
        request_from_lims = LIMSClient.get_request_samples(request_id)
        delivery_date = datetime.fromtimestamp(
            request_from_lims["deliveryDate"] / 1000)
    except Exception:
        logger.error("Failed to retrieve deliveryDate for request %s" %
                     request_id)

    if not Request.objects.filter(request_id=request_id):
        Request.objects.create(request_id=request_id,
                               delivery_date=delivery_date)
    assays = ETLConfiguration.objects.first()

    if request_redelivered and not (assays.redelivery and redelivery):
        return None, "Request is redelivered, but redelivery deactivated"

    if count == 0:
        job_group = JobGroup()
        job_group.save()
        job_group_notifier_id = notifier_start(job_group, request_id)
        job_group_notifier = JobGroupNotifier.objects.get(
            id=job_group_notifier_id)
        job = Job(
            run=TYPES["REQUEST"],
            args={
                "request_id": request_id,
                "job_group": str(job_group.id),
                "job_group_notifier": job_group_notifier_id,
                "redelivery": request_redelivered,
            },
            status=JobStatus.CREATED,
            max_retry=1,
            children=[],
            callback=TYPES["REQUEST_CALLBACK"],
            callback_args={
                "request_id": request_id,
                "job_group": str(job_group.id),
                "job_group_notifier": job_group_notifier_id,
            },
            job_group=job_group,
            job_group_notifier=job_group_notifier,
        )
        job.save()
        if request_redelivered:
            redelivery_event = RedeliveryEvent(job_group_notifier_id).to_dict()
            send_notification.delay(redelivery_event)
        request_obj = Request.objects.filter(request_id=request_id).first()
        if request_obj:
            delivery_date_event = SetDeliveryDateFieldEvent(
                job_group_notifier_id,
                str(request_obj.delivery_date)).to_dict()
            send_notification.delay(delivery_date_event)
        return job, "Job Created"
Exemple #29
0
def create_operator_run_from_jobs(operator,
                                  jobs,
                                  job_group_id=None,
                                  job_group_notifier_id=None):
    jg = None
    jgn = None
    try:
        jg = JobGroup.objects.get(id=job_group_id)
        logger.info("JobGroup id: %s", job_group_id)
    except JobGroup.DoesNotExist:
        logger.info("JobGroup not set")
    try:
        jgn = JobGroupNotifier.objects.get(id=job_group_notifier_id)
    except JobGroupNotifier.DoesNotExist:
        logger.info("JobGroupNotifier not set")
    valid_jobs, invalid_jobs = [], []
    for job in jobs:
        valid_jobs.append(job) if job[0].is_valid() else invalid_jobs.append(
            job)

    operator_run = OperatorRun.objects.create(operator=operator.model,
                                              num_total_runs=len(valid_jobs),
                                              job_group=jg,
                                              job_group_notifier=jgn)
    run_ids = []
    pipeline_id = None

    try:
        pipeline_id = operator.get_pipeline_id()
        p = Pipeline.objects.get(id=pipeline_id)
        pipeline_name = p.name
        pipeline_version = p.version
        pipeline_link = p.pipeline_link
    except Pipeline.DoesNotExist:
        pipeline_name = ""
        pipeline_link = ""
        pipeline_version = ""

    pipeline_description_event = AddPipelineToDescriptionEvent(
        job_group_notifier_id, pipeline_name, pipeline_version,
        pipeline_link).to_dict()
    send_notification.delay(pipeline_description_event)

    set_pipeline_field = SetPipelineFieldEvent(job_group_notifier_id,
                                               pipeline_name).to_dict()
    send_notification.delay(set_pipeline_field)

    for job in valid_jobs:
        logger.info("Creating Run object")
        run = job[0].save(operator_run_id=operator_run.id,
                          job_group_id=job_group_id,
                          job_group_notifier_id=job_group_notifier_id)
        logger.info("Run object created with id: %s" % str(run.id))
        run_ids.append({
            "run_id": str(run.id),
            'tags': run.tags,
            'output_directory': run.output_directory
        })
        output_directory = run.output_directory
        if not pipeline_name and not pipeline_link:
            logger.info(
                "Run [ id: %s ] failed as the pipeline [ id: %s ] was not found",
                run.id, pipeline_id)
            error_message = dict(details="Pipeline [ id: %s ] was not found.".
                                 format(pipeline_id))
            fail_job(run.id, error_message)
        else:
            create_run_task.delay(str(run.id), job[1], output_directory)

    if job_group_id:
        event = OperatorRunEvent(job_group_notifier_id, operator.request_id,
                                 pipeline_name, pipeline_link, run_ids,
                                 str(operator_run.id)).to_dict()
        send_notification.delay(event)

    for job in invalid_jobs:
        # TODO: Report this to JIRA ticket also
        logger.error("Job invalid: %s" % str(job[0].errors))

    operator_run.status = RunStatus.RUNNING
    operator_run.save()
Exemple #30
0
def process_triggers():
    operator_runs = OperatorRun.objects.prefetch_related(
        'runs', 'operator__from_triggers').exclude(
            status__in=[RunStatus.COMPLETED, RunStatus.FAILED])

    for operator_run in operator_runs:
        created_chained_job = False
        job_group = operator_run.job_group
        job_group_id = str(job_group.id) if job_group else None
        job_group_notifier = operator_run.job_group_notifier
        job_group_notifier_id = str(
            job_group_notifier.id) if job_group_notifier else None
        try:
            for trigger in operator_run.operator.from_triggers.all():
                trigger_type = trigger.run_type

                if trigger_type == TriggerRunType.AGGREGATE:
                    condition = trigger.aggregate_condition
                    if condition == TriggerAggregateConditionType.ALL_RUNS_SUCCEEDED:
                        if operator_run.percent_runs_succeeded == 100.0:
                            created_chained_job = True
                            create_jobs_from_chaining.delay(
                                trigger.to_operator_id,
                                trigger.from_operator_id,
                                list(
                                    operator_run.runs.order_by(
                                        'id').values_list('id', flat=True)),
                                job_group_id=job_group_id,
                                job_group_notifier_id=job_group_notifier_id)
                            continue
                    elif condition == TriggerAggregateConditionType.NINTY_PERCENT_SUCCEEDED:
                        if operator_run.percent_runs_succeeded >= 90.0:
                            created_chained_job = True
                            create_jobs_from_chaining.delay(
                                trigger.to_operator_id,
                                trigger.from_operator_id,
                                list(
                                    operator_run.runs.order_by(
                                        'id').values_list('id', flat=True)),
                                job_group_id=job_group_id,
                                job_group_notifier_id=job_group_notifier_id)
                            continue

                    if operator_run.percent_runs_finished == 100.0:
                        logger.info("Condition never met for operator run %s" %
                                    operator_run.id)

                elif trigger_type == TriggerRunType.INDIVIDUAL:
                    if operator_run.percent_runs_finished == 100.0:
                        operator_run.complete()

            if operator_run.percent_runs_finished == 100.0:
                if operator_run.percent_runs_succeeded == 100.0:
                    operator_run.complete()
                    if not created_chained_job and job_group_notifier_id:
                        completed_event = SetPipelineCompletedEvent(
                            job_group_notifier_id).to_dict()
                        send_notification.delay(completed_event)
                else:
                    operator_run.fail()
                    if job_group_notifier_id:
                        e = OperatorRequestEvent(
                            job_group_notifier_id,
                            "[CIReviewEvent] Operator Run %s failed" %
                            str(operator_run.id)).to_dict()
                        send_notification.delay(e)
                        ci_review_event = SetCIReviewEvent(
                            job_group_notifier_id).to_dict()
                        send_notification.delay(ci_review_event)

        except Exception as e:
            logger.info("Trigger %s Fail. Error %s" %
                        (operator_run.id, str(e)))
            operator_run.fail()