コード例 #1
0
ファイル: file_view.py プロジェクト: mskcc/beagle
 def destroy(self, request, *args, **kwargs):
     try:
         FileRepository.delete(kwargs["pk"])
     except FileNotFoundException as e:
         return Response({"details": str(e)},
                         status=status.HTTP_404_NOT_FOUND)
     return Response(status=status.HTTP_204_NO_CONTENT)
コード例 #2
0
def generate_description(job_group, job_group_notifier, request):
    files = FileRepository.filter(metadata={
        "requestId": request,
        "igocomplete": True
    })
    if files:
        data = files.first().metadata
        request_id = data["requestId"]
        recipe = data["recipe"]
        a_name = data["dataAnalystName"]
        a_email = data["dataAnalystEmail"]
        i_name = data["investigatorName"]
        i_email = data["investigatorEmail"]
        l_name = data["labHeadName"]
        l_email = data["labHeadEmail"]
        p_email = data["piEmail"]
        pm_name = data["projectManagerName"]
        qc_emails = data["qcAccessEmails"] if "qcAccessEmails" in data else ""
        data_access_emails = data[
            "dataAccessEmails"] if "dataAccessEmails" in data else ""
        other_contact_emails = data[
            "otherContactEmails"] if "otherContactEmails" in data else ""

        num_samples = len(
            files.order_by().values("metadata__cmoSampleName").annotate(
                n=Count("pk")))
        num_tumors = len(
            FileRepository.filter(
                queryset=files, metadata={
                    "tumorOrNormal": "Tumor"
                }).order_by().values("metadata__cmoSampleName").annotate(
                    n=Count("pk")))
        num_normals = len(
            FileRepository.filter(
                queryset=files, metadata={
                    "tumorOrNormal": "Normal"
                }).order_by().values("metadata__cmoSampleName").annotate(
                    n=Count("pk")))
        operator_start_event = OperatorStartEvent(
            job_group_notifier,
            job_group,
            request_id,
            num_samples,
            recipe,
            a_name,
            a_email,
            i_name,
            i_email,
            l_name,
            l_email,
            p_email,
            pm_name,
            qc_emails,
            num_tumors,
            num_normals,
            data_access_emails,
            other_contact_emails,
        ).to_dict()
        send_notification.delay(operator_start_event)
コード例 #3
0
ファイル: file_view.py プロジェクト: mskcc/beagle
 def update(self, request, *args, **kwargs):
     try:
         f = FileRepository.get(id=kwargs.get("pk"))
     except FileNotFoundException:
         return Response({"details": "Not Found"},
                         status=status.HTTP_404_NOT_FOUND)
     serializer = UpdateFileSerializer(f.file,
                                       data=request.data,
                                       context={"request": request},
                                       partial=request.method == "PATCH")
     if serializer.is_valid():
         serializer.save()
         f = FileRepository.get(id=kwargs.get("pk"))
         response = FileSerializer(f)
         return Response(response.data, status=status.HTTP_200_OK)
     return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
コード例 #4
0
 def test_zero_samples_igocomplete_false(self, mock_get_sample):
     mock_get_sample.return_value = MockResponse(
         json_data=self.data_0_fastq, status_code=200)
     with self.assertRaises(MissingDataException):
         fetch_sample_metadata('igoId_000', False, 'sampleName_000', {})
     count_files = FileRepository.all().count()
     self.assertEqual(count_files, 0)
コード例 #5
0
ファイル: tasks.py プロジェクト: mskcc/beagle
def notifier_start(job_group, request_id, operator=None, metadata={}):
    if settings.NOTIFIER_ACTIVE:
        notifier = Notifier.objects.get(default=True)
        try:
            if operator:
                notifier = Notifier.objects.filter(operator__id=operator.id).first()
        except Notifier.DoesNotExist:
            pass
        job_group_notifier = JobGroupNotifier.objects.create(
            job_group=job_group, request_id=request_id, notifier_type=notifier
        )
        eh = event_handler(job_group_notifier.id)
        notifier_id = eh.start(request_id)
        job_group_notifier.jira_id = notifier_id
        if notifier_id.startswith(settings.JIRA_PREFIX):
            file_obj = FileRepository.filter(metadata={"requestId": request_id}).first()
            if file_obj:
                job_group_notifier.PI = file_obj.metadata.get("labHeadName")
                job_group_notifier.investigator = file_obj.metadata.get("investigatorName")
                job_group_notifier.assay = file_obj.metadata.get("recipe")
            else:
                job_group_notifier.PI = metadata.get("labHeadName")
                job_group_notifier.investigator = metadata.get("investigatorName")
                job_group_notifier.assay = metadata.get("recipe")
        job_group_notifier.save()
        return str(job_group_notifier.id)
    logger.info("Notifier Inactive")
    return None
コード例 #6
0
ファイル: file_view.py プロジェクト: mskcc/beagle
 def retrieve(self, request, *args, **kwargs):
     try:
         f = FileRepository.get(kwargs["pk"])
     except FileNotFoundException as e:
         return Response({"details": str(e)},
                         status=status.HTTP_404_NOT_FOUND)
     serializer = FileSerializer(f)
     return Response(serializer.data, status=status.HTTP_200_OK)
コード例 #7
0
 def test_import_sample_two_fastq_files(self, mock_get_sample):
     mock_get_sample.return_value = MockResponse(
         json_data=self.data_2_fastq, status_code=200)
     fetch_sample_metadata('igoId_002', True, 'sampleName_002', {})
     count_files = FileRepository.filter(path_in=[
         "/path/to/sample/08/sampleName_002-d_IGO_igoId_002_S134_L008_R2_001.fastq.gz",
         "/path/to/sample/08/sampleName_002-d_IGO_igoId_002_S134_L008_R1_001.fastq.gz"
     ]).count()
     self.assertEqual(count_files, 2)
コード例 #8
0
def generate_label(job_group_id, request):
    files = FileRepository.filter(metadata={
        'requestId': request,
        'igocomplete': True
    })
    if files:
        data = files.first().metadata
        recipe = data['recipe']
        recipe_label_event = SetLabelEvent(job_group_id, recipe).to_dict()
        send_notification.delay(recipe_label_event)
コード例 #9
0
 def test_import_sample_six_fastq_files(self, mock_get_sample):
     mock_get_sample.return_value = MockResponse(
         json_data=self.data_6_fastq, status_code=200)
     fetch_sample_metadata('igoId_006', True, 'sampleName_006', {})
     count_files = FileRepository.filter(path_in=[
         "/path/to/sample/01/sampleName_006_IGO_igoId_006_S64_L007_R2_001.fastq.gz",
         "/path/to/sample/01/sampleName_006_IGO_igoId_006_S64_L007_R1_001.fastq.gz",
         "/path/to/sample/01/sampleName_006_IGO_igoId_006_S64_L006_R1_001.fastq.gz",
         "/path/to/sample/01/sampleName_006_IGO_igoId_006_S64_L006_R2_001.fastq.gz",
         "/path/to/sample/02/sampleName_006_IGO_igoId_006_S54_L003_R1_001.fastq.gz",
         "/path/to/sample/02/sampleName_006_IGO_igoId_006_S54_L003_R2_001.fastq.gz"
     ]).count()
     self.assertEqual(count_files, 6)
コード例 #10
0
def generate_description(job_group, job_group_notifier, request):
    files = FileRepository.filter(metadata={
        'requestId': request,
        'igocomplete': True
    })
    if files:
        data = files.first().metadata
        request_id = data['requestId']
        recipe = data['recipe']
        a_name = data['dataAnalystName']
        a_email = data['dataAnalystEmail']
        i_name = data['investigatorName']
        i_email = data['investigatorEmail']
        l_name = data['labHeadName']
        l_email = data['labHeadEmail']
        p_email = data['piEmail']
        pm_name = data['projectManagerName']
        qc_emails = data['qcAccessEmails'] if 'qcAccessEmails' in data else ""

        num_samples = len(
            files.order_by().values('metadata__cmoSampleName').annotate(
                n=Count("pk")))
        num_tumors = len(
            FileRepository.filter(
                queryset=files, metadata={
                    'tumorOrNormal': 'Tumor'
                }).order_by().values('metadata__cmoSampleName').annotate(
                    n=Count("pk")))
        num_normals = len(
            FileRepository.filter(
                queryset=files, metadata={
                    'tumorOrNormal': 'Normal'
                }).order_by().values('metadata__cmoSampleName').annotate(
                    n=Count("pk")))
        operator_start_event = OperatorStartEvent(
            job_group_notifier, job_group, request_id, num_samples, recipe,
            a_name, a_email, i_name, i_email, l_name, l_email, p_email,
            pm_name, qc_emails, num_tumors, num_normals).to_dict()
        send_notification.delay(operator_start_event)
コード例 #11
0
 def test_file_conflict(self, mock_get_sample):
     file_conflict = File.objects.create(
         path=
         "/path/to/sample/08/sampleName_002-d_IGO_igoId_002_S134_L008_R2_001.fastq.gz",
         file_type=self.fastq,
         file_group=self.file_group,
     )
     file_metadata = FileMetadata.objects.create(file=file_conflict,
                                                 version=1,
                                                 metadata={})
     mock_get_sample.return_value = MockResponse(
         json_data=self.data_2_fastq, status_code=200)
     with self.assertRaises(ErrorInconsistentDataException) as e:
         fetch_sample_metadata('igoId_002', True, 'sampleName_002', {})
         self.assertTrue('Conflict of fastq file(s)' in str(e))
     count_files = FileRepository.filter(path_in=[
         "/path/to/sample/08/sampleName_002-d_IGO_igoId_002_S134_L008_R2_001.fastq.gz",
     ]).count()
     self.assertEqual(count_files, 1)
コード例 #12
0
ファイル: file_view.py プロジェクト: mskcc/beagle
    def post(self, request):
        patch_files = request.data.get("patch_files", [])
        sid = transaction.savepoint()
        current_file_id = None
        current_file_data = None
        file_count = len(patch_files)
        try:
            for single_file_patch in patch_files:
                current_file_id = single_file_patch["id"]
                current_file_data = single_file_patch["patch"]
                f = FileRepository.get(id=current_file_id)
                serializer = UpdateFileSerializer(f.file,
                                                  data=current_file_data,
                                                  partial=True)
                if serializer.is_valid():
                    serializer.save()
                else:
                    transaction.savepoint_rollback(sid)
                    return Response(serializer.errors,
                                    status=status.HTTP_400_BAD_REQUEST)
            transaction.savepoint_commit(sid)
        except FileNotFoundException:
            transaction.savepoint_rollback(sid)
            error_message = "File {} not found".format(current_file_id)
            return Response({"details": error_message},
                            status=status.HTTP_404_NOT_FOUND)
        except IntegrityError:
            transaction.savepoint_rollback(sid)
            error_message = "Integrity error"
            return Response({"details": error_message},
                            status=status.HTTP_500_INTERNAL_SERVER_ERROR)
        except Exception as e:
            transaction.savepoint_rollback(sid)
            error_message = "An unexpected error occured: " + repr(e)
            return Response({"details": error_message},
                            status=status.HTTP_400_BAD_REQUEST)

        success_message = "Successfully updated {} files".format(file_count)
        return Response(success_message, status=status.HTTP_200_OK)
コード例 #13
0
ファイル: file_view.py プロジェクト: mskcc/beagle
 def list(self, request, *args, **kwargs):
     query_list_types = [
         "file_group",
         "path",
         "metadata",
         "metadata_regex",
         "filename",
         "file_type",
         "values_metadata",
         "exclude_null_metadata",
     ]
     fixed_query_params = fix_query_list(request.query_params,
                                         query_list_types)
     serializer = FileQuerySerializer(data=fixed_query_params)
     if serializer.is_valid():
         queryset = FileRepository.all()
         queryset = time_filter(FileMetadata,
                                request.query_params,
                                time_modal="modified_date",
                                previous_queryset=queryset)
         file_group = fixed_query_params.get("file_group")
         path = fixed_query_params.get("path")
         metadata = fixed_query_params.get("metadata")
         metadata_regex = fixed_query_params.get("metadata_regex")
         path_regex = fixed_query_params.get("path_regex")
         filename = fixed_query_params.get("filename")
         filename_regex = fixed_query_params.get("filename_regex")
         file_type = fixed_query_params.get("file_type")
         values_metadata = fixed_query_params.get("values_metadata")
         exclude_null_metadata = fixed_query_params.get(
             "exclude_null_metadata")
         order_by = fixed_query_params.get("order_by")
         distinct_metadata = fixed_query_params.get("distinct_metadata")
         kwargs = {"queryset": queryset}
         if file_group:
             if len(file_group) == 1:
                 kwargs["file_group"] = file_group[0]
             else:
                 kwargs["file_group_in"] = file_group
         if path:
             if len(path) == 1:
                 kwargs["path"] = path[0]
             else:
                 kwargs["path_in"] = path
         if metadata:
             filter_query = dict()
             for val in metadata:
                 k, v = val.split(":")
                 metadata_field = k.strip()
                 if metadata_field not in filter_query:
                     filter_query[metadata_field] = [v.strip()]
                 else:
                     filter_query[metadata_field].append(v.strip())
             if filter_query:
                 kwargs["metadata"] = filter_query
         if metadata_regex:
             filter_query = []
             for single_reqex_query in metadata_regex:
                 single_value = single_reqex_query.split("|")
                 single_reqex_filters = []
                 for val in single_value:
                     k, v = val.split(":")
                     single_reqex_filters.append((k.strip(), v.strip()))
                 filter_query.append(single_reqex_filters)
             if filter_query:
                 kwargs["metadata_regex"] = filter_query
         if path_regex:
             kwargs["path_regex"] = path_regex
         if filename:
             if len(filename) == 1:
                 kwargs["file_name"] = filename[0]
             else:
                 kwargs["file_name_in"] = filename
         if filename_regex:
             kwargs["file_name_regex"] = filename_regex
         if file_type:
             if len(file_type) == 1:
                 kwargs["file_type"] = file_type[0]
             else:
                 kwargs["file_type_in"] = file_type
         if exclude_null_metadata:
             kwargs["exclude"] = exclude_null_metadata
         if order_by:
             kwargs["order_by"] = order_by
         if distinct_metadata:
             kwargs["distinct"] = distinct_metadata
         if values_metadata:
             if len(values_metadata) == 1:
                 kwargs["values_metadata"] = values_metadata[0]
             else:
                 kwargs["values_metadata_list"] = values_metadata
         try:
             queryset = FileRepository.filter(**kwargs)
         except Exception as e:
             return Response({"details": str(e)},
                             status=status.HTTP_400_BAD_REQUEST)
         page = self.paginate_queryset(queryset)
         if page is not None:
             if values_metadata:
                 return self.get_paginated_response(page)
             else:
                 serializer = FileSerializer(page,
                                             many=True,
                                             context={"request": request})
                 return self.get_paginated_response(serializer.data)
     else:
         return Response(serializer.errors,
                         status=status.HTTP_400_BAD_REQUEST)
コード例 #14
0
    def _generate_ticket_decription(self):
        samples_completed = set()
        samples_failed = set()
        all_jobs = []
        request_jobs = []
        sample_jobs = []
        pooled_normal_jobs = []

        jobs = Job.objects.filter(job_group=self.job.job_group.id).all()

        for job in jobs:
            if job.run == TYPES['SAMPLE']:
                if job.status == JobStatus.COMPLETED:
                    samples_completed.add(job.args['sample_id'])
                elif job.status == JobStatus.FAILED:
                    samples_failed.add(job.args['sample_id'])

            if job.run == TYPES['SAMPLE']:
                sample_jobs.append((str(job.id), JobStatus(job.status).name, self.get_key(job.run), job.message or "",
                                   job.args.get('sample_id', '')))
            elif job.run == TYPES['REQUEST']:
                request_jobs.append(
                    (str(job.id), '', self.get_key(job.run), job.message or "", ''))
            elif job.run == TYPES['POOLED_NORMAL']:
                pooled_normal_jobs.append(
                    (str(job.id), JobStatus(job.status).name, self.get_key(job.run), job.message or "",
                     job.args.get('sample_id', '')))

        all_jobs.extend(request_jobs)
        all_jobs.extend(sample_jobs)
        all_jobs.extend(pooled_normal_jobs)

        request_metadata = Job.objects.filter(args__request_id=self.job.args['request_id'],
                                              run=TYPES['SAMPLE']).order_by('-created_date').first()

        number_of_tumors = FileRepository.filter(
            metadata={'requestId': self.job.args['request_id'], 'tumorOrNormal': 'Tumor'}, values_metadata='sampleId').count()
        number_of_normals = FileRepository.filter(
            metadata={'requestId': self.job.args['request_id'], 'tumorOrNormal': 'Normal'}, values_metadata='sampleId').count()

        data_analyst_email = ""
        data_analyst_name = ""
        investigator_email = ""
        investigator_name = ""
        lab_head_email = ""
        lab_head_name = ""
        pi_email = ""
        project_manager_name = ""
        recipe = ""
        qc_access_emails = ""

        if request_metadata:
            metadata = request_metadata.args.get('request_metadata', {})
            recipe = metadata['recipe']
            data_analyst_email = metadata['dataAnalystEmail']
            data_analyst_name = metadata['dataAnalystName']
            investigator_email = metadata['investigatorEmail']
            investigator_name = metadata['investigatorName']
            lab_head_email = metadata['labHeadEmail']
            lab_head_name = metadata['labHeadName']
            pi_email = metadata['piEmail']
            project_manager_name = metadata['projectManagerName']
            qc_access_emails = metadata['qcAccessEmails']

        event = ETLImportEvent(str(self.job.job_group_notifier.id),
                               str(self.job.job_group.id),
                               self.job.args['request_id'],
                               list(samples_completed),
                               list(samples_failed),
                               recipe,
                               data_analyst_email,
                               data_analyst_name,
                               investigator_email,
                               investigator_name,
                               lab_head_email,
                               lab_head_name,
                               pi_email,
                               project_manager_name,
                               qc_access_emails,
                               number_of_tumors,
                               number_of_normals,
                               len(pooled_normal_jobs)
                               )
        e = event.to_dict()
        send_notification.delay(e)

        etl_event = ETLJobsLinksEvent(str(self.job.job_group_notifier.id),
                                      self.job.args['request_id'],
                                      all_jobs)
        etl_e = etl_event.to_dict()
        send_notification.delay(etl_e)
コード例 #15
0
    def _generate_ticket_decription(self):
        samples_completed = set()
        samples_failed = set()
        all_jobs = []
        request_jobs = []
        sample_jobs = []
        pooled_normal_jobs = []

        jobs = Job.objects.filter(job_group=self.job.job_group.id).all()

        for job in jobs:
            if job.run == TYPES["SAMPLE"]:
                if job.status == JobStatus.COMPLETED:
                    samples_completed.add(job.args["sample_id"])
                elif job.status == JobStatus.FAILED:
                    samples_failed.add(job.args["sample_id"])

            if job.run == TYPES["SAMPLE"]:
                sample_jobs.append((
                    str(job.id),
                    JobStatus(job.status).name,
                    self.get_key(job.run),
                    job.message or "",
                    job.args.get("sample_id", ""),
                ))
            elif job.run == TYPES["REQUEST"]:
                request_jobs.append(
                    (str(job.id), "", self.get_key(job.run), job.message
                     or "", ""))
            elif job.run == TYPES["POOLED_NORMAL"]:
                pooled_normal_jobs.append((
                    str(job.id),
                    JobStatus(job.status).name,
                    self.get_key(job.run),
                    job.message or "",
                    job.args.get("sample_id", ""),
                ))

        all_jobs.extend(request_jobs)
        all_jobs.extend(sample_jobs)
        all_jobs.extend(pooled_normal_jobs)

        request_metadata = (Job.objects.filter(
            args__request_id=self.job.args["request_id"],
            run=TYPES["SAMPLE"]).order_by("-created_date").first())

        number_of_tumors = FileRepository.filter(
            metadata={
                "requestId": self.job.args["request_id"],
                "tumorOrNormal": "Tumor"
            },
            values_metadata="sampleId").count()
        number_of_normals = FileRepository.filter(
            metadata={
                "requestId": self.job.args["request_id"],
                "tumorOrNormal": "Normal"
            },
            values_metadata="sampleId").count()

        data_analyst_email = ""
        data_analyst_name = ""
        investigator_email = ""
        investigator_name = ""
        lab_head_email = ""
        lab_head_name = ""
        pi_email = ""
        project_manager_name = ""
        recipe = ""
        qc_access_emails = ""
        data_access_emails = ""
        other_contact_emails = ""

        if request_metadata:
            metadata = request_metadata.args.get("request_metadata", {})
            recipe = metadata["recipe"]
            data_analyst_email = metadata["dataAnalystEmail"]
            data_analyst_name = metadata["dataAnalystName"]
            investigator_email = metadata["investigatorEmail"]
            investigator_name = metadata["investigatorName"]
            lab_head_email = metadata["labHeadEmail"]
            lab_head_name = metadata["labHeadName"]
            pi_email = metadata["piEmail"]
            project_manager_name = metadata["projectManagerName"]
            qc_access_emails = metadata["qcAccessEmails"]
            data_access_emails = metadata["dataAccessEmails"]
            other_contact_emails = metadata["otherContactEmails"]

        event = ETLImportEvent(
            str(self.job.job_group_notifier.id),
            str(self.job.job_group.id),
            self.job.args["request_id"],
            list(samples_completed),
            list(samples_failed),
            recipe,
            data_analyst_email,
            data_analyst_name,
            investigator_email,
            investigator_name,
            lab_head_email,
            lab_head_name,
            pi_email,
            project_manager_name,
            qc_access_emails,
            number_of_tumors,
            number_of_normals,
            len(pooled_normal_jobs),
            data_access_emails,
            other_contact_emails,
        )
        e = event.to_dict()
        send_notification.delay(e)

        etl_event = ETLJobsLinksEvent(str(self.job.job_group_notifier.id),
                                      self.job.args["request_id"], all_jobs)
        etl_e = etl_event.to_dict()
        send_notification.delay(etl_e)