def upload_metadata_ms_run(request):
    result_dict = dict()
    # fill out the "filename" if forwarded for orphan run table on home
    if "run" in request.params:
        result_dict["run"] = request.params["run"]
    else:
        result_dict["run"] = ""
    try:
    # query data for autocomplete
        allowed_elements = {"used_share": MsRun.used_share, "source_id": Source.source_id,
                            "sample_mass": MsRun.sample_mass, "sample_volume": MsRun.sample_volume,
                            "antibody_set": MsRun.antibody_set, "antibody_mass": MsRun.antibody_mass}

        for k, v in allowed_elements.iteritems():
            query = DBSession.query(v)
            query = query.group_by(v)
            query_result = js_list_creator(query.all())
            result_dict[k] = query_result

        allowed_elements = {"filename": MsRun.filename}
        for k, v in allowed_elements.iteritems():
            query = DBSession.query(v)
            query = query.filter(MsRun.source_source_id == None)
            query = query.filter(MsRun.flag_trash == 0)
            query = query.group_by(v)
            query_result = js_list_creator(query.all())
            result_dict[k] = query_result
    except:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return result_dict
Beispiel #2
0
def unblacklist_ms_run(request):
    result_dict = dict()
    if "run" in request.params:
        result_dict["run"] = request.params["run"]
    else:
        result_dict["run"] = ""
    try:

        # Query Data for autocomplete

        # Person
        query = DBSession.query(Source.person.distinct())
        person = js_list_creator(query.all())
        result_dict["person"] = person

        # MS Runs
        query = DBSession.query(MsRun.filename.distinct())
        query = query.filter(MsRun.flag_trash == 1)
        filename = js_list_creator(query.all())
        result_dict["filename"] = filename

    except:
        return Response(conn_err_msg,
                        content_type='text/plain',
                        status_int=500)
    return result_dict
Beispiel #3
0
def protein_page(request):
    try:
        query = DBSession.query(Protein.name,
                                Protein.organism,
                                Protein.description,
                                Protein.sequence,
                                Protein.gene_name)
        query = query.filter(Protein.name == request.matchdict["protein"])
        temp_statistics = query.all()
        statistics = json.dumps(temp_statistics)
        query = DBSession.query(SpectrumHit.sequence.distinct())
        query = query.join(t_spectrum_protein_map)
        query = query.join(Protein)
        query = query.join(MsRun)
        query = query.filter(Protein.name == request.matchdict["protein"])
        query = query.filter(SpectrumHit.source_source_id != None)
        query = query.filter(MsRun.flag_trash ==0)
        sequences = query.all()
        # print sequences
        sequence_start = list()
        sequence_end = list()
        for seq in sequences:
            pos = temp_statistics[0][3].find(seq[0])
            if pos > -1:
                sequence_start.append(pos)
                sequence_end.append(pos + len(seq[0]))
        sequence_start = json.dumps(sequence_start)
        sequence_end = json.dumps(sequence_end)
        sequences = js_list_creator_dataTables(sequences)

    except:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return {"statistics": statistics,
            "protein": request.matchdict["protein"],
            "sequence_start": sequence_start, "sequence_end": sequence_end, "sequences": sequences}
Beispiel #4
0
def msrun_page(request):
    try:
        query = DBSession.query(func.count(SpectrumHit.spectrum_hit_id).label("count_hits"),
                                func.count(SpectrumHit.sequence.distinct()).label("count_pep"),
                                func.count(Protein.name.distinct()).label("count_prot")
        )

        query = query.join(MsRun, SpectrumHit.ms_run_ms_run_id == MsRun.ms_run_id)
        query = query.join(t_spectrum_protein_map)
        query = query.join(Protein)
        query = query.filter(MsRun.ms_run_id == request.matchdict["msrun"])
        statistics = json.dumps(query.all())

        query = DBSession.query(MsRun.filename,
                                func.group_concat(
                                    (HlaType.hla_string.distinct().op('order by')(HlaType.hla_string))).label(
                                    'hla_typing'),
                                Source.histology, Source.source_id, Source.patient_id, Source.organ,
                                Source.comment, Source.dignity, Source.celltype, Source.location,
                                Source.metastatis, Source.person, Source.organism, Source.treatment, Source.comment.label("comment"),
                                func.cast(MsRun.ms_run_date, String).label("ms_run_date"), MsRun.used_share,
                                MsRun.comment.label("msrun_comment"),
                                MsRun.sample_mass, MsRun.sample_volume, MsRun.antibody_set,
                                MsRun.antibody_mass)
        query = query.join(Source)
        query = query.join(t_hla_map)
        query = query.join(HlaType)
        query = query.filter(MsRun.ms_run_id == request.matchdict["msrun"])
        metadata = json.dumps(query.all())

    except:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return {"statistics": statistics, "metadata": metadata, "msrun": request.matchdict["msrun"]}
def blacklist_ms_run(request):
    result_dict = dict()
    if "run" in request.params:
        result_dict["run"] = request.params["run"]
    else:
        result_dict["run"] = ""
    try:

        # Query Data for autocomplete
        # Person
        query = DBSession.query(Source.person.distinct())
        person = js_list_creator(query.all())
        result_dict["person"] = person

        # MS Runs
        query = DBSession.query(MsRun.filename.distinct())
        query = query.filter(MsRun.flag_trash == 0)
        filename = js_list_creator(query.all())
        result_dict["filename"] = filename

        # Reason
        query = DBSession.query(MsRun.trash_reason.distinct())
        trash_reason = js_list_creator(query.all())
        result_dict["trash_reason"] = trash_reason

    except:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return result_dict
def blacklist_ms_run_post(request):
    try:
        blacklist = ast.literal_eval(request.params["ms_runs"])

        for row in blacklist:
            if row['filename'] != " ":
                log_writer("blacklist",
                           strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +
                           row['filename'] + "\t" +
                           row['person'] + "\t" +
                           row['trash_reason']
                           )

                DBSession.query(MsRun).filter(MsRun.filename == row['filename']).update(
                    {"flag_trash": 1,
                     'trash_reason': row['trash_reason'],
                     'ms_run_date': None,
                     'used_share': None,
                     'comment': None,
                     'sample_mass': None,
                     'antibody_set': '',
                     'antibody_mass': None,
                     'sample_volume': None
                    })

                transaction.commit()
    except:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return dict()
Beispiel #7
0
def source_page(request):
    try:
        # Catch if there are no peptides!!
        query = DBSession.query(func.count(SpectrumHit.spectrum_hit_id).label("count_hits"),
                                func.count(SpectrumHit.sequence.distinct()).label("count_pep"),
                                func.count(Protein.name.distinct()).label("count_prot")
        )
        query = query.join(Source)
        query = query.join(t_spectrum_protein_map)
        query = query.join(Protein)
        query = query.filter(Source.patient_id == request.matchdict["source"])
        statistics = json.dumps(query.all())

        query = DBSession.query(Source.patient_id,
                                func.group_concat(Source.histology.distinct().op('order by')(Source.histology)).label(
                                    'histology'),
                                func.group_concat(Source.source_id.distinct().op('order by')(Source.source_id)).label(
                                    'source_id'),
                                func.group_concat(Source.organ.distinct().op('order by')(Source.organ)).label(
                                    'organ'), func.group_concat(
                (Source.comment.distinct().op('order by')(Source.comment))).label(
                'comment'), func.group_concat(
                (Source.dignity.distinct().op('order by')(Source.dignity))).label(
                'dignity'),
                                func.group_concat(
                                    (Source.celltype.distinct().op('order by')(Source.celltype))).label(
                                    'celltype')
                                , func.group_concat(
                (Source.location.distinct().op('order by')(Source.location))).label(
                'location')
                                , func.group_concat(
                (Source.metastatis.distinct().op('order by')(Source.metastatis))).label(
                'metastatis'),
                                func.group_concat(
                                    (Source.person.distinct().op('order by')(Source.person))).label(
                                    'person'),
                                func.group_concat(
                                    (Source.organism.distinct().op('order by')(Source.organism))).label(
                                    'organism'),
                                func.group_concat(
                                    (HlaType.hla_string.distinct().op('order by')(HlaType.hla_string))).label(
                                    'hla_typing'),
                                func.group_concat(
                                    (Source.treatment.distinct().op('order by')(Source.treatment))).label(
                                    'treatment')
        )
        query = query.join(t_hla_map)
        query = query.join(HlaType)
        query = query.filter(Source.patient_id == request.matchdict["source"])
        query = query.group_by(Source.patient_id)
        metadata = json.dumps(query.all())

        query = DBSession.query(MsRun.ms_run_id, MsRun.filename).join(Source).filter(
            Source.patient_id == request.matchdict["source"])
        runs = json.dumps(query.all())

    except:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return {"statistic": statistics, "metadata": metadata, "runs": runs, "source": request.matchdict["source"]}
def update_metadata_source(request):
    if "id" in request.params:
        result_dict = dict()
        result_dict["id"] = request.params['id']
        query = DBSession.query(Source.patient_id, Source.organ,
                                Source.organism, Source.histology,
                                Source.dignity,Source.celltype,
                                Source.location, Source.metastatis,
                                Source.treatment,Source.person,
                                func.cast(Source.prep_date, String).label("prep_date")
        ).filter(Source.source_id==request.params["id"])

        source = json.dumps(query.all())
        result_dict['source'] = source
        query = DBSession.query(Source.source_id, HlaType.hla_string).join(t_hla_map).join(HlaType).filter(Source.source_id==request.params["id"])
        hla = json.dumps(query.all())
        result_dict['hla'] = hla


        # getting autocomplete items
        allowed_elements = {"patient_id": Source.patient_id, "organ": Source.organ,
                            "organism": Source.organism, "histology": Source.histology,
                            "dignity": Source.dignity, "celltype": Source.celltype,
                            "location": Source.location, "metastatis": Source.metastatis,
                            "treatment": Source.treatment, "person": Source.person,
                            "comment" : Source.comment, "typing": HlaType.hla_string}
        for k, v in allowed_elements.iteritems():
            query = DBSession.query(v)
            query = query.group_by(v)
            query_result = js_list_creator(query.all())
            result_dict[k] = query_result
        #result_dict['original'] = source

        return result_dict

    else:
        try:
            # query data for autocomplete
            result_dict = dict()
            allowed_elements = {"source_id": Source.source_id}

            for k, v in allowed_elements.iteritems():
                query = DBSession.query(v)
                query = query.group_by(v)
                query_result = js_list_creator(query.all())
                result_dict[k] = query_result
            # setting a different renderer
            result = render('../templates/upload_templates/update_metadata_source.pt',
                            result_dict,
                            request=request)
            response = Response(result)
            return response
        except:
            return Response(conn_err_msg, content_type='text/plain', status_int=500)
Beispiel #9
0
def update_metadata_msrun(request):
    if "filename" in request.params:
        result_dict = dict()
        result_dict["filename"] = request.params['filename']
        query = DBSession.query(
            MsRun.filename, MsRun.used_share, MsRun.comment,
            MsRun.source_source_id, MsRun.sample_mass, MsRun.sample_volume,
            MsRun.antibody_set, MsRun.antibody_mass,
            func.cast(MsRun.ms_run_date, String).label("ms_run_date")).filter(
                MsRun.filename == request.params["filename"])

        ms_run = json.dumps(query.all())
        result_dict['ms_run'] = ms_run

        # getting autocomplete items
        allowed_elements = {
            "used_share": MsRun.used_share,
            "comment": MsRun.comment,
            "sample_mass": MsRun.sample_mass,
            "antibody_set": MsRun.antibody_set,
            "antibody_mass": MsRun.antibody_mass,
            "sample_volume": MsRun.sample_volume,
            'source_source_id': Source.source_id
        }
        for k, v in allowed_elements.iteritems():
            query = DBSession.query(v)
            query = query.group_by(v)
            query_result = js_list_creator(query.all())
            result_dict[k] = query_result
        #result_dict['original'] = source

        return result_dict

    else:
        try:
            # query data for autocomplete
            result_dict = dict()
            result_dict["filename"] = js_list_creator(
                DBSession.query(MsRun.filename).filter(
                    MsRun.flag_trash == 0).group_by(MsRun.filename).all())

            # setting a different renderer
            result = render(
                '../templates/upload_templates/update_metadata_msrun.pt',
                result_dict,
                request=request)
            response = Response(result)
            return response
        except:
            return Response(conn_err_msg,
                            content_type='text/plain',
                            status_int=500)
def unblacklist_ms_run_post(request):
    try:
        unblacklist = ast.literal_eval(request.params["ms_runs"])

        for row in unblacklist:
            if row['filename'] != " ":
                log_writer("unblacklist", strftime("%Y.%m.%d %H:%M:%S", gmtime())+"\t"+row['filename']+"\t"+row['person'])
                DBSession.query(MsRun).filter(MsRun.filename == row['filename']).update({"flag_trash": 0, 'trash_reason' : None})
                transaction.commit()


    except:
         return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return dict()
Beispiel #11
0
def treatment_page(request):
    try:
        query = DBSession.query(Source.organ, Source.source_id,
                                Source.treatment, Source.patient_id)
        query = query.filter(Source.treatment == request.matchdict["treatment"])
        sources = json.dumps(query.all())

        query = DBSession.query(func.count(SpectrumHit.sequence.distinct()).label("pep_count"))
        query = query.join(Source)
        query = query.filter(Source.treatment == request.matchdict["treatment"])
        statistic = json.dumps(query.all())
    except:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return {"sources": sources, "treatment": request.matchdict["treatment"], "statistic": statistic}
Beispiel #12
0
def upload_metadata_source(request):
    try:
        # query data for autocomplete
        result_dict = dict()
        allowed_elements = {
            "patient_id": Source.patient_id,
            "organ": Source.organ,
            "organism": Source.organism,
            "histology": Source.histology,
            "dignity": Source.dignity,
            "celltype": Source.celltype,
            "location": Source.location,
            "metastatis": Source.metastatis,
            "treatment": Source.treatment,
            "person": Source.person,
            "typing": HlaType.hla_string,
            'comment': Source.comment
        }

        for k, v in allowed_elements.iteritems():
            query = DBSession.query(v)
            query = query.group_by(v)
            query_result = js_list_creator(query.all())
            result_dict[k] = query_result
    except:
        return Response(conn_err_msg,
                        content_type='text/plain',
                        status_int=500)
    return result_dict
Beispiel #13
0
def person_page(request):
    try:
        query = DBSession.query(Source.histology, Source.source_id, Source.patient_id, Source.organ,
                                Source.comment, Source.dignity, Source.celltype, Source.location,
                                Source.metastatis, Source.person, Source.organism)
        query = query.filter(Source.person == request.matchdict["person"])
        query = query.group_by(Source.source_id)
        sources = json.dumps(query.all())

        query = DBSession.query(MsRun.ms_run_id, MsRun.filename).join(Source).filter(
            Source.person == request.matchdict["person"])
        runs = json.dumps(query.all())

    except:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return {"sources": sources, "runs": runs, "person": request.matchdict["person"]}
def venn_analysis(request):
    # try:
    # getting source patient_id for autocomplete
    query = DBSession.query(Source.patient_id.distinct())
    patient_id = js_list_creator(query.all())
    # getting source.source_id for auto complete
    query = DBSession.query(Source.source_id.distinct())
    source_id = js_list_creator(query.all())
    # getting ms_runs for autocomplete
    query = DBSession.query(MsRun.filename.distinct())
    ms_runs = js_list_creator(query.all())
    # Antibody for autocomplete
    query = DBSession.query(MsRun.antibody_set.distinct()).order_by(MsRun.antibody_set)
    antibody = js_list_creator(query.all())

    # except:
    # return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return {"patient_id": patient_id, "source_id": source_id, "ms_runs": ms_runs, "antibody": antibody}
def source_overview(request):
    try:
        # TODO: Update
        # query Sources
        your_json = json.dumps(
            DBSession.query(Source.source_id, Source.patient_id, Source.organ, Source.dignity, Source.celltype, Source.histology, Source.location,
                            Source.metastatis, Source.organism, Source.treatment, Source.person, func.cast(Source.prep_date, String).label("prep_date")).all())
    except DBAPIError:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return {'project': your_json}
def orphan_run_overview(request):
    try:
        # get orphan runs
        your_json = json.dumps(DBSession.query(MsRun.ms_run_id, MsRun.filename).filter(
            MsRun.source_source_id == None).filter(MsRun.flag_trash == 0).group_by(
            MsRun.ms_run_id).all())

    except DBAPIError:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return {'project': your_json}
def update_metadata_msrun(request):
    if "filename" in request.params:
        result_dict = dict()
        result_dict["filename"] = request.params['filename']
        query = DBSession.query(MsRun.filename, MsRun.used_share,
                                MsRun.comment, MsRun.source_source_id,
                                MsRun.sample_mass, MsRun.sample_volume,
                                MsRun.antibody_set, MsRun.antibody_mass,
                                func.cast(MsRun.ms_run_date, String).label("ms_run_date")
        ).filter(MsRun.filename==request.params["filename"])

        ms_run = json.dumps(query.all())
        result_dict['ms_run'] = ms_run

        # getting autocomplete items
        allowed_elements = {"used_share":MsRun.used_share, "comment":MsRun.comment,
                            "sample_mass":MsRun.sample_mass, "antibody_set":MsRun.antibody_set,
                            "antibody_mass":MsRun.antibody_mass,"sample_volume":MsRun.sample_volume, 'source_source_id': Source.source_id }
        for k, v in allowed_elements.iteritems():
            query = DBSession.query(v)
            query = query.group_by(v)
            query_result = js_list_creator(query.all())
            result_dict[k] = query_result
        #result_dict['original'] = source

        return result_dict

    else:
        try:
            # query data for autocomplete
            result_dict = dict()
            result_dict["filename"] = js_list_creator(DBSession.query(MsRun.filename).filter(MsRun.flag_trash==0).group_by(MsRun.filename).all())

            # setting a different renderer
            result = render('../templates/upload_templates/update_metadata_msrun.pt',
                            result_dict,
                            request=request)
            response = Response(result)
            return response
        except:
            return Response(conn_err_msg, content_type='text/plain', status_int=500)
def run_overview(request):
    try:
        # query MS runs. Important: run_date has to be castet to string, otherwise  json.dumps can not create json object
        your_json = json.dumps(
            DBSession.query(MsRun.ms_run_id, MsRun.filename, Source.patient_id, Source.organ, Source.dignity,
                            func.cast(MsRun.ms_run_date, String).label("ms_run_date"),
                            MsRun.antibody_set, MsRun.used_share,
                            MsRun.sample_mass).join(Source).group_by(MsRun.ms_run_id).all())

    except DBAPIError:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return {'project': your_json}
Beispiel #19
0
def hla_page(request):
    try:
        query = DBSession.query(Source.organ,Source.source_id, Source.dignity,
                                Source.histology, Source.patient_id)
        query = query.join(t_hla_map)
        query = query.join(HlaType)
        query = query.filter(HlaType.hla_string == request.matchdict["hla"])
        # * --> %2A cause * is reserved character
        # : --> %3A
        sources = json.dumps(query.all())

        query = DBSession.query(func.count(SpectrumHit.sequence.distinct()).label("pep_count"))
        query = query.join(Source)
        query = query.join(t_hla_map)
        query = query.join(HlaType)
        query = query.filter(HlaType.hla_string == request.matchdict["hla"])
        statistic = json.dumps(query.all())

    except:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return {"sources": sources, "hla": request.matchdict["hla"], "statistic": statistic}
Beispiel #20
0
def blacklist_ms_run_post(request):
    try:
        blacklist = ast.literal_eval(request.params["ms_runs"])

        for row in blacklist:
            if row['filename'] != " ":
                log_writer(
                    "blacklist",
                    strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +
                    row['filename'] + "\t" + row['person'] + "\t" +
                    row['trash_reason'])

                DBSession.query(MsRun).filter(
                    MsRun.filename == row['filename']).update({
                        "flag_trash":
                        1,
                        'trash_reason':
                        row['trash_reason'],
                        'ms_run_date':
                        None,
                        'used_share':
                        None,
                        'comment':
                        None,
                        'sample_mass':
                        None,
                        'antibody_set':
                        '',
                        'antibody_mass':
                        None,
                        'sample_volume':
                        None
                    })

                transaction.commit()
    except:
        return Response(conn_err_msg,
                        content_type='text/plain',
                        status_int=500)
    return dict()
Beispiel #21
0
def my_view(request):
    try:
        # query statistics for the main page
        result_dict = dict()
        result_dict["orphan_msrun_count"] = \
            DBSession.query(func.count(distinct(MsRun.filename))).filter(MsRun.source_source_id == None).filter(MsRun.flag_trash == 0).one()[0]
        result_dict["all_msrun_count"] = DBSession.query(func.count(distinct(MsRun.filename))).one()[0]
        result_dict["sources_count"] = DBSession.query(func.count(distinct(Source.sample_id))).one()[0]
        result_dict["trash_count"] = DBSession.query(func.count(distinct(MsRun.filename))).filter(MsRun.flag_trash == 1).one()[0]


        result_dict["orphan_msrun"] = json.dumps(
        DBSession.query(distinct(MsRun.filename).label("orphan_ms_run")).filter(MsRun.source_source_id == None).filter(MsRun.flag_trash == 0).order_by(MsRun.filename.desc()).limit(10).all())

        #SELECT (organ), count(organ) from Source group by organ
        sources = DBSession.query(Source.organ, func.count(Source.organ)).group_by(Source.organ).order_by(func.count(Source.organ).desc()).all()
        merged_sources = dict()
        source_acc = 0
        for i in range(0,len(sources)):
            if i < 6:
                merged_sources[sources[i][0]] = sources[i][1]
            else:
                source_acc += sources[i][1]
        merged_sources["others"] = source_acc
        result_dict["sources"] = json.dumps(merged_sources)

        return result_dict
    except:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
Beispiel #22
0
def peptide_page(request):
    try:
        query = DBSession.query(Protein.name.label("protein"), Protein.gene_name.label("gene_name"))
        query = query.join(t_spectrum_protein_map)
        query = query.join(SpectrumHit)
        query = query.filter(SpectrumHit.sequence == request.matchdict["peptide"])
        query = query.group_by(Protein.name)
        proteins = json.dumps(query.all())

        query = DBSession.query(Source.patient_id)
        query = query.join(SpectrumHit)
        query = query.join(MsRun)
        query = query.filter(MsRun.flag_trash ==0)
        query = query.filter(SpectrumHit.sequence == request.matchdict["peptide"])
        query = query.group_by(Source.patient_id)
        sources = js_list_creator_dataTables(query.all())


    except:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return {"proteins": proteins, "sources": sources,
            "peptide": request.matchdict["peptide"]}
def orphan_run_overview(request):
    try:
        # get orphan runs
        your_json = json.dumps(
            DBSession.query(MsRun.ms_run_id, MsRun.filename)
            .filter(MsRun.source_source_id == None)
            .filter(MsRun.flag_trash == 0)
            .group_by(MsRun.ms_run_id)
            .all()
        )

    except DBAPIError:
        return Response(conn_err_msg, content_type="text/plain", status_int=500)
    return {"project": your_json}
Beispiel #24
0
def upload_metadata_ms_run(request):
    result_dict = dict()
    # fill out the "filename" if forwarded for orphan run table on home
    if "run" in request.params:
        result_dict["run"] = request.params["run"]
    else:
        result_dict["run"] = ""
    try:
        # query data for autocomplete
        allowed_elements = {
            "used_share": MsRun.used_share,
            "source_id": Source.source_id,
            "sample_mass": MsRun.sample_mass,
            "sample_volume": MsRun.sample_volume,
            "antibody_set": MsRun.antibody_set,
            "antibody_mass": MsRun.antibody_mass
        }

        for k, v in allowed_elements.iteritems():
            query = DBSession.query(v)
            query = query.group_by(v)
            query_result = js_list_creator(query.all())
            result_dict[k] = query_result

        allowed_elements = {"filename": MsRun.filename}
        for k, v in allowed_elements.iteritems():
            query = DBSession.query(v)
            query = query.filter(MsRun.source_source_id == None)
            query = query.filter(MsRun.flag_trash == 0)
            query = query.group_by(v)
            query_result = js_list_creator(query.all())
            result_dict[k] = query_result
    except:
        return Response(conn_err_msg,
                        content_type='text/plain',
                        status_int=500)
    return result_dict
Beispiel #25
0
def venn_analysis(request):
    # try:
    # getting source patient_id for autocomplete
    query = DBSession.query(Source.patient_id.distinct())
    patient_id = js_list_creator(query.all())
    # getting source.source_id for auto complete
    query = DBSession.query(Source.source_id.distinct())
    source_id = js_list_creator(query.all())
    # getting ms_runs for autocomplete
    query = DBSession.query(MsRun.filename.distinct())
    ms_runs = js_list_creator(query.all())
    # Antibody for autocomplete
    query = DBSession.query(MsRun.antibody_set.distinct()).order_by(
        MsRun.antibody_set)
    antibody = js_list_creator(query.all())

    # except:
    # return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return {
        "patient_id": patient_id,
        "source_id": source_id,
        "ms_runs": ms_runs,
        "antibody": antibody
    }
Beispiel #26
0
def unblacklist_ms_run_post(request):
    try:
        unblacklist = ast.literal_eval(request.params["ms_runs"])

        for row in unblacklist:
            if row['filename'] != " ":
                log_writer(
                    "unblacklist",
                    strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +
                    row['filename'] + "\t" + row['person'])
                DBSession.query(MsRun).filter(
                    MsRun.filename == row['filename']).update({
                        "flag_trash":
                        0,
                        'trash_reason':
                        None
                    })
                transaction.commit()

    except:
        return Response(conn_err_msg,
                        content_type='text/plain',
                        status_int=500)
    return dict()
def upload_metadata_source(request):
    try:
        # query data for autocomplete
        result_dict = dict()
        allowed_elements = {"patient_id": Source.patient_id, "organ": Source.organ,
                            "organism": Source.organism, "histology": Source.histology, "dignity": Source.dignity,
                            "celltype": Source.celltype, "location": Source.location, "metastatis": Source.metastatis,
                            "treatment": Source.treatment, "person": Source.person, "typing": HlaType.hla_string,
                            'comment' : Source.comment}

        for k, v in allowed_elements.iteritems():
            query = DBSession.query(v)
            query = query.group_by(v)
            query_result = js_list_creator(query.all())
            result_dict[k] = query_result
    except:
        return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return result_dict
def peptide_query(request):
    # patient_id
    query = DBSession.query(Source.patient_id.distinct()).order_by(
        Source.patient_id)
    patient_id = js_list_creator(query.all())
    # antibodys
    query = DBSession.query(MsRun.antibody_set.distinct())
    antibody = js_list_creator(query.all())
    # dignity
    query = DBSession.query(Source.dignity.distinct()).order_by(Source.dignity)
    dignity = js_list_creator(query.all())
    # organ
    query = DBSession.query(Source.organ.distinct()).order_by(Source.organ)
    organ = js_list_creator(query.all())
    # histology
    query = DBSession.query(Source.histology.distinct()).order_by(
        Source.histology)
    histology = js_list_creator(query.all())
    # celltype
    query = DBSession.query(Source.celltype.distinct()).order_by(
        Source.celltype)
    celltype = js_list_creator(query.all())
    # celltype
    query = DBSession.query(HlaType.hla_string.distinct()).order_by(
        HlaType.hla_string)
    hla = js_list_creator(query.all())
    # treatment
    query = DBSession.query(Source.treatment.distinct()).order_by(
        Source.treatment)
    treatment = js_list_creator(query.all())

    return {
        "antibody": antibody,
        "dignity": dignity,
        "organ": organ,
        "histology": histology,
        "celltype": celltype,
        "hla": hla,
        'patient_id': patient_id,
        'treatment': treatment
    }
def peptide_query(request):
    # patient_id
    query = DBSession.query(Source.patient_id.distinct()).order_by(Source.patient_id)
    patient_id = js_list_creator(query.all())
    # antibodys
    query = DBSession.query(MsRun.antibody_set.distinct())
    antibody = js_list_creator(query.all())
    # dignity
    query = DBSession.query(Source.dignity.distinct()).order_by(Source.dignity)
    dignity = js_list_creator(query.all())
    # organ
    query = DBSession.query(Source.organ.distinct()).order_by(Source.organ)
    organ = js_list_creator(query.all())
    # histology
    query = DBSession.query(Source.histology.distinct()).order_by(Source.histology)
    histology = js_list_creator(query.all())
    # celltype
    query = DBSession.query(Source.celltype.distinct()).order_by(Source.celltype)
    celltype = js_list_creator(query.all())
    # celltype
    query = DBSession.query(HlaType.hla_string.distinct()).order_by(HlaType.hla_string)
    hla = js_list_creator(query.all())
    # treatment
    query = DBSession.query(Source.treatment.distinct()).order_by(Source.treatment)
    treatment = js_list_creator(query.all())

    return {
        "antibody": antibody,
        "dignity": dignity,
        "organ": organ,
        "histology": histology,
        "celltype": celltype,
        "hla": hla,
        "patient_id": patient_id,
        "treatment": treatment,
    }
def source_overview(request):
    try:
        # TODO: Update
        # query Sources
        your_json = json.dumps(
            DBSession.query(
                Source.source_id,
                Source.patient_id,
                Source.organ,
                Source.dignity,
                Source.celltype,
                Source.histology,
                Source.location,
                Source.metastatis,
                Source.organism,
                Source.treatment,
                Source.person,
                func.cast(Source.prep_date, String).label("prep_date"),
            ).all()
        )
    except DBAPIError:
        return Response(conn_err_msg, content_type="text/plain", status_int=500)
    return {"project": your_json}
def run_overview(request):
    try:
        # query MS runs. Important: run_date has to be castet to string, otherwise  json.dumps can not create json object
        your_json = json.dumps(
            DBSession.query(
                MsRun.ms_run_id,
                MsRun.filename,
                Source.patient_id,
                Source.organ,
                Source.dignity,
                func.cast(MsRun.ms_run_date, String).label("ms_run_date"),
                MsRun.antibody_set,
                MsRun.used_share,
                MsRun.sample_mass,
            )
            .join(Source)
            .group_by(MsRun.ms_run_id)
            .all()
        )

    except DBAPIError:
        return Response(conn_err_msg, content_type="text/plain", status_int=500)
    return {"project": your_json}
Beispiel #32
0
def my_view(request):
    try:
        # query statistics for the main page
        result_dict = dict()
        result_dict["orphan_msrun_count"] = \
            DBSession.query(func.count(distinct(MsRun.filename))).filter(MsRun.source_source_id == None).filter(MsRun.flag_trash == 0).one()[0]
        result_dict["all_msrun_count"] = DBSession.query(
            func.count(distinct(MsRun.filename))).one()[0]
        result_dict["sources_count"] = DBSession.query(
            func.count(distinct(Source.sample_id))).one()[0]
        result_dict["trash_count"] = DBSession.query(
            func.count(distinct(
                MsRun.filename))).filter(MsRun.flag_trash == 1).one()[0]

        result_dict["orphan_msrun"] = json.dumps(
            DBSession.query(distinct(
                MsRun.filename).label("orphan_ms_run")).filter(
                    MsRun.source_source_id == None).filter(
                        MsRun.flag_trash == 0).order_by(
                            MsRun.filename.desc()).limit(10).all())

        #SELECT (organ), count(organ) from Source group by organ
        sources = DBSession.query(Source.organ, func.count(
            Source.organ)).group_by(Source.organ).order_by(
                func.count(Source.organ).desc()).all()
        merged_sources = dict()
        source_acc = 0
        for i in range(0, len(sources)):
            if i < 6:
                merged_sources[sources[i][0]] = sources[i][1]
            else:
                source_acc += sources[i][1]
        merged_sources["others"] = source_acc
        result_dict["sources"] = json.dumps(merged_sources)

        return result_dict
    except:
        return Response(conn_err_msg,
                        content_type='text/plain',
                        status_int=500)
def update_metadata_msrun_post(request):
    ms_run = ast.literal_eval(request.params["ms_runs"])
    # update if already in DB (without metadata included)
    try:
        log_writer("ms_run_update_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +ms_run)
        ms_run_update = DBSession.query(MsRun).filter(MsRun.filename == ms_run["filename"]).all()
    except:
        log_writer("ms_run_update_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +"MS Run update failed!")
        return Response(conn_err_msg + " \n MsRun insert failed", content_type='text/plain', status_int=500)
    if len(ms_run_update)>0:
        if ms_run['ms_run_date'] != "":
            ms_run_update[0].ms_run_date = ms_run['ms_run_date']
        if ms_run['used_share'] != "":
            ms_run_update[0].used_share = ms_run['used_share']
        if ms_run['comment'] != "":
            ms_run_update[0].comment = ms_run['comment']
        if ms_run['sample_mass'] != "":
            ms_run_update[0].sample_mass = ms_run['sample_mass']
        if ms_run['antibody_set'] != "":
            ms_run_update[0].antibody_set = ms_run['antibody_set']
        if ms_run['antibody_mass'] != "":
            ms_run_update[0].antibody_mass = ms_run['antibody_mass']
        if ms_run['sample_volume'] != "":
            ms_run_update[0].sample_volume = ms_run['sample_volume']

        if ms_run['source_source_id'] != "":
            # TODO: update peptide_run and spectrum_hit if source changed
            try:
                spectrum_hits = DBSession.query(SpectrumHit)\
                    .filter(SpectrumHit.source_source_id == ms_run_update[0].source_source_id).update({'source_source_id': ms_run['source_source_id']})
            except:
                log_writer("ms_run_update_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +"SpectrumHit update failed!")
                DBSession.rollback()
                return Response("SpectrumHit update failed!",
                                content_type='text/plain', status_int=500)
            try:
                peptide_runs = DBSession.query(PeptideRun) \
                    .filter(PeptideRun.source_source_id == ms_run_update[0].source_source_id).update({'source_source_id': ms_run['source_source_id']})

            except:
                log_writer("ms_run_update_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +"Peptide Run update failed!")
                DBSession.rollback()
                return Response("Peptide Run update failed!",
                                content_type='text/plain', status_int=500)

            ms_run_update[0].source_source_id = ms_run['source_source_id']

    try:
        transaction.commit()
        DBSession.flush()
        log_writer("ms_run_update", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +ms_run)
    except:
        log_writer("ms_run_update_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +"MS Run update failed!")
        DBSession.rollback()
        return Response("MS Run update failed!",
                        content_type='text/plain', status_int=500)
    return HTTPFound(location="/update_metadata_ms_run?filename=%s" % ms_run["filename"])
def upload_metadata_source_post(request):
    source_upload = ast.literal_eval(request.params["sources"])
    # Check if source already in DB
    for source in source_upload:
        try:
            query = DBSession.query(Source.source_id) \
                .filter(Source.patient_id == source['patient_id']) \
                .filter(Source.organ == source['organ']) \
                .filter(Source.organism == source['organism']) \
                .filter(Source.histology == source['histology']) \
                .filter(Source.dignity == source['dignity']) \
                .filter(Source.location == source['location']) \
                .filter(Source.treatment == source['treatment']) \
                .filter(Source.metastatis == source['metastatis']) \
                .filter(Source.celltype == source['celltype']) \
                .filter(Source.comment == source['comment']) \
                .filter(Source.person == source['person']) \
                .filter(Source.prep_date == source['prep_date'])

            test_source = query.all()
        except DBAPIError:
            return Response(conn_err_msg, content_type='text/plain', status_int=500)
        # if in DB abort whole upload
        if len(test_source) > 0:
            log_writer("source_metadata_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +source)
            log_writer("source_metadata_complete",strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +"The source is already in the Database. Aborted whole upload!")
            return Response("The source is already in the Database. Aborted whole upload!",
                            content_type='text/plain', status_int=500)

    # upload each source
    for source in source_upload:

        # ####################################################
        # Source:                                           #
        # ####################################################

        try:
            sample_id = source["patient_id"]+"_"+ source["organ"]+"_"+source['dignity']+"_"+ source['histology']+"_"+\
                        source['celltype'] +"_"+ source['location'] +"_"+ source['treatment'] +"_"+ source['prep_date']

            source_insert = Source(patient_id=source['patient_id'], organ=source['organ'], organism=source['organism'],
                                   histology=source['histology'], dignity=source['dignity'],
                                   location=source['location'], treatment=source['treatment'],
                                   metastatis=source['metastatis'], celltype=source['celltype'],
                                   comment=source['comment'], prep_date= source['prep_date'],
                                   person=source['person'], sample_id=sample_id)
            #DBSession.add(source_insert)
            #DBSession.flush()
            #source_id = source_insert.source_id
        except DBAPIError:
            return Response(conn_err_msg + "\n Insert into Source failed!",
                            content_type='text/plain', status_int=500)

        if source['typing'] is not "":
            # ###############
            # hla_types    #
            # ###############
            hla_alleles = source['typing'].split(";")
            for hla_typing in hla_alleles:
                hla_typing_split = hla_typing.strip().split(":")
                for i in range(0, len(hla_typing_split)):
                    sub_type = ":".join(hla_typing_split[0:i + 1])
                    try:
                        query = DBSession.query(HlaType.hla_types_id).filter(HlaType.hla_string == sub_type)
                        hla_types_id = query.all()
                    except DBAPIError:
                        return Response(conn_err_msg, content_type='text/plain', status_int=500)
                    # unknown hla_lookup
                    if len(hla_types_id) == 0:
                        try:
                            hla_type = HlaType(hla_string=sub_type, digits=hla_digits_extractor(sub_type))
                            DBSession.add(hla_type)
                            DBSession.flush()
                            hla_types_id = hla_type.hla_types_id
                        except DBAPIError:
                            return Response(conn_err_msg + "\n Insert into Hla-Types failed!",
                                            content_type='text/plain', status_int=500)
                    else:
                        hla_types_id = hla_types_id[0]
                        hla_type = query = DBSession.query(HlaType).filter(HlaType.hla_string == sub_type).all()[0]
                    # ###############
                    # hla_map      #
                    # ###############

                    try:
                        source_insert.append(hla_type)

                    except DBAPIError:
                        return Response(conn_err_msg + "\n Insert into Hla-Map failed!",
                                        content_type='text/plain', status_int=500)

            try:
                log_writer("source_metadata", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +source)
                log_writer("source_metadata_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +source)
                DBSession.add(source_insert)
                DBSession.flush()
            except DBAPIError:
                return Response(conn_err_msg + "\n Insert into Source failed!",
                                content_type='text/plain', status_int=500)

        else:
            log_writer("source_metadata", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +source)
            log_writer("source_metadata_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +source)
            DBSession.add(source_insert)
            DBSession.flush()
            hla_lookup_id = "NULL"

    return dict()
Beispiel #35
0
def upload_metadata_source_post(request):
    source_upload = ast.literal_eval(request.params["sources"])
    # Check if source already in DB
    for source in source_upload:
        try:
            query = DBSession.query(Source.source_id) \
                .filter(Source.patient_id == source['patient_id']) \
                .filter(Source.organ == source['organ']) \
                .filter(Source.organism == source['organism']) \
                .filter(Source.histology == source['histology']) \
                .filter(Source.dignity == source['dignity']) \
                .filter(Source.location == source['location']) \
                .filter(Source.treatment == source['treatment']) \
                .filter(Source.metastatis == source['metastatis']) \
                .filter(Source.celltype == source['celltype']) \
                .filter(Source.comment == source['comment']) \
                .filter(Source.person == source['person']) \
                .filter(Source.prep_date == source['prep_date'])

            test_source = query.all()
        except DBAPIError:
            return Response(conn_err_msg,
                            content_type='text/plain',
                            status_int=500)
        # if in DB abort whole upload
        if len(test_source) > 0:
            log_writer("source_metadata_complete",
                       strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + source)
            log_writer(
                "source_metadata_complete",
                strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +
                "The source is already in the Database. Aborted whole upload!")
            return Response(
                "The source is already in the Database. Aborted whole upload!",
                content_type='text/plain',
                status_int=500)

    # upload each source
    for source in source_upload:

        # ####################################################
        # Source:                                           #
        # ####################################################

        try:
            sample_id = source["patient_id"]+"_"+ source["organ"]+"_"+source['dignity']+"_"+ source['histology']+"_"+\
                        source['celltype'] +"_"+ source['location'] +"_"+ source['treatment'] +"_"+ source['prep_date']

            source_insert = Source(patient_id=source['patient_id'],
                                   organ=source['organ'],
                                   organism=source['organism'],
                                   histology=source['histology'],
                                   dignity=source['dignity'],
                                   location=source['location'],
                                   treatment=source['treatment'],
                                   metastatis=source['metastatis'],
                                   celltype=source['celltype'],
                                   comment=source['comment'],
                                   prep_date=source['prep_date'],
                                   person=source['person'],
                                   sample_id=sample_id)
            #DBSession.add(source_insert)
            #DBSession.flush()
            #source_id = source_insert.source_id
        except DBAPIError:
            return Response(conn_err_msg + "\n Insert into Source failed!",
                            content_type='text/plain',
                            status_int=500)

        if source['typing'] is not "":
            # ###############
            # hla_types    #
            # ###############
            hla_alleles = source['typing'].split(";")
            for hla_typing in hla_alleles:
                hla_typing_split = hla_typing.strip().split(":")
                for i in range(0, len(hla_typing_split)):
                    sub_type = ":".join(hla_typing_split[0:i + 1])
                    try:
                        query = DBSession.query(HlaType.hla_types_id).filter(
                            HlaType.hla_string == sub_type)
                        hla_types_id = query.all()
                    except DBAPIError:
                        return Response(conn_err_msg,
                                        content_type='text/plain',
                                        status_int=500)
                    # unknown hla_lookup
                    if len(hla_types_id) == 0:
                        try:
                            hla_type = HlaType(
                                hla_string=sub_type,
                                digits=hla_digits_extractor(sub_type))
                            DBSession.add(hla_type)
                            DBSession.flush()
                            hla_types_id = hla_type.hla_types_id
                        except DBAPIError:
                            return Response(conn_err_msg +
                                            "\n Insert into Hla-Types failed!",
                                            content_type='text/plain',
                                            status_int=500)
                    else:
                        hla_types_id = hla_types_id[0]
                        hla_type = query = DBSession.query(HlaType).filter(
                            HlaType.hla_string == sub_type).all()[0]
                    # ###############
                    # hla_map      #
                    # ###############

                    try:
                        source_insert.append(hla_type)

                    except DBAPIError:
                        return Response(conn_err_msg +
                                        "\n Insert into Hla-Map failed!",
                                        content_type='text/plain',
                                        status_int=500)

            try:
                log_writer(
                    "source_metadata",
                    strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + source)
                log_writer(
                    "source_metadata_complete",
                    strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + source)
                DBSession.add(source_insert)
                DBSession.flush()
            except DBAPIError:
                return Response(conn_err_msg + "\n Insert into Source failed!",
                                content_type='text/plain',
                                status_int=500)

        else:
            log_writer("source_metadata",
                       strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + source)
            log_writer("source_metadata_complete",
                       strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + source)
            DBSession.add(source_insert)
            DBSession.flush()
            hla_lookup_id = "NULL"

    return dict()
Beispiel #36
0
def update_metadata_msrun_post(request):
    ms_run = ast.literal_eval(request.params["ms_runs"])
    # update if already in DB (without metadata included)
    try:
        log_writer("ms_run_update_complete",
                   strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + ms_run)
        ms_run_update = DBSession.query(MsRun).filter(
            MsRun.filename == ms_run["filename"]).all()
    except:
        log_writer(
            "ms_run_update_complete",
            strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +
            "MS Run update failed!")
        return Response(conn_err_msg + " \n MsRun insert failed",
                        content_type='text/plain',
                        status_int=500)
    if len(ms_run_update) > 0:
        if ms_run['ms_run_date'] != "":
            ms_run_update[0].ms_run_date = ms_run['ms_run_date']
        if ms_run['used_share'] != "":
            ms_run_update[0].used_share = ms_run['used_share']
        if ms_run['comment'] != "":
            ms_run_update[0].comment = ms_run['comment']
        if ms_run['sample_mass'] != "":
            ms_run_update[0].sample_mass = ms_run['sample_mass']
        if ms_run['antibody_set'] != "":
            ms_run_update[0].antibody_set = ms_run['antibody_set']
        if ms_run['antibody_mass'] != "":
            ms_run_update[0].antibody_mass = ms_run['antibody_mass']
        if ms_run['sample_volume'] != "":
            ms_run_update[0].sample_volume = ms_run['sample_volume']

        if ms_run['source_source_id'] != "":
            # TODO: update peptide_run and spectrum_hit if source changed
            try:
                spectrum_hits = DBSession.query(SpectrumHit)\
                    .filter(SpectrumHit.source_source_id == ms_run_update[0].source_source_id).update({'source_source_id': ms_run['source_source_id']})
            except:
                log_writer(
                    "ms_run_update_complete",
                    strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +
                    "SpectrumHit update failed!")
                DBSession.rollback()
                return Response("SpectrumHit update failed!",
                                content_type='text/plain',
                                status_int=500)
            try:
                peptide_runs = DBSession.query(PeptideRun) \
                    .filter(PeptideRun.source_source_id == ms_run_update[0].source_source_id).update({'source_source_id': ms_run['source_source_id']})

            except:
                log_writer(
                    "ms_run_update_complete",
                    strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +
                    "Peptide Run update failed!")
                DBSession.rollback()
                return Response("Peptide Run update failed!",
                                content_type='text/plain',
                                status_int=500)

            ms_run_update[0].source_source_id = ms_run['source_source_id']

    try:
        transaction.commit()
        DBSession.flush()
        log_writer("ms_run_update",
                   strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + ms_run)
    except:
        log_writer(
            "ms_run_update_complete",
            strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +
            "MS Run update failed!")
        DBSession.rollback()
        return Response("MS Run update failed!",
                        content_type='text/plain',
                        status_int=500)
    return HTTPFound(location="/update_metadata_ms_run?filename=%s" %
                     ms_run["filename"])
def update_metadata_source_post(request):
    source = ast.literal_eval(request.params["sources"])
    try:
        log_writer("source_update_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +source)
        source_update = DBSession.query(Source).join(t_hla_map).join(HlaType).filter(Source.source_id == source["source_id"]).all()
    except:
        log_writer("source_update_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +" Source update failed!")
        return Response(conn_err_msg + " \n Source update failed", content_type='text/plain', status_int=500)
    if len(source_update)>0:
        if source['patient_id'] != "":
            source_update[0].patient_id = source['patient_id']
        if source['organ'] != "":
            source_update[0].organ = source['organ']
        if source['organism'] != "":
            source_update[0].orgnaism = source['organism']
        if source['comment'] != "":
            source_update[0].comment = source['comment']
        if source['histology'] != "":
            source_update[0].histology = source['histology']
        if source['dignity'] != "":
            source_update[0].dignity = source['dignity']
        if source['celltype'] != "":
            source_update[0].celltype = source['celltype']
        if source['person'] != "":
            source_update[0].person = source['person']
        if source['location'] != "":
            source_update[0].location = source['location']
        if source['metastatis'] != "":
            source_update[0].metastatis = source['metastatis']
        if source['treatment'] != "":
            source_update[0].treatment = source['treatment']
        if source['prep_date'] != "":
            source_update[0].prep_date = source['prep_date']
        source_update[0].sample_id = source_update[0].patient_id + "_" + source_update[0].organ + "_" + source_update[0].dignity\
                    + "_" + source_update[0].histology + "_" + \
                    source_update[0].celltype + "_" + source_update[0].location + "_" + source_update[0].treatment\
                    + "_" + source_update[0].prep_date


        if source['typing'] != "":
            # remove all mappings
            source_update[0].hlatypes[:] = []
            # create all hla links
            hla_split = source['typing'].split(";")
            for hla_typing in hla_split:
                hla_typing_split = hla_typing.strip().split(":")
                for i in range(0, len(hla_typing_split)):
                    sub_type = ":".join(hla_typing_split[0:i + 1])
                    try:
                        query = DBSession.query(HlaType.hla_types_id).filter(HlaType.hla_string == sub_type)
                        hla_types_id = query.all()
                    except DBAPIError:
                        return Response(conn_err_msg, content_type='text/plain', status_int=500)
                    # unknown hla_lookup
                    if len(hla_types_id) == 0:
                        try:
                            hla_type = HlaType(hla_string=sub_type, digits=hla_digits_extractor(sub_type))
                            DBSession.add(hla_type)
                            DBSession.flush()
                            hla_types_id = hla_type.hla_types_id
                        except DBAPIError:
                            return Response(conn_err_msg + "\n Insert into Hla-Types failed!",
                                            content_type='text/plain', status_int=500)
                    else:
                        hla_types_id = hla_types_id[0]
                        hla_type = query = DBSession.query(HlaType).filter(HlaType.hla_string == sub_type).all()[0]
                    try:
                        # add the hla type
                        source_update[0].hlatypes.append(hla_type)

                    except DBAPIError:
                        return Response(conn_err_msg + "\n Insert into Hla-Map failed!",
                                        content_type='text/plain', status_int=500)

    try:
        transaction.commit()
        DBSession.flush()
        log_writer("source_update", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +source)
    except:
        log_writer("source_update_complete",strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + "Source update failed!")
        DBSession.rollback()
        return Response("Source update failed!",
                        content_type='text/plain', status_int=500)
    return HTTPFound(location="/update_metadata_source?id=%s" % source["source_id"])
def peptide_query_result(request):
    # Check if one of these parameters is set, if not forward to peptide_query page
    params_check_dict = [
        'sequence', 'source_id', 'patient_id', 'run_name', 'organ',
        'histology', 'dignity', 'hla_typing', 'protein', 'length_1',
        'length_2', 'antibody', 'celltype', 'treatment'
    ]
    input_check = False
    for param in params_check_dict:
        if param in request.params:
            if len(request.params[param]) > 0:
                input_check = True

    # if there is no input forward to peptide_query
    if not input_check:
        raise HTTPFound(request.route_url("peptide_query"))

    # Group by  peptide
    if request.params['grouping'] == "peptide":
        try:
            query = DBSession.query(
                PeptideRun.sequence,
                func.group_concat(Protein.name.distinct().op('order by')(
                    Protein.name)).label("protein"),
                func.group_concat(Source.source_id.distinct().op('order by')(
                    Source.source_id)).label("source_id"),
                func.group_concat(Source.patient_id.distinct().op('order by')(
                    Source.patient_id)).label("patient_id"),
                func.group_concat(Source.dignity.distinct().op('order by')(
                    Source.dignity)).label("dignity"),
                func.group_concat(
                    (HlaType.hla_string.distinct().op('order by')(
                        HlaType.hla_string))).label('hla_typing'))
            query = query.join(Source)
            query = query.join(MsRun,
                               PeptideRun.ms_run_ms_run_id == MsRun.ms_run_id)
            query = query.join(t_hla_map)
            query = query.join(HlaType)
            query = query.join(t_peptide_protein_map)
            query = query.join(Protein)

            # filter
            query = create_filter(query,
                                  'sequence',
                                  request.params,
                                  "sequence",
                                  PeptideRun,
                                  'sequence_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'patient_id',
                                  request.params,
                                  "patient_id",
                                  Source,
                                  'patient_id_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'source_id',
                                  request.params,
                                  "source_id",
                                  Source,
                                  'source_id_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'run_name',
                                  request.params,
                                  "filename",
                                  MsRun,
                                  'run_name_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'organ',
                                  request.params,
                                  "organ",
                                  Source,
                                  'organ_rule',
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'histology',
                                  request.params,
                                  "histology",
                                  Source,
                                  'histology_rule',
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'dignity',
                                  request.params,
                                  "dignity",
                                  Source,
                                  'dignity_rule',
                                  False,
                                  set=False)
            query = create_filter(
                query,
                'hla_typing',
                request.params,
                "hla_string",
                HlaType,
                'hla_typing_rule',
                False,
                set=False)  # TODO: check if it works withou fk,
            #fk=HlaLookup.fk_hla_typess)
            query = create_filter(query,
                                  'digits',
                                  request.params,
                                  'digits',
                                  HlaType,
                                  None,
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'protein',
                                  request.params,
                                  "name",
                                  Protein,
                                  'protein_rule',
                                  False,
                                  set=False,
                                  fk=PeptideRun.protein_proteins)
            query = create_filter(query,
                                  'length_1',
                                  request.params,
                                  'length',
                                  PeptideRun,
                                  ">",
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'length_2',
                                  request.params,
                                  'length',
                                  PeptideRun,
                                  "<",
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'antibody',
                                  request.params,
                                  "antibody_set",
                                  MsRun,
                                  'antibody_rule',
                                  False,
                                  set=True)
            query = create_filter(query,
                                  'celltype',
                                  request.params,
                                  "celltype",
                                  Source,
                                  'celltype_rule',
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'treatment',
                                  request.params,
                                  "treatment",
                                  Source,
                                  'treatment_rule',
                                  False,
                                  set=False)

            query = query.group_by(PeptideRun.sequence)

            your_json = json.dumps(query.all())
            grouping = "peptide"
        except DBAPIError:
            return Response(conn_err_msg,
                            content_type='text/plain',
                            status_int=500)
    # MS run group by
    elif request.params['grouping'] == "run":
        try:
            query = DBSession.query(
                PeptideRun.peptide_run_id, PeptideRun.sequence,
                PeptideRun.minRT, PeptideRun.maxRT, PeptideRun.minScore,
                PeptideRun.maxScore, PeptideRun.minE, PeptideRun.maxE,
                PeptideRun.minQ, PeptideRun.maxQ, PeptideRun.PSM,
                func.group_concat(HlaType.hla_string.distinct().op('order by')(
                    HlaType.hla_string)).label('hla_typing'),
                func.group_concat(Protein.name.distinct().op('order by')(
                    Protein.name)).label("protein"), Source.histology,
                Source.patient_id, Source.source_id, MsRun.filename,
                MsRun.ms_run_id)
            query = query.join(Source)
            query = query.join(MsRun,
                               PeptideRun.ms_run_ms_run_id == MsRun.ms_run_id)
            query = query.join(t_hla_map)
            query = query.join(HlaType)
            query = query.join(t_peptide_protein_map)
            query = query.join(Protein)

            # filter
            query = create_filter(query,
                                  'sequence',
                                  request.params,
                                  "sequence",
                                  PeptideRun,
                                  'sequence_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'patient_id',
                                  request.params,
                                  "patient_id",
                                  Source,
                                  'patient_id_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'source_id',
                                  request.params,
                                  "source_id",
                                  Source,
                                  'source_id_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'run_name',
                                  request.params,
                                  "filename",
                                  MsRun,
                                  'run_name_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'organ',
                                  request.params,
                                  "organ",
                                  Source,
                                  'organ_rule',
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'histology',
                                  request.params,
                                  "histology",
                                  Source,
                                  'histology_rule',
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'dignity',
                                  request.params,
                                  "dignity",
                                  Source,
                                  'dignity_rule',
                                  False,
                                  set=False)
            query = create_filter(
                query,
                'hla_typing',
                request.params,
                "hla_string",
                HlaType,
                'hla_typing_rule',
                False,
                set=False)  # TODO: check if it works withou fk,
            # fk=HlaLookup.fk_hla_typess)
            query = create_filter(query,
                                  'digits',
                                  request.params,
                                  'digits',
                                  HlaType,
                                  None,
                                  False,
                                  set=False)

            query = create_filter(query,
                                  'protein',
                                  request.params,
                                  "name",
                                  Protein,
                                  'protein_rule',
                                  False,
                                  set=False,
                                  fk=PeptideRun.protein_proteins)
            query = create_filter(query,
                                  'length_1',
                                  request.params,
                                  'length',
                                  PeptideRun,
                                  ">",
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'length_2',
                                  request.params,
                                  'length',
                                  PeptideRun,
                                  "<",
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'antibody',
                                  request.params,
                                  "antibody_set",
                                  MsRun,
                                  'antibody_rule',
                                  False,
                                  set=True)
            query = create_filter(query,
                                  'celltype',
                                  request.params,
                                  "celltype",
                                  Source,
                                  'celltype_rule',
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'treatment',
                                  request.params,
                                  "treatment",
                                  Source,
                                  'treatment_rule',
                                  False,
                                  set=False)

            query = query.group_by(PeptideRun.peptide_run_id)
            your_json = json.dumps(query.all())
            grouping = "run"
        except DBAPIError:
            return Response(conn_err_msg,
                            content_type='text/plain',
                            status_int=500)
    # source without PSM group by
    elif request.params['grouping'] == "source":
        try:
            query = DBSession.query(
                PeptideRun.peptide_run_id, PeptideRun.sequence,
                func.min(PeptideRun.minRT).label("minRT"),
                func.max(PeptideRun.maxRT).label("maxRT"),
                func.min(PeptideRun.minScore).label("minScore"),
                func.max(PeptideRun.maxScore).label("maxScore"),
                func.min(PeptideRun.minE).label("minE"),
                func.max(PeptideRun.maxE).label("maxE"),
                func.min(PeptideRun.minQ).label("minQ"),
                func.max(PeptideRun.maxQ).label("maxQ"),
                func.group_concat(HlaType.hla_string.distinct().op('order by')(
                    HlaType.hla_string)).label('hla_typing'),
                func.group_concat(Protein.name.distinct().op('order by')(
                    Protein.name)).label("protein"), Source.histology,
                Source.patient_id, Source.source_id)
            query = query.join(Source)
            query = query.join(MsRun,
                               PeptideRun.ms_run_ms_run_id == MsRun.ms_run_id)
            query = query.join(t_hla_map)
            query = query.join(HlaType)
            query = query.join(t_peptide_protein_map)
            query = query.join(Protein)

            # filter
            query = create_filter(query,
                                  'sequence',
                                  request.params,
                                  "sequence",
                                  PeptideRun,
                                  'sequence_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'patient_id',
                                  request.params,
                                  "patient_id",
                                  Source,
                                  'patient_id_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'source_id',
                                  request.params,
                                  "source_id",
                                  Source,
                                  'source_id_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'run_name',
                                  request.params,
                                  "filename",
                                  MsRun,
                                  'run_name_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'organ',
                                  request.params,
                                  "organ",
                                  Source,
                                  'organ_rule',
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'histology',
                                  request.params,
                                  "histology",
                                  Source,
                                  'histology_rule',
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'dignity',
                                  request.params,
                                  "dignity",
                                  Source,
                                  'dignity_rule',
                                  False,
                                  set=False)
            query = create_filter(
                query,
                'hla_typing',
                request.params,
                "hla_string",
                HlaType,
                'hla_typing_rule',
                False,
                set=False)  # TODO: check if it works withou fk,
            # fk=HlaLookup.fk_hla_typess)
            query = create_filter(query,
                                  'digits',
                                  request.params,
                                  'digits',
                                  HlaType,
                                  None,
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'protein',
                                  request.params,
                                  "name",
                                  Protein,
                                  'protein_rule',
                                  False,
                                  set=False,
                                  fk=PeptideRun.protein_proteins)
            query = create_filter(query,
                                  'length_1',
                                  request.params,
                                  'length',
                                  PeptideRun,
                                  ">",
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'length_2',
                                  request.params,
                                  'length',
                                  PeptideRun,
                                  "<",
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'antibody',
                                  request.params,
                                  "antibody_set",
                                  MsRun,
                                  'antibody_rule',
                                  False,
                                  set=True)
            query = create_filter(query,
                                  'celltype',
                                  request.params,
                                  "celltype",
                                  Source,
                                  'celltype_rule',
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'treatment',
                                  request.params,
                                  "treatment",
                                  Source,
                                  'treatment_rule',
                                  False,
                                  set=False)
            query = query.group_by(Source.source_id, PeptideRun.sequence)

            your_json = json.dumps(query.all())
            grouping = "source"
        except DBAPIError:
            return Response(conn_err_msg,
                            content_type='text/plain',
                            status_int=500)
    # source with PSM group by
    elif request.params['grouping'] == "source_psm":
        try:
            query = DBSession.query(
                SpectrumHit.sequence,
                func.min(SpectrumHit.ionscore).label("minScore"),
                func.max(SpectrumHit.ionscore).label("maxScore"),
                func.min(SpectrumHit.e_value).label("minE"),
                func.max(SpectrumHit.e_value).label("maxE"),
                func.min(SpectrumHit.q_value).label("minQ"),
                func.max(SpectrumHit.q_value).label("maxQ"),
                func.count(
                    SpectrumHit.spectrum_hit_id.distinct()).label("PSM"),
                func.group_concat(HlaType.hla_string.distinct().op('order by')(
                    HlaType.hla_string)).label('hla_typing'),
                func.group_concat(Protein.name.distinct().op('order by')(
                    Protein.name)).label("protein"), Source.histology,
                Source.patient_id, Source.source_id)
            query = query.join(Source)
            query = query.join(MsRun,
                               SpectrumHit.ms_run_ms_run_id == MsRun.ms_run_id)
            query = query.join(t_hla_map)
            query = query.join(HlaType)
            query = query.join(t_spectrum_protein_map)
            query = query.join(Protein)

            # filter
            query = create_filter(query,
                                  'sequence',
                                  request.params,
                                  "sequence",
                                  SpectrumHit,
                                  'sequence_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'patient_id',
                                  request.params,
                                  "patient_id",
                                  Source,
                                  'patient_id_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'source_id',
                                  request.params,
                                  "source_id",
                                  Source,
                                  'source_id_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'run_name',
                                  request.params,
                                  "filename",
                                  MsRun,
                                  'run_name_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'organ',
                                  request.params,
                                  "organ",
                                  Source,
                                  'organ_rule',
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'histology',
                                  request.params,
                                  "histology",
                                  Source,
                                  'histology_rule',
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'dignity',
                                  request.params,
                                  "dignity",
                                  Source,
                                  'dignity_rule',
                                  False,
                                  set=False)
            query = create_filter(
                query,
                'hla_typing',
                request.params,
                "hla_string",
                HlaType,
                'hla_typing_rule',
                False,
                set=False)  # TODO: check if it works withou fk,
            # fk=HlaLookup.fk_hla_typess)
            query = create_filter(query,
                                  'digits',
                                  request.params,
                                  'digits',
                                  HlaType,
                                  None,
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'protein',
                                  request.params,
                                  "name",
                                  Protein,
                                  'protein_rule',
                                  False,
                                  set=False,
                                  fk=SpectrumHit.protein_proteins)
            query = create_filter(query,
                                  'length_1',
                                  request.params,
                                  'length',
                                  SpectrumHit,
                                  ">",
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'length_2',
                                  request.params,
                                  'length',
                                  SpectrumHit,
                                  "<",
                                  False,
                                  set=True)
            query = create_filter(query,
                                  'celltype',
                                  request.params,
                                  "celltype",
                                  Source,
                                  'celltype_rule',
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'treatment',
                                  request.params,
                                  "treatment",
                                  Source,
                                  'treatment_rule',
                                  False,
                                  set=False)

            query = query.group_by(Source.source_id, SpectrumHit.sequence)

            your_json = json.dumps(query.all())
            grouping = "source_psm"
        except DBAPIError:
            return Response(conn_err_msg,
                            content_type='text/plain',
                            status_int=500)
    # Group by protein
    elif request.params['grouping'] == "protein":
        # TODO: a whole protein query from kidney take 8 min...
        try:
            query = DBSession.query(
                func.group_concat(
                    PeptideRun.sequence.distinct().op('order by')(
                        PeptideRun.sequence)).label("peptide"),
                Protein.name.label("protein"),
                func.group_concat(Source.patient_id.distinct().op('order by')(
                    Source.patient_id)).label("patient_id"),
                func.group_concat(Source.source_id.distinct().op('order by')(
                    Source.source_id)).label("source_id"),
                func.group_concat(Source.dignity.distinct().op('order by')(
                    Source.dignity)).label("dignity"))
            query = query.join(Source)
            query = query.join(MsRun,
                               PeptideRun.ms_run_ms_run_id == MsRun.ms_run_id)
            query = query.join(t_hla_map)
            query = query.join(HlaType)
            query = query.join(t_peptide_protein_map)
            query = query.join(Protein)

            # filter
            query = create_filter(query,
                                  'sequence',
                                  request.params,
                                  "sequence",
                                  PeptideRun,
                                  'sequence_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'patient_id',
                                  request.params,
                                  "patient_id",
                                  Source,
                                  'patient_id_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'source_id',
                                  request.params,
                                  "source_id",
                                  Source,
                                  'source_id_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'run_name',
                                  request.params,
                                  "filename",
                                  MsRun,
                                  'run_name_rule',
                                  True,
                                  set=False)
            query = create_filter(query,
                                  'organ',
                                  request.params,
                                  "organ",
                                  Source,
                                  'organ_rule',
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'histology',
                                  request.params,
                                  "histology",
                                  Source,
                                  'histology_rule',
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'dignity',
                                  request.params,
                                  "dignity",
                                  Source,
                                  'dignity_rule',
                                  False,
                                  set=False)
            query = create_filter(
                query,
                'hla_typing',
                request.params,
                "hla_string",
                HlaType,
                'hla_typing_rule',
                False,
                set=False)  # TODO: check if it works withou fk,
            # fk=HlaLookup.fk_hla_typess)
            query = create_filter(query,
                                  'digits',
                                  request.params,
                                  'digits',
                                  HlaType,
                                  None,
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'protein',
                                  request.params,
                                  "name",
                                  Protein,
                                  'protein_rule',
                                  False,
                                  set=False,
                                  fk=PeptideRun.protein_proteins)
            query = create_filter(query,
                                  'length_1',
                                  request.params,
                                  'length',
                                  PeptideRun,
                                  ">",
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'length_2',
                                  request.params,
                                  'length',
                                  PeptideRun,
                                  "<",
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'antibody',
                                  request.params,
                                  "antibody_set",
                                  MsRun,
                                  'antibody_rule',
                                  False,
                                  set=True)
            query = create_filter(query,
                                  'celltype',
                                  request.params,
                                  "celltype",
                                  Source,
                                  'celltype_rule',
                                  False,
                                  set=False)
            query = create_filter(query,
                                  'treatment',
                                  request.params,
                                  "treatment",
                                  Source,
                                  'treatment_rule',
                                  False,
                                  set=False)

            query = query.group_by(Protein)

            your_json = json.dumps(query.all())
            grouping = "protein"
        except DBAPIError:
            return Response(conn_err_msg,
                            content_type='text/plain',
                            status_int=500)

    return {'project': your_json, 'grouping': grouping}
Beispiel #39
0
def upload_metadata_ms_run_post(request):
    ms_run_upload = ast.literal_eval(request.params["ms_runs"])
    # Check if  MS run is already in database with METADATA
    for ms_run in ms_run_upload:
        try:
            ms_runs = DBSession.query(MsRun.ms_run_id).filter(
                MsRun.filename == ms_run['filename']).filter(
                    MsRun.source_source_id != None).all()
        except DBAPIError:
            return Response(conn_err_msg,
                            content_type='text/plain',
                            status_int=500)
        if len(ms_runs) > 0:
            # if in MS run with Metadata in DB, abort whole Upload
            return Response(
                "The source " + ms_run['source'] +
                " is already in the Database. Aborted whole upload!",
                content_type='text/plain',
                status_int=500)
    # upload the each MS run
    for ms_run in ms_run_upload:
        source_id = ms_run["source_id"]

        # update if already in DB (without metadata included)
        try:
            ms_run_update = DBSession.query(MsRun).filter(
                MsRun.filename == ms_run["filename"]).filter(
                    MsRun.source_source_id == None).all()
        except:
            return Response(conn_err_msg + " \n MsRun insert failed",
                            content_type='text/plain',
                            status_int=500)

        if len(ms_run_update) > 0:
            ms_run_update[0].source_source_id = source_id
            if ms_run['date'] != "":
                ms_run_update[0].ms_run_date = ms_run['date']
            if ms_run['used_share'] != "" and ms_run['used_share'] != "None":
                ms_run_update[0].used_share = ms_run['used_share']
            if ms_run['comment'] != "":
                ms_run_update[0].comment = ms_run['comment']
            if ms_run['sample_mass'] != "" and ms_run['sample_mass'] != "None":
                ms_run_update[0].sample_mass = ms_run['sample_mass']
            if ms_run['sample_volume'] != "" and ms_run[
                    'sample_volume'] != "None":
                ms_run_update[0].sample_volume = ms_run['sample_volume']
            ms_run_update[0].antibody_set = ms_run['antibody_set'].replace(
                " ", "")
            if ms_run['antibody_mass'] != "" and ms_run[
                    'antibody_mass'] != "None":
                ms_run_update[0].antibody_mass = ms_run['antibody_mass']

            # Updating all crossreferences for the source_id
            try:
                spectrum_hits = DBSession.query(SpectrumHit) \
                    .filter(SpectrumHit.ms_run_ms_run_id == ms_run_update[0].ms_run_id).update(
                    {'source_source_id': ms_run['source_source_id']})
            except:
                log_writer(
                    "ms_run_upload_complete",
                    strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +
                    "SpectrumHit update failed!")
                DBSession.rollback()
                return Response("SpectrumHit update failed!",
                                content_type='text/plain',
                                status_int=500)
            try:
                peptide_runs = DBSession.query(PeptideRun) \
                    .filter(PeptideRun.ms_run_ms_run_id == ms_run_update[0].ms_run_id).update(
                    {'source_source_id': ms_run['source_source_id']})

            except:
                log_writer("ms_run_update_complete",
                           "Peptide Run update failed!")
                DBSession.rollback()
                return Response("Peptide Run update failed!",
                                content_type='text/plain',
                                status_int=500)
            transaction.commit()
            DBSession.flush()
            log_writer("ms_run_metadata_complete",
                       strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + ms_run)
            log_writer("ms_run_metadata",
                       strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + ms_run)
        else:
            log_writer(
                "ms_run_metadata_complete",
                strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +
                "MsRun insert failed! Only already registered MS Runs can be uploaded."
            )
            DBSession.rollback()
            return Response(
                conn_err_msg +
                " \n MsRun insert failed! Only already registered MS Runs can be uploaded.",
                content_type='text/plain',
                status_int=500)

    return dict()
def multi_peptide_result(request):
    # TODO: remove empty peptide at end
    if request.params["peptide_input"] != '':
        split_peptides = request.params["peptide_input"].split("\r\n")

        query = DBSession.query(func.count(MsRun.ms_run_id).label("source_count"), HlaType.hla_string)
        # query = DBSession.query(func.count(Source.source_id).label("source_count"), HlaType.hla_string)
        query = query.join(Source)
        query = query.join(HlaLookup)
        query = query.join(t_hla_map)
        query = query.join(HlaType)
        query = query.group_by(HlaType.hla_string)
        source_count = query.all()
        hla_dict_count = dict()
        for s in source_count:
            hla_dict_count[s[1]] = s[0]


        # per peptide
        query = DBSession.query(SpectrumHit.sequence,
                                Source.name.label("name"),
                                HlaType.hla_string, func.count(HlaType.hla_string))
        # func.group_concat(HlaType.hla_string).label("hla"))
        query = query.join(Source)
        query = query.join(HlaLookup)
        query = query.join(t_hla_map)
        query = query.join(HlaType)
        query = query.filter(or_(*[(SpectrumHit.sequence == pep) for pep in split_peptides]))
        query = query.group_by(SpectrumHit.sequence, HlaType.hla_string)
        result = query.all()
        # iterate over result and count each HLA
        # TODO: maybe query first only the hla and then get also the source name and a distinct hla for each peptide
        sources = set()
        for r in result:
            sources.add(r[1])

        hla_dict = dict()
        for r in result:
            if r[2] in hla_dict:
                hla_dict[r[2]] += r[3]
            else:
                hla_dict[r[2]] = r[3]

        hla_dict_normalized = dict()
        for k, v in hla_dict.iteritems():
            hla_dict_normalized[k] = (float(v) / len(sources)) / hla_dict_count[k]

        for k, v in hla_dict_normalized.iteritems():
            print str(k) + "\t" + str(v)


            # # per source
            # query = DBSession.query(SpectrumHit.sequence,
            # Source.name.label("name"),
            # HlaType.hla_string)
            # # func.group_concat(HlaType.hla_string).label("hla"))
            # query = query.join(Source)
            # query = query.join(HlaLookup)
            # query = query.join(t_hla_map)
            # query = query.join(HlaType)
            # query = query.filter(or_(*[(SpectrumHit.sequence == pep) for pep in split_peptides]))
            # result = query.all()
            # # iterate over result and count each HLA
            # # TODO: maybe query first only the hla and then get also the source name and a distinct hla for each peptide
            # sources = set()
            # for r in result:
            # sources.add(r[1])
            #
            # hla_dict = dict()
            # for r in result:
            #     if r[2] in hla_dict:
            #         hla_dict[r[2]] += 1
            #     else:
            #         hla_dict[r[2]] = 1
            #
            # hla_dict_normalized = dict()
            # for k, v in hla_dict.iteritems():
            #     hla_dict_normalized[k] = (float(v) / len(sources)) / hla_dict_count[k]
            #
            # for k, v in hla_dict_normalized.iteritems():
            #     print str(k) + "\t" + str(v)

            # # per Spectrum hit
            # query = DBSession.query(SpectrumHit.spectrum_hit_id ,SpectrumHit.sequence,
            #                         Source.name.label("name"),
            #                         HlaType.hla_string)
            #                         #func.group_concat(HlaType.hla_string).label("hla"))
            # query = query.join(Source)
            # query = query.join(HlaLookup)
            # query = query.join(t_hla_map)
            # query = query.join(HlaType)
            # query = query.filter(or_(*[(SpectrumHit.sequence == pep) for pep in split_peptides]))
            # result = query.all()
            # # iterate over result and count each HLA
            # # TODO: maybe query first only the hla and then get also the source name and a distinct hla for each peptide
            # sources = set()
            # spectra = set()
            # for r in result:
            #     sources.add(r[2])
            #     spectra.add(r[0])
            #
            # hla_dict = dict()
            # for r in result:
            #     if r[3] in hla_dict:
            #         hla_dict[r[3]] += 1
            #     else:
            #         hla_dict[r[3]] = 1
            #
            # hla_dict_normalized = dict()
            # for k, v in hla_dict.iteritems():
            #     hla_dict_normalized[k] = (float(v)/len(spectra))/hla_dict_count[k]
            #
            # for k,v in hla_dict_normalized.iteritems():
            #     print str(k) + "\t" + str(v)
    return dict()
Beispiel #41
0
    Base,
    PeptideRun
)
# For usage with Database if incorporated in ligando project
import paste.deploy
from sqlalchemy import engine_from_config

# Location to save logos
# SET OUTPUT DIR
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
output_dir = BASE_DIR + "/static/seqlogo/"

settings = paste.deploy.appconfig('config:' + os.path.join(BASE_DIR, '../', 'development.ini'))

engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine


# Method to create Peptide binding motif by a list of peptides and filename
# peptides need the same length
def seq2logo_by_peptide_list(peptides, file_name):
    # write list to file
    # Note: peptides have to be trimmed to same length if query not already done
    # change peptide to peptide[1:9]
    peptide_file = open('peptide.txt', 'w')
    for peptide in peptides:
        peptide_file.write("%s\n" % peptide)
    peptide_file.close()

    os.system("./Seq2Logo.py  -I 1 -H ends --format PNG -f peptide.txt -o " + output_dir + file_name)
def upload_metadata_ms_run_post(request):
    ms_run_upload = ast.literal_eval(request.params["ms_runs"])
    # Check if  MS run is already in database with METADATA
    for ms_run in ms_run_upload:
        try:
            ms_runs = DBSession.query(MsRun.ms_run_id).filter(MsRun.filename == ms_run['filename']).filter(
                MsRun.source_source_id != None).all()
        except DBAPIError:
            return Response(conn_err_msg, content_type='text/plain', status_int=500)
        if len(ms_runs) > 0:
            # if in MS run with Metadata in DB, abort whole Upload
            return Response("The source " + ms_run['source'] + " is already in the Database. Aborted whole upload!",
                            content_type='text/plain', status_int=500)
    # upload the each MS run
    for ms_run in ms_run_upload:
        source_id = ms_run["source_id"]

        # update if already in DB (without metadata included)
        try:
            ms_run_update = DBSession.query(MsRun).filter(MsRun.filename == ms_run["filename"]).filter(
                MsRun.source_source_id == None).all()
        except:
            return Response(conn_err_msg + " \n MsRun insert failed", content_type='text/plain', status_int=500)

        if len(ms_run_update) > 0:
            ms_run_update[0].source_source_id = source_id
            if ms_run['date'] != "":
                ms_run_update[0].ms_run_date = ms_run['date']
            if ms_run['used_share'] != "" and ms_run['used_share'] != "None":
                ms_run_update[0].used_share = ms_run['used_share']
            if ms_run['comment'] != "":
                ms_run_update[0].comment = ms_run['comment']
            if ms_run['sample_mass'] != "" and ms_run['sample_mass'] != "None":
                ms_run_update[0].sample_mass = ms_run['sample_mass']
            if ms_run['sample_volume'] != "" and ms_run['sample_volume'] != "None":
                ms_run_update[0].sample_volume = ms_run['sample_volume']
            ms_run_update[0].antibody_set = ms_run['antibody_set'].replace(" ", "")
            if ms_run['antibody_mass'] != "" and ms_run['antibody_mass'] != "None":
                ms_run_update[0].antibody_mass = ms_run['antibody_mass']

            # Updating all crossreferences for the source_id
            try:
                spectrum_hits = DBSession.query(SpectrumHit) \
                    .filter(SpectrumHit.ms_run_ms_run_id == ms_run_update[0].ms_run_id).update(
                    {'source_source_id': ms_run['source_source_id']})
            except:
                log_writer("ms_run_upload_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +"SpectrumHit update failed!")
                DBSession.rollback()
                return Response("SpectrumHit update failed!",
                                content_type='text/plain', status_int=500)
            try:
                peptide_runs = DBSession.query(PeptideRun) \
                    .filter(PeptideRun.ms_run_ms_run_id == ms_run_update[0].ms_run_id).update(
                    {'source_source_id': ms_run['source_source_id']})

            except:
                log_writer("ms_run_update_complete", "Peptide Run update failed!")
                DBSession.rollback()
                return Response("Peptide Run update failed!",
                                content_type='text/plain', status_int=500)
            transaction.commit()
            DBSession.flush()
            log_writer("ms_run_metadata_complete", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +ms_run)
            log_writer("ms_run_metadata", strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +ms_run)
        else:
            log_writer("ms_run_metadata_complete",strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +"MsRun insert failed! Only already registered MS Runs can be uploaded.")
            DBSession.rollback()
            return Response(conn_err_msg + " \n MsRun insert failed! Only already registered MS Runs can be uploaded.", content_type='text/plain', status_int=500)

    return dict()
def venn_analysis_result(request):

    # Check if MS runs or Source_id or Patient_id are selected
    ms_run_search = False
    patient_id_search = False
    for i in range(1, 7):
        if request.params["ms_run_" + str(i)] != "":
            ms_run_search = True
            break
        elif request.params["patient_id_" + str(i)] != "":
            patient_id_search = True
            break


    # query for the ms runs
    if ms_run_search:
        ms_runs = dict()
        # find the peptides
        if request.params["prot_pep"] == "Peptide":
            for i in range(1, 7):
                if request.params["ms_run_" + str(i)] != "":
                    query = DBSession.query(SpectrumHit.sequence.distinct())
                    query = query.join(MsRun)
                    query = query.filter(MsRun.filename == request.params["ms_run_" + str(i)])
                    temp = query.all()
                    ms_runs[i] = [(j[0]) for j in temp]
                else:
                    ms_runs[i] = ""

        # find the proteins
        else:
            for i in range(1, 7):
                if request.params["ms_run_" + str(i)] != "":
                    query = DBSession.query(Protein.name.distinct())
                    query = query.join(t_spectrum_protein_map)
                    query = query.join(SpectrumHit)
                    query = query.join(MsRun)
                    query = query.filter(MsRun.filename == request.params["ms_run_" + str(i)])
                    temp = query.all()
                    ms_runs[i] = [(j[0]) for j in temp]
                else:
                    ms_runs[i] = ""
        # create result dictionary
        result = dict()
        result["result"] = json.dumps(ms_runs)
        temp = dict()
        alias = dict()
        for i in range(1, 7):
            alias[i] = "Run " + str(i)
            temp[i] = request.params["ms_run_" + str(i)]
        result["names"] = json.dumps(alias)
        result["real_names"] = json.dumps(temp)

    # query for the source
    elif patient_id_search:
        sources = dict()
        # find the peptides
        if request.params["prot_pep"] == "Peptide":
            for i in range(1, 7):
                if request.params["patient_id_" + str(i)] != "":
                    query = DBSession.query(SpectrumHit.sequence.distinct())
                    query = query.join(Source)
                    query = query.filter(Source.patient_id == request.params["patient_id_" + str(i)])
                    if not (request.params["antibody"] == "all" or request.params["antibody"] == ""):
                        query = query.join(MsRun)
                        query = query.filter(MsRun.antibody_set == request.params["antibody"])
                    temp = query.all()
                    sources[i] = [(j[0]) for j in temp]
                else:
                    sources[i] = ""
        # find the proteins
        else:
            for i in range(1, 7):
                if request.params["patient_id_" + str(i)] != "":
                    query = DBSession.query(Protein.name.distinct())
                    query = query.join(t_spectrum_protein_map)
                    query = query.join(SpectrumHit)
                    query = query.join(Source)
                    query = query.filter(Source.patient_id == request.params["patient_id_" + str(i)])
                    if not (request.params["antibody"] == "all" or len(request.params["antibody"]) == 0):
                        query = query.join(MsRun)
                        query = query.filter(MsRun.antibody_set == request.params["antibody"])
                    temp = query.all()
                    sources[i] = [(j[0]) for j in temp]
                else:
                    sources[i] = ""
                    # create result dictionary
        result = dict()
        result["result"] = json.dumps(sources)
        temp = dict()
        for i in range(1, 7):
            temp[i] = request.params["patient_id_" + str(i)]
        result["names"] = json.dumps(temp)
        # Not setting or setting to None did not work...
        result["real_names"] = 0

    else:
        sources = dict()
        # find the peptides
        if request.params["prot_pep"] == "Peptide":
            for i in range(1, 7):
                if request.params["source_id_" + str(i)] != "":
                    query = DBSession.query(SpectrumHit.sequence.distinct())
                    query = query.join(Source)
                    query = query.filter(Source.source_id == request.params["source_id_" + str(i)])
                    if not (request.params["antibody"] == "all" or request.params["antibody"] == ""):
                        query = query.join(MsRun)
                        query = query.filter(MsRun.antibody_set == request.params["antibody"])
                    temp = query.all()
                    sources[i] = [(j[0]) for j in temp]
                else:
                    sources[i] = ""
        # find the proteins
        else:
            for i in range(1, 7):
                if request.params["source_id_" + str(i)] != "":
                    query = DBSession.query(Protein.name.distinct())
                    query = query.join(t_spectrum_protein_map)
                    query = query.join(SpectrumHit)
                    query = query.join(Source)
                    query = query.filter(Source.source_id == request.params["source_id_" + str(i)])
                    if not (request.params["antibody"] == "all" or len(request.params["antibody"]) == 0):
                        query = query.join(MsRun)
                        query = query.filter(MsRun.antibody_set == request.params["antibody"])
                    temp = query.all()
                    sources[i] = [(j[0]) for j in temp]
                else:
                    sources[i] = ""
        # create result dictionary
        result = dict()
        result["result"] = json.dumps(sources)
        temp = dict()
        for i in range(1, 7):
            temp[i] = request.params["source_id_" + str(i)]
        result["names"] = json.dumps(temp)
        # Not setting or setting to None did not work...
        result["real_names"] = 0

    return result
Beispiel #44
0
def update_metadata_source_post(request):
    source = ast.literal_eval(request.params["sources"])
    try:
        log_writer("source_update_complete",
                   strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + source)
        source_update = DBSession.query(Source).join(t_hla_map).join(
            HlaType).filter(Source.source_id == source["source_id"]).all()
    except:
        log_writer(
            "source_update_complete",
            strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +
            " Source update failed!")
        return Response(conn_err_msg + " \n Source update failed",
                        content_type='text/plain',
                        status_int=500)
    if len(source_update) > 0:
        if source['patient_id'] != "":
            source_update[0].patient_id = source['patient_id']
        if source['organ'] != "":
            source_update[0].organ = source['organ']
        if source['organism'] != "":
            source_update[0].orgnaism = source['organism']
        if source['comment'] != "":
            source_update[0].comment = source['comment']
        if source['histology'] != "":
            source_update[0].histology = source['histology']
        if source['dignity'] != "":
            source_update[0].dignity = source['dignity']
        if source['celltype'] != "":
            source_update[0].celltype = source['celltype']
        if source['person'] != "":
            source_update[0].person = source['person']
        if source['location'] != "":
            source_update[0].location = source['location']
        if source['metastatis'] != "":
            source_update[0].metastatis = source['metastatis']
        if source['treatment'] != "":
            source_update[0].treatment = source['treatment']
        if source['prep_date'] != "":
            source_update[0].prep_date = source['prep_date']
        source_update[0].sample_id = source_update[0].patient_id + "_" + source_update[0].organ + "_" + source_update[0].dignity\
                    + "_" + source_update[0].histology + "_" + \
                    source_update[0].celltype + "_" + source_update[0].location + "_" + source_update[0].treatment\
                    + "_" + source_update[0].prep_date

        if source['typing'] != "":
            # remove all mappings
            source_update[0].hlatypes[:] = []
            # create all hla links
            hla_split = source['typing'].split(";")
            for hla_typing in hla_split:
                hla_typing_split = hla_typing.strip().split(":")
                for i in range(0, len(hla_typing_split)):
                    sub_type = ":".join(hla_typing_split[0:i + 1])
                    try:
                        query = DBSession.query(HlaType.hla_types_id).filter(
                            HlaType.hla_string == sub_type)
                        hla_types_id = query.all()
                    except DBAPIError:
                        return Response(conn_err_msg,
                                        content_type='text/plain',
                                        status_int=500)
                    # unknown hla_lookup
                    if len(hla_types_id) == 0:
                        try:
                            hla_type = HlaType(
                                hla_string=sub_type,
                                digits=hla_digits_extractor(sub_type))
                            DBSession.add(hla_type)
                            DBSession.flush()
                            hla_types_id = hla_type.hla_types_id
                        except DBAPIError:
                            return Response(conn_err_msg +
                                            "\n Insert into Hla-Types failed!",
                                            content_type='text/plain',
                                            status_int=500)
                    else:
                        hla_types_id = hla_types_id[0]
                        hla_type = query = DBSession.query(HlaType).filter(
                            HlaType.hla_string == sub_type).all()[0]
                    try:
                        # add the hla type
                        source_update[0].hlatypes.append(hla_type)

                    except DBAPIError:
                        return Response(conn_err_msg +
                                        "\n Insert into Hla-Map failed!",
                                        content_type='text/plain',
                                        status_int=500)

    try:
        transaction.commit()
        DBSession.flush()
        log_writer("source_update",
                   strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" + source)
    except:
        log_writer(
            "source_update_complete",
            strftime("%Y.%m.%d %H:%M:%S", gmtime()) + "\t" +
            "Source update failed!")
        DBSession.rollback()
        return Response("Source update failed!",
                        content_type='text/plain',
                        status_int=500)
    return HTTPFound(location="/update_metadata_source?id=%s" %
                     source["source_id"])
Beispiel #45
0
def search_result(request):
    # try:
    result = dict()
    # Search SpectrumHit Sequence
    # TODO: Vllt lieber peptide_run benutzen?
    result["peptide"] = json.dumps(DBSession.query(SpectrumHit.sequence.distinct().label('peptide')).join(MsRun).filter(
        SpectrumHit.sequence == request.params["search_all"]).filter(MsRun.flag_trash==0).filter(SpectrumHit.source_source_id != None).all())

    # Search source columns
    result["patient_id"] = json.dumps(DBSession.query(Source.patient_id.distinct().label('source')).filter(
        Source.patient_id == request.params["search_all"]).all())
    result["dignity"] = json.dumps(DBSession.query(Source.dignity.distinct().label('dignity')).filter(
        Source.dignity == request.params["search_all"]).all())
    result["histology"] = json.dumps(DBSession.query(Source.histology.distinct().label('histology')).filter(
        Source.histology == request.params["search_all"]).all())
    result["organ"] = json.dumps(DBSession.query(Source.organ.distinct().label('organ')).filter(
        Source.organ == request.params["search_all"]).all())
    result["organism"] = json.dumps(DBSession.query(Source.organism.distinct().label('organism')).filter(
        Source.organism == request.params["search_all"]).all())
    result["celltype"] = json.dumps(DBSession.query(Source.celltype.distinct().label('celltype')).filter(
        Source.celltype == request.params["search_all"]).all())
    result["person"] = json.dumps(DBSession.query(Source.person.distinct().label('person')).filter(
        Source.person == request.params["search_all"]).all())
    result["location"] = json.dumps(DBSession.query(Source.location.distinct().label('location')).filter(
        Source.location == request.params["search_all"]).all())

    result["treatment"] = json.dumps(DBSession.query(Source.treatment.distinct().label('treatment')).filter(
        Source.treatment == request.params["search_all"]).all())

    # Search HLA
    result["hla"] = json.dumps(DBSession.query(HlaType.hla_string.distinct().label('hla')).filter(
        HlaType.hla_string == request.params["search_all"]).all())
    # Search MS_run
    result["msrun"] = json.dumps(DBSession.query(MsRun.filename.distinct().label('msrun')).filter(
        MsRun.filename == request.params["search_all"]).filter(MsRun.flag_trash ==0).all())
    # Search Protein
    result["protein"] = json.dumps(DBSession.query(Protein.name.distinct().label('protein')).filter(
        Protein.name == request.params["search_all"]).all())
    # Search Protein with gene name
    if result["protein"] == "[]":
        result["protein"] = json.dumps(DBSession.query(Protein.name.distinct().label('protein')).filter(
            Protein.gene_name == request.params["search_all"]).all())

        # except:
        # return Response(conn_err_msg, content_type='text/plain', status_int=500)
    return result
Beispiel #46
0
def venn_analysis_result(request):

    # Check if MS runs or Source_id or Patient_id are selected
    ms_run_search = False
    patient_id_search = False
    for i in range(1, 7):
        if request.params["ms_run_" + str(i)] != "":
            ms_run_search = True
            break
        elif request.params["patient_id_" + str(i)] != "":
            patient_id_search = True
            break

    # query for the ms runs
    if ms_run_search:
        ms_runs = dict()
        # find the peptides
        if request.params["prot_pep"] == "Peptide":
            for i in range(1, 7):
                if request.params["ms_run_" + str(i)] != "":
                    query = DBSession.query(SpectrumHit.sequence.distinct())
                    query = query.join(MsRun)
                    query = query.filter(
                        MsRun.filename == request.params["ms_run_" + str(i)])
                    temp = query.all()
                    ms_runs[i] = [(j[0]) for j in temp]
                else:
                    ms_runs[i] = ""

        # find the proteins
        else:
            for i in range(1, 7):
                if request.params["ms_run_" + str(i)] != "":
                    query = DBSession.query(Protein.name.distinct())
                    query = query.join(t_spectrum_protein_map)
                    query = query.join(SpectrumHit)
                    query = query.join(MsRun)
                    query = query.filter(
                        MsRun.filename == request.params["ms_run_" + str(i)])
                    temp = query.all()
                    ms_runs[i] = [(j[0]) for j in temp]
                else:
                    ms_runs[i] = ""
        # create result dictionary
        result = dict()
        result["result"] = json.dumps(ms_runs)
        temp = dict()
        alias = dict()
        for i in range(1, 7):
            alias[i] = "Run " + str(i)
            temp[i] = request.params["ms_run_" + str(i)]
        result["names"] = json.dumps(alias)
        result["real_names"] = json.dumps(temp)

    # query for the source
    elif patient_id_search:
        sources = dict()
        # find the peptides
        if request.params["prot_pep"] == "Peptide":
            for i in range(1, 7):
                if request.params["patient_id_" + str(i)] != "":
                    query = DBSession.query(SpectrumHit.sequence.distinct())
                    query = query.join(Source)
                    query = query.filter(
                        Source.patient_id == request.params["patient_id_" +
                                                            str(i)])
                    if not (request.params["antibody"] == "all"
                            or request.params["antibody"] == ""):
                        query = query.join(MsRun)
                        query = query.filter(
                            MsRun.antibody_set == request.params["antibody"])
                    temp = query.all()
                    sources[i] = [(j[0]) for j in temp]
                else:
                    sources[i] = ""
        # find the proteins
        else:
            for i in range(1, 7):
                if request.params["patient_id_" + str(i)] != "":
                    query = DBSession.query(Protein.name.distinct())
                    query = query.join(t_spectrum_protein_map)
                    query = query.join(SpectrumHit)
                    query = query.join(Source)
                    query = query.filter(
                        Source.patient_id == request.params["patient_id_" +
                                                            str(i)])
                    if not (request.params["antibody"] == "all"
                            or len(request.params["antibody"]) == 0):
                        query = query.join(MsRun)
                        query = query.filter(
                            MsRun.antibody_set == request.params["antibody"])
                    temp = query.all()
                    sources[i] = [(j[0]) for j in temp]
                else:
                    sources[i] = ""
                    # create result dictionary
        result = dict()
        result["result"] = json.dumps(sources)
        temp = dict()
        for i in range(1, 7):
            temp[i] = request.params["patient_id_" + str(i)]
        result["names"] = json.dumps(temp)
        # Not setting or setting to None did not work...
        result["real_names"] = 0

    else:
        sources = dict()
        # find the peptides
        if request.params["prot_pep"] == "Peptide":
            for i in range(1, 7):
                if request.params["source_id_" + str(i)] != "":
                    query = DBSession.query(SpectrumHit.sequence.distinct())
                    query = query.join(Source)
                    query = query.filter(
                        Source.source_id == request.params["source_id_" +
                                                           str(i)])
                    if not (request.params["antibody"] == "all"
                            or request.params["antibody"] == ""):
                        query = query.join(MsRun)
                        query = query.filter(
                            MsRun.antibody_set == request.params["antibody"])
                    temp = query.all()
                    sources[i] = [(j[0]) for j in temp]
                else:
                    sources[i] = ""
        # find the proteins
        else:
            for i in range(1, 7):
                if request.params["source_id_" + str(i)] != "":
                    query = DBSession.query(Protein.name.distinct())
                    query = query.join(t_spectrum_protein_map)
                    query = query.join(SpectrumHit)
                    query = query.join(Source)
                    query = query.filter(
                        Source.source_id == request.params["source_id_" +
                                                           str(i)])
                    if not (request.params["antibody"] == "all"
                            or len(request.params["antibody"]) == 0):
                        query = query.join(MsRun)
                        query = query.filter(
                            MsRun.antibody_set == request.params["antibody"])
                    temp = query.all()
                    sources[i] = [(j[0]) for j in temp]
                else:
                    sources[i] = ""
        # create result dictionary
        result = dict()
        result["result"] = json.dumps(sources)
        temp = dict()
        for i in range(1, 7):
            temp[i] = request.params["source_id_" + str(i)]
        result["names"] = json.dumps(temp)
        # Not setting or setting to None did not work...
        result["real_names"] = 0

    return result
def peptide_query_result(request):
    # Check if one of these parameters is set, if not forward to peptide_query page
    params_check_dict = [
        "sequence",
        "source_id",
        "patient_id",
        "run_name",
        "organ",
        "histology",
        "dignity",
        "hla_typing",
        "protein",
        "length_1",
        "length_2",
        "antibody",
        "celltype",
        "treatment",
    ]
    input_check = False
    for param in params_check_dict:
        if param in request.params:
            if len(request.params[param]) > 0:
                input_check = True

    # if there is no input forward to peptide_query
    if not input_check:
        raise HTTPFound(request.route_url("peptide_query"))

    # Group by  peptide
    if request.params["grouping"] == "peptide":
        try:
            query = DBSession.query(
                PeptideRun.sequence,
                func.group_concat(Protein.name.distinct().op("order by")(Protein.name)).label("protein"),
                func.group_concat(Source.source_id.distinct().op("order by")(Source.source_id)).label("source_id"),
                func.group_concat(Source.patient_id.distinct().op("order by")(Source.patient_id)).label("patient_id"),
                func.group_concat(Source.dignity.distinct().op("order by")(Source.dignity)).label("dignity"),
                func.group_concat((HlaType.hla_string.distinct().op("order by")(HlaType.hla_string))).label(
                    "hla_typing"
                ),
            )
            query = query.join(Source)
            query = query.join(MsRun, PeptideRun.ms_run_ms_run_id == MsRun.ms_run_id)
            query = query.join(t_hla_map)
            query = query.join(HlaType)
            query = query.join(t_peptide_protein_map)
            query = query.join(Protein)

            # filter
            query = create_filter(
                query, "sequence", request.params, "sequence", PeptideRun, "sequence_rule", True, set=False
            )
            query = create_filter(
                query, "patient_id", request.params, "patient_id", Source, "patient_id_rule", True, set=False
            )
            query = create_filter(
                query, "source_id", request.params, "source_id", Source, "source_id_rule", True, set=False
            )
            query = create_filter(
                query, "run_name", request.params, "filename", MsRun, "run_name_rule", True, set=False
            )
            query = create_filter(query, "organ", request.params, "organ", Source, "organ_rule", False, set=False)
            query = create_filter(
                query, "histology", request.params, "histology", Source, "histology_rule", False, set=False
            )
            query = create_filter(query, "dignity", request.params, "dignity", Source, "dignity_rule", False, set=False)
            query = create_filter(
                query, "hla_typing", request.params, "hla_string", HlaType, "hla_typing_rule", False, set=False
            )  # TODO: check if it works withou fk,
            # fk=HlaLookup.fk_hla_typess)
            query = create_filter(query, "digits", request.params, "digits", HlaType, None, False, set=False)
            query = create_filter(
                query,
                "protein",
                request.params,
                "name",
                Protein,
                "protein_rule",
                False,
                set=False,
                fk=PeptideRun.protein_proteins,
            )
            query = create_filter(query, "length_1", request.params, "length", PeptideRun, ">", False, set=False)
            query = create_filter(query, "length_2", request.params, "length", PeptideRun, "<", False, set=False)
            query = create_filter(
                query, "antibody", request.params, "antibody_set", MsRun, "antibody_rule", False, set=True
            )
            query = create_filter(
                query, "celltype", request.params, "celltype", Source, "celltype_rule", False, set=False
            )
            query = create_filter(
                query, "treatment", request.params, "treatment", Source, "treatment_rule", False, set=False
            )

            query = query.group_by(PeptideRun.sequence)

            your_json = json.dumps(query.all())
            grouping = "peptide"
        except DBAPIError:
            return Response(conn_err_msg, content_type="text/plain", status_int=500)
    # MS run group by
    elif request.params["grouping"] == "run":
        try:
            query = DBSession.query(
                PeptideRun.peptide_run_id,
                PeptideRun.sequence,
                PeptideRun.minRT,
                PeptideRun.maxRT,
                PeptideRun.minScore,
                PeptideRun.maxScore,
                PeptideRun.minE,
                PeptideRun.maxE,
                PeptideRun.minQ,
                PeptideRun.maxQ,
                PeptideRun.PSM,
                func.group_concat(HlaType.hla_string.distinct().op("order by")(HlaType.hla_string)).label("hla_typing"),
                func.group_concat(Protein.name.distinct().op("order by")(Protein.name)).label("protein"),
                Source.histology,
                Source.patient_id,
                Source.source_id,
                MsRun.filename,
                MsRun.ms_run_id,
            )
            query = query.join(Source)
            query = query.join(MsRun, PeptideRun.ms_run_ms_run_id == MsRun.ms_run_id)
            query = query.join(t_hla_map)
            query = query.join(HlaType)
            query = query.join(t_peptide_protein_map)
            query = query.join(Protein)

            # filter
            query = create_filter(
                query, "sequence", request.params, "sequence", PeptideRun, "sequence_rule", True, set=False
            )
            query = create_filter(
                query, "patient_id", request.params, "patient_id", Source, "patient_id_rule", True, set=False
            )
            query = create_filter(
                query, "source_id", request.params, "source_id", Source, "source_id_rule", True, set=False
            )
            query = create_filter(
                query, "run_name", request.params, "filename", MsRun, "run_name_rule", True, set=False
            )
            query = create_filter(query, "organ", request.params, "organ", Source, "organ_rule", False, set=False)
            query = create_filter(
                query, "histology", request.params, "histology", Source, "histology_rule", False, set=False
            )
            query = create_filter(query, "dignity", request.params, "dignity", Source, "dignity_rule", False, set=False)
            query = create_filter(
                query, "hla_typing", request.params, "hla_string", HlaType, "hla_typing_rule", False, set=False
            )  # TODO: check if it works withou fk,
            # fk=HlaLookup.fk_hla_typess)
            query = create_filter(query, "digits", request.params, "digits", HlaType, None, False, set=False)

            query = create_filter(
                query,
                "protein",
                request.params,
                "name",
                Protein,
                "protein_rule",
                False,
                set=False,
                fk=PeptideRun.protein_proteins,
            )
            query = create_filter(query, "length_1", request.params, "length", PeptideRun, ">", False, set=False)
            query = create_filter(query, "length_2", request.params, "length", PeptideRun, "<", False, set=False)
            query = create_filter(
                query, "antibody", request.params, "antibody_set", MsRun, "antibody_rule", False, set=True
            )
            query = create_filter(
                query, "celltype", request.params, "celltype", Source, "celltype_rule", False, set=False
            )
            query = create_filter(
                query, "treatment", request.params, "treatment", Source, "treatment_rule", False, set=False
            )

            query = query.group_by(PeptideRun.peptide_run_id)
            your_json = json.dumps(query.all())
            grouping = "run"
        except DBAPIError:
            return Response(conn_err_msg, content_type="text/plain", status_int=500)
    # source without PSM group by
    elif request.params["grouping"] == "source":
        try:
            query = DBSession.query(
                PeptideRun.peptide_run_id,
                PeptideRun.sequence,
                func.min(PeptideRun.minRT).label("minRT"),
                func.max(PeptideRun.maxRT).label("maxRT"),
                func.min(PeptideRun.minScore).label("minScore"),
                func.max(PeptideRun.maxScore).label("maxScore"),
                func.min(PeptideRun.minE).label("minE"),
                func.max(PeptideRun.maxE).label("maxE"),
                func.min(PeptideRun.minQ).label("minQ"),
                func.max(PeptideRun.maxQ).label("maxQ"),
                func.group_concat(HlaType.hla_string.distinct().op("order by")(HlaType.hla_string)).label("hla_typing"),
                func.group_concat(Protein.name.distinct().op("order by")(Protein.name)).label("protein"),
                Source.histology,
                Source.patient_id,
                Source.source_id,
            )
            query = query.join(Source)
            query = query.join(MsRun, PeptideRun.ms_run_ms_run_id == MsRun.ms_run_id)
            query = query.join(t_hla_map)
            query = query.join(HlaType)
            query = query.join(t_peptide_protein_map)
            query = query.join(Protein)

            # filter
            query = create_filter(
                query, "sequence", request.params, "sequence", PeptideRun, "sequence_rule", True, set=False
            )
            query = create_filter(
                query, "patient_id", request.params, "patient_id", Source, "patient_id_rule", True, set=False
            )
            query = create_filter(
                query, "source_id", request.params, "source_id", Source, "source_id_rule", True, set=False
            )
            query = create_filter(
                query, "run_name", request.params, "filename", MsRun, "run_name_rule", True, set=False
            )
            query = create_filter(query, "organ", request.params, "organ", Source, "organ_rule", False, set=False)
            query = create_filter(
                query, "histology", request.params, "histology", Source, "histology_rule", False, set=False
            )
            query = create_filter(query, "dignity", request.params, "dignity", Source, "dignity_rule", False, set=False)
            query = create_filter(
                query, "hla_typing", request.params, "hla_string", HlaType, "hla_typing_rule", False, set=False
            )  # TODO: check if it works withou fk,
            # fk=HlaLookup.fk_hla_typess)
            query = create_filter(query, "digits", request.params, "digits", HlaType, None, False, set=False)
            query = create_filter(
                query,
                "protein",
                request.params,
                "name",
                Protein,
                "protein_rule",
                False,
                set=False,
                fk=PeptideRun.protein_proteins,
            )
            query = create_filter(query, "length_1", request.params, "length", PeptideRun, ">", False, set=False)
            query = create_filter(query, "length_2", request.params, "length", PeptideRun, "<", False, set=False)
            query = create_filter(
                query, "antibody", request.params, "antibody_set", MsRun, "antibody_rule", False, set=True
            )
            query = create_filter(
                query, "celltype", request.params, "celltype", Source, "celltype_rule", False, set=False
            )
            query = create_filter(
                query, "treatment", request.params, "treatment", Source, "treatment_rule", False, set=False
            )
            query = query.group_by(Source.source_id, PeptideRun.sequence)

            your_json = json.dumps(query.all())
            grouping = "source"
        except DBAPIError:
            return Response(conn_err_msg, content_type="text/plain", status_int=500)
    # source with PSM group by
    elif request.params["grouping"] == "source_psm":
        try:
            query = DBSession.query(
                SpectrumHit.sequence,
                func.min(SpectrumHit.ionscore).label("minScore"),
                func.max(SpectrumHit.ionscore).label("maxScore"),
                func.min(SpectrumHit.e_value).label("minE"),
                func.max(SpectrumHit.e_value).label("maxE"),
                func.min(SpectrumHit.q_value).label("minQ"),
                func.max(SpectrumHit.q_value).label("maxQ"),
                func.count(SpectrumHit.spectrum_hit_id.distinct()).label("PSM"),
                func.group_concat(HlaType.hla_string.distinct().op("order by")(HlaType.hla_string)).label("hla_typing"),
                func.group_concat(Protein.name.distinct().op("order by")(Protein.name)).label("protein"),
                Source.histology,
                Source.patient_id,
                Source.source_id,
            )
            query = query.join(Source)
            query = query.join(MsRun, SpectrumHit.ms_run_ms_run_id == MsRun.ms_run_id)
            query = query.join(t_hla_map)
            query = query.join(HlaType)
            query = query.join(t_spectrum_protein_map)
            query = query.join(Protein)

            # filter
            query = create_filter(
                query, "sequence", request.params, "sequence", SpectrumHit, "sequence_rule", True, set=False
            )
            query = create_filter(
                query, "patient_id", request.params, "patient_id", Source, "patient_id_rule", True, set=False
            )
            query = create_filter(
                query, "source_id", request.params, "source_id", Source, "source_id_rule", True, set=False
            )
            query = create_filter(
                query, "run_name", request.params, "filename", MsRun, "run_name_rule", True, set=False
            )
            query = create_filter(query, "organ", request.params, "organ", Source, "organ_rule", False, set=False)
            query = create_filter(
                query, "histology", request.params, "histology", Source, "histology_rule", False, set=False
            )
            query = create_filter(query, "dignity", request.params, "dignity", Source, "dignity_rule", False, set=False)
            query = create_filter(
                query, "hla_typing", request.params, "hla_string", HlaType, "hla_typing_rule", False, set=False
            )  # TODO: check if it works withou fk,
            # fk=HlaLookup.fk_hla_typess)
            query = create_filter(query, "digits", request.params, "digits", HlaType, None, False, set=False)
            query = create_filter(
                query,
                "protein",
                request.params,
                "name",
                Protein,
                "protein_rule",
                False,
                set=False,
                fk=SpectrumHit.protein_proteins,
            )
            query = create_filter(query, "length_1", request.params, "length", SpectrumHit, ">", False, set=False)
            query = create_filter(query, "length_2", request.params, "length", SpectrumHit, "<", False, set=True)
            query = create_filter(
                query, "celltype", request.params, "celltype", Source, "celltype_rule", False, set=False
            )
            query = create_filter(
                query, "treatment", request.params, "treatment", Source, "treatment_rule", False, set=False
            )

            query = query.group_by(Source.source_id, SpectrumHit.sequence)

            your_json = json.dumps(query.all())
            grouping = "source_psm"
        except DBAPIError:
            return Response(conn_err_msg, content_type="text/plain", status_int=500)
    # Group by protein
    elif request.params["grouping"] == "protein":
        # TODO: a whole protein query from kidney take 8 min...
        try:
            query = DBSession.query(
                func.group_concat(PeptideRun.sequence.distinct().op("order by")(PeptideRun.sequence)).label("peptide"),
                Protein.name.label("protein"),
                func.group_concat(Source.patient_id.distinct().op("order by")(Source.patient_id)).label("patient_id"),
                func.group_concat(Source.source_id.distinct().op("order by")(Source.source_id)).label("source_id"),
                func.group_concat(Source.dignity.distinct().op("order by")(Source.dignity)).label("dignity"),
            )
            query = query.join(Source)
            query = query.join(MsRun, PeptideRun.ms_run_ms_run_id == MsRun.ms_run_id)
            query = query.join(t_hla_map)
            query = query.join(HlaType)
            query = query.join(t_peptide_protein_map)
            query = query.join(Protein)

            # filter
            query = create_filter(
                query, "sequence", request.params, "sequence", PeptideRun, "sequence_rule", True, set=False
            )
            query = create_filter(
                query, "patient_id", request.params, "patient_id", Source, "patient_id_rule", True, set=False
            )
            query = create_filter(
                query, "source_id", request.params, "source_id", Source, "source_id_rule", True, set=False
            )
            query = create_filter(
                query, "run_name", request.params, "filename", MsRun, "run_name_rule", True, set=False
            )
            query = create_filter(query, "organ", request.params, "organ", Source, "organ_rule", False, set=False)
            query = create_filter(
                query, "histology", request.params, "histology", Source, "histology_rule", False, set=False
            )
            query = create_filter(query, "dignity", request.params, "dignity", Source, "dignity_rule", False, set=False)
            query = create_filter(
                query, "hla_typing", request.params, "hla_string", HlaType, "hla_typing_rule", False, set=False
            )  # TODO: check if it works withou fk,
            # fk=HlaLookup.fk_hla_typess)
            query = create_filter(query, "digits", request.params, "digits", HlaType, None, False, set=False)
            query = create_filter(
                query,
                "protein",
                request.params,
                "name",
                Protein,
                "protein_rule",
                False,
                set=False,
                fk=PeptideRun.protein_proteins,
            )
            query = create_filter(query, "length_1", request.params, "length", PeptideRun, ">", False, set=False)
            query = create_filter(query, "length_2", request.params, "length", PeptideRun, "<", False, set=False)
            query = create_filter(
                query, "antibody", request.params, "antibody_set", MsRun, "antibody_rule", False, set=True
            )
            query = create_filter(
                query, "celltype", request.params, "celltype", Source, "celltype_rule", False, set=False
            )
            query = create_filter(
                query, "treatment", request.params, "treatment", Source, "treatment_rule", False, set=False
            )

            query = query.group_by(Protein)

            your_json = json.dumps(query.all())
            grouping = "protein"
        except DBAPIError:
            return Response(conn_err_msg, content_type="text/plain", status_int=500)

    return {"project": your_json, "grouping": grouping}
Beispiel #48
0
def update_metadata_source(request):
    if "id" in request.params:
        result_dict = dict()
        result_dict["id"] = request.params['id']
        query = DBSession.query(
            Source.patient_id, Source.organ, Source.organism, Source.histology,
            Source.dignity, Source.celltype, Source.location,
            Source.metastatis, Source.treatment, Source.person,
            func.cast(Source.prep_date, String).label("prep_date")).filter(
                Source.source_id == request.params["id"])

        source = json.dumps(query.all())
        result_dict['source'] = source
        query = DBSession.query(
            Source.source_id,
            HlaType.hla_string).join(t_hla_map).join(HlaType).filter(
                Source.source_id == request.params["id"])
        hla = json.dumps(query.all())
        result_dict['hla'] = hla

        # getting autocomplete items
        allowed_elements = {
            "patient_id": Source.patient_id,
            "organ": Source.organ,
            "organism": Source.organism,
            "histology": Source.histology,
            "dignity": Source.dignity,
            "celltype": Source.celltype,
            "location": Source.location,
            "metastatis": Source.metastatis,
            "treatment": Source.treatment,
            "person": Source.person,
            "comment": Source.comment,
            "typing": HlaType.hla_string
        }
        for k, v in allowed_elements.iteritems():
            query = DBSession.query(v)
            query = query.group_by(v)
            query_result = js_list_creator(query.all())
            result_dict[k] = query_result
        #result_dict['original'] = source

        return result_dict

    else:
        try:
            # query data for autocomplete
            result_dict = dict()
            allowed_elements = {"source_id": Source.source_id}

            for k, v in allowed_elements.iteritems():
                query = DBSession.query(v)
                query = query.group_by(v)
                query_result = js_list_creator(query.all())
                result_dict[k] = query_result
            # setting a different renderer
            result = render(
                '../templates/upload_templates/update_metadata_source.pt',
                result_dict,
                request=request)
            response = Response(result)
            return response
        except:
            return Response(conn_err_msg,
                            content_type='text/plain',
                            status_int=500)