def from_analysis(context, database_connection, analysis_connection,
                  analysis_identifier, reduction, derivatization, name):
    database_connection = DatabaseBoundOperation(database_connection)
    if name is not None:
        name = validate_glycan_hypothesis_name(
            context, database_connection._original_connection, name)
        click.secho("Building Glycan Hypothesis %s" % name, fg='cyan')
    reduction = validate_reduction(context, reduction)
    derivatization = validate_derivatization(context, derivatization)

    analysis_connection = DatabaseBoundOperation(analysis_connection)
    analysis = get_by_name_or_id(analysis_connection.session, Analysis,
                                 analysis_identifier)
    if analysis.analysis_type == AnalysisTypeEnum.glycan_lc_ms:
        job = GlycanAnalysisHypothesisSerializer(
            analysis_connection._original_connection,
            analysis.id,
            name,
            output_connection=database_connection._original_connection)
        job.display_header()
        job.start()
    elif analysis.analysis_type == AnalysisTypeEnum.glycopeptide_lc_msms:
        job = GlycopeptideAnalysisGlycanCompositionExtractionHypothesisSerializer(
            analysis_connection._original_connection,
            analysis.id,
            name,
            output_connection=database_connection._original_connection)
        job.display_header()
        job.start()
    else:
        click.secho("Analysis Type %r could not be converted" %
                    (analysis.analysis_type.name, ),
                    fg='red')
Esempio n. 2
0
 def __init__(self,
              input_connection,
              analysis_id,
              hypothesis_name,
              output_connection=None):
     if output_connection is None:
         output_connection = input_connection
     self.input_connection = DatabaseBoundOperation(input_connection)
     self.output_connection = DatabaseBoundOperation(output_connection)
     GlycanHypothesisSerializerBase.__init__(self, output_connection,
                                             hypothesis_name)
     self.analysis_id = analysis_id
     self.seen_cache = set()
def _copy_analysis_across_file_boundaries(database_connection,
                                          source,
                                          hypothesis_name,
                                          identifier=None):
    source_handle = DatabaseBoundOperation(source)
    source_analysis_id = None
    source_analysis_name = None
    try:
        hypothesis_id = int(identifier)
        inst = source_handle.query(Analysis).get(hypothesis_id)
        if inst is not None:
            source_analysis_id = hypothesis_id
            source_analysis_name = inst.name

    except TypeError:
        hypothesis_name = identifier
        inst = source_handle.query(Analysis).filter(
            Analysis.name == hypothesis_name).first()
        if inst is not None:
            source_analysis_id = inst.id
            source_analysis_name = inst.name
    if hypothesis_name is None:
        hypothesis_name = source_analysis_name
    mover = GlycanAnalysisHypothesisSerializer(source, source_analysis_id,
                                               hypothesis_name,
                                               database_connection)
    mover.run()
    return mover.hypothesis_id
Esempio n. 4
0
def glycopeptide_hypothesis(database_connection, hypothesis_identifier, output_path, multifasta=False):
    '''Write each theoretical glycopeptide in CSV format
    '''
    database_connection = DatabaseBoundOperation(database_connection)
    session = database_connection.session()
    hypothesis = get_by_name_or_id(session, GlycopeptideHypothesis, hypothesis_identifier)

    def generate():
        interval = 100000
        i = 0
        while True:
            session.expire_all()
            chunk = hypothesis.glycopeptides.slice(i, i + interval).all()
            if len(chunk) == 0:
                break
            for glycopeptide in chunk:
                yield glycopeptide
            i += interval
    if output_path is None:
        output_stream = ctxstream(sys.stdout)
    else:
        output_stream = open(output_path, 'wb')
    with output_stream:
        job = GlycopeptideHypothesisCSVSerializer(output_stream, generate())
        job.run()
def mass_search_dispatch(uuid):
    try:
        arguments, state = request_arguments_and_context()
        record = _locate_hypothesis(uuid)
        handle = DatabaseBoundOperation(record.path)
        hypothesis = handle.query(GlycanHypothesis).filter(
            GlycanHypothesis.uuid == record.uuid).first()

        if hypothesis is not None:
            return search_glycan_hypothesis(hypothesis.uuid, arguments['mass'],
                                            arguments['tolerance'])

        hypothesis = handle.query(GlycopeptideHypothesis).filter(
            GlycopeptideHypothesis.uuid == record.uuid).first()
        if hypothesis is not None:
            return search_glycopeptide_hypothesis(hypothesis.uuid,
                                                  arguments['mass'],
                                                  arguments['tolerance'])

        return jsonify(*[])
    except Exception, e:
        logging.exception("An exception occurred for %r",
                          request.get_json(),
                          exc_info=e)
        return jsonify(*[])
Esempio n. 6
0
def index_database(database_connection, channel):
    try:
        channel.send(Message("Analyzing Database", 'update'))
        handle = DatabaseBoundOperation(database_connection)

        for (table, ix_name) in index_updates:
            session = handle.session()
            connection = session.connection()
            index = find_index_by_name(table, ix_name)
            if index is None:
                continue
            try:
                index.create(connection)
            except:
                session.rollback()
            session.commit()

        handle._analyze_database()
        session = handle.session()
        for query in target_queries:
            result = session.execute("EXPLAIN QUERY PLAN " + query)
            channel.log("%s:\n\t%r" % (query, ' '.join(map(str, result))))

        channel.send(Message("Indexing Complete", 'update'))
    except:
        channel.send(Message.traceback())
Esempio n. 7
0
def glycopeptide_mzidentml(database_connection,
                           analysis_identifier,
                           output_path=None,
                           mzml_path=None,
                           embed_protein_sequences=True):
    '''Write identified glycopeptides as mzIdentML file, and associated MSn spectra
    to a paired mzML file if the matched data are available. If an mzML file is written
    it will also contain the extracted ion chromatograms for each glycopeptide with an
    extracted elution profile.
    '''
    database_connection = DatabaseBoundOperation(database_connection)
    session = database_connection.session()  # pylint: disable=not-callable
    analysis = get_by_name_or_id(session, Analysis, analysis_identifier)
    if not analysis.analysis_type == AnalysisTypeEnum.glycopeptide_lc_msms:
        click.secho("Analysis %r is of type %r." %
                    (str(analysis.name), str(analysis.analysis_type)),
                    fg='red',
                    err=True)
        raise click.Abort()
    loader = AnalysisDeserializer(database_connection._original_connection,
                                  analysis_id=analysis.id)
    click.echo("Loading Identifications")
    # glycopeptides = loader.load_identified_glycopeptides()
    glycopeptides = loader.query(IdentifiedGlycopeptide).filter(
        IdentifiedGlycopeptide.analysis_id == analysis_identifier).all()
    with open(output_path, 'wb') as outfile:
        writer = MzIdentMLSerializer(
            outfile,
            glycopeptides,
            analysis,
            loader,
            source_mzml_path=mzml_path,
            embed_protein_sequences=embed_protein_sequences)
        writer.run()
def _copy_hypothesis_across_file_boundaries(database_connection,
                                            source,
                                            hypothesis_name,
                                            identifier=None):
    source_handle = DatabaseBoundOperation(source)
    source_hypothesis_id = None
    source_hypothesis_name = None

    try:
        hypothesis_id = int(identifier)
        inst = source_handle.query(GlycanHypothesis).get(hypothesis_id)
        if inst is not None:
            source_hypothesis_id = hypothesis_id
            source_hypothesis_name = inst.name

    except TypeError:
        hypothesis_name = identifier
        inst = source_handle.query(GlycanHypothesis).filter(
            GlycanHypothesis.name == hypothesis_name).first()
        if inst is not None:
            source_hypothesis_id = inst.id
            source_hypothesis_name = inst.name

    if source == database_connection:
        return source_hypothesis_id

    mover = GlycanHypothesisCopier(database_connection,
                                   [(source, source_hypothesis_id)],
                                   hypothesis_name=source_hypothesis_name)
    mover.run()
    return mover.hypothesis_id
Esempio n. 9
0
def export_identified_glycans_from_glycopeptides(database_connection,
                                                 analysis_identifier,
                                                 output_path):
    database_connection = DatabaseBoundOperation(database_connection)
    session = database_connection.session()  # pylint: disable=not-callable
    analysis = get_by_name_or_id(session, Analysis, analysis_identifier)
    if not analysis.analysis_type == AnalysisTypeEnum.glycopeptide_lc_msms:
        click.secho("Analysis %r is of type %r." %
                    (str(analysis.name), str(analysis.analysis_type)),
                    fg='red',
                    err=True)
        raise click.Abort()
    glycans = session.query(GlycanComposition).join(
        GlycanCombinationGlycanComposition).join(GlycanCombination).join(
            Glycopeptide,
            Glycopeptide.glycan_combination_id == GlycanCombination.id).join(
                IdentifiedGlycopeptide,
                IdentifiedGlycopeptide.structure_id == Glycopeptide.id).filter(
                    IdentifiedGlycopeptide.analysis_id == analysis.id).all()
    if output_path is None:
        output_stream = ctxstream(click.get_binary_stream('stdout'))
    else:
        output_stream = open(output_path, 'wb')
    with output_stream:
        job = ImportableGlycanHypothesisCSVSerializer(output_stream, glycans)
        job.run()
Esempio n. 10
0
def validate_unique_name(context, database_connection, name, klass):
    handle = DatabaseBoundOperation(database_connection)
    obj = handle.query(klass).filter(klass.name == name).first()
    if obj is not None:
        return klass.make_unique_name(handle.session, name)
    else:
        return name
Esempio n. 11
0
 def __init__(self, database_path, analysis_id, mzml_path=None):
     self.database_connection = DatabaseBoundOperation(database_path)
     self.analysis_id = analysis_id
     self.analysis = self.session.query(serialize.Analysis).get(
         self.analysis_id)
     self.mzml_path = mzml_path
     self.scan_loader = None
     self._make_scan_loader()
Esempio n. 12
0
def validate_database_unlocked(database_connection):
    try:
        db = DatabaseBoundOperation(database_connection)
        db.session.add(GlycanHypothesis(name="_____not_real_do_not_use______"))
        db.session.rollback()
        return True
    except OperationalError:
        return False
Esempio n. 13
0
 def __init__(self,
              database_connection,
              source,
              source_type,
              source_identifier=None):
     super(GlycanAnalysisGlycanSourceValidator,
           self).__init__(database_connection, source, source_type,
                          source_identifier)
     self.handle = DatabaseBoundOperation(source)
Esempio n. 14
0
def glycan_composition_identification(database_connection, analysis_identifier, output_path=None,
                                      threshold=0, report=False):
    '''Write each glycan chromatogram in CSV format
    '''
    database_connection = DatabaseBoundOperation(database_connection)
    session = database_connection.session()
    analysis = get_by_name_or_id(session, Analysis, analysis_identifier)
    if not analysis.analysis_type == AnalysisTypeEnum.glycan_lc_ms:
        click.secho("Analysis %r is of type %r." % (
            str(analysis.name), str(analysis.analysis_type)), fg='red', err=True)
        raise click.Abort()
    analysis_id = analysis.id
    if output_path is None:
        output_stream = ctxstream(sys.stdout)
    else:
        output_stream = open(output_path, 'wb')

    if report:
        with output_stream:
            job = GlycanChromatogramReportCreator(
                database_connection._original_connection,
                analysis_id, output_stream, threshold=threshold)
            job.run()
    else:
        def generate():
            i = 0
            interval = 100
            query = session.query(GlycanCompositionChromatogram).filter(
                GlycanCompositionChromatogram.analysis_id == analysis_id,
                GlycanCompositionChromatogram.score > threshold)

            while True:
                session.expire_all()
                chunk = query.slice(i, i + interval).all()
                if len(chunk) == 0:
                    break
                for gcs in chunk:
                    yield gcs.convert()
                i += interval

            i = 0
            query = session.query(UnidentifiedChromatogram).filter(
                UnidentifiedChromatogram.analysis_id == analysis_id,
                UnidentifiedChromatogram.score > threshold)

            while True:
                session.expire_all()
                chunk = query.slice(i, i + interval).all()
                if len(chunk) == 0:
                    break
                for gcs in chunk:
                    yield gcs.convert()
                i += interval

        with output_stream:
            job = GlycanLCMSAnalysisCSVSerializer(output_stream, generate())
            job.run()
Esempio n. 15
0
def sql_shell(database_connection, script=None):
    db = DatabaseBoundOperation(database_connection)
    session = db.session()  # pylint: disable=not-callable
    interpreter = SQLShellInterpreter(session)
    if script is None:
        interpreter.cmdloop()
    else:
        result = session.execute(script)
        interpreter._to_csv(list(result), sys.stdout)
Esempio n. 16
0
def merge_glycan_hypotheses(context, database_connection,
                            hypothesis_specification, name):
    database_connection = DatabaseBoundOperation(database_connection)
    hypothesis_ids = []
    for connection, ident in hypothesis_specification:
        hypothesis = get_by_name_or_id(DatabaseBoundOperation(connection),
                                       GlycanHypothesis, ident)
        hypothesis_ids.append((connection, hypothesis.id))

    if name is not None:
        name = validate_glycan_hypothesis_name(
            context, database_connection._original_connection, name)
        click.secho("Building Glycan Hypothesis %s" % name, fg='cyan')

    task = GlycanCompositionHypothesisMerger(
        database_connection._original_connection, hypothesis_ids, name)
    task.display_header()
    task.start()
 def stream_from_hypotheses(self, connection, hypothesis_id):
     self.log("Streaming from %s for hypothesis %d" % (connection, hypothesis_id))
     connection = DatabaseBoundOperation(connection)
     session = connection.session()
     for db_composition in session.query(DBGlycanComposition).filter(
             DBGlycanComposition.hypothesis_id == hypothesis_id):
         structure_classes = list(db_composition.structure_classes)
         if len(structure_classes) > 0:
             yield db_composition, [sc.name for sc in db_composition.structure_classes]
         else:
             yield db_composition, [None]
Esempio n. 18
0
def prebuilt_glycan(context, database_connection, recipe_name, name, reduction,
                    derivatization):
    database_connection = DatabaseBoundOperation(database_connection)
    reduction = validate_reduction(context, reduction)
    derivatization = validate_derivatization(context, derivatization)
    if name is not None:
        name = validate_glycan_hypothesis_name(
            context, database_connection._original_connection, name)
    recipe = prebuilt_hypothesis_register[recipe_name]()
    recipe(database_connection._original_connection,
           hypothesis_name=name,
           reduction=reduction,
           derivatization=derivatization)
    def task(self):
        database = DatabaseBoundOperation(self.connection)
        session = database.session
        has_work = True

        digestor = self.digestor
        acc = []
        if self.message_handler is None:
            self.message_handler = lambda x: None
        while has_work:
            try:
                work_items = self.input_queue.get(timeout=5)
                if work_items is None:
                    has_work = False
                    continue
            except Exception:
                if self.done_event.is_set():
                    has_work = False
                continue
            proteins = slurp(session, Protein, work_items, flatten=False)
            acc = []

            threshold_size = 3000

            for protein in proteins:
                size = len(protein.protein_sequence)
                if size > threshold_size:
                    self.message_handler("Started digesting %s (%d)" %
                                         (protein.name, size))
                i = 0
                for peptide in digestor.process_protein(protein):
                    acc.append(peptide)
                    i += 1
                    if len(acc) > self.chunk_size:
                        session.bulk_save_objects(acc)
                        session.commit()
                        acc = []
                    if i % 10000 == 0:
                        self.message_handler(
                            "Digested %d peptides from %r (%d)" %
                            (i, protein.name, size))
                if size > threshold_size:
                    self.message_handler("Finished digesting %s (%d)" %
                                         (protein.name, size))
            session.bulk_save_objects(acc)
            session.commit()
            acc = []
        if acc:
            session.bulk_save_objects(acc)
            session.commit()
            acc = []
Esempio n. 20
0
def glycopeptide_identification(database_connection, analysis_identifier, output_path=None,
                                report=False, mzml_path=None, threshold=0):
    '''Write each distinct identified glycopeptide in CSV format
    '''
    database_connection = DatabaseBoundOperation(database_connection)
    session = database_connection.session()
    analysis = get_by_name_or_id(session, Analysis, analysis_identifier)
    if not analysis.analysis_type == AnalysisTypeEnum.glycopeptide_lc_msms:
        click.secho("Analysis %r is of type %r." % (
            str(analysis.name), str(analysis.analysis_type)), fg='red', err=True)
        raise click.Abort()
    analysis_id = analysis.id
    if output_path is None:
        output_stream = ctxstream(sys.stdout)
    else:
        output_stream = open(output_path, 'wb')
    if report:
        with output_stream:
            if mzml_path is None:
                mzml_path = analysis.parameters['sample_path']
                if not os.path.exists(mzml_path):
                    raise click.ClickException(
                        ("Sample path {} not found. Pass the path to"
                         " this file as `-m/--mzml-path` for this command.").format(
                            mzml_path))
            GlycopeptideDatabaseSearchReportCreator(
                database_connection._original_connection, analysis_id,
                stream=output_stream, threshold=threshold,
                mzml_path=mzml_path).run()
    else:
        query = session.query(Protein.id, Protein.name).join(Protein.glycopeptides).join(
            IdentifiedGlycopeptide).filter(
            IdentifiedGlycopeptide.analysis_id == analysis.id)
        protein_index = dict(query)

        def generate():
            i = 0
            interval = 100
            query = session.query(IdentifiedGlycopeptide).filter(
                IdentifiedGlycopeptide.analysis_id == analysis_id)
            while True:
                session.expire_all()
                chunk = query.slice(i, i + interval).all()
                if len(chunk) == 0:
                    break
                for glycopeptide in chunk:
                    yield glycopeptide.convert()
                i += interval
        with output_stream:
            job = GlycopeptideLCMSMSAnalysisCSVSerializer(output_stream, generate(), protein_index)
            job.run()
Esempio n. 21
0
def glycan_hypothesis(database_connection, hypothesis_identifier, output_path=None, importable=False):
    '''Write each theoretical glycan composition in CSV format
    '''
    database_connection = DatabaseBoundOperation(database_connection)
    hypothesis = get_by_name_or_id(database_connection, GlycanHypothesis, hypothesis_identifier)
    if importable:
        task_type = ImportableGlycanHypothesisCSVSerializer
    else:
        task_type = GlycanHypothesisCSVSerializer
    if output_path is None:
        output_stream = ctxstream(sys.stdout)
    else:
        output_stream = open(output_path, 'wb')
    with output_stream:
        job = task_type(output_stream, hypothesis.glycans)
        job.run()
Esempio n. 22
0
def glycopeptide_training_mgf(database_connection, analysis_identifier, output_path=None,
                              mzml_path=None, threshold=None):
    database_connection = DatabaseBoundOperation(database_connection)
    session = database_connection.session()
    analysis = get_by_name_or_id(session, Analysis, analysis_identifier)
    if not analysis.analysis_type == AnalysisTypeEnum.glycopeptide_lc_msms:
        click.secho("Analysis %r is of type %r." % (
            str(analysis.name), str(analysis.analysis_type)), fg='red', err=True)
        raise click.Abort()
    if output_path is None:
        output_stream = ctxstream(sys.stdout)
    else:
        output_stream = open(output_path, 'wb')
    with output_stream:
        TrainingMGFExporter.from_analysis(
            database_connection, analysis.id, output_stream, mzml_path, threshold).run()
Esempio n. 23
0
def glycopeptide_chromatogram_records(database_connection,
                                      analysis_identifier,
                                      output_path,
                                      apex_time_range=None):
    if apex_time_range is None:
        apex_time_range = (0, float('inf'))
    database_connection = DatabaseBoundOperation(database_connection)
    session = database_connection.session()  # pylint: disable=not-callable
    analysis = get_by_name_or_id(session, Analysis, analysis_identifier)
    if not analysis.analysis_type == AnalysisTypeEnum.glycopeptide_lc_msms:
        click.secho("Analysis %r is of type %r." %
                    (str(analysis.name), str(analysis.analysis_type)),
                    fg='red',
                    err=True)
        raise click.Abort()
    if output_path is None:
        fh = click.get_binary_stream('stdout')
    else:
        fh = open(output_path, 'wb')
    idgps = session.query(IdentifiedGlycopeptide).filter(
        IdentifiedGlycopeptide.analysis_id == analysis.id).all()
    n = len(idgps)
    from glycan_profiling.scoring.elution_time_grouping import GlycopeptideChromatogramProxy
    cases = []
    analysis_name = analysis.name
    start_time, stop_time = apex_time_range
    for i, idgp in enumerate(idgps):
        if i % 50 == 0:
            click.echo("%d/%d Records Processed" % (i, n), err=True)
        if idgp.chromatogram is None:
            continue
        if idgp.ms1_score < 0:
            continue
        obj = GlycopeptideChromatogramProxy.from_obj(
            idgp,
            ms1_score=idgp.ms1_score,
            ms2_score=idgp.ms2_score,
            q_value=idgp.q_value,
            analysis_name=analysis_name,
            mass_shifts=';'.join(
                [m.name for m in idgp.chromatogram.mass_shifts]))
        if obj.apex_time < start_time or obj.apex_time > stop_time:
            continue
        cases.append(obj)
    click.echo("Writing %d Records" % (len(cases), ), err=True)
    with fh:
        GlycopeptideChromatogramProxy.to_csv(cases, csv_stream(fh))
Esempio n. 24
0
    def task(self):
        database = DatabaseBoundOperation(self.connection)
        self.session = database.session
        has_work = True

        glycosylator = PeptideGlycosylator(database.session,
                                           self.hypothesis_id,
                                           glycan_offset=self.glycan_offset,
                                           glycan_limit=self.glycan_limit)
        result_accumulator = []

        n = 0
        n_gps = 0
        while has_work:
            try:
                work_items = self.input_queue.get(timeout=5)
                if work_items is None:
                    has_work = False
                    continue
            except Exception:
                if self.done_event.is_set():
                    has_work = False
                continue
            peptides = self.load_peptides(work_items)
            n += len(peptides)
            for peptide in peptides:
                for gp in glycosylator.handle_peptide(peptide):
                    result_accumulator.append(gp)
                    if len(result_accumulator) > self.chunk_size:
                        n_gps += len(result_accumulator)
                        self.process_result(result_accumulator)
                        result_accumulator = []
            if len(result_accumulator) > 0:
                n_gps += len(result_accumulator)
                self.process_result(result_accumulator)
                result_accumulator = []
        self.work_done_event.set()
        # It seems there is no public API to force the process to check if it is done
        # but the internal method is invoked when creating a Process `repr` on Python 2.
        # This problem supposedly doesn't exist in Python 3.
        repr(self)
        self.log_handler(
            "Process %r completed. (%d peptides, %d glycopeptides)" %
            (self.pid, n, n_gps))
Esempio n. 25
0
def glycopeptide_spectrum_matches(database_connection,
                                  analysis_identifier,
                                  output_path=None):
    '''Write each matched glycopeptide spectrum in CSV format
    '''
    database_connection = DatabaseBoundOperation(database_connection)
    session = database_connection.session()  # pylint: disable=not-callable
    analysis = get_by_name_or_id(session, Analysis, analysis_identifier)
    if not analysis.analysis_type == AnalysisTypeEnum.glycopeptide_lc_msms:
        click.secho("Analysis %r is of type %r." %
                    (str(analysis.name), str(analysis.analysis_type)),
                    fg='red',
                    err=True)
        raise click.Abort()
    analysis_id = analysis.id
    query = session.query(Protein.id, Protein.name).join(
        Protein.glycopeptides).join(GlycopeptideSpectrumMatch).filter(
            GlycopeptideSpectrumMatch.analysis_id == analysis.id)
    protein_index = dict(query)

    def generate():
        i = 0
        interval = 100000
        query = session.query(GlycopeptideSpectrumMatch).filter(
            GlycopeptideSpectrumMatch.analysis_id == analysis_id).order_by(
                GlycopeptideSpectrumMatch.scan_id)
        while True:
            session.expire_all()
            chunk = query.slice(i, i + interval).all()
            if len(chunk) == 0:
                break
            for glycopeptide in chunk:
                yield glycopeptide.convert()
            i += interval

    if output_path is None:
        output_stream = ctxstream(click.get_binary_stream('stdout'))
    else:
        output_stream = open(output_path, 'wb')
    with output_stream:
        job = GlycopeptideSpectrumMatchAnalysisCSVSerializer(
            output_stream, generate(), protein_index)
        job.run()
Esempio n. 26
0
    def task(self):
        database = DatabaseBoundOperation(self.connection)
        session = database.session
        has_work = True

        sharer = PeptideSharer(self.connection, self.hypothesis_id)

        while has_work:
            try:
                work_items = self.input_queue.get(timeout=5)
                if work_items is None:
                    has_work = False
                    continue
            except Exception:
                if self.done_event.is_set():
                    has_work = False
                continue
            proteins = slurp(session, Protein, work_items, flatten=False)
            for protein in proteins:
                sharer.find_contained_peptides(protein)
Esempio n. 27
0
def glycan_network(context, database_connection, hypothesis_identifier,
                   edge_strategy, output_path):
    conn = DatabaseBoundOperation(database_connection)
    hypothesis = get_by_name_or_id(conn, GlycanHypothesis,
                                   hypothesis_identifier)
    if output_path is None:
        output_stream = ctxstream(sys.stdout)
    else:
        output_stream = open(output_path, 'wb')
    with output_stream:
        db = GlycanCompositionDiskBackedStructureDatabase(
            database_connection, hypothesis.id)
        glycans = list(db)
        graph = CompositionGraph(glycans)
        if edge_strategy == 'manhattan':
            graph.create_edges(1)
        else:
            raise click.ClickException("Could not find edge strategy %r" %
                                       (edge_strategy, ))
        GraphWriter(graph, output_stream)
Esempio n. 28
0
def annotate_matched_spectra(database_connection,
                             analysis_identifier,
                             output_path,
                             mzml_path=None):
    database_connection = DatabaseBoundOperation(database_connection)
    session = database_connection.session()  # pylint: disable=not-callable
    analysis = get_by_name_or_id(session, Analysis, analysis_identifier)
    if not analysis.analysis_type == AnalysisTypeEnum.glycopeptide_lc_msms:
        click.secho("Analysis %r is of type %r." %
                    (str(analysis.name), str(analysis.analysis_type)),
                    fg='red',
                    err=True)
        raise click.Abort()
    if output_path is None:
        output_path = os.path.dirname(database_connection._original_connection)

    task = SpectrumAnnotatorExport(database_connection._original_connection,
                                   analysis.id, output_path, mzml_path)
    task.display_header()
    task.start()
Esempio n. 29
0
    def __init__(self, database_connection, base_path=None, validate=False):
        if base_path is None:
            base_path = os.getcwd()
        Project.__init__(self, base_path, validate=validate)

        self.load_configuration()

        self.database_connection = DatabaseBoundOperation(
            database_connection)

        self.task_manager = TaskManager(
            self.task_dir)

        self.task_manager.register_event_handler("new-sample-run", self.handle_new_sample_run)
        self.task_manager.register_event_handler("new-hypothesis", self.handle_new_hypothesis)
        self.task_manager.register_event_handler("new-analysis", self.handle_new_analysis)

        logger = logging.getLogger()
        logger.addHandler(
            logging.FileHandler(
                self.application_log_path, mode='a'))
def view_hypothesis_dispatch(uuid):
    try:
        arguments, state = request_arguments_and_context()
        record = _locate_hypothesis(uuid)
        handle = DatabaseBoundOperation(record.path)
        hypothesis = handle.query(GlycanHypothesis).filter(
            GlycanHypothesis.uuid == record.uuid).first()

        if hypothesis is not None:
            return handle_glycan_hypothesis(hypothesis)

        hypothesis = handle.query(GlycopeptideHypothesis).filter(
            GlycopeptideHypothesis.uuid == record.uuid).first()
        if hypothesis is not None:
            return handle_glycopeptide_hypothesis(hypothesis)

        return Response("<h2>%s</h2>" % record.name)
    except Exception as e:
        logging.exception("An exception occurred for %r",
                          request.get_json(), exc_info=e)
    return Response("<h2>No display method is implemented for %s </h2>" % request.get_json())