Esempio n. 1
0
 def test(self):
     tmp = tempfile.mkdtemp()
     self.simple_annotate_instance \
         = Annotate(tmp,
                    True, True, True, True, True, True, True, True, True,# Annotate with all databases
                    1e-05, 0, 0.3, 0.7, 0.7, 0.7, # Runtime options
                    False, False, False, False, False, False, True, # Cutoffs
                    5, # Inflation
                    4, 2500, # chunks
                    False, 1, 1, '.fna', False)
Esempio n. 2
0
    def run_annotate(self, args):
        self._check_annotate(args)

        annotate = Annotate(  # Define inputs and outputs
            args.output,
            # Define type of annotation to be carried out
            args.ko,
            args.ko_hmm,
            args.pfam,
            args.tigrfam,
            args.clusters,
            args.orthologs,
            args.cazy,
            args.ec,
            args.orthogroup,
            # Cutoffs
            args.evalue,
            args.bit,
            args.id,
            args.aln_query,
            args.aln_reference,
            args.c,
            args.cut_ga_pfam,
            args.cut_nc_pfam,
            args.cut_tc_pfam,
            args.cut_ga_tigrfam,
            args.cut_nc_tigrfam,
            args.cut_tc_tigrfam,
            args.cut_ko,
            args.inflation,
            args.chunk_number,
            args.chunk_max,
            args.count_domains,
            # Parameters
            args.threads,
            args.parallel,
            args.suffix,
            args.light)

        annotate.annotate_pipeline(args.genome_directory,
                                   args.protein_directory, args.genome_files,
                                   args.protein_files)
Esempio n. 3
0
    def run_enrichm(self, args, command):
        '''
        Parameters
        ----------

        Output
        ------
        '''
        self._check_general(args)
        self._logging_setup(args)

        logging.info("Command: %s" % ' '.join(command))
        logging.info("Running the %s pipeline" % args.subparser_name)

        if args.subparser_name == self.DATA:
            d = Data()
            d.do(args.uninstall, args.dry)

        if args.subparser_name == self.ANNOTATE:
            self._check_annotate(args)
            annotate = Annotate(# Define inputs and outputs
                                args.output,
                                # Define type of annotation to be carried out
                                args.ko, args.ko_hmm, args.pfam, args.tigrfam,
                                args.clusters, args.orthologs, args.cazy,
                                args.ec,
                                # Cutoffs
                                args.evalue, args.bit, args.id, args.aln_query,
                                args.aln_reference, args.c, args.cut_ga, 
                                args.cut_nc, args.cut_tc, args.cut_ko,
                                args.inflation, args.chunk_number, args.chunk_max,
                                args.count_domains,
                                # Parameters
                                args.threads, args.parallel, args.suffix, args.light)

            annotate.annotate_pipeline(args.genome_directory,
                                       args.protein_directory,
                                       args.genome_files,
                                       args.protein_files)

        elif args.subparser_name == self.CLASSIFY:
            self._check_classify(args)
            classify = Classify()
            classify.classify_pipeline(args.custom_modules, args.cutoff, args.aggregate,
                                       args.genome_and_annotation_matrix, args.output)

        elif args.subparser_name == self.ENRICHMENT:
            self._check_enrichment(args)
            enrichment = Enrichment()
            enrichment.enrichment_pipeline(# Input options
                                           args.annotate_output, args.annotation_matrix,
                                           args.metadata, args.abundance, args.abundance_metadata,
                                           args.transcriptome, args.transcriptome_metadata,
                                           # Runtime options
                                           args.pval_cutoff, args.proportions_cutoff, 
                                           args.threshold, args.multi_test_correction, 
                                           args.batchfile, args.processes, 
                                           args.allow_negative_values, args.ko, args.pfam, 
                                           args.tigrfam, args.cluster, args.ortholog, args.cazy,
                                           args.ec, args.ko_hmm,
                                           # Outputs
                                           args.output)

        elif(args.subparser_name == NetworkAnalyser.PATHWAY or
             args.subparser_name == NetworkAnalyser.EXPLORE):
            self._check_network(args)
            network_analyser=NetworkAnalyser()
            network_analyser.network_pipeline(args.subparser_name, args.matrix, 
                                              args.genome_metadata, args.tpm_values,
                                              args.tpm_metadata, args.abundance, 
                                              args.abundance_metadata, args.metabolome,
                                              args.enrichment_output, args.depth, args.filter,
                                              args.limit, args.queries, args.output)

        if args.subparser_name == self.PREDICT:
            self._check_predict(args)
            predict = Predict()
            predict.predict_pipeline(args.forester_model_directory,
                 args.input_matrix,
                 args.output)

        elif args.subparser_name == self.GENERATE:
            self._check_generate(args)
            generate_model = GenerateModel()
            generate_model.generate_pipeline(args.input_matrix,
                  args.groups,
                  args.model_type,
                  args.testing_portion,
                  args.grid_search,
                  args.threads,
                  args.output)

        elif args.subparser_name == self.USES:
            self._check_uses(args)
            uses = Uses()
            uses.uses_pipeline(args.compounds_list,
                    args.annotation_matrix,
                    args.metadata,
                    args.output,
                    args.count)

        logging.info('Finished running EnrichM')
Esempio n. 4
0
File: run.py Progetto: yxxue/enrichM
    def main(self, args, command):
        '''
        Parameters
        ----------
        
        Output
        ------
        '''

        self._check_general(args)
        self._logging_setup(args)

        logging.info("Running command: %s" % ' '.join(command))

        if args.subparser_name == self.DATA:
            d = Data()
            d.do(args.uninstall)
        
        if args.subparser_name == self.ANNOTATE:
            self._check_annotate(args)
            a = Annotate(# Define inputs and outputs
                         args.output,
                         # Define type of annotation to be carried out
                         args.ko,
                         args.pfam,
                         args.tigrfam,
                         args.hypothetical,
                         args.cazy,
                         # Cutoffs
                         args.evalue,
                         args.bit,
                         args.id,
                         args.aln_query, 
                         args.aln_reference, 
                         args.c,
                         args.cut_ga,
                         args.cut_nc,
                         args.cut_tc,
                         args.inflation,
                         args.chunk_number,
                         args.chunk_max,
                         # Parameters
                         args.threads,
                         args.parallel,
                         args.suffix,
                         args.light)
            a.do(args.genome_directory,
                 args.protein_directory, 
                 args.genome_files,
                 args.protein_files)

        elif args.subparser_name == self.CLASSIFY:
            self._check_classify(args)
            c = Classify()
            c.do(args.custom_modules, 
                 args.cutoff,
                 args.genome_and_annotation_file,
                 args.genome_and_annotation_matrix,
                 args.output)

        elif args.subparser_name == self.ENRICHMENT: 
            self._check_enrichment(args)
            e = Enrichment()
            e.do(# Input options
                 args.annotate_output,
                 args.metadata,
                 args.modules,
                 args.abundances,
                 # Runtime options
                 args.genomes_to_compare_with_group,
                 args.pval_cutoff,
                 args.proportions_cutoff,
                 args.threshold,
                 args.multi_test_correction,
                 args.batchfile,
                 args.processes,
                 args.ko,
                 args.pfam,
                 args.tigrfam,
                 args.hypothetical,
                 args.cazy,
                 # Outputs
                 args.output)

        elif args.subparser_name == self.CONNECT:
            self._check_connect(args)
            c = Connect()
            c.do(args.annotate_output,
                 args.metadata,
                 args.custom_modules,
                 args.cutoff,
                 args.output)

        elif(args.subparser_name == NetworkAnalyser.PATHWAY or
             args.subparser_name == NetworkAnalyser.EXPLORE or
             args.subparser_name == NetworkAnalyser.TRAVERSE):
            self._check_network(args)
            na=NetworkAnalyser(args.metadata)
            na.do(args.matrix,
                  args.transcriptome,
                  args.metabolome,
                  args.depth,
                  args.filter,
                  args.limit,
                  args.queries,
                  args.subparser_name,
                  args.starting_compounds, 
                  args.steps,
                  args.number_of_queries,
                  args.output)
        
        if args.subparser_name == self.PREDICT:
            self._check_predict(args)
            p = Predict()
            p.do(args.forester_model_directory,
                 args.input_matrix,
                 args.output)

        elif args.subparser_name == self.GENERATE:
            self._check_generate(args)
            gm = GenerateModel()
            gm.do(args.input_matrix,
                  args.groups,
                  args.model_type,
                  args.testing_portion,
                  args.grid_search,
                  args.threads,
                  args.output)
        
        logging.info('Done!')