def run_annotate(self, args): self._check_annotate(args) annotate = Annotate( # Define inputs and outputs args.output, # Define type of annotation to be carried out args.ko, args.ko_hmm, args.pfam, args.tigrfam, args.clusters, args.orthologs, args.cazy, args.ec, args.orthogroup, # Cutoffs args.evalue, args.bit, args.id, args.aln_query, args.aln_reference, args.c, args.cut_ga_pfam, args.cut_nc_pfam, args.cut_tc_pfam, args.cut_ga_tigrfam, args.cut_nc_tigrfam, args.cut_tc_tigrfam, args.cut_ko, args.inflation, args.chunk_number, args.chunk_max, args.count_domains, # Parameters args.threads, args.parallel, args.suffix, args.light) annotate.annotate_pipeline(args.genome_directory, args.protein_directory, args.genome_files, args.protein_files)
def run_enrichm(self, args, command): ''' Parameters ---------- Output ------ ''' self._check_general(args) self._logging_setup(args) logging.info("Command: %s" % ' '.join(command)) logging.info("Running the %s pipeline" % args.subparser_name) if args.subparser_name == self.DATA: d = Data() d.do(args.uninstall, args.dry) if args.subparser_name == self.ANNOTATE: self._check_annotate(args) annotate = Annotate(# Define inputs and outputs args.output, # Define type of annotation to be carried out args.ko, args.ko_hmm, args.pfam, args.tigrfam, args.clusters, args.orthologs, args.cazy, args.ec, # Cutoffs args.evalue, args.bit, args.id, args.aln_query, args.aln_reference, args.c, args.cut_ga, args.cut_nc, args.cut_tc, args.cut_ko, args.inflation, args.chunk_number, args.chunk_max, args.count_domains, # Parameters args.threads, args.parallel, args.suffix, args.light) annotate.annotate_pipeline(args.genome_directory, args.protein_directory, args.genome_files, args.protein_files) elif args.subparser_name == self.CLASSIFY: self._check_classify(args) classify = Classify() classify.classify_pipeline(args.custom_modules, args.cutoff, args.aggregate, args.genome_and_annotation_matrix, args.output) elif args.subparser_name == self.ENRICHMENT: self._check_enrichment(args) enrichment = Enrichment() enrichment.enrichment_pipeline(# Input options args.annotate_output, args.annotation_matrix, args.metadata, args.abundance, args.abundance_metadata, args.transcriptome, args.transcriptome_metadata, # Runtime options args.pval_cutoff, args.proportions_cutoff, args.threshold, args.multi_test_correction, args.batchfile, args.processes, args.allow_negative_values, args.ko, args.pfam, args.tigrfam, args.cluster, args.ortholog, args.cazy, args.ec, args.ko_hmm, # Outputs args.output) elif(args.subparser_name == NetworkAnalyser.PATHWAY or args.subparser_name == NetworkAnalyser.EXPLORE): self._check_network(args) network_analyser=NetworkAnalyser() network_analyser.network_pipeline(args.subparser_name, args.matrix, args.genome_metadata, args.tpm_values, args.tpm_metadata, args.abundance, args.abundance_metadata, args.metabolome, args.enrichment_output, args.depth, args.filter, args.limit, args.queries, args.output) if args.subparser_name == self.PREDICT: self._check_predict(args) predict = Predict() predict.predict_pipeline(args.forester_model_directory, args.input_matrix, args.output) elif args.subparser_name == self.GENERATE: self._check_generate(args) generate_model = GenerateModel() generate_model.generate_pipeline(args.input_matrix, args.groups, args.model_type, args.testing_portion, args.grid_search, args.threads, args.output) elif args.subparser_name == self.USES: self._check_uses(args) uses = Uses() uses.uses_pipeline(args.compounds_list, args.annotation_matrix, args.metadata, args.output, args.count) logging.info('Finished running EnrichM')