Beispiel #1
0
def run(args):
    start = timer()
    if os.path.exists(args.output):
        logging.info("%s already exists, you have to move it or delete it if you want it done again", args.output)
        return

    if (args.hdf5_expression_file and args.expression_file) or \
        (not args.hdf5_expression_file and not args.expression_file):
        logging.info("Provide either hdf5 expression file or plain text expression file")
        return

    with PrediXcanUtilities.p_context_from_args(args) as context:
        genes = context.get_genes()
        n_genes = len(genes)
        reporter = Utilities.PercentReporter(logging.INFO, n_genes)
        reporter.update(0, "%d %% of model's genes processed so far", force=True)
        results = []
        for i,gene in enumerate(genes):
            logging.log(7, "Processing gene %s", gene)
            r = PrediXcanAssociation.predixcan_association(gene, context)
            results.append(r)
            reporter.update(i, "%d %% of model's genes processed so far")
        reporter.update(i, "%d %% of model's genes processed so far")
        results = PrediXcanAssociation.dataframe_from_results(results)
        results = results.fillna("NA")
        results = results.sort_values(by="pvalue")

        Utilities.save_dataframe(results, args.output)

    end = timer()
    logging.info("Ran multi tissue predixcan in %s seconds" % (str(end - start)))
Beispiel #2
0
def run(args):
    if os.path.exists(args.snp_covariance_output):
        logging.info("%s already exists, you have to move it or delete it if you want it done again", args.snp_covariance_output)
        return

    start = timer()

    logging.info("Loading models...")
    model_manager = PredictionModel.load_model_manager(args.models_folder, name_pattern=args.models_pattern)
    all_snps = model_manager.get_rsids()

    logging.info("processing genotype")
    for chromosome, metadata, dosage in GenotypeUtilities.genotype_by_chromosome_from_args(args, all_snps):
        logging.log(9, "Processing chromosome %s", str(chromosome))
        covariance_results = pandas.DataFrame()

        context = GenotypeAnalysis.GenotypeAnalysisContext(metadata, dosage, model_manager)
        genes = context.get_genes()
        reporter = Utilities.PercentReporter(9, len(genes))
        reporter.update(0, "%d %% of genes processed so far in chromosome " + str(chromosome))
        for i,gene in enumerate(genes):
            logging.log(6, "%d/%d:%s", i+1, len(genes), gene)
            cov_data = GenotypeAnalysis.get_prediction_covariance(context, gene)
            cov_data = MatrixManager._flatten_matrix_data([cov_data])
            cov_data = Utilities.to_dataframe(cov_data, GenotypeAnalysis.COVARIANCE_COLUMNS, to_numeric="ignore", fill_na="NA")
            covariance_results = pandas.concat([covariance_results, cov_data])

            reporter.update(i, "%d %% of genes processed so far in chromosome "+str(chromosome))

        reporter.update(len(genes), "%d %% of genes processed so far in chromosome " + str(chromosome))

        logging.log(9, "writing chromosome results")
        Utilities.save_dataframe(covariance_results, args.snp_covariance_output,
                                    mode="w" if chromosome ==1 else "a",
                                    header=chromosome==1)

    end = timer()
    logging.info("Ran covariance builder in %s seconds" % (str(end - start)))
Beispiel #3
0
def run(args):
    start = timer()
    if os.path.exists(args.output):
        logging.info(
            "%s already exists, you have to move it or delete it if you want it done again",
            args.output)
        return

    if (args.hdf5_expression_folder and args.expression_folder) or \
        (not args.hdf5_expression_folder and not args.expression_folder):
        logging.info(
            "Provide either hdf5 expression folder or plain text expression folder"
        )
        return

    with MultiPrediXcanUtilities.mp_context_from_args(args) as context:
        genes = context.get_genes()
        n_genes = len(genes)
        reporter = Utilities.PercentReporter(logging.INFO, n_genes)
        reporter.update(0,
                        "%d %% of model's genes processed so far",
                        force=True)

        results = []
        callbacks = {}
        if args.coefficient_output:
            callbacks["coefficient"] = MultiPrediXcanAssociation.SaveCoefs()
        if args.loadings_output:
            callbacks["loadings"] = MultiPrediXcanAssociation.SaveLoadings()

        for i, gene in enumerate(genes):
            logging.log(7, "Processing gene %i/%i: %s", i + 1, n_genes, gene)
            r = MultiPrediXcanAssociation.multi_predixcan_association(
                gene, context, callbacks.values())
            results.append(r)
            reporter.update(i, "%d %% of model's genes processed so far")
        reporter.update(i, "%d %% of model's genes processed so far")
        results = MultiPrediXcanAssociation.dataframe_from_results(
            results, context)
        results = results.fillna("NA")
        results = results.sort_values(by="pvalue")

        Utilities.save_dataframe(results, args.output)
        if args.coefficient_output:
            Utilities.save_dataframe(callbacks["coefficient"].get(),
                                     args.coefficient_output)
        if args.loadings_output:
            Utilities.save_dataframe(callbacks["loadings"].get(),
                                     args.loadings_output)

    end = timer()
    logging.info("Ran multi tissue predixcan in %s seconds" %
                 (str(end - start)))
Beispiel #4
0
def run(args):
    start = timer()

    folder, prefix = os.path.split(args.output_prefix)
    results_name = args.output_prefix + "__mt_results.txt"
    predixcan_results_name = args.output_prefix + "__p_results.txt"
    additional_name = args.output_prefix + "__additional.txt"

    if os.path.exists(results_name):
        logging.info(
            "%s already exists, you have to move it or delete it if you want it done again",
            results_name)
        return

    #for reproducibility
    numpy.random.seed(100)

    results = []
    additional = []
    predixcan_results = []

    n_max = args.max_n_results
    logging.info("Acquiring context")
    with MultiPredixcanSimulations.context_from_args(args) as context:
        logging.info("processing")
        _c, _cp, _e = context.get_mp_simulation(None)
        for i, gene in enumerate(context.get_genes()):
            if n_max and i + 1 > n_max:
                logging.info("Max runs met")
                break
            logging.log(9, "%d Gene %s", i, gene)
            r, add, p = MultiPredixcanSimulations.simulate(gene, context)
            if r is None:
                logging.log(9, "%s could not be simulated", gene)
                continue
            results.append(r)
            additional.append(add)

            if p is not None:
                predixcan_results.append(p)

    results = MultiPrediXcanAssociation.dataframe_from_results(
        results, _c).sort_values(by="pvalue")
    additional = pandas.concat(additional)

    Utilities.ensure_requisite_folders(results_name)
    Utilities.save_dataframe(results, results_name)
    Utilities.save_dataframe(additional, additional_name)

    if len(predixcan_results):
        predixcan_results = pandas.concat(predixcan_results)
        Utilities.save_dataframe(predixcan_results, predixcan_results_name)
    logging.info("Finished")
Beispiel #5
0
def run(args):
    start = timer()

    folder, prefix = os.path.split(args.output_prefix)
    results_name = args.output_prefix + "__mt_results.txt"
    predixcan_results_name = args.output_prefix + "__p_results.txt"
    additional_name = args.output_prefix + "__additional.txt"

    if os.path.exists(results_name):
        logging.info("%s already exists, you have to move it or delete it if you want it done again", results_name)
        return

    #for reproducibility
    numpy.random.seed(100)

    results = []
    additional = []
    predixcan_results = []

    n_max = args.max_n_results
    logging.info("Acquiring context")
    with MultiPredixcanSimulations.context_from_args(args) as context:
        logging.info("processing")
        _c, _cp, _e = context.get_mp_simulation(None)
        for i, gene in enumerate(context.get_genes()):
            if n_max and i+1>n_max:
                logging.info("Max runs met")
                break
            logging.log(9, "%d Gene %s", i, gene)
            r, add, p = MultiPredixcanSimulations.simulate(gene, context)
            if r is None:
                logging.log(9, "%s could not be simulated", gene)
                continue
            results.append(r)
            additional.append(add)

            if p is not None:
                predixcan_results.append(p)

    results = MultiPrediXcanAssociation.dataframe_from_results(results, _c).sort_values(by="pvalue")
    additional = pandas.concat(additional)

    Utilities.ensure_requisite_folders(results_name)
    Utilities.save_dataframe(results, results_name)
    Utilities.save_dataframe(additional, additional_name)

    if len(predixcan_results):
        predixcan_results = pandas.concat(predixcan_results)
        Utilities.save_dataframe(predixcan_results, predixcan_results_name)
    logging.info("Finished")
Beispiel #6
0
def run(args):
    start = timer()
    if args.prediction_output:
        if os.path.exists(args.prediction_output[0]):
            logging.info(
                "Prediction output exists. Move or remove if you want this ran again."
            )
            return
        Utilities.ensure_requisite_folders(args.prediction_output[0])

    if args.prediction_summary_output:
        if os.path.exists(args.prediction_summary_output):
            logging.info(
                "Summary output exists. Move or remove if you want this ran again."
            )
            return
        Utilities.ensure_requisite_folders(args.prediction_output[0])

    logging.info("Loading samples")
    samples = load_samples(args)

    logging.info("Loading model")
    model, weights, extra = model_structure(args)

    variant_mapping = get_variant_mapping(args, weights)

    logging.info("Preparing genotype dosages")
    dosage_source = dosage_generator(args, variant_mapping, weights)

    logging.info("Processing genotypes")
    dcapture = []
    reporter = Utilities.PercentReporter(logging.INFO,
                                         len(set(weights.rsid.values)))
    snps_found = set()
    with prepare_prediction(args, extra, samples) as results:

        for i, e in enumerate(dosage_source):
            if args.stop_at_variant and i > args.stop_at_variant:
                break
            var_id = e[GF.RSID]

            logging.log(8, "variant %i:%s", i, var_id)
            if var_id in model:
                s = model[var_id]
                ref_allele, alt_allele = e[GF.REF_ALLELE], e[GF.ALT_ALLELE]

                allele_align, strand_align = GWASAndModels.match_alleles(
                    ref_allele, alt_allele, s[0], s[1])
                if not allele_align or not strand_align:
                    continue

                dosage = e[GF.FIRST_DOSAGE:]
                if allele_align == -1:
                    dosage = tuple(map(lambda x: 2 - x, dosage))
                dosage = numpy.array(dosage, dtype=numpy.float)

                snps_found.add(var_id)

                for gene, weight in s[2].items():
                    results.update(gene, dosage, weight)
                    if args.capture:
                        dcapture.append((gene, weight, var_id, s[0], s[1],
                                         ref_allele, alt_allele, strand_align,
                                         allele_align) + e[GF.FIRST_DOSAGE:])

                reporter.update(len(snps_found), "%d %% of models' snps used")

    reporter.update(len(snps_found), "%d %% of models' snps used", force=True)

    if args.capture:
        logging.info("Saving data capture")
        Utilities.ensure_requisite_folders(args.capture)
        with gzip.open(args.capture, "w") as f:
            header = "gene\tweight\tvariant_id\tref_allele\teff_allele\ta0\ta1\tstrand_align\tallele_align\t" + "\t".join(
                samples.IID.values) + "\n"
            f.write(header.encode())
            for c in dcapture:
                l = "\t".join(map(str, c)) + "\n"
                f.write(l.encode())

    if args.prediction_output and len(args.prediction_output) < 2:
        logging.info("Storing prediction")
        results.store_prediction()

    if args.prediction_summary_output:
        logging.info("Saving summary")
        summary = results.summary()
        Utilities.save_dataframe(summary, args.prediction_summary_output)

    end = timer()
    logging.info("Successfully predicted expression in %s seconds" %
                 (str(end - start)))

    return results
Beispiel #7
0
 def store_prediction(self):
     logging.info("Saving prediction as a text file")
     d = pandas.DataFrame(self.genes)
     result = pandas.concat([self.samples, d], axis=1, sort=False)
     Utilities.save_dataframe(result, self.output_path)