def generate_multi_backend(args, variant_key):
    logging.info("Processing Genotype")
    dosage_conversion = GenotypeUtilities.impute_to_mean_conversion if args.impute_to_mean else None
    dosage_filter = get_filter(args, variant_key)
    metadata = []

    for genotype, individual_ids in ModelTraining.load_genotype_file_by_chromosome(
            args.input_genotype_file, variant_key, dosage_conversion,
            dosage_filter):
        if args.simplify_individual_id:
            logging.info("simplifying individual id")
            individual_ids = [x.split("_")[0] for x in individual_ids]

        _m = genotype.get_variants_metadata()

        metadata.append(_m)
        _chr = _m.chromosome.values[0]
        logging.log(9, "Processing {}".format(_chr))
        _o = args.output_prefix + ".chr{}".format(_chr) + ".variants.parquet"
        Parquet.save_variants(_o, genotype, individual_ids)

    logging.info("Saving metadata")
    metadata = pandas.concat(metadata)
    path_metadata_variant = args.output_prefix + ".variants_metadata.parquet"
    Parquet._save_metadata(path_metadata_variant, metadata)
Beispiel #2
0
def run(args):
    start = timer()
    Utilities.ensure_requisite_folders(args.output_prefix)
    logging.info("Loading SNP annotation")
    snp_key = KeyedDataSource.load_data(args.snp_annotation_file,
                                        "varID",
                                        "rsid_dbSNP150",
                                        should_skip=KeyedDataSource.skip_na)

    logging.info("Loading Genotype")
    genotype, individual_ids = ModelTraining.load_genotype_folder(
        args.input_genotype_folder, args.input_genotype_file_pattern, snp_key)

    logging.info("Saving Genotype")
    path_variant = args.output_prefix + ".variants.parquet"
    Parquet.save_variants(path_variant, genotype, individual_ids)

    path_metadata_variant = args.output_prefix + ".variants_metadata.parquet"
    Parquet.save_metadata(path_metadata_variant, genotype)

    logging.info("Processing Expression Phenotype")
    expression_logic = Utilities.file_logic(
        args.input_phenotype_folder, args.input_phenotype_expression_pattern)
    for row in expression_logic.itertuples():
        logging.info("Phenotype: %s", row.name)
        process_phenotype(row.path, row.name, args.output_prefix)
    end = timer()
    logging.info("Finished in %s", str(end - start))
def run(args):
    logging.info("Starting")
    Utilities.ensure_requisite_folders(args.output)

    logging.info("Read covariate")
    covariate = pq.read_table(args.covariate).to_pandas()
    logging.info("Read data")
    data = pq.read_table(args.data).to_pandas()

    logging.info("Processing")
    covariate_names = covariate.columns.values[1:]
    results = {"individual": data.individual.values}
    variables = [x for x in data.columns.values[1:]]
    for i, column in enumerate(variables):
        logging.log(9, "%i/%i:%s", i, len(variables), column)
        d = data[["individual", column]].rename(columns={
            column: "y"
        }).merge(covariate, on="individual", how="inner").drop("individual",
                                                               axis=1)
        y, X = dmatrices("y ~ {}".format(" + ".join(covariate_names)),
                         data=d,
                         return_type="dataframe")
        model = sm.OLS(y, X)
        result = model.fit()
        results[column] = result.resid
    results = pandas.DataFrame(results)[["individual"] + variables]
    Parquet.save_variable(args.output, results)
    logging.info("Finished")
def check_missing(args, data, features):
    m = None
    if args.missing_individuals:
        logging.info("Instructed to check for individuals missing from the genotype")
        p = Parquet._read(data, ["individual"])
        g = Parquet._read(features, ["individual"])
        m = [x for x in p["individual"] if x in g["individual"]]
        logging.info("Found %d individuals", len(m))
    return m
Beispiel #5
0
def run(args):
    start = timer()
    Utilities.ensure_requisite_folders(args.parquet_output)
    logging.info("Loading variable")
    variables = ModelTraining.load_variable_file(args.variable_file)
    logging.info("Saving")
    Parquet.save_variable(args.parquet_output, variables)
    end = timer()
    logging.info("Finished in %s", str(end-start))
def process(w, s, c, data, data_annotation_, features, features_metadata, x_weights, summary_fields, train, postfix=None, nested_folds=10, use_individuals=None):
    gene_id_ = data_annotation_.gene_id if postfix is None else "{}-{}".format(data_annotation_.gene_id, postfix)
    logging.log(8, "loading data")
    d_ = Parquet._read(data, [data_annotation_.gene_id], specific_individuals=use_individuals)
    features_ = Genomics.entries_for_gene_annotation(data_annotation_, args.window, features_metadata)

    if x_weights is not None:
        x_w = features_[["id"]].merge(x_weights[x_weights.gene_id == data_annotation_.gene_id], on="id")
        features_ = features_[features_.id.isin(x_w.id)]
        x_w = robjects.FloatVector(x_w.w.values)
    else:
        x_w = None

    if features_.shape[0] == 0:
        logging.log(9, "No features available")
        return

    features_data_ = Parquet._read(features, [x for x in features_.id.values],
                                   specific_individuals=[x for x in d_["individual"]])

    logging.log(8, "training")
    weights, summary = train(features_data_, features_, d_, data_annotation_, x_w, not args.dont_prune, nested_folds)

    if weights.shape[0] == 0:
        logging.log(9, "no weights, skipping")
        return

    logging.log(8, "saving")
    weights = weights.assign(gene=data_annotation_.gene_id). \
        merge(features_.rename(columns={"id": "feature", "allele_0": "ref_allele", "allele_1": "eff_allele"}), on="feature"). \
        rename(columns={"feature": "varID"}). \
        assign(gene=gene_id_)

    weights = weights[["gene", "rsid", "varID", "ref_allele", "eff_allele", "weight"]]
    if args.output_rsids:
        weights.loc[weights.rsid == "NA", "rsid"] = weights.loc[weights.rsid == "NA", "varID"]
    w.write(weights.to_csv(sep="\t", index=False, header=False, na_rep="NA").encode())

    summary = summary. \
        assign(gene=gene_id_, genename=data_annotation_.gene_name,
               gene_type=data_annotation_.gene_type). \
        rename(columns={"n_features": "n_snps_in_window", "n_features_in_model": "n.snps.in.model",
                        "zscore_pval": "pred.perf.pval", "rho_avg_squared": "pred.perf.R2",
                        "cv_converged":"nested_cv_converged"})
    summary["pred.perf.qval"] = None
    summary = summary[summary_fields]
    s.write(summary.to_csv(sep="\t", index=False, header=False, na_rep="NA").encode())

    var_ids = [x for x in weights.varID.values]
    cov = numpy.cov([features_data_[k] for k in var_ids], ddof=1)
    ids = [x for x in weights.rsid.values] if args.output_rsids else var_ids
    cov = matrices._flatten_matrix_data([(gene_id_, ids, cov)])
    for cov_ in cov:
        l = "{} {} {} {}\n".format(cov_[0], cov_[1], cov_[2], cov_[3]).encode()
        c.write(l)
Beispiel #7
0
def generate_single_backend(args, variant_key):
    logging.info("Loading Genotype")
    dosage_conversion = GenotypeUtilities.impute_to_mean_conversion if args.impute_to_mean else None
    dosage_filter = get_filter(args, variant_key)
    genotype, individual_ids = ModelTraining.load_genotype_file(args.input_genotype_file, variant_key, dosage_conversion, dosage_filter)

    logging.info("Saving Genotype")
    path_variant = args.output_prefix + ".variants.parquet"
    Parquet.save_variants(path_variant, genotype, individual_ids)

    logging.info("Saving Genotype")
    path_metadata_variant = args.output_prefix + ".variants_metadata.parquet"
    Parquet.save_metadata(path_metadata_variant, genotype)
Beispiel #8
0
def run(args):
    if os.path.exists(args.output):
        logging.info("Output already exists, either delete it or move it")
        return

    logging.info("Getting parquet genotypes")
    file_map = get_file_map(args)

    logging.info("Getting genes")
    with sqlite3.connect(args.model_db) as connection:
        extra = pandas.read_sql("SELECT * FROM EXTRA", connection)

    logging.info("Processing")
    with gzip.open(args.output, "w") as f:
        f.write("GENE RSID1 RSID2 VALUE\n".encode())
        with sqlite3.connect(args.model_db) as connection:
            for i,t in enumerate(extra.itertuples()):
                g_ = t.gene
                logging.log(9, "Proccessing %i:%s", i, g_)
                w = pandas.read_sql("select * from weights where gene = '{}';".format(g_), connection)

                chr_ = w.varID.values[0].split("_")[0].split("chr")[1]
                dosage = file_map[int(chr_)]
                d = Parquet._read(dosage, columns=w.varID.values, skip_individuals=True)
                var_ids = list(d.keys())
                rsids = pandas.DataFrame({"varID":var_ids}).merge(w[["varID", "rsid"]], on="varID").rsid
                c = numpy.cov([d[x] for x in var_ids])
                c = matrices._flatten_matrix_data([(w.gene.values[0], rsids, c)])
                for entry in c:
                    l = "{} {} {} {}\n".format(entry[0], entry[1], entry[2], entry[3])
                    f.write(l.encode())
    logging.info("Finished building covariance.")
Beispiel #9
0
def by_chromosome(context, chromosome):
    vm = context.vmf.read_row_group(chromosome - 1).to_pandas()
    if args.frequency_filter:
        vm = filter_by_frequency(vm, args.frequency_filter)

    g = context.get_genotype_file(chromosome)

    regions = context.regions
    regions = regions[regions.chr == "chr{}".format(chromosome)]

    for i, region in enumerate(regions.itertuples()):
        logging.log(9, "Processing region in chr %d: %d/%d", chromosome, i + 1,
                    regions.shape[0])
        vmw = Genomics.entries_for_window(chromosome,
                                          region.start - args.window,
                                          region.stop + args.window, vm)
        ids = vmw.id.values
        logging.log(9, "%d variants", len(ids))
        d = Parquet._read(g, columns=ids, skip_individuals=True)
        d = numpy.array([d[x] for x in ids], dtype=numpy.float32)
        if context.args.standardise_geno:
            cov = numpy.corrcoef(d, ddof=1).astype(numpy.float32, copy=False)
        else:
            cov = numpy.cov(d).astype(numpy.float32, copy=False)
        logging.log(9, "%d rows", cov.shape[0])
        context.sink(cov, ids, region)
Beispiel #10
0
def load_annotation(args):
    if args.snp_annotation:
        logging.info("Loading SNP annotation file for variant-rsid mapping")
        return GTExMisc.load_gtex_variant_to_rsid(args.snp_annotation)
    elif args.snp_annotation_from_parquet_metadata:
        logging.info("Loading Parquet metadata for variant-rsid mapping")
        return Parquet.variant_key_value_from_metadata(args.snp_annotation_from_parquet_metadata)

    raise Exceptions.ReportableException("Provide a file annotation")
def run(args):
    if os.path.exists(args.output):
        logging.info("Output already exists, either delete it or move it")
        return

    logging.info("Getting parquet genotypes")
    file_map = get_file_map(args)

    logging.info("Getting variants")
    gene_variants = get_gene_variant_list(args.model_db_folder,
                                          args.model_db_file_pattern)
    genes = list(gene_variants.gene.drop_duplicates())

    Utilities.ensure_requisite_folders(args.output)

    logging.info("Processing")
    with gzip.open(args.output, "w") as f:
        f.write("GENE RSID1 RSID2 VALUE\n".encode())
        for i, g in enumerate(gene_variants.gene.drop_duplicates()):
            logging.log(9, "Proccessing %i/%i:%s", i + 1, len(genes), g)
            w = gene_variants[gene_variants.gene == g]
            chr_ = w.varID.values[0].split("_")[0].split("chr")[1]
            if not n_.search(chr_):
                logging.log(9, "Unsupported chromosome: %s", chr_)
                continue

            dosage = file_map[int(chr_)]
            d = Parquet._read(dosage,
                              columns=w.varID.values,
                              skip_individuals=True)
            var_ids = list(d.keys())
            if args.output_rsids:
                ids = [
                    x for x in pandas.DataFrame({
                        "varID": var_ids
                    }).merge(w[["varID", "rsid"]], on="varID").rsid.values
                ]
            else:
                ids = var_ids
            c = numpy.cov([d[x] for x in var_ids])
            c = matrices._flatten_matrix_data([(w.gene.values[0], ids, c)])
            for entry in c:
                l = "{} {} {} {}\n".format(entry[0], entry[1], entry[2],
                                           entry[3])
                f.write(l.encode())
    logging.info("Finished building covariance.")
Beispiel #12
0
def run(args):
    if not (args.bimbam_output_prefix or  args.parquet_output_prefix or args.sbam_output_folder):
        raise RuntimeError("Need output argument")

    #reproducibility. Add argument for different seed.
    Simulate.reset_seed()

    study, selected_snps, gene_annotation = Simulate.simulate_bslmm_study(args.snps_per_chromosome)

    if args.bimbam_output_prefix:
        _save = lambda study: BIMBAM.save_study(study, args.bimbam_output_prefix)
        save_study(study, selected_snps, gene_annotation, args.bimbam_output_prefix, _save)
    if args.parquet_output_prefix:
        _save = lambda study: Parquet.save_study(study, args.parquet_output_prefix)
        save_study(study, selected_snps, gene_annotation, args.parquet_output_prefix, _save)
    if args.sbam_output_folder:
        _save = lambda study: SBAM.save_study(study, args.sbam_output_folder)
        save_study(study, selected_snps, gene_annotation, os.path.join(args.sbam_output_folder, "_"), _save)
Beispiel #13
0
def _run_dap(region, features, features_metadata, summary_stats,
             intermediate_folder, output_folder, options, dap_command):
    logging.log(9, "fetching and prepatring data")
    os.makedirs(_intermediate_folder(intermediate_folder, region))
    s = summary_stats[summary_stats.region_id == region.region_id]
    #m = features_metadata[features_metadata.id.isin(s.variant_id)]
    x = Parquet._read(features, [x for x in s.variant_id.values])
    c = numpy.corrcoef([x[k] for k in s.variant_id.values])
    del x

    numpy.savetxt(_cor_path(intermediate_folder, region), c)
    s[["variant_id", "z"]].rename(columns={
        "variant_id": "snp_name_i",
        "z": "z_i"
    }).to_csv(_stats_path(intermediate_folder, region), index=False, sep="\t")

    del s
    del c

    command = _dap_command(region, intermediate_folder, output_folder, options,
                           dap_command)

    logging.log(9, "running")
    script_path = _script_path(intermediate_folder, region)
    with open(script_path, "w") as script:
        script.write(command)

    _o = os.path.join(_intermediate_folder(intermediate_folder, region),
                      "dap.o")
    _e = os.path.join(_intermediate_folder(intermediate_folder, region),
                      "dap.e")
    with open(_o, "w") as o:
        with open(_e, "w") as e:
            call(["bash", script_path], stderr=e, stdout=o)
    shutil.move(_o, _output(output_folder, region))
    logging.log(9, "executed dap")
Beispiel #14
0
def run(args):
    if os.path.exists(args.output):
        logging.info("Output already exists, either delete it or move it")
        return

    logging.info("Loading group")
    groups = pandas.read_table(args.group)
    groups = groups.assign(chromosome = groups.gtex_intron_id.str.split(":").str.get(0))
    groups = groups.assign(position=groups.gtex_intron_id.str.split(":").str.get(1))
    groups = Genomics.sort(groups)

    logging.info("Getting parquet genotypes")
    file_map = get_file_map(args)

    logging.info("Getting genes")
    with sqlite3.connect(args.model_db_group_key) as connection:
        # Pay heed to the order. This avoids arbitrariness in sqlite3 loading of results.
        extra = pandas.read_sql("SELECT * FROM EXTRA order by gene", connection)
        extra = extra[extra["n.snps.in.model"] > 0]

    individuals = TextFileTools.load_list(args.individuals) if args.individuals else None

    logging.info("Processing")
    Utilities.ensure_requisite_folders(args.output)

    genes_ = groups[["chromosome", "position", "gene_id"]].drop_duplicates()
    with gzip.open(args.output, "w") as f:
        f.write("GENE RSID1 RSID2 VALUE\n".encode())
        with sqlite3.connect(args.model_db_group_key) as db_group_key:
            with sqlite3.connect(args.model_db_group_values) as db_group_values:
                for i,t_ in enumerate(genes_.itertuples()):
                    g_ = t_.gene_id
                    chr_ = t_.chromosome.split("chr")[1]
                    logging.log(8, "Proccessing %i/%i:%s", i+1, len(genes_), g_)

                    if not n_.search(chr_):
                        logging.log(9, "Unsupported chromosome: %s", chr_)
                        continue
                    dosage = file_map[int(chr_)]

                    group = groups[groups.gene_id == g_]
                    wg=[]
                    for value in group.intron_id:
                        wk = pandas.read_sql("select * from weights where gene = '{}';".format(value), db_group_values)
                        if wk.shape[0] == 0:
                            continue
                        wg.append(wk)

                    if len(wg) > 0:
                        wg = pandas.concat(wg)
                        w = pandas.concat([wk, wg])[["varID", "rsid"]].drop_duplicates()
                    else:
                        w = wk[["varID", "rsid"]].drop_duplicates()

                    if w.shape[0] == 0:
                        logging.log(8, "No data, skipping")
                        continue

                    if individuals:
                        d = Parquet._read(dosage, columns=w.varID.values, specific_individuals=individuals)
                        del d["individual"]
                    else:
                        d = Parquet._read(dosage, columns=w.varID.values, skip_individuals=True)

                    var_ids = list(d.keys())
                    if len(var_ids) == 0:
                        if len(w.varID.values) == 1:
                            logging.log(9, "workaround for single missing genotype at %s", g_)
                            d = {w.varID.values[0]:[0,1]}
                        else:
                            logging.log(9, "No genotype available for %s, skipping",g_)
                            next

                    if args.output_rsids:
                        ids = [x for x in pandas.DataFrame({"varID": var_ids}).merge(w[["varID", "rsid"]], on="varID").rsid.values]
                    else:
                        ids = var_ids

                    c = numpy.cov([d[x] for x in var_ids])
                    c = matrices._flatten_matrix_data([(g_, ids, c)])
                    for entry in c:
                        l = "{} {} {} {}\n".format(entry[0], entry[1], entry[2], entry[3])
                        f.write(l.encode())
    logging.info("Finished building covariance.")
def run(args):
    Utilities.maybe_create_folder(args.intermediate_folder)
    Utilities.ensure_requisite_folders(args.output_prefix)

    logging.info("Opening data")
    p_ = re.compile(args.data_name_pattern)
    f = [x for x in sorted(os.listdir(args.data_folder)) if p_.search(x)]
    tissue_names = [p_.search(x).group(1) for x in f]
    data = []
    for i in range(0, len(tissue_names)):
        logging.info("Loading %s", tissue_names[i])
        data.append((tissue_names[i],
                     pq.ParquetFile(os.path.join(args.data_folder, f[i]))))
    data = collections.OrderedDict(data)
    available_data = {
        x
        for p in data.values() for x in p.metadata.schema.names
    }

    logging.info("Preparing output")
    WEIGHTS_FIELDS = [
        "gene", "rsid", "varID", "ref_allele", "eff_allele", "weight"
    ]
    SUMMARY_FIELDS = [
        "gene", "genename", "gene_type", "alpha", "n_snps_in_window",
        "n.snps.in.model", "rho_avg", "pred.perf.R2", "pred.perf.pval"
    ]

    Utilities.ensure_requisite_folders(args.output_prefix)

    if args.skip_regression:
        weights, summaries, covariances = None, None, None
    else:
        weights, summaries, covariances = setup_output(args.output_prefix,
                                                       tissue_names,
                                                       WEIGHTS_FIELDS,
                                                       SUMMARY_FIELDS)

    logging.info("Loading data annotation")
    data_annotation = StudyUtilities._load_gene_annotation(
        args.data_annotation)
    data_annotation = data_annotation[data_annotation.gene_id.isin(
        available_data)]
    if args.chromosome or (args.sub_batches and args.sub_batch):
        data_annotation = StudyUtilities._filter_gene_annotation(
            data_annotation, args.chromosome, args.sub_batches, args.sub_batch)
    logging.info("Kept %i entries", data_annotation.shape[0])

    logging.info("Opening features annotation")
    if not args.chromosome:
        features_metadata = pq.read_table(args.features_annotation).to_pandas()
    else:
        features_metadata = pq.ParquetFile(
            args.features_annotation).read_row_group(args.chromosome -
                                                     1).to_pandas()

    if args.chromosome and args.sub_batches:
        logging.info("Trimming variants")
        features_metadata = StudyUtilities.trim_variant_metadata_on_gene_annotation(
            features_metadata, data_annotation, args.window)

    if args.rsid_whitelist:
        logging.info("Filtering features annotation")
        whitelist = TextFileTools.load_list(args.rsid_whitelist)
        whitelist = set(whitelist)
        features_metadata = features_metadata[features_metadata.rsid.isin(
            whitelist)]

    logging.info("Opening features")
    features = pq.ParquetFile(args.features)

    logging.info("Setting R seed")
    seed = numpy.random.randint(1e8)

    if args.run_tag:
        d = pandas.DataFrame({
            "run": [args.run_tag],
            "cv_seed": [seed]
        })[["run", "cv_seed"]]
        for t in tissue_names:
            Utilities.save_dataframe(
                d, "{}_{}_runs.txt.gz".format(args.output_prefix, t))

    failed_run = False
    try:
        for i, data_annotation_ in enumerate(data_annotation.itertuples()):
            logging.log(9, "processing %i/%i:%s", i + 1,
                        data_annotation.shape[0], data_annotation_.gene_id)
            logging.log(8, "loading data")
            d_ = {}
            for k, v in data.items():
                d_[k] = Parquet._read(v, [data_annotation_.gene_id],
                                      to_pandas=True)
            features_ = Genomics.entries_for_gene_annotation(
                data_annotation_, args.window, features_metadata)

            if features_.shape[0] == 0:
                logging.log(9, "No features available")
                continue

            features_data_ = Parquet._read(features,
                                           [x for x in features_.id.values],
                                           to_pandas=True)
            features_data_["id"] = range(1, features_data_.shape[0] + 1)
            features_data_ = features_data_[["individual", "id"] +
                                            [x for x in features_.id.values]]

            logging.log(8, "training")
            prepare_ctimp(args.script_path, seed, args.intermediate_folder,
                          data_annotation_, features_, features_data_, d_)
            del (features_data_)
            del (d_)
            if args.skip_regression:
                continue

            subprocess.call([
                "bash",
                _execution_script(args.intermediate_folder,
                                  data_annotation_.gene_id)
            ])

            w = pandas.read_table(_weights(args.intermediate_folder,
                                           data_annotation_.gene_id),
                                  sep="\s+")
            s = pandas.read_table(_summary(args.intermediate_folder,
                                           data_annotation_.gene_id),
                                  sep="\s+")

            for e_, entry in enumerate(s.itertuples()):
                entry_weights = w[["SNP", "REF.0.", "ALT.1.",
                                   entry.tissue]].rename(
                                       columns={
                                           "SNP": "varID",
                                           "REF.0.": "ref_allele",
                                           "ALT.1.": "eff_allele",
                                           entry.tissue: "weight"
                                       })
                entry_weights = entry_weights[entry_weights.weight != 0]
                entry_weights = entry_weights.assign(
                    gene=data_annotation_.gene_id)
                entry_weights = entry_weights.merge(features_,
                                                    left_on="varID",
                                                    right_on="id",
                                                    how="left")
                entry_weights = entry_weights[WEIGHTS_FIELDS]
                if args.output_rsids:
                    entry_weights.loc[entry_weights.rsid == "NA",
                                      "rsid"] = entry_weights.loc[
                                          entry_weights.rsid == "NA", "varID"]
                weights[entry.tissue].write(
                    entry_weights.to_csv(sep="\t",
                                         index=False,
                                         header=False,
                                         na_rep="NA").encode())

                entry_summary = s[s.tissue == entry.tissue].rename(
                    columns={
                        "zscore_pval": "pred.perf.pval",
                        "rho_avg_squared": "pred.perf.R2"
                    })
                entry_summary = entry_summary.assign(
                    gene=data_annotation_.gene_id,
                    alpha=0.5,
                    genename=data_annotation_.gene_name,
                    gene_type=data_annotation_.gene_type,
                    n_snps_in_window=features_.shape[0])
                entry_summary["n.snps.in.model"] = entry_weights.shape[0]
                #must repeat strings beause of weird pandas indexing issue
                entry_summary = entry_summary.drop(
                    ["R2", "n", "tissue"], axis=1)[[
                        "gene", "genename", "gene_type", "alpha",
                        "n_snps_in_window", "n.snps.in.model", "rho_avg",
                        "pred.perf.R2", "pred.perf.pval"
                    ]]
                summaries[entry.tissue].write(
                    entry_summary.to_csv(sep="\t",
                                         index=False,
                                         header=False,
                                         na_rep="NA").encode())

                features_data_ = Parquet._read(
                    features, [x for x in entry_weights.varID.values],
                    to_pandas=True)
                var_ids = [x for x in entry_weights.varID.values]
                cov = numpy.cov([features_data_[k] for k in var_ids], ddof=1)
                ids = [x for x in entry_weights.rsid.values
                       ] if args.output_rsids else var_ids
                cov = matrices._flatten_matrix_data([(data_annotation_.gene_id,
                                                      ids, cov)])
                for cov_ in cov:
                    l = "{} {} {} {}\n".format(cov_[0], cov_[1], cov_[2],
                                               cov_[3]).encode()
                    covariances[entry.tissue].write(l)

            if not args.keep_intermediate_folder:
                logging.info("Cleaning up")
                shutil.rmtree(
                    _intermediate_folder(args.intermediate_folder,
                                         data_annotation_.gene_id))

            if args.MAX_M and i >= args.MAX_M:
                logging.info("Early abort")
                break

    except Exception as e:
        logging.info("Exception running model training:\n%s",
                     traceback.format_exc())
        failed_run = True
    finally:
        pass
        # if not args.keep_intermediate_folder:
        #     shutil.rmtree(args.intermediate_folder)

    if not args.skip_regression:
        set_down(weights, summaries, covariances, tissue_names, failed_run)

    logging.info("Finished")
Beispiel #16
0
def _data_sink(args):
    return Parquet.ParquetDataFrameSink(args.parquet_output, GTEx.pyarrow_schema)
Beispiel #17
0
def run(args):
    if os.path.exists(args.output):
        logging.info("Output already exists, either delete it or move it")
        return

    logging.info("Getting parquet genotypes")
    file_map = get_file_map(args)

    logging.info("Getting genes")
    with sqlite3.connect(args.model_db) as connection:
        # Pay heed to the order. This avoids arbitrariness in sqlite3 loading of results.
        extra = pandas.read_sql("SELECT * FROM EXTRA order by gene",
                                connection)
        extra = extra[extra["n.snps.in.model"] > 0]

    individuals = TextFileTools.load_list(
        args.individuals) if args.individuals else None

    logging.info("Processing")
    Utilities.ensure_requisite_folders(args.output)

    with gzip.open(args.output, "w") as f:
        f.write("GENE RSID1 RSID2 VALUE\n".encode())
        with sqlite3.connect(args.model_db) as connection:
            for i, t in enumerate(extra.itertuples()):
                g_ = t.gene
                logging.log(9, "Proccessing %i/%i:%s", i + 1, extra.shape[0],
                            g_)
                w = pandas.read_sql(
                    "select * from weights where gene = '{}';".format(g_),
                    connection)
                chr_ = w.varID.values[0].split("_")[0].split("chr")[1]
                if not n_.search(chr_):
                    logging.log(9, "Unsupported chromosome: %s", chr_)
                    continue
                dosage = file_map[int(chr_)]

                if individuals:
                    d = Parquet._read(dosage,
                                      columns=w.varID.values,
                                      specific_individuals=individuals)
                    del d["individual"]
                else:
                    d = Parquet._read(dosage,
                                      columns=w.varID.values,
                                      skip_individuals=True)

                var_ids = list(d.keys())
                if len(var_ids) == 0:
                    if len(w.varID.values) == 1:
                        logging.log(
                            9, "workaround for single missing genotype at %s",
                            g_)
                        d = {w.varID.values[0]: [0, 1]}
                    else:
                        logging.log(9,
                                    "No genotype available for %s, skipping",
                                    g_)
                        next

                if args.output_rsids:
                    ids = [
                        x for x in pandas.DataFrame({
                            "varID": var_ids
                        }).merge(w[["varID", "rsid"]], on="varID").rsid.values
                    ]
                else:
                    ids = var_ids

                c = numpy.cov([d[x] for x in var_ids])
                c = matrices._flatten_matrix_data([(w.gene.values[0], ids, c)])
                for entry in c:
                    l = "{} {} {} {}\n".format(entry[0], entry[1], entry[2],
                                               entry[3])
                    f.write(l.encode())
    logging.info("Finished building covariance.")
Beispiel #18
0
def process_phenotype(path, name, output_prefix):
    pheno = ModelTraining.load_variable_file(path)
    pheno_path = output_prefix + ".expression." + name + ".parquet"
    Parquet.save_variable(pheno_path, pheno)