Exemple #1
0
def _filter(dosage, metadata, individual_ids, biallelic_only, maf_threshold):
    if biallelic_only and GenotypeUtilities._biallelic_filter(dosage, metadata, individual_ids):
        return True

    if maf_threshold and GenotypeUtilities._maf_filter_min_threshold(dosage, metadata, individual_ids, maf_threshold):
        return True

    return False
Exemple #2
0
def run(args):
    if os.path.exists(args.output):
        logging.info("output exists already, delete it or move it")
        return

    logging.info("Starting")
    Utilities.ensure_requisite_folders(args.output)

    logging.info("Loading data annotation")
    gene_annotation = StudyUtilities.load_gene_annotation(args.gene_annotation)
    gene_annotation = gene_annotation.rename(
        {"gene_name": "genename"},
        axis=1)[["gene_id", "genename", "gene_type"]]

    logging.info("Loading variant annotation")
    features_metadata = pq.read_table(args.features_annotation).to_pandas()

    logging.info("Loading spec")
    weights = get_weights(args.spec)

    w = weights.merge(features_metadata[["id", "allele_0", "allele_1",
                                         "rsid"]],
                      on="id",
                      how="left")
    w = w.rename(
        {
            "allele_0": "ref_allele",
            "allele_1": "eff_allele",
            "id": "varID"
        },
        axis=1)
    w["gene"] = w.gene_id.str.cat(w.cluster_id.astype(str), sep="_")
    w = w.drop(["w", "cluster_id"], axis=1)
    w = w.sort_values(by="gene").assign(weight=1)

    logging.info("Building models")
    with sqlite3.connect(args.output) as conn:
        w.drop("gene_id", axis=1).fillna("NA")[[
            "gene", "rsid", "varID", "ref_allele", "eff_allele", "weight"
        ]].to_sql("weights", conn, index=False)

        e = w[["gene_id", "gene"]].merge(gene_annotation,
                                         on="gene_id").drop("gene_id", axis=1)
        e["n_snps_in_window"] = None
        e["n.snps.in.model"] = 1
        e["pred.perf.pval"] = None
        e["pred.perf.qval"] = None
        e["pred.perf.R2"] = None
        e = e[[
            "gene", "genename", "gene_type", "n_snps_in_window",
            "n.snps.in.model", "pred.perf.R2", "pred.perf.pval",
            "pred.perf.qval"
        ]]

        e.to_sql("extra", conn, index=False)

        Models.model_indexes(conn)

    logging.info("Finished")
Exemple #3
0
def run(args):
    wp = args.output_prefix + "_weights.txt.gz"
    if os.path.exists(wp):
        logging.info("Weights output exists already, delete it or move it")
        return

    sp = args.output_prefix + "_summary.txt.gz"
    if os.path.exists(sp):
        logging.info("Summary output exists already, delete it or move it")
        return

    cp = args.output_prefix + "_covariance.txt.gz"
    if os.path.exists(wp):
        logging.info("covariance output exists already, delete it or move it")
        return

    r = args.output_prefix + "_run.txt.gz"
    if os.path.exists(wp):
        logging.info("run output exists already, delete it or move it")
        return

    logging.info("Starting")
    Utilities.ensure_requisite_folders(args.output_prefix)

    logging.info("Opening data")
    data = pq.ParquetFile(args.data)
    available_data = {x for x in data.metadata.schema.names}

    logging.info("Loading data annotation")
    data_annotation = StudyUtilities.load_gene_annotation(
        args.data_annotation, args.chromosome, args.sub_batches,
        args.sub_batch)
    data_annotation = data_annotation[data_annotation.gene_id.isin(
        available_data)]
    if args.gene_whitelist:
        logging.info("Applying gene whitelist")
        data_annotation = data_annotation[data_annotation.gene_id.isin(
            set(args.gene_whitelist))]
    logging.info("Kept %i entries", data_annotation.shape[0])

    logging.info("Opening features annotation")
    if not args.chromosome:
        features_metadata = pq.read_table(args.features_annotation).to_pandas()
    else:
        features_metadata = pq.ParquetFile(
            args.features_annotation).read_row_group(args.chromosome -
                                                     1).to_pandas()

    if args.chromosome and args.sub_batches:
        logging.info("Trimming variants")
        features_metadata = StudyUtilities.trim_variant_metadata_on_gene_annotation(
            features_metadata, data_annotation, args.window)

    if args.rsid_whitelist:
        logging.info("Filtering features annotation")
        whitelist = TextFileTools.load_list(args.rsid_whitelist)
        whitelist = set(whitelist)
        features_metadata = features_metadata[features_metadata.rsid.isin(
            whitelist)]

    if args.features_weights:
        logging.info("Loading weights")
        x_weights = get_weights(args.features_weights,
                                {x
                                 for x in features_metadata.id})
        logging.info(
            "Filtering features metadata to those available in weights")
        features_metadata = features_metadata[features_metadata.id.isin(
            x_weights.id)]
        logging.info("Kept %d entries", features_metadata.shape[0])
    else:
        x_weights = None

    logging.info("Opening features")
    features = pq.ParquetFile(args.features)

    logging.info("Setting R seed")
    s = numpy.random.randint(1e8)
    set_seed(s)
    if args.run_tag:
        d = pandas.DataFrame({
            "run": [args.run_tag],
            "cv_seed": [s]
        })[["run", "cv_seed"]]
        Utilities.save_dataframe(d, r)

    WEIGHTS_FIELDS = [
        "gene", "rsid", "varID", "ref_allele", "eff_allele", "weight"
    ]
    SUMMARY_FIELDS = [
        "gene", "genename", "gene_type", "alpha", "n_snps_in_window",
        "n.snps.in.model", "test_R2_avg", "test_R2_sd", "cv_R2_avg",
        "cv_R2_sd", "in_sample_R2", "nested_cv_fisher_pval",
        "nested_cv_converged", "rho_avg", "rho_se", "rho_zscore",
        "pred.perf.R2", "pred.perf.pval", "pred.perf.qval"
    ]

    train = train_elastic_net_wrapper if args.mode == "elastic_net" else train_ols

    with gzip.open(wp, "w") as w:
        w.write(("\t".join(WEIGHTS_FIELDS) + "\n").encode())
        with gzip.open(sp, "w") as s:
            s.write(("\t".join(SUMMARY_FIELDS) + "\n").encode())
            with gzip.open(cp, "w") as c:
                c.write("GENE RSID1 RSID2 VALUE\n".encode())
                for i, data_annotation_ in enumerate(
                        data_annotation.itertuples()):
                    if args.MAX_M and i >= args.MAX_M:
                        logging.info("Early abort")
                        break
                    logging.log(9, "processing %i/%i:%s", i + 1,
                                data_annotation.shape[0],
                                data_annotation_.gene_id)
                    if args.repeat:
                        for j in range(0, args.repeat):
                            logging.log(9, "%i-th reiteration", j)
                            process(w, s, c, data, data_annotation_, features,
                                    features_metadata, x_weights,
                                    SUMMARY_FIELDS, train, j,
                                    args.nested_cv_folds)
                    else:
                        process(w,
                                s,
                                c,
                                data,
                                data_annotation_,
                                features,
                                features_metadata,
                                x_weights,
                                SUMMARY_FIELDS,
                                train,
                                nested_folds=args.nested_cv_folds)

    logging.info("Finished")
def run(args):
    Utilities.maybe_create_folder(args.intermediate_folder)
    Utilities.ensure_requisite_folders(args.output_prefix)

    logging.info("Opening data")
    p_ = re.compile(args.data_name_pattern)
    f = [x for x in sorted(os.listdir(args.data_folder)) if p_.search(x)]
    tissue_names = [p_.search(x).group(1) for x in f]
    data = []
    for i in range(0, len(tissue_names)):
        logging.info("Loading %s", tissue_names[i])
        data.append((tissue_names[i],
                     pq.ParquetFile(os.path.join(args.data_folder, f[i]))))
    data = collections.OrderedDict(data)
    available_data = {
        x
        for p in data.values() for x in p.metadata.schema.names
    }

    logging.info("Preparing output")
    WEIGHTS_FIELDS = [
        "gene", "rsid", "varID", "ref_allele", "eff_allele", "weight"
    ]
    SUMMARY_FIELDS = [
        "gene", "genename", "gene_type", "alpha", "n_snps_in_window",
        "n.snps.in.model", "rho_avg", "pred.perf.R2", "pred.perf.pval"
    ]

    Utilities.ensure_requisite_folders(args.output_prefix)

    if args.skip_regression:
        weights, summaries, covariances = None, None, None
    else:
        weights, summaries, covariances = setup_output(args.output_prefix,
                                                       tissue_names,
                                                       WEIGHTS_FIELDS,
                                                       SUMMARY_FIELDS)

    logging.info("Loading data annotation")
    data_annotation = StudyUtilities._load_gene_annotation(
        args.data_annotation)
    data_annotation = data_annotation[data_annotation.gene_id.isin(
        available_data)]
    if args.chromosome or (args.sub_batches and args.sub_batch):
        data_annotation = StudyUtilities._filter_gene_annotation(
            data_annotation, args.chromosome, args.sub_batches, args.sub_batch)
    logging.info("Kept %i entries", data_annotation.shape[0])

    logging.info("Opening features annotation")
    if not args.chromosome:
        features_metadata = pq.read_table(args.features_annotation).to_pandas()
    else:
        features_metadata = pq.ParquetFile(
            args.features_annotation).read_row_group(args.chromosome -
                                                     1).to_pandas()

    if args.chromosome and args.sub_batches:
        logging.info("Trimming variants")
        features_metadata = StudyUtilities.trim_variant_metadata_on_gene_annotation(
            features_metadata, data_annotation, args.window)

    if args.rsid_whitelist:
        logging.info("Filtering features annotation")
        whitelist = TextFileTools.load_list(args.rsid_whitelist)
        whitelist = set(whitelist)
        features_metadata = features_metadata[features_metadata.rsid.isin(
            whitelist)]

    logging.info("Opening features")
    features = pq.ParquetFile(args.features)

    logging.info("Setting R seed")
    seed = numpy.random.randint(1e8)

    if args.run_tag:
        d = pandas.DataFrame({
            "run": [args.run_tag],
            "cv_seed": [seed]
        })[["run", "cv_seed"]]
        for t in tissue_names:
            Utilities.save_dataframe(
                d, "{}_{}_runs.txt.gz".format(args.output_prefix, t))

    failed_run = False
    try:
        for i, data_annotation_ in enumerate(data_annotation.itertuples()):
            logging.log(9, "processing %i/%i:%s", i + 1,
                        data_annotation.shape[0], data_annotation_.gene_id)
            logging.log(8, "loading data")
            d_ = {}
            for k, v in data.items():
                d_[k] = Parquet._read(v, [data_annotation_.gene_id],
                                      to_pandas=True)
            features_ = Genomics.entries_for_gene_annotation(
                data_annotation_, args.window, features_metadata)

            if features_.shape[0] == 0:
                logging.log(9, "No features available")
                continue

            features_data_ = Parquet._read(features,
                                           [x for x in features_.id.values],
                                           to_pandas=True)
            features_data_["id"] = range(1, features_data_.shape[0] + 1)
            features_data_ = features_data_[["individual", "id"] +
                                            [x for x in features_.id.values]]

            logging.log(8, "training")
            prepare_ctimp(args.script_path, seed, args.intermediate_folder,
                          data_annotation_, features_, features_data_, d_)
            del (features_data_)
            del (d_)
            if args.skip_regression:
                continue

            subprocess.call([
                "bash",
                _execution_script(args.intermediate_folder,
                                  data_annotation_.gene_id)
            ])

            w = pandas.read_table(_weights(args.intermediate_folder,
                                           data_annotation_.gene_id),
                                  sep="\s+")
            s = pandas.read_table(_summary(args.intermediate_folder,
                                           data_annotation_.gene_id),
                                  sep="\s+")

            for e_, entry in enumerate(s.itertuples()):
                entry_weights = w[["SNP", "REF.0.", "ALT.1.",
                                   entry.tissue]].rename(
                                       columns={
                                           "SNP": "varID",
                                           "REF.0.": "ref_allele",
                                           "ALT.1.": "eff_allele",
                                           entry.tissue: "weight"
                                       })
                entry_weights = entry_weights[entry_weights.weight != 0]
                entry_weights = entry_weights.assign(
                    gene=data_annotation_.gene_id)
                entry_weights = entry_weights.merge(features_,
                                                    left_on="varID",
                                                    right_on="id",
                                                    how="left")
                entry_weights = entry_weights[WEIGHTS_FIELDS]
                if args.output_rsids:
                    entry_weights.loc[entry_weights.rsid == "NA",
                                      "rsid"] = entry_weights.loc[
                                          entry_weights.rsid == "NA", "varID"]
                weights[entry.tissue].write(
                    entry_weights.to_csv(sep="\t",
                                         index=False,
                                         header=False,
                                         na_rep="NA").encode())

                entry_summary = s[s.tissue == entry.tissue].rename(
                    columns={
                        "zscore_pval": "pred.perf.pval",
                        "rho_avg_squared": "pred.perf.R2"
                    })
                entry_summary = entry_summary.assign(
                    gene=data_annotation_.gene_id,
                    alpha=0.5,
                    genename=data_annotation_.gene_name,
                    gene_type=data_annotation_.gene_type,
                    n_snps_in_window=features_.shape[0])
                entry_summary["n.snps.in.model"] = entry_weights.shape[0]
                #must repeat strings beause of weird pandas indexing issue
                entry_summary = entry_summary.drop(
                    ["R2", "n", "tissue"], axis=1)[[
                        "gene", "genename", "gene_type", "alpha",
                        "n_snps_in_window", "n.snps.in.model", "rho_avg",
                        "pred.perf.R2", "pred.perf.pval"
                    ]]
                summaries[entry.tissue].write(
                    entry_summary.to_csv(sep="\t",
                                         index=False,
                                         header=False,
                                         na_rep="NA").encode())

                features_data_ = Parquet._read(
                    features, [x for x in entry_weights.varID.values],
                    to_pandas=True)
                var_ids = [x for x in entry_weights.varID.values]
                cov = numpy.cov([features_data_[k] for k in var_ids], ddof=1)
                ids = [x for x in entry_weights.rsid.values
                       ] if args.output_rsids else var_ids
                cov = matrices._flatten_matrix_data([(data_annotation_.gene_id,
                                                      ids, cov)])
                for cov_ in cov:
                    l = "{} {} {} {}\n".format(cov_[0], cov_[1], cov_[2],
                                               cov_[3]).encode()
                    covariances[entry.tissue].write(l)

            if not args.keep_intermediate_folder:
                logging.info("Cleaning up")
                shutil.rmtree(
                    _intermediate_folder(args.intermediate_folder,
                                         data_annotation_.gene_id))

            if args.MAX_M and i >= args.MAX_M:
                logging.info("Early abort")
                break

    except Exception as e:
        logging.info("Exception running model training:\n%s",
                     traceback.format_exc())
        failed_run = True
    finally:
        pass
        # if not args.keep_intermediate_folder:
        #     shutil.rmtree(args.intermediate_folder)

    if not args.skip_regression:
        set_down(weights, summaries, covariances, tissue_names, failed_run)

    logging.info("Finished")
def run(args):
    wp = args.output_prefix + "_weights.txt.gz"
    if os.path.exists(wp):
        logging.info("Weights output exists already, delete it or move it")
        return

    sp = args.output_prefix + "_summary.txt.gz"
    if os.path.exists(sp):
        logging.info("Summary output exists already, delete it or move it")
        return

    cp = args.output_prefix + "_covariance.txt.gz"
    if os.path.exists(wp):
        logging.info("covariance output exists already, delete it or move it")
        return

    r = args.output_prefix + "_run.txt.gz"
    if os.path.exists(wp):
        logging.info("run output exists already, delete it or move it")
        return

    logging.info("Starting")
    Utilities.ensure_requisite_folders(args.output_prefix)

    logging.info("Opening data")
    data = pq.ParquetFile(args.data)
    available_data = {x for x in data.metadata.schema.names}

    logging.info("Loading data annotation")
    data_annotation = StudyUtilities.load_gene_annotation(args.data_annotation, args.chromosome, args.sub_batches, args.sub_batch, args.simplify_data_annotation)
    data_annotation = data_annotation[data_annotation.gene_id.isin(available_data)]
    if args.gene_whitelist:
        logging.info("Applying gene whitelist")
        data_annotation = data_annotation[data_annotation.gene_id.isin(set(args.gene_whitelist))]
    logging.info("Kept %i entries", data_annotation.shape[0])

    logging.info("Opening features annotation")
    if not args.chromosome:
        features_metadata = pq.read_table(args.features_annotation).to_pandas()
    else:
        features_metadata = pq.ParquetFile(args.features_annotation).read_row_group(args.chromosome-1).to_pandas()

    if args.output_rsids:
        if not args.keep_highest_frequency_rsid_entry and features_metadata[(features_metadata.rsid != "NA") & features_metadata.rsid.duplicated()].shape[0]:
            logging.warning("Several variants map to a same rsid (hint: multiple INDELS?).\n"
                            "Can't proceed. Consider the using the --keep_highest_frequency_rsid flag, or models will be ill defined.")
            return

    if args.chromosome and args.sub_batches:
        logging.info("Trimming variants")
        features_metadata = StudyUtilities.trim_variant_metadata_on_gene_annotation(features_metadata, data_annotation, args.window)
        logging.info("Kept %d", features_metadata.shape[0])

    if args.variant_call_filter:
        logging.info("Filtering variants by average call rate")
        features_metadata = features_metadata[features_metadata.avg_call > args.variant_call_filter]
        logging.info("Kept %d", features_metadata.shape[0])

    if args.variant_r2_filter:
        logging.info("Filtering variants by imputation R2")
        features_metadata = features_metadata[features_metadata.r2 > args.variant_r2_filter]
        logging.info("Kept %d", features_metadata.shape[0])

    if args.variant_variance_filter:
        logging.info("Filtering variants by (dosage/2)'s variance")
        features_metadata = features_metadata[features_metadata["std"]/2 > numpy.sqrt(args.variant_variance_filter)]
        logging.info("Kept %d", features_metadata.shape[0])

    if args.discard_palindromic_snps:
        logging.info("Discarding palindromic snps")
        features_metadata = Genomics.discard_gtex_palindromic_variants(features_metadata)
        logging.info("Kept %d", features_metadata.shape[0])

    if args.rsid_whitelist:
        logging.info("Filtering features annotation for whitelist")
        whitelist = TextFileTools.load_list(args.rsid_whitelist)
        whitelist = set(whitelist)
        features_metadata = features_metadata[features_metadata.rsid.isin(whitelist)]
        logging.info("Kept %d", features_metadata.shape[0])

    if args.only_rsids:
        logging.info("discarding non-rsids")
        features_metadata = StudyUtilities.trim_variant_metadata_to_rsids_only(features_metadata)
        logging.info("Kept %d", features_metadata.shape[0])

        if args.keep_highest_frequency_rsid_entry and features_metadata[(features_metadata.rsid != "NA") & features_metadata.rsid.duplicated()].shape[0]:
            logging.info("Keeping only the highest frequency entry for every rsid")
            k = features_metadata[["rsid", "allele_1_frequency", "id"]]
            k.loc[k.allele_1_frequency > 0.5, "allele_1_frequency"] = 1 - k.loc[k.allele_1_frequency > 0.5, "allele_1_frequency"]
            k = k.sort_values(by=["rsid", "allele_1_frequency"], ascending=False)
            k = k.groupby("rsid").first().reset_index()
            features_metadata = features_metadata[features_metadata.id.isin(k.id)]
            logging.info("Kept %d", features_metadata.shape[0])
        else:
            logging.info("rsids are unique, no need to restrict to highest frequency entry")

    if args.features_weights:
        logging.info("Loading weights")
        x_weights = get_weights(args.features_weights, {x for x in features_metadata.id})
        logging.info("Filtering features metadata to those available in weights")
        features_metadata = features_metadata[features_metadata.id.isin(x_weights.id)]
        logging.info("Kept %d entries", features_metadata.shape[0])
    else:
        x_weights = None

    logging.info("Opening features")
    features = pq.ParquetFile(args.features)

    logging.info("Setting R seed")
    s = numpy.random.randint(1e8)
    set_seed(s)
    if args.run_tag:
        d = pandas.DataFrame({"run":[args.run_tag], "cv_seed":[s]})[["run", "cv_seed"]]
        Utilities.save_dataframe(d, r)

    WEIGHTS_FIELDS=["gene", "rsid", "varID", "ref_allele", "eff_allele", "weight"]
    SUMMARY_FIELDS=["gene", "genename", "gene_type", "alpha", "n_snps_in_window", "n.snps.in.model",
                    "test_R2_avg", "test_R2_sd", "cv_R2_avg", "cv_R2_sd", "in_sample_R2", "nested_cv_fisher_pval",
                    "nested_cv_converged", "rho_avg", "rho_se", "rho_zscore", "pred.perf.R2", "pred.perf.pval", "pred.perf.qval"]

    train = train_elastic_net_wrapper if args.mode == "elastic_net" else train_ols

    available_individuals = check_missing(args, data, features)

    with gzip.open(wp, "w") as w:
        w.write(("\t".join(WEIGHTS_FIELDS) + "\n").encode())
        with gzip.open(sp, "w") as s:
            s.write(("\t".join(SUMMARY_FIELDS) + "\n").encode())
            with gzip.open(cp, "w") as c:
                c.write("GENE RSID1 RSID2 VALUE\n".encode())
                for i,data_annotation_ in enumerate(data_annotation.itertuples()):
                    if args.MAX_M and  i>=args.MAX_M:
                        logging.info("Early abort")
                        break
                    logging.log(9, "processing %i/%i:%s", i+1, data_annotation.shape[0], data_annotation_.gene_id)

                    if args.repeat:
                        for j in range(0, args.repeat):
                            logging.log(9, "%i-th reiteration", j)
                            process(w, s, c, data, data_annotation_, features, features_metadata, x_weights, SUMMARY_FIELDS, train, j, nested_folds=args.nested_cv_folds, use_individuals=available_individuals)
                    else:
                        process(w, s, c, data, data_annotation_, features, features_metadata, x_weights, SUMMARY_FIELDS, train, nested_folds=args.nested_cv_folds, use_individuals=available_individuals)

    logging.info("Finished")