示例#1
0
def bpnet_export_bw(
        model_dir,
        output_prefix,
        fasta_file=None,
        regions=None,
        contrib_method='grad',
        contrib_wildcard='*/profile/wn,*/counts/pre-act',  # specifies which contrib. scores to compute
        batch_size=256,
        scale_contribution=False,
        flip_negative_strand=False,
        gpu=0,
        memfrac_gpu=0.45):
    """Export model predictions and contribution scores to big-wig files
    """
    from pybedtools import BedTool
    from bpnet.modisco.core import Seqlet
    output_dir = os.path.dirname(output_prefix)
    add_file_logging(output_dir, logger, 'bpnet-export-bw')
    os.makedirs(output_dir, exist_ok=True)
    if gpu is not None:
        create_tf_session(gpu, per_process_gpu_memory_fraction=memfrac_gpu)

    logger.info("Load model")

    bp = BPNetSeqModel.from_mdir(model_dir)

    if regions is not None:
        logger.info(
            f"Computing predictions and contribution scores for provided regions: {regions}"
        )
        regions = list(BedTool(regions))
    else:
        logger.info("--regions not provided. Using regions from dataspec.yml")
        ds = DataSpec.load(os.path.join(model_dir, 'dataspec.yml'))
        regions = ds.get_all_regions()

    seqlen = bp.input_seqlen()
    logger.info(
        f"Resizing regions (fix=center) to model's input width of: {seqlen}")
    regions = [resize_interval(interval, seqlen) for interval in regions]
    logger.info("Sort the bed file")
    regions = list(BedTool(regions).sort())

    bp.export_bw(regions=regions,
                 output_prefix=output_prefix,
                 contrib_method=contrib_method,
                 fasta_file=fasta_file,
                 pred_summaries=contrib_wildcard.replace("*/", "").split(","),
                 batch_size=batch_size,
                 scale_contribution=scale_contribution,
                 flip_negative_strand=flip_negative_strand,
                 chromosomes=None)  # infer chromosomes from the fasta file
示例#2
0
def bpnet_contrib(
        model_dir,
        output_file,
        method="grad",
        dataspec=None,
        regions=None,
        fasta_file=None,  # alternative to dataspec
        shuffle_seq=False,
        shuffle_regions=False,
        max_regions=None,
        # reference='zeroes', # Currently the only option
        # peak_width=1000,  # automatically inferred from 'config.gin.json'
        # seq_width=None,
        contrib_wildcard='*/profile/wn,*/counts/pre-act',  # specifies which contrib. scores to compute
        batch_size=512,
        gpu=0,
        memfrac_gpu=0.45,
        num_workers=10,
        storage_chunk_size=512,
        exclude_chr='',
        include_chr='',
        overwrite=False,
        skip_bias=False):
    """Run contribution scores for a BPNet model
    """
    from bpnet.extractors import _chrom_sizes
    add_file_logging(os.path.dirname(output_file), logger, 'bpnet-contrib')
    if gpu is not None:
        create_tf_session(gpu, per_process_gpu_memory_fraction=memfrac_gpu)
    else:
        # Don't use any GPU's
        os.environ['CUDA_VISIBLE_DEVICES'] = ''

    if os.path.exists(output_file):
        if overwrite:
            os.remove(output_file)
        else:
            raise ValueError(
                f"File exists {output_file}. Use overwrite=True to overwrite it"
            )

    config = read_json(os.path.join(model_dir, 'config.gin.json'))
    seq_width = config['seq_width']
    peak_width = config['seq_width']

    # NOTE - seq_width has to be the same for the input and the target
    #
    # infer from the command line
    # if seq_width is None:
    #     logger.info("Using seq_width = peak_width")
    #     seq_width = peak_width

    # # make sure these are int's
    # seq_width = int(seq_width)
    # peak_width = int(peak_width)

    # Split
    contrib_wildcards = contrib_wildcard.split(",")

    # Allow chr inclusion / exclusion
    if exclude_chr:
        exclude_chr = exclude_chr.split(",")
    else:
        exclude_chr = None
    if include_chr:
        include_chr = include_chr.split(",")
    else:
        include_chr = None

    logger.info("Loading the config files")
    model_dir = Path(model_dir)

    logger.info("Creating the dataset")
    from bpnet.datasets import StrandedProfile, SeqClassification
    if fasta_file is not None:
        if regions is None:
            raise ValueError(
                "fasta_file specified. Expecting regions to be specified as well"
            )
        dl_valid = SeqClassification(
            fasta_file=fasta_file,
            intervals_file=regions,
            incl_chromosomes=include_chr,
            excl_chromosomes=exclude_chr,
            auto_resize_len=seq_width,
        )
        chrom_sizes = _chrom_sizes(fasta_file)
    else:
        if dataspec is None:
            logger.info("Using dataspec used to train the model")
            # Specify dataspec
            dataspec = model_dir / "dataspec.yml"

        ds = DataSpec.load(dataspec)
        dl_valid = StrandedProfile(ds,
                                   incl_chromosomes=include_chr,
                                   excl_chromosomes=exclude_chr,
                                   intervals_file=regions,
                                   peak_width=peak_width,
                                   shuffle=False,
                                   seq_width=seq_width)
        chrom_sizes = _chrom_sizes(ds.fasta_file)

    # Setup contribution score trimming (not required currently)
    if seq_width > peak_width:
        # Trim
        # make sure we can nicely trim the peak
        logger.info("Trimming the output")
        assert (seq_width - peak_width) % 2 == 0
        trim_start = (seq_width - peak_width) // 2
        trim_end = seq_width - trim_start
        assert trim_end - trim_start == peak_width
    elif seq_width == peak_width:
        trim_start = 0
        trim_end = peak_width
    else:
        raise ValueError("seq_width < peak_width")

    seqmodel = SeqModel.from_mdir(model_dir)

    # get all possible interpretation names
    # make sure they match the specified glob
    intp_names = [
        name for name, _ in seqmodel.get_intp_tensors(preact_only=False)
        if fnmatch_any(name, contrib_wildcards)
    ]
    logger.info(f"Using the following interpretation targets:")
    for n in intp_names:
        print(n)

    if max_regions is not None:
        if len(dl_valid) > max_regions:
            logging.info(
                f"Using {max_regions} regions instead of the original {len(dl_valid)}"
            )
        else:
            logging.info(
                f"--max-regions={max_regions} is larger than the dataset size: {len(dl_valid)}. "
                "Using the dataset size for max-regions")
            max_regions = len(dl_valid)
    else:
        max_regions = len(dl_valid)

    max_batches = np.ceil(max_regions / batch_size)

    writer = HDF5BatchWriter(output_file, chunk_size=storage_chunk_size)
    for i, batch in enumerate(
            tqdm(dl_valid.batch_iter(batch_size=batch_size,
                                     shuffle=shuffle_regions,
                                     num_workers=num_workers),
                 total=max_batches)):
        # store the original batch containing 'inputs' and 'targets'
        if skip_bias:
            batch['inputs'] = {
                'seq': batch['inputs']['seq']
            }  # ignore all other inputs

        if max_batches > 0:
            if i > max_batches:
                break

        if shuffle_seq:
            # Di-nucleotide shuffle the sequences
            batch['inputs']['seq'] = onehot_dinucl_shuffle(
                batch['inputs']['seq'])

        for name in intp_names:
            hyp_contrib = seqmodel.contrib_score(
                batch['inputs']['seq'],
                name=name,
                method=method,
                batch_size=None)  # don't second-batch

            # put contribution scores to the dictionary
            # also trim the contribution scores appropriately so that
            # the output will always be w.r.t. the peak center
            batch[f"/hyp_contrib/{name}"] = hyp_contrib[:, trim_start:trim_end]

        # trim the sequence as well
        # Trim the sequence
        batch['inputs']['seq'] = batch['inputs']['seq'][:, trim_start:trim_end]

        # ? maybe it would it be better to have an explicit ContribFileWriter.
        # that way the written schema would be fixed
        writer.batch_write(batch)

    # add chromosome sizes
    writer.f.attrs['chrom_sizes'] = json.dumps(chrom_sizes)
    writer.close()
    logger.info(f"Done. Contribution score file was saved to: {output_file}")
示例#3
0
def bpnet_train(dataspec,
                output_dir,
                premade='bpnet9',
                config=None,
                override='',
                gpu=0,
                memfrac_gpu=0.45,
                num_workers=8,
                vmtouch=False,
                in_memory=False,
                wandb_project="",
                cometml_project="",
                run_id=None,
                note_params="",
                overwrite=False):
    """Train a model using gin-config

    Output files:
      train.log - log file
      model.h5 - Keras model HDF5 file
      seqmodel.pkl - Serialized SeqModel. This is the main trained model.
      eval-report.ipynb/.html - evaluation report containing training loss curves and some example model predictions.
        You can specify your own ipynb using `--override='report_template.name="my-template.ipynb"'`.
      model.gin -> copied from the input
      dataspec.yaml -> copied from the input
    """
    cometml_experiment, wandb_run, output_dir = start_experiment(
        output_dir=output_dir,
        cometml_project=cometml_project,
        wandb_project=wandb_project,
        run_id=run_id,
        note_params=note_params,
        overwrite=overwrite)
    # remember the executed command
    write_json(
        {
            "dataspec": dataspec,
            "output_dir": output_dir,
            "premade": premade,
            "config": config,
            "override": override,
            "gpu": gpu,
            "memfrac_gpu": memfrac_gpu,
            "num_workers": num_workers,
            "vmtouch": vmtouch,
            "in_memory": in_memory,
            "wandb_project": wandb_project,
            "cometml_project": cometml_project,
            "run_id": run_id,
            "note_params": note_params,
            "overwrite": overwrite
        },
        os.path.join(output_dir, 'bpnet-train.kwargs.json'),
        indent=2)

    # copy dataspec.yml and input config file over
    if config is not None:
        shutil.copyfile(config, os.path.join(output_dir, 'input-config.gin'))

    # parse and validate the dataspec
    ds = DataSpec.load(dataspec)
    related_dump_yaml(ds.abspath(), os.path.join(output_dir, 'dataspec.yml'))
    if vmtouch:
        if shutil.which('vmtouch') is None:
            logger.warn(
                "vmtouch is currently not installed. "
                "--vmtouch disabled. Please install vmtouch to enable it")
        else:
            # use vmtouch to load all file to memory
            ds.touch_all_files()

    # --------------------------------------------
    # Parse the config file
    # import gin.tf
    if gpu is not None:
        logger.info(f"Using gpu: {gpu}, memory fraction: {memfrac_gpu}")
        create_tf_session(gpu, per_process_gpu_memory_fraction=memfrac_gpu)

    gin_files = _get_gin_files(premade, config)

    # infer differnet hyper-parameters from the dataspec file
    if len(ds.bias_specs) > 0:
        use_bias = True
        if len(ds.bias_specs) > 1:
            # TODO - allow multiple bias track
            # - split the heads separately
            raise ValueError("Only a single bias track is currently supported")

        bias = [v for k, v in ds.bias_specs.items()][0]
        n_bias_tracks = len(bias.tracks)
    else:
        use_bias = False
        n_bias_tracks = 0
    tasks = list(ds.task_specs)
    # TODO - handle multiple track widths?
    tracks_per_task = [len(v.tracks) for k, v in ds.task_specs.items()][0]
    # figure out the right hyper-parameters
    dataspec_bindings = [
        f'dataspec="{dataspec}"', f'use_bias={use_bias}',
        f'n_bias_tracks={n_bias_tracks}', f'tracks_per_task={tracks_per_task}',
        f'tasks={tasks}'
    ]

    gin.parse_config_files_and_bindings(
        gin_files,
        bindings=dataspec_bindings + override.split(";"),
        # NOTE: custom files were inserted right after
        # ther user's config file and before the `override`
        # parameters specified at the command-line
        # This allows the user to disable the bias correction
        # despite being specified in the config file
        skip_unknown=False)

    # --------------------------------------------
    # Remember the parsed configs

    # comet - log environment
    if cometml_experiment is not None:
        # log other parameters
        cometml_experiment.log_parameters(dict(premade=premade,
                                               config=config,
                                               override=override,
                                               gin_files=gin_files,
                                               gpu=gpu),
                                          prefix='cli/')

    # wandb - log environment
    if wandb_run is not None:

        # store general configs
        wandb_run.config.update(
            dict_prefix_key(dict(premade=premade,
                                 config=config,
                                 override=override,
                                 gin_files=gin_files,
                                 gpu=gpu),
                            prefix='cli/'))

    return train(
        output_dir=output_dir,
        cometml_experiment=cometml_experiment,
        wandb_run=wandb_run,
        num_workers=num_workers,
        in_memory=in_memory,
        # to execute the sub-notebook
        memfrac_gpu=memfrac_gpu,
        gpu=gpu)
示例#4
0
def bpnet_modisco_run(
    contrib_file,
    output_dir,
    null_contrib_file=None,
    premade='modisco-50k',
    config=None,
    override='',
    contrib_wildcard="*/profile/wn",  # on which contribution scores to run modisco
    only_task_regions=False,
    filter_npy=None,
    exclude_chr="",
    num_workers=10,
    gpu=None,  # no need to use a gpu by default
    memfrac_gpu=0.45,
    overwrite=False,
):
    """Run TF-MoDISco on the contribution scores stored in the contribution score file
    generated by `bpnet contrib`.
    """
    add_file_logging(output_dir, logger, 'modisco-run')
    if gpu is not None:
        logger.info(f"Using gpu: {gpu}, memory fraction: {memfrac_gpu}")
        create_tf_session(gpu, per_process_gpu_memory_fraction=memfrac_gpu)
    else:
        # Don't use any GPU's
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
        os.environ['MKL_THREADING_LAYER'] = 'GNU'

    import modisco
    assert '/' in contrib_wildcard

    if filter_npy is not None:
        filter_npy = os.path.abspath(str(filter_npy))
    if config is not None:
        config = os.path.abspath(str(config))

    # setup output file paths
    output_path = os.path.abspath(os.path.join(output_dir, "modisco.h5"))
    remove_exists(output_path, overwrite=overwrite)
    output_filter_npy = os.path.abspath(
        os.path.join(output_dir, 'modisco-run.subset-contrib-file.npy'))
    remove_exists(output_filter_npy, overwrite=overwrite)
    kwargs_json_file = os.path.join(output_dir, "modisco-run.kwargs.json")
    remove_exists(kwargs_json_file, overwrite=overwrite)
    if config is not None:
        config_output_file = os.path.join(output_dir,
                                          'modisco-run.input-config.gin')
        remove_exists(config_output_file, overwrite=overwrite)
        shutil.copyfile(config, config_output_file)

    # save the hyper-parameters
    write_json(
        dict(contrib_file=os.path.abspath(contrib_file),
             output_dir=str(output_dir),
             null_contrib_file=null_contrib_file,
             config=str(config),
             override=override,
             contrib_wildcard=contrib_wildcard,
             only_task_regions=only_task_regions,
             filter_npy=str(filter_npy),
             exclude_chr=exclude_chr,
             num_workers=num_workers,
             overwrite=overwrite,
             output_filter_npy=output_filter_npy,
             gpu=gpu,
             memfrac_gpu=memfrac_gpu), kwargs_json_file)

    # setup the gin config using premade, config and override
    cli_bindings = [f'num_workers={num_workers}']
    gin.parse_config_files_and_bindings(
        _get_gin_files(premade, config),
        bindings=cli_bindings + override.split(";"),
        # NOTE: custom files were inserted right after
        # ther user's config file and before the `override`
        # parameters specified at the command-line
        skip_unknown=False)
    log_gin_config(output_dir, prefix='modisco-run.')
    # --------------------------------------------

    # load the contribution file
    logger.info(f"Loading the contribution file: {contrib_file}")
    cf = ContribFile(contrib_file)
    tasks = cf.get_tasks()

    # figure out subset_tasks
    subset_tasks = set()
    for w in contrib_wildcard.split(","):
        task, head, head_summary = w.split("/")
        if task == '*':
            subset_tasks = None
        else:
            if task not in tasks:
                raise ValueError(f"task {task} not found in tasks: {tasks}")
            subset_tasks.add(task)
    if subset_tasks is not None:
        subset_tasks = list(subset_tasks)

    # --------------------------------------------
    # subset the intervals
    logger.info(f"Loading ranges")
    ranges = cf.get_ranges()
    # include all samples at the beginning
    include_samples = np.ones(len(cf)).astype(bool)

    # --only-task-regions
    if only_task_regions:
        if subset_tasks is None:
            logger.warn(
                "contrib_wildcard contains all tasks (specified by */<head>/<summary>). Not using --only-task-regions"
            )
        elif np.all(ranges['interval_from_task'] == ''):
            raise ValueError(
                "Contribution file wasn't created from multiple set of peaks. "
                "E.g. interval_from_task='' for all ranges. Please disable --only-task-regions"
            )
        else:
            logger.info(f"Subsetting ranges according to `interval_from_task`")
            include_samples = include_samples & ranges[
                'interval_from_task'].isin(subset_tasks).values
            logger.info(
                f"Using {include_samples.sum()} / {len(include_samples)} regions after --only-task-regions subset"
            )

    # --exclude-chr
    if exclude_chr:
        logger.info(f"Excluding chromosomes: {exclude_chr}")
        chromosomes = ranges['chr']
        include_samples = include_samples & (
            ~pd.Series(chromosomes).isin(exclude_chr)).values
        logger.info(
            f"Using {include_samples.sum()} / {len(include_samples)} regions after --exclude-chr subset"
        )

    # -- filter-npy
    if filter_npy is not None:
        print(f"Loading a filter file from {filter_npy}")
        include_samples = include_samples & np.load(filter_npy)
        logger.info(
            f"Using {include_samples.sum()} / {len(include_samples)} regions after --filter-npy subset"
        )

    # store the subset-contrib-file.npy
    logger.info(
        f"Saving the included samples from ContribFile to {output_filter_npy}")
    np.save(output_filter_npy, include_samples)
    # --------------------------------------------

    # convert to indices
    idx = np.arange(len(include_samples))[include_samples]
    seqs = cf.get_seq(idx=idx)

    # fetch the contribution scores from the importance score file
    # expand * to use all possible values
    # TODO - allow this to be done also for all the heads?
    hyp_contrib = {}
    task_names = []
    for w in contrib_wildcard.split(","):
        wc_task, head, head_summary = w.split("/")
        if task == '*':
            use_tasks = tasks
        else:
            use_tasks = [wc_task]
        for task in use_tasks:
            key = f"{task}/{head}/{head_summary}"
            task_names.append(key)
            hyp_contrib[key] = cf._subset(cf.data[f'/hyp_contrib/{key}'],
                                          idx=idx)
    contrib = {k: v * seqs for k, v in hyp_contrib.items()}

    if null_contrib_file is not None:
        logger.info(f"Using null-contrib-file: {null_contrib_file}")
        null_cf = ContribFile(null_contrib_file)
        null_seqs = null_cf.get_seq()
        null_per_pos_scores = {
            key: null_seqs * null_cf.data[f'/hyp_contrib/{key}'][:]
            for key in task_names
        }
    else:
        # default Null distribution. Requires modisco 5.0
        logger.info(f"Using default null_contrib_scores")
        null_per_pos_scores = modisco.coordproducers.LaplaceNullDist(
            num_to_samp=10000)

    # run modisco.
    # NOTE: `workflow` and `report` parameters are provided by gin config files
    modisco_run(task_names=task_names,
                output_path=output_path,
                contrib_scores=contrib,
                hypothetical_contribs=hyp_contrib,
                one_hot=seqs,
                null_per_pos_scores=null_per_pos_scores)

    logger.info(
        f"bpnet modisco-run finished. modisco.h5 and other files can be found in: {output_dir}"
    )