def __init__(self, output_names=None, use_dna=True, dna_wlen=None, replicate_names=None, cpg_wlen=None, cpg_max_dist=25000, encode_replicates=False): self.output_names = to_list(output_names) self.use_dna = use_dna self.dna_wlen = dna_wlen self.replicate_names = to_list(replicate_names) self.cpg_wlen = cpg_wlen self.cpg_max_dist = cpg_max_dist self.encode_replicates = encode_replicates
def read_anno_file(anno_file, chromos=None, nb_sample=None): """Read annotations from BED file. Reads annotations from BED file merges overlapping annotations. Parameters ---------- anno_file: str File name. chromos: list List of chromosomes for filtering annotations. nb_sample: int Maximum number of annotated regions. Returns ------- :class:`pandas.DataFrame` :class:`pandas.DataFrame` with columns `chromo`, `start`, `end`. """ anno = pd.read_table(anno_file, header=None, usecols=[0, 1, 2], dtype={0: 'str', 1: 'int32', 2: 'int32'}, nrows=nb_sample) anno.columns = ['chromo', 'start', 'end'] anno.chromo = anno.chromo.str.upper().str.replace('chr', '', case=False) if chromos is not None: chromos = to_list(chromos) anno = anno.loc[anno.chromo.isin(chromos)] anno = join_overlapping_frame(anno) return anno
def read_anno_file(anno_file, chromos=None, nb_sample=None): """Read annotations from BED file. Reads annotations from BED file merges overlapping annotations. Parameters ---------- anno_file: str File name. chromos: list List of chromosomes for filtering annotations. nb_sample: int Maximum number of annotated regions. Returns ------- :class:`pandas.DataFrame` :class:`pandas.DataFrame` with columns `chromo`, `start`, `end`. """ anno = pd.read_table(anno_file, header=None, usecols=[0, 1, 2], dtype={ 0: 'str', 1: 'int32', 2: 'int32' }, nrows=nb_sample) anno.columns = ['chromo', 'start', 'end'] anno.chromo = anno.chromo.str.upper().str.replace('chr', '', case=False) if chromos is not None: chromos = to_list(chromos) anno = anno.loc[anno.chromo.isin(chromos)] anno = join_overlapping_frame(anno) return anno
def data_reader_config_from_model(model, config_out_fpath = None, replicate_names=None): """Return :class:`DataReader` from `model`. Builds a :class:`DataReader` for reading data for `model`. Parameters ---------- model: :class:`Model`. :class:`Model`. outputs: bool If `True`, return output labels. replicate_names: list Name of input cells of `model`. Returns ------- :class:`DataReader` Instance of :class:`DataReader`. """ use_dna = False dna_wlen = None cpg_wlen = None output_names = None encode_replicates = False # input_shapes = to_list(model.input_shape) for input_name, input_shape in zip(model.input_names, input_shapes): if input_name == 'dna': # Read DNA sequences. use_dna = True dna_wlen = input_shape[1] elif input_name.startswith('cpg/state/'): # DEPRECATED: legacy model. Decode replicate names from input name. replicate_names = decode_replicate_names(input_name.replace('cpg/state/', '')) assert len(replicate_names) == input_shape[1] cpg_wlen = input_shape[2] encode_replicates = True elif input_name == 'cpg/state': # Read neighboring CpG sites. if not replicate_names: raise ValueError('Replicate names required!') if len(replicate_names) != input_shape[1]: tmp = '{r} replicates found but CpG model was trained with' \ ' {s} replicates. Use `--nb_replicate {s}` or ' \ ' `--replicate_names` option to select {s} replicates!' tmp = tmp.format(r=len(replicate_names), s=input_shape[1]) raise ValueError(tmp) cpg_wlen = input_shape[2] output_names = model.output_names config = {"output_names":output_names, "use_dna":use_dna, "dna_wlen":dna_wlen, "cpg_wlen":cpg_wlen, "replicate_names":replicate_names, "encode_replicates":encode_replicates} if config_out_fpath is not None: with open(config_out_fpath, "w") as ofh: json.dump(config, ofh) return config
def main(self, name, opts): logging.basicConfig(filename=opts.log_file, format='%(levelname)s (%(asctime)s): %(message)s') log = logging.getLogger(name) if opts.verbose: log.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) log.debug(opts) if opts.seed is not None: np.random.seed(opts.seed) if not opts.model_files: raise ValueError('No model files provided!') log.info('Loading model ...') K.set_learning_phase(0) model = mod.load_model(opts.model_files, log=log.info) weight_layer, act_layer = mod.get_first_conv_layer(model.layers, True) log.info('Using activation layer "%s"' % act_layer.name) log.info('Using weight layer "%s"' % weight_layer.name) try: dna_idx = model.input_names.index('dna') except BaseException: raise IOError('Model is not a valid DNA model!') fun_outputs = to_list(act_layer.output) if opts.store_preds: fun_outputs += to_list(model.output) fun = K.function([to_list(model.input)[dna_idx]], fun_outputs) log.info('Reading data ...') if opts.store_outputs or opts.store_preds: output_names = model.output_names else: output_names = None data_reader = mod.DataReader( output_names=output_names, use_dna=True, dna_wlen=to_list(model.input_shape)[dna_idx][1] ) nb_sample = dat.get_nb_sample(opts.data_files, opts.nb_sample) data_reader = data_reader(opts.data_files, nb_sample=nb_sample, batch_size=opts.batch_size, loop=False, shuffle=opts.shuffle) meta_reader = hdf.reader(opts.data_files, ['chromo', 'pos'], nb_sample=nb_sample, batch_size=opts.batch_size, loop=False, shuffle=False) out_file = h5.File(opts.out_file, 'w') out_group = out_file weights = weight_layer.get_weights() out_group['weights/weights'] = weights[0] out_group['weights/bias'] = weights[1] def h5_dump(path, data, idx, dtype=None, compression='gzip'): if path not in out_group: if dtype is None: dtype = data.dtype out_group.create_dataset( name=path, shape=[nb_sample] + list(data.shape[1:]), dtype=dtype, compression=compression ) out_group[path][idx:idx+len(data)] = data log.info('Computing activations') progbar = ProgressBar(nb_sample, log.info) idx = 0 for data in data_reader: if isinstance(data, tuple): inputs, outputs, weights = data else: inputs = data if isinstance(inputs, dict): inputs = list(inputs.values()) batch_size = len(inputs[0]) progbar.update(batch_size) if opts.store_inputs: for i, name in enumerate(model.input_names): h5_dump('inputs/%s' % name, dna.onehot_to_int(inputs[i]), idx) if opts.store_outputs: for name, output in six.iteritems(outputs): h5_dump('outputs/%s' % name, output, idx) fun_eval = fun(inputs) act = fun_eval[0] if opts.act_wlen: delta = opts.act_wlen // 2 ctr = act.shape[1] // 2 act = act[:, (ctr-delta):(ctr+delta+1)] if opts.act_fun: if opts.act_fun == 'mean': act = act.mean(axis=1) elif opts.act_fun == 'wmean': weights = linear_weights(act.shape[1]) act = np.average(act, axis=1, weights=weights) elif opts.act_fun == 'max': act = act.max(axis=1) else: raise ValueError('Invalid function "%s"!' % (opts.act_fun)) h5_dump('act', act, idx) if opts.store_preds: preds = fun_eval[1:] for i, name in enumerate(model.output_names): h5_dump('preds/%s' % name, preds[i].squeeze(), idx) for name, value in six.iteritems(next(meta_reader)): h5_dump(name, value, idx) idx += batch_size progbar.close() out_file.close() log.info('Done!') return 0
def main(self, name, opts): logging.basicConfig(filename=opts.log_file, format='%(levelname)s (%(asctime)s): %(message)s') log = logging.getLogger(name) if opts.verbose: log.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) if not opts.model_files: raise ValueError('No model files provided!') log.info('Loading model ...') model = mod.load_model(opts.model_files) log.info('Loading data ...') nb_sample = dat.get_nb_sample(opts.data_files, opts.nb_sample) replicate_names = dat.get_replicate_names(opts.data_files[0], regex=opts.replicate_names, nb_key=opts.nb_replicate) data_reader = mod.data_reader_from_model( model, replicate_names, replicate_names=replicate_names) # Seed used since unobserved input CpG states are randomly sampled if opts.seed is not None: np.random.seed(opts.seed) random.seed(opts.seed) data_reader = data_reader(opts.data_files, nb_sample=nb_sample, batch_size=opts.batch_size, loop=False, shuffle=False) meta_reader = hdf.reader(opts.data_files, ['chromo', 'pos'], nb_sample=nb_sample, batch_size=opts.batch_size, loop=False, shuffle=False) writer = None if opts.out_data: writer = H5Writer(opts.out_data, nb_sample) log.info('Predicting ...') nb_tot = 0 nb_eval = 0 data_eval = dict() perf_eval = [] progbar = ProgressBar(nb_sample, log.info) for inputs, outputs, weights in data_reader: batch_size = len(list(inputs.values())[0]) nb_tot += batch_size progbar.update(batch_size) preds = to_list(model.predict(inputs)) data_batch = dict() data_batch['preds'] = dict() data_batch['outputs'] = dict() for i, name in enumerate(model.output_names): data_batch['preds'][name] = preds[i].squeeze() data_batch['outputs'][name] = outputs[name].squeeze() for name, value in six.iteritems(next(meta_reader)): data_batch[name] = value if writer: writer.write_dict(data_batch) nb_eval += batch_size dat.add_to_dict(data_batch, data_eval) if nb_tot >= nb_sample or \ (opts.eval_size and nb_eval >= opts.eval_size): data_eval = dat.stack_dict(data_eval) perf_eval.append( ev.evaluate_outputs(data_eval['outputs'], data_eval['preds'])) data_eval = dict() nb_eval = 0 progbar.close() if writer: writer.close() report = pd.concat(perf_eval) report = report.groupby(['metric', 'output']).mean().reset_index() if opts.out_report: report.to_csv(opts.out_report, sep='\t', index=False) report = ev.unstack_report(report) print(report.to_string()) log.info('Done!') return 0
def main(self, name, opts): logging.basicConfig(filename=opts.log_file, format='%(levelname)s (%(asctime)s): %(message)s') log = logging.getLogger(name) if opts.verbose: log.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) log.debug(opts) if opts.seed is not None: np.random.seed(opts.seed) if not opts.model_files: raise ValueError('No model files provided!') log.info('Loading model ...') K.set_learning_phase(0) model = mod.load_model(opts.model_files, log=log.info) weight_layer, act_layer = mod.get_first_conv_layer(model.layers, True) log.info('Using activation layer "%s"' % act_layer.name) log.info('Using weight layer "%s"' % weight_layer.name) try: dna_idx = model.input_names.index('dna') except BaseException: raise IOError('Model is not a valid DNA model!') fun_outputs = to_list(act_layer.output) if opts.store_preds: fun_outputs += to_list(model.output) fun = K.function([to_list(model.input)[dna_idx]], fun_outputs) log.info('Reading data ...') if opts.store_outputs or opts.store_preds: output_names = model.output_names else: output_names = None data_reader = mod.DataReader(output_names=output_names, use_dna=True, dna_wlen=to_list( model.input_shape)[dna_idx][1]) nb_sample = dat.get_nb_sample(opts.data_files, opts.nb_sample) data_reader = data_reader(opts.data_files, nb_sample=nb_sample, batch_size=opts.batch_size, loop=False, shuffle=False) meta_reader = hdf.reader(opts.data_files, ['chromo', 'pos'], nb_sample=nb_sample, batch_size=opts.batch_size, loop=False, shuffle=False) out_file = h5.File(opts.out_file, 'w') out_group = out_file weights = weight_layer.get_weights() out_group['weights/weights'] = weights[0] out_group['weights/bias'] = weights[1] def h5_dump(path, data, idx, dtype=None, compression='gzip'): if path not in out_group: if dtype is None: dtype = data.dtype out_group.create_dataset(name=path, shape=[nb_sample] + list(data.shape[1:]), dtype=dtype, compression=compression) out_group[path][idx:idx + len(data)] = data log.info('Computing activations') progbar = ProgressBar(nb_sample, log.info) idx = 0 for data in data_reader: if isinstance(data, tuple): inputs, outputs, weights = data else: inputs = data if isinstance(inputs, dict): inputs = list(inputs.values()) batch_size = len(inputs[0]) progbar.update(batch_size) if opts.store_inputs: for i, name in enumerate(model.input_names): h5_dump('inputs/%s' % name, dna.onehot_to_int(inputs[i]), idx) if opts.store_outputs: for name, output in six.iteritems(outputs): h5_dump('outputs/%s' % name, output, idx) fun_eval = fun(inputs) act = fun_eval[0] if opts.act_wlen: delta = opts.act_wlen // 2 ctr = act.shape[1] // 2 act = act[:, (ctr - delta):(ctr + delta + 1)] if opts.act_fun: if opts.act_fun == 'mean': act = act.mean(axis=1) elif opts.act_fun == 'wmean': weights = linear_weights(act.shape[1]) act = np.average(act, axis=1, weights=weights) elif opts.act_fun == 'max': act = act.max(axis=1) else: raise ValueError('Invalid function "%s"!' % (opts.act_fun)) h5_dump('act', act, idx) if opts.store_preds: preds = fun_eval[1:] for i, name in enumerate(model.output_names): h5_dump('preds/%s' % name, preds[i].squeeze(), idx) for name, value in six.iteritems(next(meta_reader)): h5_dump(name, value, idx) idx += batch_size progbar.close() out_file.close() log.info('Done!') return 0
def main(self, name, opts): logging.basicConfig(filename=opts.log_file, format='%(levelname)s (%(asctime)s): %(message)s') log = logging.getLogger(name) if opts.verbose: log.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) if not opts.model_files: raise ValueError('No model files provided!') log.info('Loading model ...') model = mod.load_model(opts.model_files) log.info('Loading data ...') nb_sample = dat.get_nb_sample(opts.data_files, opts.nb_sample) replicate_names = dat.get_replicate_names( opts.data_files[0], regex=opts.replicate_names, nb_key=opts.nb_replicate) data_reader = mod.data_reader_from_model( model, replicate_names, replicate_names=replicate_names) data_reader = data_reader(opts.data_files, nb_sample=nb_sample, batch_size=opts.batch_size, loop=False, shuffle=False) meta_reader = hdf.reader(opts.data_files, ['chromo', 'pos'], nb_sample=nb_sample, batch_size=opts.batch_size, loop=False, shuffle=False) log.info('Predicting ...') data = dict() progbar = ProgressBar(nb_sample, log.info) for inputs, outputs, weights in data_reader: batch_size = len(list(inputs.values())[0]) progbar.update(batch_size) preds = to_list(model.predict(inputs)) data_batch = dict() data_batch['preds'] = dict() data_batch['outputs'] = dict() for i, name in enumerate(model.output_names): data_batch['preds'][name] = preds[i].squeeze() data_batch['outputs'][name] = outputs[name].squeeze() for name, value in next(meta_reader).items(): data_batch[name] = value dat.add_to_dict(data_batch, data) progbar.close() data = dat.stack_dict(data) report = ev.evaluate_outputs(data['outputs'], data['preds']) if opts.out_report: report.to_csv(opts.out_report, sep='\t', index=False) report = ev.unstack_report(report) print(report.to_string()) if opts.out_data: hdf.write_data(data, opts.out_data) log.info('Done!') return 0
def main(self, name, opts): logging.basicConfig(filename=opts.log_file, format='%(levelname)s (%(asctime)s): %(message)s') log = logging.getLogger(name) if opts.verbose: log.setLevel(logging.DEBUG) else: log.setLevel(logging.INFO) if not opts.model_files: raise ValueError('No model files provided!') log.info('Loading model ...') model = mod.load_model(opts.model_files) log.info('Loading data ...') nb_sample = dat.get_nb_sample(opts.data_files, opts.nb_sample) replicate_names = dat.get_replicate_names( opts.data_files[0], regex=opts.replicate_names, nb_key=opts.nb_replicate) data_reader = mod.data_reader_from_model( model, replicate_names, replicate_names=replicate_names) # Seed used since unobserved input CpG states are randomly sampled if opts.seed is not None: np.random.seed(opts.seed) random.seed(opts.seed) data_reader = data_reader(opts.data_files, nb_sample=nb_sample, batch_size=opts.batch_size, loop=False, shuffle=False) meta_reader = hdf.reader(opts.data_files, ['chromo', 'pos'], nb_sample=nb_sample, batch_size=opts.batch_size, loop=False, shuffle=False) writer = None if opts.out_data: writer = H5Writer(opts.out_data, nb_sample) log.info('Predicting ...') nb_tot = 0 nb_eval = 0 data_eval = dict() perf_eval = [] progbar = ProgressBar(nb_sample, log.info) for inputs, outputs, weights in data_reader: batch_size = len(list(inputs.values())[0]) nb_tot += batch_size progbar.update(batch_size) preds = to_list(model.predict(inputs)) data_batch = dict() data_batch['preds'] = dict() data_batch['outputs'] = dict() for i, name in enumerate(model.output_names): data_batch['preds'][name] = preds[i].squeeze() data_batch['outputs'][name] = outputs[name].squeeze() for name, value in six.iteritems(next(meta_reader)): data_batch[name] = value if writer: writer.write_dict(data_batch) nb_eval += batch_size dat.add_to_dict(data_batch, data_eval) if nb_tot >= nb_sample or \ (opts.eval_size and nb_eval >= opts.eval_size): data_eval = dat.stack_dict(data_eval) perf_eval.append(ev.evaluate_outputs(data_eval['outputs'], data_eval['preds'])) data_eval = dict() nb_eval = 0 progbar.close() if writer: writer.close() report = pd.concat(perf_eval) report = report.groupby(['metric', 'output']).mean().reset_index() if opts.out_report: report.to_csv(opts.out_report, sep='\t', index=False) report = ev.unstack_report(report) print(report.to_string()) log.info('Done!') return 0