def load_hic_data_from_reads(fnam, resolution, **kwargs): """ :param fnam: tsv file with reads1 and reads2 :param resolution: the resolution of the experiment (size of a bin in bases) :param genome_seq: a dictionary containing the genomic sequence by chromosome :param False get_sections: for very very high resolution, when the column index does not fit in memory """ sections = [] genome_seq = OrderedDict() fhandler = open(fnam) line = fhandler.next() size = 0 while line.startswith('#'): if line.startswith('# CRM '): crm, clen = line[6:].split() genome_seq[crm] = int(clen) / resolution + 1 size += genome_seq[crm] line = fhandler.next() section_sizes = {} if kwargs.get('get_sections', True): for crm in genome_seq: len_crm = genome_seq[crm] section_sizes[(crm, )] = len_crm sections.extend([(crm, i) for i in xrange(len_crm)]) dict_sec = dict([(j, i) for i, j in enumerate(sections)]) imx = HiC_data((), size, genome_seq, dict_sec, resolution=resolution) try: while True: _, cr1, ps1, _, _, _, _, cr2, ps2, _ = line.split('\t', 9) try: ps1 = dict_sec[(cr1, int(ps1) / resolution)] ps2 = dict_sec[(cr2, int(ps2) / resolution)] except KeyError: ps1 = int(ps1) / resolution ps2 = int(ps2) / resolution imx[ps1, ps2] += 1 imx[ps2, ps1] += 1 line = fhandler.next() except StopIteration: pass imx.symmetricized = True return imx
def optimal_reader(f, normalized=False, resolution=1): """ Reads a matrix generated by TADbit. Can be slower than autoreader, but uses almost a third of the memory :param f: an iterable (typically an open file). :param False normalized: if the matrix is normalized :param 1 resolution: resolution of the matrix """ # get masked bins masked = {} pos = 0 for line in f: if line[0] != '#': break pos += len(line) if line.startswith('# MASKED'): masked = dict([(int(n), True) for n in line.split()[2:]]) f.seek(pos) # super fast header = [tuple(line.split(None, 2)[:2]) for line in f] f.seek(pos) ncol = len(header) # Get the numeric values and remove extra columns num = float if normalized else int chromosomes, sections, resolution = _header_to_section(header, resolution) ############################################################# # monkey patch HiC_data to make it faster def fast_setitem(self, key, val): "Use directly dict setitem" super(HiC_data, self).__setitem__(key, val) def fast_getitem(self, key): "Use directly dict setitem" try: return super(HiC_data, self).__getitem__(key) except KeyError: return 0 original_setitem = HiC_data.__setitem__ original_getitem = HiC_data.__getitem__ # apply_async the patch HiC_data.__setitem__ = fast_setitem HiC_data.__getitem__ = fast_getitem hic = HiC_data( ((j, num(v)) for i, line in enumerate(f) for j, v in enumerate(line.split()[2:], i * ncol) if num(v)), size=ncol, masked=masked, dict_sec=sections, chromosomes=chromosomes, resolution=resolution, symmetricized=False) # make it symmetric if is_asymmetric_dico(hic): hic.symmetricized = True symmetrize_dico(hic) # undo patching HiC_data.__setitem__ = original_setitem HiC_data.__getitem__ = original_getitem hic.__setitem__ = original_setitem hic.__getitem__ = original_getitem ############################################################# return hic
def load_hic_data_from_bam(fnam, resolution, biases=None, tmpdir='.', ncpus=8, filter_exclude=(1, 2, 3, 4, 6, 7, 8, 9, 10), region=None, verbose=True, clean=True): """ :param fnam: TADbit-generated BAM file with read-ends1 and read-ends2 :param resolution: the resolution of the experiment (size of a bin in bases) :param None biases: path to pickle file where are stored the biases. Keys in this file should be: 'biases', 'badcol', 'decay' and 'resolution' :param '.' tmpdir: path to folder where to create temporary files :param 8 ncpus: :param (1, 2, 3, 4, 6, 7, 8, 9, 10) filter exclude: filters to define the set of valid pair of reads. :param None region: chromosome name, if None, all genome will be loaded :returns: HiC_data object """ bam = AlignmentFile(fnam) genome_seq = OrderedDict((c, l) for c, l in zip( bam.references, [x / resolution + 1 for x in bam.lengths])) bam.close() sections = [] for crm in genome_seq: len_crm = genome_seq[crm] sections.extend([(crm, i) for i in xrange(len_crm)]) size = sum(genome_seq.values()) chromosomes = {region: genome_seq[region]} if region else genome_seq dict_sec = dict([(j, i) for i, j in enumerate(sections)]) imx = HiC_data((), size, chromosomes=chromosomes, dict_sec=dict_sec, resolution=resolution) if biases: if isinstance(biases, basestring): biases = load(open(biases)) if biases['resolution'] != resolution: raise Exception('ERROR: resolution of biases do not match to the ' 'one wanted (%d vs %d)' % (biases['resolution'], resolution)) if region: chrom_start = 0 for crm in genome_seq: if crm == region: break len_crm = genome_seq[crm] chrom_start += len_crm imx.bads = dict((b - chrom_start, biases['badcol'][b]) for b in biases['badcol']) imx.bias = dict((b - chrom_start, biases['biases'][b]) for b in biases['biases']) else: imx.bads = biases['badcol'] imx.bias = biases['biases'] imx.expected = biases['decay'] get_matrix(fnam, resolution, biases=None, filter_exclude=filter_exclude, normalization='raw', tmpdir=tmpdir, clean=clean, ncpus=ncpus, dico=imx, region1=region, verbose=verbose) imx._symmetricize() imx.symmetricized = True return imx
def read_matrix(things, parser=None, hic=True, resolution=1, **kwargs): """ Read and checks a matrix from a file (using :func:`pytadbit.parser.hic_parser.autoreader`) or a list. :param things: might be either a file name, a file handler or a list of list (all with same length) :param None parser: a parser function that returns a tuple of lists representing the data matrix, with this file example.tsv: :: chrT_001 chrT_002 chrT_003 chrT_004 chrT_001 629 164 88 105 chrT_002 86 612 175 110 chrT_003 159 216 437 105 chrT_004 100 111 146 278 the output of parser('example.tsv') might be: ``([629, 86, 159, 100, 164, 612, 216, 111, 88, 175, 437, 146, 105, 110, 105, 278])`` :param 1 resolution: resolution of the matrix :param True hic: if False, TADbit assumes that files contains normalized data :returns: the corresponding matrix concatenated into a huge list, also returns number or rows """ one = kwargs.get('one', True) global HIC_DATA HIC_DATA = hic if not isinstance(things, list): things = [things] matrices = [] for thing in things: if isinstance(thing, HiC_data): matrices.append(thing) elif isinstance(thing, file): parser = parser or (abc_reader if __is_abc(thing) else autoreader) matrix, size, header, masked, sym = parser(thing) print(header) thing.close() chromosomes, sections, resolution = _header_to_section( header, resolution) matrices.append( HiC_data(matrix, size, dict_sec=sections, chromosomes=chromosomes, resolution=resolution, symmetricized=sym, masked=masked)) elif isinstance(thing, str): if is_cooler(thing, resolution if resolution > 1 else None): matrix, size, header, masked, sym = parse_cooler( thing, resolution if resolution > 1 else None, not hic) else: try: parser = parser or (abc_reader if __is_abc(gzopen(thing)) else autoreader) matrix, size, header, masked, sym = parser(gzopen(thing)) except IOError: if len(thing.split('\n')) > 1: parser = parser or (abc_reader if __is_abc( thing.split('\n')) else autoreader) matrix, size, header, masked, sym = parser( thing.split('\n')) else: raise IOError('\n ERROR: file %s not found\n' % thing) sections = dict([(h, i) for i, h in enumerate(header)]) chromosomes, sections, resolution = _header_to_section( header, resolution) matrices.append( HiC_data(matrix, size, dict_sec=sections, chromosomes=chromosomes, masked=masked, resolution=resolution, symmetricized=sym)) elif isinstance(thing, list): if all([len(thing) == len(l) for l in thing]): size = len(thing) matrix = [(i + j * size, v) for i, l in enumerate(thing) for j, v in enumerate(l) if v] else: raise Exception('must be list of lists, all with same length.') matrices.append(HiC_data(matrix, size)) elif isinstance(thing, tuple): # case we know what we are doing and passing directly list of tuples matrix = thing siz = sqrt(len(thing)) if int(siz) != siz: raise AttributeError('ERROR: matrix should be square.\n') size = int(siz) matrices.append(HiC_data(matrix, size)) elif 'matrix' in str(type(thing)): try: row, col = thing.shape if row != col: raise Exception('matrix needs to be square.') matrix = thing.reshape(-1).tolist()[0] size = row except Exception as exc: print 'Error found:', exc matrices.append(HiC_data(matrix, size)) else: raise Exception('Unable to read this file or whatever it is :)') if one: return matrices[0] else: return matrices
def run(opts): check_options(opts) launch_time = time.localtime() param_hash = digest_parameters(opts, extra=['quiet']) coord1 = opts.coord1 if not coord1: region1 = None start1 = None end1 = None else: try: crm1, pos1 = coord1.split(':') start1, end1 = pos1.split('-') region1 = crm1 start1 = int(start1) end1 = int(end1) except ValueError: region1 = coord1 start1 = None end1 = None printime('Importing hic in %s format' % opts.format) if opts.format == 'matrix' or opts.format == 'text': with gzopen(opts.input) as f_thing: masked, chroms_gen, crm, beg, _, _ = read_file_header(f_thing) if not chroms_gen or (region1 and region1 not in chroms_gen): raise Exception( '''ERROR: Chromosome size not included in import file. Please include the chromosome sizes of the data that you want to import in the header of the file. Example: # CRM chr1 249250621''') elif opts.format == 'cooler': if is_cooler(opts.input, opts.reso if opts.reso > 1 else None): chroms_gen = parse_header(opts.input, opts.reso if opts.reso > 1 else None) if not chroms_gen or (region1 and region1 not in chroms_gen): raise Exception( '''ERROR: Chromosome size not included in import file. ''') else: raise Exception('''ERROR: The input file is not a cooler''') chroms = OrderedDict( (crm, int(chroms_gen[crm] // opts.reso) + 1) for crm in chroms_gen) sections = [] if not region1: size = 0 for crm in chroms: size += chroms[crm] sections.extend([(crm, i) for i in range(chroms[crm])]) elif not start1: size = chroms[region1] sections.extend([(region1, i) for i in range(size)]) else: #size = (end1 - start1)//opts.reso size = chroms[region1] sections.extend([ (region1, i) for i in range(start1 // opts.reso, (end1 // opts.reso)) ]) dict_sec = dict([(j, i) for i, j in enumerate(sections)]) bias_file = None badcol = {} if opts.format == 'text': with gzopen(opts.input) as f_thing: matrix = abc_reader(f_thing, size, start1 // opts.reso if start1 else None) size_mat = size elif opts.format == 'matrix': with gzopen(opts.input) as in_f: matrix, size_mat, _, masked, _ = autoreader(in_f) if size != size_mat: raise Exception('''ERROR: The size of the specified region is different from the data in the matrix''') elif opts.format == 'cooler': matrix, weights, size, header = parse_cooler( opts.input, opts.reso if opts.reso > 1 else None, normalized=True, raw_values=True) masked = {} size_mat = size if len(set(weights)) > 1: printime('Transforming cooler weights to biases') outdir_norm = path.join(opts.workdir, '04_normalization') mkdir(outdir_norm) bias_file = path.join( outdir_norm, 'biases_%s_%s.pickle' % (nicer(opts.reso).replace(' ', ''), param_hash)) out = open(bias_file, 'wb') badcol.update((i, True) for i, m in enumerate(weights) if m == 0) dump( { 'biases': dict((k, b if b > 0 else float('nan')) for k, b in enumerate(weights)), 'decay': {}, 'badcol': badcol, 'resolution': opts.reso }, out, HIGHEST_PROTOCOL) out.close() hic = HiC_data(matrix, size_mat, dict_sec=dict_sec, chromosomes=chroms, masked=masked, resolution=opts.reso) #from pytadbit.mapping.analyze import hic_map #hic_map(hic, normalized=False, focus='chr1', show=True, cmap='viridis') printime('Creating BAM file') outbam = path.join(opts.workdir, '03_filtered_reads', 'intersection_%s' % param_hash) total_counts = create_BAMhic(hic, opts.cpus, outbam, chroms_gen, opts.reso, samtools=opts.samtools) finish_time = time.localtime() # save all job information to sqlite DB save_to_db(opts, total_counts, size_mat, bias_file, len(badcol), outbam + '.bam', launch_time, finish_time)