Exemplo n.º 1
0
def load_chromsizes(chromsizes_filename, assembly=None):
    """
    Load a set of chromosomes from a file or using an assembly
    identifier. If using just an assembly identifier the chromsizes
    will be loaded from the negspy repository.

    Parameters:
    -----------
    chromsizes_filename: string
        The file containing the tab-delimited chromosome sizes
    assembly: string
        Assembly name (e.g. 'hg19'). Not necessary if a chromsizes_filename is passed in
    """
    if chromsizes_filename is not None:
        chrom_info = nc.get_chrominfo_from_file(chromsizes_filename)
        chrom_names = chrom_info.chrom_order
        chrom_sizes = [
            chrom_info.chrom_lengths[c] for c in chrom_info.chrom_order
        ]
    else:
        if assembly is None:
            raise ValueError("No assembly or chromsizes specified")

        chrom_info = nc.get_chrominfo(assembly)
        chrom_names = nc.get_chromorder(assembly)
        chrom_sizes = nc.get_chromsizes(assembly)

    return (chrom_info, chrom_names, chrom_sizes)
Exemplo n.º 2
0
def _bigwig(filepath,
            chunk_size=14,
            zoom_step=8,
            tile_size=1024,
            output_file=None,
            assembly='hg19',
            chromsizes_filename=None,
            chromosome=None):
    last_end = 0
    data = []

    if output_file is None:
        if chromosome is None:
            output_file = op.splitext(filepath)[0] + '.hitile'
        else:
            output_file = op.splitext(
                filepath)[0] + '.' + chromosome + '.hitile'

    # Override the output file if it existts
    if op.exists(output_file):
        os.remove(output_file)
    f = h5py.File(output_file, 'w')

    if chromsizes_filename is not None:
        chrom_info = nc.get_chrominfo_from_file(chromsizes_filename)
        chrom_order = [
            a for a in nc.get_chromorder_from_file(chromsizes_filename)
        ]
        chrom_sizes = nc.get_chromsizes_from_file(chromsizes_filename)
    else:
        print("there")
        chrom_info = nc.get_chrominfo(assembly)
        chrom_order = [a for a in nc.get_chromorder(assembly)]
        chrom_sizes = nc.get_chromsizes(assembly)

    print("chrom_order:", chrom_order)
    assembly_size = chrom_info.total_length

    tile_size = tile_size
    chunk_size = tile_size * 2**chunk_size  # how many values to read in at once while tiling

    dsets = []  # data sets at each zoom level
    nan_dsets = []

    # initialize the arrays which will store the values at each stored zoom level
    z = 0
    positions = []  # store where we are at the current dataset
    data_buffers = [[]]
    nan_data_buffers = [[]]

    while assembly_size / 2**z > tile_size:
        dset_length = math.ceil(assembly_size / 2**z)
        dsets += [
            f.create_dataset('values_' + str(z), (dset_length, ),
                             dtype='f',
                             compression='gzip')
        ]
        nan_dsets += [
            f.create_dataset('nan_values_' + str(z), (dset_length, ),
                             dtype='f',
                             compression='gzip')
        ]

        data_buffers += [[]]
        nan_data_buffers += [[]]

        positions += [0]
        z += zoom_step

    # load the bigWig file
    bwf = pbw.open(filepath)

    # store some meta data
    d = f.create_dataset('meta', (1, ), dtype='f')

    if chromosome is not None:
        d.attrs['min-pos'] = chrom_info.cum_chrom_lengths[chromosome]
        d.attrs['max-pos'] = chrom_info.cum_chrom_lengths[
            chromosome] + bwf.chroms()[chromosome]
    else:
        d.attrs['min-pos'] = 0
        d.attrs['max-pos'] = assembly_size
    '''
    print("chroms.keys:", bwf.chroms().keys())
    print("chroms.values:", bwf.chroms().values())
    '''

    d.attrs['zoom-step'] = zoom_step
    d.attrs['max-length'] = assembly_size
    d.attrs['assembly'] = assembly
    d.attrs['chrom-names'] = [a.encode('utf-8') for a in chrom_order]
    d.attrs['chrom-sizes'] = chrom_sizes
    d.attrs['chrom-order'] = [a.encode('utf-8') for a in chrom_order]
    d.attrs['tile-size'] = tile_size
    d.attrs['max-zoom'] = max_zoom = math.ceil(
        math.log(d.attrs['max-length'] / tile_size) / math.log(2))
    d.attrs['max-width'] = tile_size * 2**max_zoom
    d.attrs['max-position'] = 0

    print("assembly size (max-length)", d.attrs['max-length'])
    print("max-width", d.attrs['max-width'])
    print("max_zoom:", d.attrs['max-zoom'])
    print("chunk-size:", chunk_size)
    print("chrom-order", d.attrs['chrom-order'])

    t1 = time.time()

    curr_zoom = 0

    def add_values_to_data_buffers(buffers_to_add, nan_buffers_to_add):
        curr_zoom = 0

        data_buffers[0] += buffers_to_add
        nan_data_buffers[0] += nan_buffers_to_add

        curr_time = time.time() - t1
        percent_progress = (positions[curr_zoom] + 1) / float(assembly_size)
        print(
            "position: {} progress: {:.2f} elapsed: {:.2f} remaining: {:.2f}".
            format(positions[curr_zoom] + 1, percent_progress, curr_time,
                   curr_time / (percent_progress) - curr_time))

        while len(data_buffers[curr_zoom]) >= chunk_size:
            # get the current chunk and store it, converting nans to 0
            print("len(data_buffers[curr_zoom])", len(data_buffers[curr_zoom]))
            curr_chunk = np.array(data_buffers[curr_zoom][:chunk_size])
            nan_curr_chunk = np.array(nan_data_buffers[curr_zoom][:chunk_size])
            #curr_chunk[np.isnan(curr_chunk)] = 0
            '''
            print("1cc:", sum(curr_chunk))
            print("1db:", data_buffers[curr_zoom][:chunk_size])
            print("1curr_chunk:", nan_curr_chunk)
            '''
            print("positions[curr_zoom]:", positions[curr_zoom])

            dsets[curr_zoom][positions[curr_zoom]:positions[curr_zoom] +
                             chunk_size] = curr_chunk
            nan_dsets[curr_zoom][positions[curr_zoom]:positions[curr_zoom] +
                                 chunk_size] = nan_curr_chunk

            # aggregate nan values
            #nan_curr_chunk[np.isnan(curr_chunk)] = 0
            #print("1na_cc:", sum(nan_curr_chunk))

            # aggregate and store aggregated values in the next zoom_level's data
            data_buffers[curr_zoom + 1] += list(
                ct.aggregate(curr_chunk, 2**zoom_step))
            nan_data_buffers[curr_zoom + 1] += list(
                ct.aggregate(nan_curr_chunk, 2**zoom_step))

            data_buffers[curr_zoom] = data_buffers[curr_zoom][chunk_size:]
            nan_data_buffers[curr_zoom] = nan_data_buffers[curr_zoom][
                chunk_size:]

            data = data_buffers[curr_zoom + 1]
            nan_data = nan_data_buffers[curr_zoom + 1]

            # do the same for the nan values buffers

            positions[curr_zoom] += chunk_size
            curr_zoom += 1

            if curr_zoom * zoom_step >= max_zoom:
                break

    # Do we only want values from a single chromosome?
    if chromosome is not None:
        chroms_to_use = [chromosome]
    else:
        chroms_to_use = chrom_order

    for chrom in chroms_to_use:
        print("chrom:", chrom)
        '''
        if chrom not in bwf.chroms():
            print("skipping chrom (not in bigWig file):",
            chrom, chrom_info.chrom_lengths[chrom])
            continue
        '''

        counter = 0
        # chrom_size = bwf.chroms()[chrom]
        chrom_size = chrom_info.chrom_lengths[chrom]

        # print("chrom_size:", chrom_size, bwf.chroms()[chrom])
        d.attrs['max-position'] += chrom_size

        while counter < chrom_size:
            remaining = min(chunk_size, chrom_size - counter)

            if chrom not in bwf.chroms():
                values = [np.nan] * remaining
                nan_values = [1] * remaining
            else:
                values = bwf.values(chrom, counter, counter + remaining)
                nan_values = np.isnan(values).astype('i4')

            # print("counter:", counter, "remaining:", remaining,
            # "counter + remaining:", counter + remaining)
            counter += remaining
            curr_zoom = 0

            add_values_to_data_buffers(list(values), list(nan_values))

    while True:
        # get the current chunk and store it
        chunk_size = len(data_buffers[curr_zoom])
        curr_chunk = np.array(data_buffers[curr_zoom][:chunk_size])
        nan_curr_chunk = np.array(nan_data_buffers[curr_zoom][:chunk_size])

        dsets[curr_zoom][positions[curr_zoom]:positions[curr_zoom] +
                         chunk_size] = curr_chunk
        nan_dsets[curr_zoom][positions[curr_zoom]:positions[curr_zoom] +
                             chunk_size] = nan_curr_chunk

        # aggregate and store aggregated values in the next zoom_level's data
        data_buffers[curr_zoom + 1] += list(
            ct.aggregate(curr_chunk, 2**zoom_step))
        nan_data_buffers[curr_zoom + 1] += list(
            ct.aggregate(nan_curr_chunk, 2**zoom_step))

        data_buffers[curr_zoom] = data_buffers[curr_zoom][chunk_size:]
        nan_data_buffers[curr_zoom] = nan_data_buffers[curr_zoom][chunk_size:]

        data = data_buffers[curr_zoom + 1]
        nan_data = nan_data_buffers[curr_zoom + 1]

        positions[curr_zoom] += chunk_size
        curr_zoom += 1

        # we've created enough tile levels to cover the entire maximum width
        if curr_zoom * zoom_step >= max_zoom:
            break

    # still need to take care of the last chunk

    data = np.array(data)
    t1 = time.time()
    pass
Exemplo n.º 3
0
def _bedgraph(filepath, output_file, assembly, chrom_col, from_pos_col,
              to_pos_col, value_col, has_header, chromosome, tile_size,
              chunk_size, method, nan_value, transform, count_nan,
              closed_interval, chromsizes_filename, zoom_step):
    last_end = 0
    data = []

    if output_file is None:
        output_file = op.splitext(filepath)[0] + '.hitile'

    print("output file:", output_file)

    # Override the output file if it existts
    if op.exists(output_file):
        os.remove(output_file)
    f = h5py.File(output_file, 'w')

    # get the information about the chromosomes in this assembly
    if chromsizes_filename is not None:
        chrom_info = nc.get_chrominfo_from_file(chromsizes_filename)
        chrom_order = [
            a.encode('utf-8')
            for a in nc.get_chromorder_from_file(chromsizes_filename)
        ]
        chrom_sizes = nc.get_chromsizes_from_file(chromsizes_filename)
    else:
        chrom_info = nc.get_chrominfo(assembly)
        chrom_order = [a.encode('utf-8') for a in nc.get_chromorder(assembly)]
        chrom_sizes = nc.get_chromsizes(assembly)

    assembly_size = chrom_info.total_length
    print('assembly_size:', assembly_size)

    tile_size = tile_size
    chunk_size = tile_size * 2**chunk_size  # how many values to read in at once while tiling

    dsets = []  # data sets at each zoom level
    nan_dsets = []  # store nan values

    # initialize the arrays which will store the values at each stored zoom level
    z = 0
    positions = []  # store where we are at the current dataset
    data_buffers = [[]]
    nan_data_buffers = [[]]

    while assembly_size / 2**z > tile_size:
        dset_length = math.ceil(assembly_size / 2**z)
        dsets += [
            f.create_dataset('values_' + str(z), (dset_length, ),
                             dtype='f',
                             compression='gzip')
        ]
        nan_dsets += [
            f.create_dataset('nan_values_' + str(z), (dset_length, ),
                             dtype='f',
                             compression='gzip')
        ]

        data_buffers += [[]]
        nan_data_buffers += [[]]

        positions += [0]
        z += zoom_step

    #print("dsets[0][-10:]", dsets[0][-10:])

    # load the bigWig file
    #print("filepath:", filepath)

    # store some meta data
    d = f.create_dataset('meta', (1, ), dtype='f')

    print("assembly:", assembly)
    #print("chrom_info:", nc.get_chromorder(assembly))

    d.attrs['zoom-step'] = zoom_step
    d.attrs['max-length'] = assembly_size
    d.attrs['assembly'] = assembly
    d.attrs['chrom-names'] = chrom_order
    d.attrs['chrom-sizes'] = chrom_sizes
    d.attrs['chrom-order'] = chrom_order
    d.attrs['tile-size'] = tile_size
    d.attrs['max-zoom'] = max_zoom = math.ceil(
        math.log(d.attrs['max-length'] / tile_size) / math.log(2))
    d.attrs['max-width'] = tile_size * 2**max_zoom
    d.attrs['max-position'] = 0

    print("assembly size (max-length)", d.attrs['max-length'])
    print("max-width", d.attrs['max-width'])
    print("max_zoom:", d.attrs['max-zoom'])
    print("chunk-size:", chunk_size)
    print("chrom-order", d.attrs['chrom-order'])

    t1 = time.time()

    # are we reading the input from stdin or from a file?

    if filepath == '-':
        f = sys.stdin
    else:
        if filepath.endswith('.gz'):
            import gzip
            f = gzip.open(filepath, 'rt')
        else:
            f = open(filepath, 'r')

    curr_zoom = 0

    def add_values_to_data_buffers(buffers_to_add, nan_buffers_to_add):
        curr_zoom = 0

        data_buffers[0] += buffers_to_add
        nan_data_buffers[0] += nan_buffers_to_add

        curr_time = time.time() - t1
        percent_progress = (positions[curr_zoom] + 1) / float(assembly_size)
        print(
            "position: {} progress: {:.2f} elapsed: {:.2f} remaining: {:.2f}".
            format(positions[curr_zoom] + 1, percent_progress, curr_time,
                   curr_time / (percent_progress) - curr_time))

        while len(data_buffers[curr_zoom]) >= chunk_size:
            # get the current chunk and store it, converting nans to 0
            print("len(data_buffers[curr_zoom])", len(data_buffers[curr_zoom]))
            curr_chunk = np.array(data_buffers[curr_zoom][:chunk_size])
            nan_curr_chunk = np.array(nan_data_buffers[curr_zoom][:chunk_size])
            #curr_chunk[np.isnan(curr_chunk)] = 0
            '''
            print("1cc:", sum(curr_chunk))
            print("1db:", data_buffers[curr_zoom][:chunk_size])
            print("1curr_chunk:", nan_curr_chunk)
            '''
            print("positions[curr_zoom]:", positions[curr_zoom])

            dsets[curr_zoom][positions[curr_zoom]:positions[curr_zoom] +
                             chunk_size] = curr_chunk
            nan_dsets[curr_zoom][positions[curr_zoom]:positions[curr_zoom] +
                                 chunk_size] = nan_curr_chunk

            # aggregate nan values
            #nan_curr_chunk[np.isnan(curr_chunk)] = 0
            #print("1na_cc:", sum(nan_curr_chunk))

            # aggregate and store aggregated values in the next zoom_level's data
            data_buffers[curr_zoom + 1] += list(
                ct.aggregate(curr_chunk, 2**zoom_step))
            nan_data_buffers[curr_zoom + 1] += list(
                ct.aggregate(nan_curr_chunk, 2**zoom_step))

            data_buffers[curr_zoom] = data_buffers[curr_zoom][chunk_size:]
            nan_data_buffers[curr_zoom] = nan_data_buffers[curr_zoom][
                chunk_size:]

            data = data_buffers[curr_zoom + 1]
            nan_data = nan_data_buffers[curr_zoom + 1]

            # do the same for the nan values buffers

            positions[curr_zoom] += chunk_size
            curr_zoom += 1

            if curr_zoom * zoom_step >= max_zoom:
                break

    values = []
    nan_values = []

    if has_header:
        f.readline()

    # the genome position up to which we've filled in values
    curr_genome_pos = 0

    # keep track of the previous value so that we can use it to fill in NAN values
    prev_value = 0

    for line in f:
        # each line should indicate a chromsome, start position and end position
        parts = line.strip().split()

        start_genome_pos = chrom_info.cum_chrom_lengths[parts[
            chrom_col - 1]] + int(parts[from_pos_col - 1])
        #print("len(values):", len(values), curr_genome_pos, start_genome_pos)
        #print("line:", line)

        if start_genome_pos - curr_genome_pos > 1:
            values += [np.nan] * (start_genome_pos - curr_genome_pos - 1)
            nan_values += [1] * (start_genome_pos - curr_genome_pos - 1)

            curr_genome_pos += (start_genome_pos - curr_genome_pos - 1)

        # count how many nan values there are in the dataset
        nan_count = 1 if parts[value_col - 1] == nan_value else 0

        # if the provided values are log2 transformed, we have to un-transform them
        if transform == 'exp2':
            value = 2**float(
                parts[value_col -
                      1]) if not parts[value_col - 1] == nan_value else np.nan
        else:
            value = float(
                parts[value_col -
                      1]) if not parts[value_col - 1] == nan_value else np.nan

        # print("pos:", int(parts[to_pos_col-1]) - int(parts[from_pos_col-1]))
        # we're going to add as many values are as specified in the bedfile line
        values_to_add = [value] * (int(parts[to_pos_col - 1]) -
                                   int(parts[from_pos_col - 1]))
        nan_counts_to_add = [nan_count] * (int(parts[to_pos_col - 1]) -
                                           int(parts[from_pos_col - 1]))

        if closed_interval:
            values_to_add += [value]
            nan_counts_to_add += [nan_count]

        # print("values_to_add", values_to_add)

        values += values_to_add
        nan_values += nan_counts_to_add

        d.attrs['max-position'] = start_genome_pos + len(values_to_add)

        #print("values:", values[:30])

        curr_genome_pos += len(values_to_add)

        while len(values) > chunk_size:
            print("len(values):", len(values), chunk_size)
            print("line:", line)
            add_values_to_data_buffers(values[:chunk_size],
                                       nan_values[:chunk_size])
            values = values[chunk_size:]
            nan_values = nan_values[chunk_size:]

    add_values_to_data_buffers(values, nan_values)

    # store the remaining data
    while True:
        # get the current chunk and store it
        chunk_size = len(data_buffers[curr_zoom])
        curr_chunk = np.array(data_buffers[curr_zoom][:chunk_size])
        nan_curr_chunk = np.array(nan_data_buffers[curr_zoom][:chunk_size])
        '''
        print("2curr_chunk", curr_chunk)
        print("2curr_zoom:", curr_zoom)
        print("2db", data_buffers[curr_zoom][:100])
        '''

        dsets[curr_zoom][positions[curr_zoom]:positions[curr_zoom] +
                         chunk_size] = curr_chunk
        nan_dsets[curr_zoom][positions[curr_zoom]:positions[curr_zoom] +
                             chunk_size] = nan_curr_chunk

        #print("chunk_size:", chunk_size, "len(curr_chunk):", len(curr_chunk), "len(nan_curr_chunk)", len(nan_curr_chunk))

        # aggregate and store aggregated values in the next zoom_level's data
        data_buffers[curr_zoom + 1] += list(
            ct.aggregate(curr_chunk, 2**zoom_step))
        nan_data_buffers[curr_zoom + 1] += list(
            ct.aggregate(nan_curr_chunk, 2**zoom_step))

        data_buffers[curr_zoom] = data_buffers[curr_zoom][chunk_size:]
        nan_data_buffers[curr_zoom] = nan_data_buffers[curr_zoom][chunk_size:]

        data = data_buffers[curr_zoom + 1]
        nan_data = nan_data_buffers[curr_zoom + 1]

        positions[curr_zoom] += chunk_size
        curr_zoom += 1

        # we've created enough tile levels to cover the entire maximum width
        if curr_zoom * zoom_step >= max_zoom:
            break
Exemplo n.º 4
0
def _bedfile(filepath, output_file, assembly, importance_column, has_header,
             chromosome, max_per_tile, tile_size, delimiter,
             chromsizes_filename, offset):
    if output_file is None:
        output_file = filepath + ".multires"
    else:
        output_file = output_file

    if op.exists(output_file):
        os.remove(output_file)

    bed_file = open(filepath, 'r')

    if chromsizes_filename is not None:
        chrom_info = nc.get_chrominfo_from_file(chromsizes_filename)
        chrom_names = chrom_info.chrom_order
        chrom_sizes = [
            chrom_info.chrom_lengths[c] for c in chrom_info.chrom_order
        ]
    else:
        chrom_info = nc.get_chrominfo(assembly)
        chrom_names = nc.get_chromorder(assembly)
        chrom_sizes = nc.get_chromsizes(assembly)

    print("chrom_names:", chrom_info.chrom_order)
    print("chrom_sizes:", chrom_sizes)

    def line_to_np_array(line):
        '''
        Convert a bed file line to a numpy array which can later
        be used as an entry in an h5py file.
        '''
        try:
            start = int(line[1])
            stop = int(line[2])
        except ValueError:
            raise ValueError(
                "Error parsing the position, line: {}".format(line))

        chrom = line[0]

        if importance_column is None:
            importance = stop - start
        elif importance_column == 'random':
            importance = random.random()
        else:
            importance = int(line[int(importance_column) - 1])

        # convert chromosome coordinates to genome coordinates

        genome_start = chrom_info.cum_chrom_lengths[chrom] + start + offset
        #nc.chr_pos_to_genome_pos(str(chrom), start, assembly)
        genome_end = chrom_info.cum_chrom_lengths[chrom] + stop + offset
        #nc.chr_pos_to_genome_pos(chrom, stop, assembly)

        pos_offset = genome_start - start
        parts = {
            'startPos': genome_start,
            'endPos': genome_end,
            'uid': slugid.nice().decode('utf-8'),
            'chrOffset': pos_offset,
            'fields': '\t'.join(line),
            'importance': importance,
            'chromosome': str(chrom)
        }

        return parts

    dset = []

    if has_header:
        line = bed_file.readline()
        header = line.strip().split(delimiter)
    else:
        line = bed_file.readline().strip()
        dset += [line_to_np_array(line.strip().split(delimiter))]
        header = map(str, list(range(1,
                                     len(line.strip().split(delimiter)) + 1)))
    print("header:", header)

    for line in bed_file:
        dset += [line_to_np_array(line.strip().split(delimiter))]

    if chromosome is not None:
        dset = [d for d in dset if d['chromosome'] == chromosome]

    # We neeed chromosome information as well as the assembly size to properly
    # tile this data
    tile_size = tile_size

    #if chromosome is None:
    assembly_size = chrom_info.total_length + 1
    '''
    else:
        try:
            assembly_size = chrom_info.chrom_lengths[chromosome]
        except KeyError:
            print("ERROR: Chromosome {} not found in assembly {}.".format(chromosome, assembly), file=sys.stderr)
            return 1
    '''

    #max_zoom = int(math.ceil(math.log(assembly_size / min_feature_width) / math.log(2)))
    max_zoom = int(math.ceil(
        math.log(assembly_size / tile_size) / math.log(2)))
    '''
    if max_zoom is not None and max_zoom < max_zoom:
        max_zoom = max_zoom
    '''

    # this script stores data in a sqlite database
    import sqlite3
    sqlite3.register_adapter(np.int64, lambda val: int(val))
    print("output_file:", output_file)
    conn = sqlite3.connect(output_file)

    # store some meta data
    store_meta_data(conn,
                    1,
                    max_length=assembly_size,
                    assembly=assembly,
                    chrom_names=chrom_names,
                    chrom_sizes=chrom_sizes,
                    tile_size=tile_size,
                    max_zoom=max_zoom,
                    max_width=tile_size * 2**max_zoom,
                    header=header)

    max_width = tile_size * 2**max_zoom
    uid_to_entry = {}

    intervals = []

    # store each bed file entry as an interval
    for d in dset:
        uid = d['uid']
        uid_to_entry[uid] = d
        intervals += [(d['startPos'], d['endPos'], uid)]

    tile_width = tile_size

    removed = set()

    c = conn.cursor()
    c.execute('''
    CREATE TABLE intervals
    (
        id int PRIMARY KEY,
        zoomLevel int,
        importance real,
        startPos int,
        endPos int,
        chrOffset int,
        uid text,
        fields text
    )
    ''')

    c.execute('''
        CREATE VIRTUAL TABLE position_index USING rtree(
            id,
            rStartPos, rEndPos
        )
        ''')

    curr_zoom = 0
    counter = 0

    max_viewable_zoom = max_zoom

    if max_zoom is not None and max_zoom < max_zoom:
        max_viewable_zoom = max_zoom

    while curr_zoom <= max_viewable_zoom and len(intervals) > 0:
        # at each zoom level, add the top genes
        tile_width = tile_size * 2**(max_zoom - curr_zoom)

        for tile_num in range(max_width // tile_width):
            # go over each tile and distribute the remaining values
            #values = interval_tree[tile_num * tile_width: (tile_num+1) * tile_width]
            from_value = tile_num * tile_width
            to_value = (tile_num + 1) * tile_width
            entries = [
                i for i in intervals if (i[0] < to_value and i[1] > from_value)
            ]
            values_in_tile = sorted(
                entries, key=lambda x: -uid_to_entry[x[-1]]['importance']
            )[:max_per_tile]  # the importance is always the last column
            # take the negative because we want to prioritize
            # higher values

            if len(values_in_tile) > 0:
                for v in values_in_tile:
                    counter += 1

                    value = uid_to_entry[v[-1]]

                    # one extra question mark for the primary key
                    exec_statement = 'INSERT INTO intervals VALUES (?,?,?,?,?,?,?,?)'
                    #print("value:", value['startPos'])

                    ret = c.execute(
                        exec_statement,
                        # primary key, zoomLevel, startPos, endPos, chrOffset, line
                        (counter, curr_zoom, value['importance'],
                         value['startPos'], value['endPos'],
                         value['chrOffset'], value['uid'], value['fields']))
                    conn.commit()

                    exec_statement = 'INSERT INTO position_index VALUES (?,?,?)'
                    ret = c.execute(
                        exec_statement,
                        (counter, value['startPos'], value['endPos']
                         )  #add counter as a primary key
                    )
                    conn.commit()
                    intervals.remove(v)
        #print ("curr_zoom:", curr_zoom, file=sys.stderr)
        curr_zoom += 1

    conn.commit()
    conn.close()

    return
Exemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser(description="""
    
    python chr_pos_to_genome_pos.py -t 1,2:3,4

    Convert chromosome,position pairs to genome_positions. Assumes that the
    coordinates refer to the hg19 assembly (unless otherwise specified).

    Example:

    2       NM_000014       chr12   -       9220303 9268825

    -> python scripts/chr_pos_to_genome_pos.py -c 3:5,3:6

    2       NM_000014       genome  -       2115405269      2115453791

    --------------------------------

    This also works with space-delimited fields:

    chr5    56765,56766

    ->python scripts/chr_pos_to_genome_pos.py -c 1:2

    genome  881683465,881683466

""")

    parser.add_argument('-a', '--assembly', default='hg19')
    parser.add_argument('-s', '--chromsizes-file', default=None)
    parser.add_argument('-n', '--new-chrom', default=None)
    parser.add_argument(
        '-c',
        '--columns',
        default='1,2',
        help="Which columns to translate to genome positions. "
        "Column pairs should be 1-based and separated by colons")

    #parser.add_argument('-u', '--useless', action='store_true',
    #                     help='Another useless option')
    args = parser.parse_args()

    if args.chromsizes_file is not None:
        chrom_info = nc.get_chrominfo_from_file(args.chromsizes_file)
    else:
        chrom_info = nc.get_chrominfo(args.assembly)

    for line in sys.stdin:
        try:
            line_output = []
            line_parts = line.strip().split()
            translated_positions = {}
            translated_chroms = {}

            for translate_pair in [[int(y) for y in x.split(':')]
                                   for x in args.columns.split(',')]:
                # go through the pairs of columns that need to be translated to genome position
                # assume that the position column is comma separated list of values (although it doesn't
                # actually need to be)
                chrom, poss = line_parts[translate_pair[0] - 1], line_parts[
                    translate_pair[1] - 1].strip(",").split(',')
                genome_pos = ",".join(
                    map(str, [
                        nc.chr_pos_to_genome_pos(chrom, int(pos), chrom_info)
                        for pos in poss
                    ]))
                #line_output += [genome_pos]

                # note that we've translated these columns and shouldn't include them in the output
                translated_positions[translate_pair[1] - 1] = genome_pos
                translated_chroms[translate_pair[0] - 1] = chrom

            for i, part in enumerate(line_parts):
                if i in translated_chroms:
                    # replace chromosome identifiers (e.g. 'chr1') with 'genome' to indicate the positions
                    if args.new_chrom is None:
                        line_output += ['genome({})'.format(chrom)]
                    else:
                        line_output += [args.new_chrom]
                elif i in translated_positions:
                    # this column used to contain a position so we need to replace it with a translated
                    # position
                    line_output += [translated_positions[i]]
                else:
                    # if this column didn't contain a translated position output it as is
                    line_output += [part]

            try:
                print("\t".join(map(str, line_output)))
            except BrokenPipeError:
                # Output is probably being run through "head" or something similar
                break
        except KeyError as ke:
            print("KeyError:", ke, line.strip(), file=sys.stderr)
Exemplo n.º 6
0
def test_chrom_info_from_chromsizes():
    chromsizes_file='negspy/data/hg19/chromInfo.txt'

    chrom_info = nc.get_chrominfo_from_file(chromsizes_file)
    assert(chrom_info.chrom_order[0] == 'chr1')
    assert(chrom_info.chrom_order[-1] == 'chr18_gl000207_random')
Exemplo n.º 7
0
def test_chrom_info_from_chromsizes():
    chromsizes_file = 'negspy/data/hg19/chromInfo.txt'

    chrom_info = nc.get_chrominfo_from_file(chromsizes_file)
    assert (chrom_info.chrom_order[0] == 'chr1')
    assert (chrom_info.chrom_order[-1] == 'chr18_gl000207_random')
Exemplo n.º 8
0
def main():
    parser = argparse.ArgumentParser(description="""
    
    python chr_pos_to_genome_pos.py -t 1,2:3,4

    Convert chromosome,position pairs to genome_positions. Assumes that the
    coordinates refer to the hg19 assembly (unless otherwise specified).

    Example:

    2       NM_000014       chr12   -       9220303 9268825

    -> python scripts/chr_pos_to_genome_pos.py -c 3:5,3:6

    2       NM_000014       genome  -       2115405269      2115453791

    --------------------------------

    This also works with space-delimited fields:

    chr5    56765,56766

    ->python scripts/chr_pos_to_genome_pos.py -c 1:2

    genome  881683465,881683466

""")

    parser.add_argument('-a', '--assembly', default='hg19')
    parser.add_argument('-s', '--chromsizes-file', default=None)
    parser.add_argument('-n', '--new-chrom', default=None)
    parser.add_argument('-c', '--columns', default='1,2', 
            help="Which columns to translate to genome positions. "
            "Column pairs should be 1-based and separated by colons")

    #parser.add_argument('-u', '--useless', action='store_true', 
    #                     help='Another useless option')
    args = parser.parse_args()

    if args.chromsizes_file is not None:
        chrom_info = nc.get_chrominfo_from_file(args.chromsizes_file)
    else:
        chrom_info = nc.get_chrominfo(args.assembly)

    for line in sys.stdin:
        try:
            line_output = []
            line_parts = line.strip().split()
            translated_positions = {}
            translated_chroms = {}

            for translate_pair in [[int (y) for y in x.split(':')] for x in args.columns.split(',')]:
                # go through the pairs of columns that need to be translated to genome position
                # assume that the position column is comma separated list of values (although it doesn't
                # actually need to be)
                chrom,poss = line_parts[translate_pair[0]-1], line_parts[translate_pair[1]-1].strip(",").split(',')
                genome_pos = ",".join(map(str,[nc.chr_pos_to_genome_pos( chrom, int(pos), chrom_info) for pos in poss]))
                #line_output += [genome_pos]

                # note that we've translated these columns and shouldn't include them in the output
                translated_positions[translate_pair[1]-1] = genome_pos
                translated_chroms[translate_pair[0]-1] = chrom

            for i,part in enumerate(line_parts):
                if i in translated_chroms:
                    # replace chromosome identifiers (e.g. 'chr1') with 'genome' to indicate the positions
                    if args.new_chrom is None:
                        line_output += ['genome({})'.format(chrom)]
                    else:
                        line_output += [args.new_chrom]
                elif i in translated_positions:
                    # this column used to contain a position so we need to replace it with a translated
                    # position
                    line_output += [translated_positions[i]]
                else:
                    # if this column didn't contain a translated position output it as is
                    line_output += [part]

            try:
                print("\t".join(map(str, line_output)))
            except BrokenPipeError:
                # Output is probably being run through "head" or something similar
                break
        except KeyError as ke:
            print("KeyError:", ke, line.strip(), file=sys.stderr)