Beispiel #1
0
def get_frag(c: cooler.api.Cooler,
             resolution: int,
             offsets: pd.core.series.Series,
             chrom1: str,
             start1: int,
             end1: int,
             chrom2: str,
             start2: int,
             end2: int,
             width: int = 22,
             height: int = -1,
             padding: int = 10,
             normalize: bool = True,
             balanced: bool = True,
             percentile: float = 100.0,
             ignore_diags: int = 0,
             no_normalize: bool = False) -> np.ndarray:
    """
    Retrieves a matrix fragment.

    Args:
        c:
            Cooler object.
        chrom1:
            Chromosome 1. E.g.: `1` or `chr1`.
        start1:
            First start position in base pairs relative to `chrom1`.
        end1:
            First end position in base pairs relative to `chrom1`.
        chrom2:
            Chromosome 2. E.g.: `1` or `chr1`.
        start2:
            Second start position in base pairs relative to `chrom2`.
        end2:
            Second end position in base pairs relative to `chrom2`.
        offsets:
            Pandas Series of chromosome offsets in bins.
        width:
            Width of the fragment in pixels.
        height:
            Height of the fragments in pixels. If `-1` `height` will equal
            `width`. Defaults to `-1`.
        padding: Percental padding related to the dimension of the fragment.
            E.g., 10 = 10% padding (5% per side). Defaults to `10`.
        normalize:
            If `True` the fragment will be normalized to [0, 1].
            Defaults to `True`.
        balanced:
            If `True` the fragment will be balanced using Cooler.
            Defaults to `True`.
        percentile:
            Percentile clip. E.g., For 99 the maximum will be
            capped at the 99-percentile. Defaults to `100.0`.
        ignore_diags:
            Number of diagonals to be ignored, i.e., set to 0.
            Defaults to `0`.
        no_normalize:
            If `true` the returned matrix is not normalized.
            Defaults to `False`.

    Returns:

    """

    if height is -1:
        height = width

    # Restrict padding to be [0, 100]%
    padding = min(100, max(0, padding)) / 100

    try:
        offset1 = offsets[chrom1]
        offset2 = offsets[chrom2]
    except KeyError:
        # One more try before we will fail miserably
        offset1 = offsets['chr{}'.format(chrom1)]
        offset2 = offsets['chr{}'.format(chrom2)]

    start_bin1 = offset1 + int(round(float(start1) / resolution))
    end_bin1 = offset1 + int(round(float(end1) / resolution)) + 1

    start_bin2 = offset2 + int(round(float(start2) / resolution))
    end_bin2 = offset2 + int(round(float(end2) / resolution)) + 1

    # Apply percentile padding
    padding1 = int(round(((end_bin1 - start_bin1) / 2) * padding))
    padding2 = int(round(((end_bin2 - start_bin2) / 2) * padding))
    start_bin1 -= padding1
    start_bin2 -= padding2
    end_bin1 += padding1
    end_bin2 += padding2

    # Get the size of the region
    dim1 = end_bin1 - start_bin1
    dim2 = end_bin2 - start_bin2

    # Get additional absolute padding if needed
    padding1 = 0
    if dim1 < width:
        padding1 = int((width - dim1) / 2)
        start_bin1 -= padding1
        end_bin1 += padding1

    padding2 = 0
    if dim2 < height:
        padding2 = int((height - dim2) / 2)
        start_bin2 -= padding2
        end_bin2 += padding2

    # In case the final dimension does not math the desired dimension we
    # increase the end bin. This can be caused when the padding is not
    # divisible by 2, since the padding is rounded to the nearest integer.
    abs_dim1 = abs(start_bin1 - end_bin1)
    if abs_dim1 < width:
        end_bin1 += width - abs_dim1
        abs_dim1 = width

    abs_dim2 = abs(start_bin2 - end_bin2)
    if abs_dim2 < height:
        end_bin2 += height - abs_dim2
        abs_dim2 = height

    # Maximum width / height is 512
    if abs_dim1 > hss.SNIPPET_MAT_MAX_DATA_DIM: raise SnippetTooLarge()
    if abs_dim2 > hss.SNIPPET_MAT_MAX_DATA_DIM: raise SnippetTooLarge()

    # Finally, adjust to negative values.
    # Since relative bin IDs are adjusted by the start this will lead to a
    # white offset.
    real_start_bin1 = start_bin1 if start_bin1 >= 0 else 0
    real_start_bin2 = start_bin2 if start_bin2 >= 0 else 0

    # Get the data
    data = c.matrix(as_pixels=True, balance=False,
                    max_chunk=np.inf)[real_start_bin1:end_bin1,
                                      real_start_bin2:end_bin2]

    # Annotate pixels for balancing
    bins = c.bins(convert_enum=False)[['weight']]
    data = cooler.annotate(data, bins, replace=False)

    # Calculate relative bin IDs
    rel_bin1 = np.add(data['bin1_id'].values, -start_bin1)
    rel_bin2 = np.add(data['bin2_id'].values, -start_bin2)

    # Balance counts
    if balanced:
        values = data['count'].values.astype(np.float32)
        values *= data['weight1'].values * data['weight2'].values
    else:
        values = data['count'].values

    # Get pixel IDs for the upper triangle
    idx1 = np.add(np.multiply(rel_bin1, abs_dim1), rel_bin2)

    # Mirror matrix
    idx2_1 = np.add(data['bin2_id'].values, -start_bin1)
    idx2_2 = np.add(data['bin1_id'].values, -start_bin2)
    idx2 = np.add(np.multiply(idx2_1, abs_dim1), idx2_2)
    validBins = np.where((idx2_1 < abs_dim1) & (idx2_2 >= 0))

    # Ignore diagonals
    diags_start_row = None
    if ignore_diags > 0:
        try:
            diags_start_idx = np.min(
                np.where(data['bin1_id'].values == data['bin2_id'].values))
            diags_start_row = (rel_bin1[diags_start_idx] -
                               rel_bin2[diags_start_idx])
        except ValueError:
            pass

    # Copy pixel values onto the final array
    frag_len = abs_dim1 * abs_dim2
    frag = np.zeros(frag_len, dtype=np.float32)
    # Make sure we're within the bounds
    idx1_f = np.where(idx1 < frag_len)
    frag[idx1[idx1_f]] = values[idx1_f]
    frag[idx2[validBins]] = values[validBins]
    frag = frag.reshape((abs_dim1, abs_dim2))

    # Store low quality bins
    low_quality_bins = np.where(np.isnan(frag))

    # Assign 0 for now to avoid influencing the max values
    frag[low_quality_bins] = 0

    # Scale fragment down if needed
    scaled = False
    scale_x = width / frag.shape[0]
    if frag.shape[0] > width or frag.shape[1] > height:
        scaledFrag = np.zeros((width, height), float)
        frag = scaledFrag + zoomArray(frag, scaledFrag.shape, order=1)
        scaled = True

    # Normalize by minimum
    if not no_normalize:
        min_val = np.min(frag)
        frag -= min_val

    ignored_idx = None

    # Remove diagonals
    if ignore_diags > 0 and diags_start_row is not None:
        if width == height:
            scaled_row = int(np.rint(diags_start_row / scale_x))

            idx = np.diag_indices(width)
            scaled_idx = (idx if scaled_row == 0 else
                          [idx[0][scaled_row:], idx[0][:-scaled_row]])

            for i in range(ignore_diags):

                # First set all cells to be ignored to `-1` so that we can
                # easily query for them later.
                if i == 0:
                    frag[scaled_idx] = -1
                else:
                    dist_to_diag = scaled_row - i
                    dist_neg = min(0, dist_to_diag)
                    off = 0 if dist_to_diag >= 0 else i - scaled_row

                    # Above diagonal
                    frag[((scaled_idx[0] - i)[off:],
                          (scaled_idx[1])[off:])] = -1

                    # Extra cutoff at the bottom right
                    frag[(range(
                        scaled_idx[0][-1] - i,
                        scaled_idx[0][-1] + 1 + dist_neg,
                    ),
                          range(scaled_idx[1][-1],
                                scaled_idx[1][-1] + i + 1 + dist_neg))] = -1

                    # Below diagonal
                    frag[((scaled_idx[0] + i)[:-i], (scaled_idx[1])[:-i])] = -1

            # Save the final selection of ignored cells for fast access
            # later and set those values to `0` now.
            ignored_idx = np.where(frag == -1)
            frag[ignored_idx] = 0

        else:
            logger.warn(
                'Ignoring the diagonal only supported for squared features')

    # Capp by percentile
    max_val = np.percentile(frag, percentile)
    frag = np.clip(frag, 0, max_val)

    # Normalize by maximum
    if not no_normalize and max_val > 0:
        frag /= max_val

    # Set the ignored diagonal to the maximum
    if ignored_idx:
        frag[ignored_idx] = 1.0

    if not scaled:
        # Recover low quality bins
        frag[low_quality_bins] = -1

    return frag
Beispiel #2
0
def get_frag_by_loc_from_imtiles(imtiles_file,
                                 loci,
                                 zoom_level=0,
                                 padding=0,
                                 tile_size=256,
                                 no_cache=False):
    db = None
    div = 1
    width = 0
    height = 0

    ims = []

    got_info = False

    for locus in loci:
        id = locus[-1]

        if not no_cache:
            im_snip = None
            try:
                im_snip = np.load(BytesIO(rdb.get('im_snip_%s' % id)))
                if im_snip is not None:
                    ims.append(im_snip)
                    continue
            except:
                pass

        if not got_info:
            db = sqlite3.connect(imtiles_file)
            info = db.execute('SELECT * FROM tileset_info').fetchone()

            max_zoom = info[6]
            max_width = info[8]
            max_height = info[9]

            div = 2**(max_zoom - zoom_level)
            width = max_width / div
            height = max_height / div

            got_info = True

        start1 = round(locus[0] / div)
        end1 = round(locus[1] / div)
        start2 = round(locus[2] / div)
        end2 = round(locus[3] / div)

        if not is_within(start1, end1, start2, end2, width, height):
            ims.append(None)
            continue

        # Get tile ids
        tile_start1_id = start1 // tile_size
        tile_end1_id = end1 // tile_size
        tile_start2_id = start2 // tile_size
        tile_end2_id = end2 // tile_size

        tiles_x_range = range(tile_start1_id, tile_end1_id + 1)
        tiles_y_range = range(tile_start2_id, tile_end2_id + 1)

        # Make sure that no more than 6 standard tiles (256px) are loaded.
        if tile_size * len(tiles_x_range) > hss.SNIPPET_IMT_MAX_DATA_DIM:
            raise SnippetTooLarge()
        if tile_size * len(tiles_y_range) > hss.SNIPPET_IMT_MAX_DATA_DIM:
            raise SnippetTooLarge()

        # Extract image tiles
        tiles = []
        for y in tiles_y_range:
            for x in tiles_x_range:
                tiles.append(
                    Image.open(
                        BytesIO(
                            db.execute(
                                'SELECT image FROM tiles WHERE z=? AND y=? AND x=?',
                                (zoom_level, y, x)).fetchone()[0])))

        im_snip = get_frag_from_image_tiles(tiles, tile_size, tiles_x_range,
                                            tiles_y_range, tile_start1_id,
                                            tile_start2_id, start1, end1,
                                            start2, end2)

        # Cache for 30 min
        if not no_cache:
            with BytesIO() as b:
                np.save(b, im_snip)
                rdb.set('im_snip_%s' % id, b.getvalue(), 60 * 30)

        ims.append(im_snip)

    if db:
        db.close()

    return ims
Beispiel #3
0
def get_frag_by_loc_from_osm(imtiles_file,
                             loci,
                             zoom_level=0,
                             padding=0,
                             tile_size=256,
                             no_cache=False):
    width = 360
    height = 180

    ims = []

    prefixes = ['a', 'b', 'c']
    prefix_idx = math.floor(random() * len(prefixes))
    osm_src = 'http://{}.tile.openstreetmap.org'.format(prefixes[prefix_idx])

    s = CacheControl(requests.Session())

    for locus in loci:
        id = locus[-1]

        if not no_cache:
            osm_snip = None
            try:
                osm_snip = np.load(BytesIO(rdb.get('osm_snip_%s' % id)))
                if osm_snip is not None:
                    ims.append(osm_snip)
                    continue
            except:
                pass

        start_lng = locus[0]
        end_lng = locus[1]
        start_lat = locus[2]
        end_lat = locus[3]

        if not is_within(start_lng + 180, end_lng + 180, end_lat + 90,
                         start_lat + 90, width, height):
            ims.append(None)
            continue

        # Get tile ids
        start1, start2 = get_tile_pos_from_lng_lat(start_lng, start_lat,
                                                   zoom_level)
        end1, end2 = get_tile_pos_from_lng_lat(end_lng, end_lat, zoom_level)

        xPad = padding * (end1 - start1)
        yPad = padding * (start2 - end2)

        start1 -= xPad
        end1 += xPad
        start2 += yPad
        end2 -= yPad

        tile_start1_id = math.floor(start1)
        tile_start2_id = math.floor(start2)
        tile_end1_id = math.floor(end1)
        tile_end2_id = math.floor(end2)

        start1 = math.floor(start1 * tile_size)
        start2 = math.floor(start2 * tile_size)
        end1 = math.ceil(end1 * tile_size)
        end2 = math.ceil(end2 * tile_size)

        tiles_x_range = range(tile_start1_id, tile_end1_id + 1)
        tiles_y_range = range(tile_start2_id, tile_end2_id + 1)

        # Make sure that no more than 6 standard tiles (256px) are loaded.
        if tile_size * len(tiles_x_range) > hss.SNIPPET_OSM_MAX_DATA_DIM:
            raise SnippetTooLarge()
        if tile_size * len(tiles_y_range) > hss.SNIPPET_OSM_MAX_DATA_DIM:
            raise SnippetTooLarge()

        # Extract image tiles
        tiles = []
        for y in tiles_y_range:
            for x in tiles_x_range:
                src = ('{}/{}/{}/{}.png'.format(osm_src, zoom_level, x, y))

                r = s.get(src)

                if r.status_code == 200:
                    tiles.append(Image.open(BytesIO(r.content)).convert('RGB'))
                else:
                    tiles.append(None)

        osm_snip = get_frag_from_image_tiles(tiles, tile_size, tiles_x_range,
                                             tiles_y_range, tile_start1_id,
                                             tile_start2_id, start1, end1,
                                             start2, end2)

        if not no_cache:
            with BytesIO() as b:
                np.save(b, osm_snip)
                rdb.set('osm_snip_%s' % id, b.getvalue(), 60 * 30)

        ims.append(osm_snip)

    return ims
Beispiel #4
0
def get_fragments_by_loci(request):
    '''
    Retrieve a list of locations and return the corresponding matrix fragments

    Args:

    request (django.http.HTTPRequest): The request object containing the
        list of loci.

    Return:

    '''

    if type(request.data) is str:
        return JsonResponse(
            {
                'error': 'Request body needs to be an array or object.',
                'error_message': 'Request body needs to be an array or object.'
            },
            status=400)

    try:
        loci = request.data.get('loci', [])
    except AttributeError:
        loci = request.data
    except Exception as e:
        return JsonResponse(
            {
                'error': 'Could not read request body.',
                'error_message': str(e)
            },
            status=400)

    try:
        forced_rep_idx = request.data.get('representativeIndices', None)
    except Exception as e:
        forced_rep_idx = None
        pass
    '''
    Loci list must be of type:
    [cooler]          [imtiles]
    0: chrom1         start1
    1: start1         end1
    2: end1           start2
    3: chrom2         end2
    4: start2         dataset
    5: end2           zoomLevel
    6: dataset        dim*
    7: zoomOutLevel
    8: dim*

    *) Optional
    '''

    params = get_params(request, GET_FRAG_PARAMS)

    dims = params['dims']
    padding = params['padding']
    no_balance = params['no-balance']
    percentile = params['percentile']
    precision = params['precision']
    no_cache = params['no-cache']
    ignore_diags = params['ignore-diags']
    no_normalize = params['no-normalize']
    aggregate = params['aggregate']
    aggregation_method = params['aggregation-method']
    max_previews = params['max-previews']
    encoding = params['encoding']
    representatives = params['representatives']

    # Check if requesting a snippet from a `.cool` cooler file
    is_cool = len(loci) and len(loci[0]) > 7
    tileset_idx = 6 if is_cool else 4
    zoom_level_idx = tileset_idx + 1

    filetype = None
    new_filetype = None
    previews = []
    previews_2d = []
    ts_cache = {}
    mat_idx = None

    total_valid_loci = 0
    loci_lists = {}
    loci_ids = []
    try:
        for locus in loci:
            tileset_file = ''

            if locus[tileset_idx]:
                if locus[tileset_idx] in ts_cache:
                    tileset = ts_cache[locus[tileset_idx]]['obj']
                    tileset_file = ts_cache[locus[tileset_idx]]['path']
                elif locus[tileset_idx].endswith('.cool'):
                    tileset_file = path.join('data', locus[tileset_idx])
                else:
                    try:
                        tileset = Tileset.objects.get(uuid=locus[tileset_idx])
                        tileset_file = tileset.datafile.path
                        ts_cache[locus[tileset_idx]] = {
                            "obj": tileset,
                            "path": tileset_file
                        }

                    except AttributeError:
                        return JsonResponse(
                            {
                                'error':
                                'Tileset ({}) does not exist'.format(
                                    locus[tileset_idx]),
                            },
                            status=400)
                    except Tileset.DoesNotExist:
                        if locus[tileset_idx].startswith('osm'):
                            new_filetype = locus[tileset_idx]
                        else:
                            return JsonResponse(
                                {
                                    'error':
                                    'Tileset ({}) does not exist'.format(
                                        locus[tileset_idx]),
                                },
                                status=400)
            else:
                return JsonResponse({
                    'error': 'Tileset not specified',
                },
                                    status=400)

            # Get the dimensions of the snippets (i.e., width and height in px)
            inset_dim = (locus[zoom_level_idx + 1] if
                         (len(locus) >= zoom_level_idx + 2
                          and locus[zoom_level_idx + 1]) else None)
            out_dim = dims if inset_dim is None else inset_dim

            # Make sure out dim (in pixel) is not too large
            if ((is_cool and out_dim > hss.SNIPPET_MAT_MAX_OUT_DIM) or
                (not is_cool and out_dim > hss.SNIPPET_IMG_MAX_OUT_DIM)):
                return JsonResponse(
                    {
                        'error': 'Snippet too large',
                        'error_message': str(SnippetTooLarge())
                    },
                    status=400)

            if tileset_file not in loci_lists:
                loci_lists[tileset_file] = {}

            if is_cool:
                # Get max abs dim in base pairs
                max_abs_dim = max(locus[2] - locus[1], locus[5] - locus[4])

                with h5py.File(tileset_file, 'r') as f:
                    # get base resolution (bin size) of cooler file
                    if 'resolutions' in f:
                        # v2
                        resolutions = sorted(
                            [int(key) for key in f['resolutions'].keys()])
                        closest_res = 0
                        for i, res in enumerate(resolutions):
                            if (max_abs_dim / out_dim) - res < 0:
                                closest_res = resolutions[max(0, i - 1)]
                                break
                        zoomout_level = (locus[zoom_level_idx]
                                         if locus[zoom_level_idx] >= 0 else
                                         closest_res)
                    else:
                        # v1
                        max_zoom = f.attrs['max-zoom']
                        bin_size = int(f[str(max_zoom)].attrs['bin-size'])

                        # Find closest zoom level if `zoomout_level < 0`
                        # Assuming resolutions of powers of 2
                        zoomout_level = (locus[zoom_level_idx] if
                                         locus[zoom_level_idx] >= 0 else floor(
                                             log((max_abs_dim / bin_size) /
                                                 out_dim, 2)))

            else:
                # Get max abs dim in base pairs
                max_abs_dim = max(locus[1] - locus[0], locus[3] - locus[2])

                bin_size = 1

                # Find closest zoom level if `zoomout_level < 0`
                # Assuming resolutions of powers of 2
                zoomout_level = (locus[zoom_level_idx]
                                 if locus[zoom_level_idx] >= 0 else floor(
                                     log((max_abs_dim / bin_size) /
                                         out_dim, 2)))

            if zoomout_level not in loci_lists[tileset_file]:
                loci_lists[tileset_file][zoomout_level] = []

            locus_id = '.'.join(map(str, locus))

            loci_lists[tileset_file][zoomout_level].append(
                locus[0:tileset_idx] + [total_valid_loci, inset_dim, locus_id])
            loci_ids.append(locus_id)

            if new_filetype is None:
                new_filetype = (tileset.filetype if tileset else
                                tileset_file[tileset_file.rfind('.') + 1:])

            if filetype is None:
                filetype = new_filetype

            if filetype != new_filetype:
                return JsonResponse(
                    {
                        'error':
                        ('Multiple file types per query are not supported yet.'
                         )
                    },
                    status=400)

            total_valid_loci += 1

    except Exception as e:
        return JsonResponse(
            {
                'error': 'Could not convert loci.',
                'error_message': str(e)
            },
            status=500)

    mat_idx = list(range(len(loci_ids)))

    # Get a unique string for caching
    dump = (json.dumps(loci, sort_keys=True) + str(forced_rep_idx) +
            str(dims) + str(padding) + str(no_balance) + str(percentile) +
            str(precision) + str(ignore_diags) + str(no_normalize) +
            str(aggregate) + str(aggregation_method) + str(max_previews) +
            str(encoding) + str(representatives))
    uuid = hashlib.md5(dump.encode('utf-8')).hexdigest()

    # Check if something is cached
    if not no_cache:
        try:
            results = rdb.get('frag_by_loci_%s' % uuid)
            if results:
                return JsonResponse(pickle.loads(results))
        except:
            pass

    matrices = [None] * total_valid_loci
    data_types = [None] * total_valid_loci
    try:
        for dataset in loci_lists:
            for zoomout_level in loci_lists[dataset]:
                if filetype == 'cooler' or filetype == 'cool':
                    raw_matrices = get_frag_by_loc_from_cool(
                        dataset,
                        loci_lists[dataset][zoomout_level],
                        dims,
                        zoomout_level=zoomout_level,
                        balanced=not no_balance,
                        padding=int(padding),
                        percentile=percentile,
                        ignore_diags=ignore_diags,
                        no_normalize=no_normalize,
                        aggregate=aggregate,
                    )

                    for i, matrix in enumerate(raw_matrices):
                        idx = loci_lists[dataset][zoomout_level][i][6]
                        matrices[idx] = matrix
                        data_types[idx] = 'matrix'

                if filetype == 'imtiles' or filetype == 'osm-image':
                    extractor = (get_frag_by_loc_from_imtiles if filetype
                                 == 'imtiles' else get_frag_by_loc_from_osm)

                    sub_ims = extractor(
                        imtiles_file=dataset,
                        loci=loci_lists[dataset][zoomout_level],
                        zoom_level=zoomout_level,
                        padding=float(padding),
                        no_cache=no_cache,
                    )

                    for i, im in enumerate(sub_ims):
                        idx = loci_lists[dataset][zoomout_level][i][4]

                        matrices[idx] = im

                        data_types[idx] = 'matrix'

    except Exception as ex:
        raise
        return JsonResponse(
            {
                'error': 'Could not retrieve fragments.',
                'error_message': str(ex)
            },
            status=500)

    if aggregate and len(matrices) > 1:
        try:
            cover, previews_1d, previews_2d = aggregate_frags(
                matrices,
                loci_ids,
                aggregation_method,
                max_previews,
            )
            matrices = [cover]
            mat_idx = []
            if previews_1d is not None:
                previews = np.split(previews_1d, range(1,
                                                       previews_1d.shape[0]))
            data_types = [data_types[0]]
        except Exception as ex:
            raise
            return JsonResponse(
                {
                    'error': 'Could not aggregate fragments.',
                    'error_message': str(ex)
                },
                status=500)

    if representatives and len(matrices) > 1:
        if forced_rep_idx and len(forced_rep_idx) <= len(matrices):
            matrices = [matrices[i] for i in forced_rep_idx]
            mat_idx = forced_rep_idx
            data_types = [data_types[0]] * len(forced_rep_idx)
        else:
            try:
                rep_frags, rep_idx = get_rep_frags(matrices, loci, loci_ids,
                                                   representatives, no_cache)
                matrices = rep_frags
                mat_idx = rep_idx
                data_types = [data_types[0]] * len(rep_frags)
            except Exception as ex:
                raise
                return JsonResponse(
                    {
                        'error': 'Could get representative fragments.',
                        'error_message': str(ex)
                    },
                    status=500)

    if encoding != 'b64' and encoding != 'image':
        # Adjust precision and convert to list
        for i, matrix in enumerate(matrices):
            if precision > 0:
                matrix = np.round(matrix, decimals=precision)
            matrices[i] = matrix.tolist()

        if max_previews > 0:
            for i, preview in enumerate(previews):
                previews[i] = preview.tolist()
            for i, preview_2d in enumerate(previews_2d):
                previews_2d[i] = preview_2d.tolist()

    # Encode matrix if required
    if encoding == 'b64':
        for i, matrix in enumerate(matrices):
            id = loci_ids[mat_idx[i]]
            data_types[i] = 'dataUrl'
            if not no_cache and id:
                mat_b64 = None
                try:
                    mat_b64 = rdb.get('im_b64_%s' % id)
                    if mat_b64 is not None:
                        matrices[i] = mat_b64.decode('ascii')
                        continue
                except:
                    pass

            mat_b64 = pybase64.b64encode(np_to_png(matrix)).decode('ascii')

            if not no_cache:
                try:
                    rdb.set('im_b64_%s' % id, mat_b64, 60 * 30)
                except Exception as ex:
                    # error caching a tile
                    # log the error and carry forward, this isn't critical
                    logger.warn(ex)

            matrices[i] = mat_b64

        if max_previews > 0:
            for i, preview in enumerate(previews):
                previews[i] = pybase64.b64encode(
                    np_to_png(preview)).decode('ascii')
            for i, preview_2d in enumerate(previews_2d):
                previews_2d[i] = pybase64.b64encode(
                    np_to_png(preview_2d)).decode('ascii')

    # Create results
    results = {
        'fragments': matrices,
        'indices': [int(i) for i in mat_idx],
        'dataTypes': data_types,
    }

    # Return Y aggregates as 1D previews on demand
    if max_previews > 0:
        results['previews'] = previews
        results['previews2d'] = previews_2d

    # Cache results for 30 minutes
    try:
        rdb.set('frag_by_loci_%s' % uuid, pickle.dumps(results), 60 * 30)
    except Exception as ex:
        # error caching a tile
        # log the error and carry forward, this isn't critical
        logger.warn(ex)

    if encoding == 'image':
        if len(matrices) == 1:
            return HttpResponse(np_to_png(
                grey_to_rgb(matrices[0], to_rgba=True)),
                                content_type='image/png')
        else:
            ims = []
            for i, matrix in enumerate(matrices):
                ims.append({
                    'name':
                    '{}.png'.format(i),
                    'bytes':
                    np_to_png(grey_to_rgb(matrix, to_rgba=True))
                })
            return blob_to_zip(ims, to_resp=True)

    return JsonResponse(results)