def setup(self, stage=None):
        matFile = loadmat(self.data_path)
        data = dict()

        data['landsat'] = matFile['t1_L8_clipped']
        # data['landsat'] = data['landsat'].reshape((-1, self.patch_size, self.patch_size, 11))
        data['landsat'] = view_as_blocks(
            data['landsat'], (self.patch_size, self.patch_size, 11)).reshape(
                (-1, self.patch_size, self.patch_size, 11))
        data['sentinel'] = matFile['logt2_clipped']
        # data['sentinel'] = data['sentinel'].view().reshape((-1, self.patch_size, self.patch_size, 3))
        data['sentinel'] = view_as_blocks(
            data['sentinel'], (self.patch_size, self.patch_size, 3)).reshape(
                (-1, self.patch_size, self.patch_size, 3))

        prior_information_image = loadmat(
            r'resources/alpha_prior_test.mat')['prior']
        prior_information_image = prior_information_image / prior_information_image.max(
        )

        data['prior_info'] = prior_information_image
        # data['prior_info'] = data['prior_info'].view().reshape((-1, self.patch_size, self.patch_size))
        data['prior_info'] = view_as_blocks(
            data['prior_info'], (self.patch_size, self.patch_size)).reshape(
                (-1, self.patch_size, self.patch_size))
        self.trainDataset = CaliforniaFloodDataset(data)

        data['roi'] = matFile['ROI']
        # data['roi'] = data['roi'].view().reshape((-1, self.patch_size, self.patch_size))
        data['roi'] = view_as_blocks(
            data['roi'], (self.patch_size, self.patch_size)).reshape(
                (-1, self.patch_size, self.patch_size))

        self.testDataset = CaliforniaTestDataset(data)
示例#2
0
 def _wrapper(dim):
     h, w = dim.shape
     sobel = skimage_sobel(dim)
     sobel += 1e-8 # avoid division by zero
     sobel_norm = view_as_blocks(sobel, (self.sobel, self.sobel)).sum((2,3))
     sum_prod = view_as_blocks((sobel * dim), (self.sobel, self.sobel)).sum((2,3))
     return sum_prod / sobel_norm
示例#3
0
def get_energy_sums(event_data):
    """
    Get the energy sums of 2x2 sub regions along with their indices.
    
    Args:
        event_data: Dataframe containing event data
        
    Returns:
        Dictionary of values 'index': sum
    """
    # Empty dictionary
    sum_energy_index = {}

    # Configuring the size of sub regions
    s = 2

    # We will now get 2x2 sub matrices as blocks from the electron matrix.
    # Reshape each matrix into a list format.
    blocks_energy = view_as_blocks(get_matrix(event_data, 'et'),
                                   (s, s)).reshape(-1, s**2)

    # We will now get 2x2 sub matrices as blocks from the index matrix.
    # Reshape each matrix into a list format.
    blocks_idx = view_as_blocks(get_matrix(event_data, 'index'),
                                (s, s)).reshape(-1, s**2)

    for row, (sub, idx) in enumerate(zip(blocks_energy, blocks_idx)):
        # Add all energies in 2x2 region
        sum_energy = np.sum(sub)
        # get the actual index of the highest postion of energy in the submatrix (2x2)
        actual_idx = idx[np.argmax(sub)]

        sum_energy_index[actual_idx] = sum_energy

    return sum_energy_index
示例#4
0
def iris_encode(irisImage, dr=15, dtheta=15, alpha=0.4):
    """Encodes the straightened representation of an iris with gabor wavelets.
    :param irisImage: Image of an iris
    :param dr: Width of image patches producing one feature
    :param dtheta: Length of image patches producing one feature
    :param alpha: Gabor wavelets modifier (beta parameter of Gabor wavelets becomes inverse of this number)
    :return: Iris code and its mask
    :rtype: tuple (ndarray, ndarray)
    """
    # mean = np.mean(img)
    # std = img.std()
    mask = view_as_blocks(np.logical_and(100 < irisImage, irisImage < 230), (dr, dtheta))
    norm_iris = (irisImage - irisImage.mean()) / irisImage.std()
    patches = view_as_blocks(norm_iris, (dr, dtheta))
    code = np.zeros((patches.shape[0] * 3, patches.shape[1] * 2))
    code_mask = np.zeros((patches.shape[0] * 3, patches.shape[1] * 2))
    for i, row in enumerate(patches):
        for j, p in enumerate(row):
            for k, w in enumerate([8, 16, 32]):
                wavelet = gabor_convolve(p, w, alpha, 1 / alpha)
                code[3 * i + k, 2 * j] = np.sum(wavelet[0])
                code[3 * i + k, 2 * j + 1] = np.sum(wavelet[1])
                code_mask[3 * i + k, 2 * j] = code_mask[3 * i + k, 2 * j + 1] = \
                    1 if mask[i, j].sum() > dr * dtheta * 3 / 4 else 0
    code[code >= 0] = 1
    code[code < 0] = 0
    return code, code_mask
            def post_changed_blocks(old_seg, new_seg):
                # If we post the whole volume, we'll be overwriting blocks that haven't changed,
                # wasting space in DVID (for duplicate blocks stored in the child uuid).
                # Instead, we need to only post the blocks that have changed.

                # So, can't just do this:
                # output_service.write_subvolume(new_seg, box[0], scale)

                seg_diff = (old_seg != new_seg)
                block_diff = view_as_blocks(seg_diff, 3 * (block_width, ))

                changed_block_map = block_diff.any(axis=(3, 4, 5)).nonzero()
                changed_block_corners = box[0] + np.transpose(
                    changed_block_map) * block_width

                changed_blocks = view_as_blocks(
                    new_seg, 3 * (block_width, ))[changed_block_map]
                encoded_blocks = encode_labelarray_blocks(
                    changed_block_corners, changed_blocks)

                mgr = output_service.resource_manager_client
                with mgr.access_context(output_service.server, True, 1,
                                        changed_blocks.nbytes):
                    post_labelmap_blocks(*output_service.instance_triple,
                                         None,
                                         encoded_blocks,
                                         scale,
                                         downres=False,
                                         noindexing=True,
                                         throttle=False,
                                         is_raw=True)
def get_feature_vector(filtered_1, filtered_2):
    """As the paper denotes, this method generate the feature vector
    based on the two filtered image.
    :param filtered_1: the filtered image 1
    :param filtered_2: the filtered image 2
    :return: the feature vector
    :rtype: ndarray
    """

    blocks_1 = view_as_blocks(filtered_1, block_shape=(8, 8)).reshape([-1, 64])
    blocks_2 = view_as_blocks(filtered_2, block_shape=(8, 8)).reshape([-1, 64])

    def mad(array, axis):
        return np.mean(np.abs(array - np.mean(array, axis, keepdims=True)), axis)

    m_1 = blocks_1.mean(axis=-1)
    m_2 = blocks_2.mean(axis=-1)
    mad_1 = mad(blocks_1, axis=-1)
    mad_2 = mad(blocks_2, axis=-1)

    #     feature_vector = np.concatenate([np.stack([m_1, mad_1], axis=1).reshape([-1]),
    #                                     np.stack([m_2, mad_2], axis=1).reshape([-1])])
    feature_vector = np.stack([m_1, mad_1, m_2, mad_2], axis=1).reshape([-1])

    return feature_vector
示例#7
0
    def test_crops(self, img, msk, msk_true):

        n_channels = 3
        if self.use_dsm:
            n_channels = 4
        if self.overlap:
            w_img = util.view_as_windows(
                img, (self.crop_size[0], self.crop_size[1], n_channels),
                (self.crop_size[0] // 2, self.crop_size[1] // 2,
                 n_channels)).squeeze()
            w_msk = util.view_as_windows(
                msk, (self.crop_size[0], self.crop_size[1]),
                (self.crop_size[0] // 2, self.crop_size[1] // 2))
            w_msk_true = util.view_as_windows(
                msk_true, (self.crop_size[0], self.crop_size[1]),
                (self.crop_size[0] // 2, self.crop_size[1] // 2))
        else:
            w_img = util.view_as_blocks(
                img,
                (self.crop_size[0], self.crop_size[1], n_channels)).squeeze()
            w_msk = util.view_as_blocks(msk,
                                        (self.crop_size[0], self.crop_size[1]))
            w_msk_true = util.view_as_blocks(
                msk_true, (self.crop_size[0], self.crop_size[1]))

        return w_img, w_msk, w_msk_true
示例#8
0
def main():
    ''' Perform a two-step spectral resampling to 10nm. First wavelengths are aggregated
    to approximatley 10nm, then aggregated spectra a interpolated to exactly 10nm using a
    cubic piecewise interpolator.
    '''

    parser = argparse.ArgumentParser()
    parser.add_argument('infile', type=str)
    parser.add_argument('outdir', type=str)
    parser.add_argument('--verbose', action='store_true')

    args = parser.parse_args()

    out_image = args.outdir + '/' + os.path.basename(args.infile) + "_10nm"
    hy_obj = htl.HyTools()
    hy_obj.read_file(args.infile, 'envi')

    if hy_obj.wavelengths.max() < 1100:
        new_waves = np.arange(400, 991, 10)
    else:
        new_waves = np.arange(400, 2451, 10)

    bins = int(np.round(10 / np.diff(hy_obj.wavelengths).mean()))
    agg_waves = np.nanmean(view_as_blocks(
        hy_obj.wavelengths[:(hy_obj.bands // bins) * bins], (bins, )),
                           axis=1)
    if args.verbose:
        print("Aggregating every: %s" % bins)

    out_header = hy_obj.get_header()
    out_header['bands'] = len(new_waves)
    out_header['wavelength'] = new_waves.tolist()
    out_header['fwhm'] = [10 for x in new_waves]
    out_header['default bands'] = []

    writer = WriteENVI(out_image, out_header)
    iterator = hy_obj.iterate(by='line')

    while not iterator.complete:
        if (iterator.current_line % 100 == 0) and args.verbose:
            print(iterator.current_line)
        line = iterator.read_next()[:, :(hy_obj.bands // bins) * bins]
        line = np.nanmean(view_as_blocks(line, (
            1,
            bins,
        )), axis=(2, 3))
        interpolator = interp1d(agg_waves,
                                line,
                                fill_value='extrapolate',
                                kind='cubic')
        line = interpolator(new_waves)
        writer.write_line(line, iterator.current_line)
示例#9
0
    def calculate_bleedthrough(self, control, bins, show_graphs = False) :
        """
        Function that calculates the FRET bleedthrough in control situations
        where no FRET should occur.

        Inputs
        |   * control - which control sample to calculate
        |   * bins - size of kernels for pooling step

        Returns
        |   * (slope, intercept) of the linear regression model fit to the
        |       processed control sample data.
        """

        # assertions
        assert control in ['no_acceptor_control', 'no_donor_control'], 'control parameter is not valid (`no_acceptor_control` or `no_donor_control` are valid options)'

        if control == 'no_acceptor_control' :
            channel = 0
            path_to_control = self.no_acceptor_path
            control_filenames = self.no_acceptor_filenames
        elif control == 'no_donor_control' :
            channel = 1
            path_to_control = self.no_donor_path
            control_filenames = self.no_donor_filenames

        # Calculate bleedthrough per image
        results = []
        for f in control_filenames :
            img = io.imread(os.path.join(path_to_control, f))
            assert (img.shape[0] % bins == 0) & (img.shape[1] % bins == 0), 'Image shape is not divisible by bin number'
            block_dims = [img.shape[0] // bins, img.shape[1] // bins] # // returns integer values

            control_blocks = util.view_as_blocks(img[:,:,channel], block_shape=tuple(block_dims))
            fret_blocks = util.view_as_blocks(img[:,:,2], block_shape = tuple(block_dims))

            for m in range(control_blocks.shape[1]) :
                for n in range(control_blocks.shape[0]) :
                    results.append( [np.mean(control_blocks[m,n]), np.mean(fret_blocks[m,n])])
        if show_graphs :
            plt.scatter([x[0] for x in results],
                        [y[1] for y in results])
            # TODO : label axes
            plt.show()


        LR_clf = LinearRegression()
        LR_clf.fit(X = np.array([x[0] for x in results]).reshape(-1,1),
                   y = np.array([y[1] for y in results]))

        return LR_clf.coef_[0], LR_clf.intercept_
示例#10
0
文件: noise.py 项目: ofersp/wlenet
def correlated_noise_stamps(num_stamps, stamp_sz, noise_kernel, seed,
                            aug_noise_factor, aug_noise_factor_min,
                            aug_noise_factor_max):

    stamp_sz = np.array(stamp_sz)
    kernel_sz = np.array(noise_kernel.shape)
    pad_sz = np.ceil(kernel_sz / 2.0).astype('int32') + 1
    padded_stamp_sz = stamp_sz + 2 * pad_sz
    noise_im_sz = padded_stamp_sz * int(ma.ceil(ma.sqrt(num_stamps)))

    np.random.seed(seed)
    noise_im = np.random.randn(*noise_im_sz)
    noise_im = convolve(noise_im,
                        noise_kernel,
                        normalize_kernel=False,
                        boundary=None)

    noise_stamps = view_as_blocks(noise_im, tuple(padded_stamp_sz)).reshape(
        -1, *padded_stamp_sz).copy()
    noise_stamps = noise_stamps[:num_stamps, pad_sz[0]:-pad_sz[0],
                                pad_sz[1]:-pad_sz[1]]
    noise_stamps = noise_stamps.reshape(-1, stamp_sz[0], stamp_sz[1], 1)

    if aug_noise_factor:
        aug_noise_range = aug_noise_factor_max - aug_noise_factor_min
        noise_factors = np.random.rand(
            num_stamps, 1, 1, 1) * aug_noise_range + aug_noise_factor_min
        noise_stamps = noise_stamps * noise_factors

    return noise_stamps
示例#11
0
    def windolf_sampling(self, params, layer):
	a = np.abs(params)
	alpha = np.angle(params)

	if layer=='visible':
	    rates = np.abs(self.clamp)
	    phases = vm(alpha, a*rates / self.sigma_sq)
	    return rates*np.exp(1j*phases)
	else:
	    bessels = bessel(a - self.biases)/ self.sigma_sq
	    custom_kernel = np.ones((self.pool_size, self.pool_size))
	    sum_bessels = conv(bessels, custom_kernel, mode='valid')
	    # Downsample
	    sum_bessels = sum_bessels[0::self.pool_stride, 0::self.pool_stride]
	    
	    bessel_sftmx_denom = 1.0 + sum_bessels
	    upsampled_denom = bessel_sftmx_denom.repeat(self.pool_stride, axis=0).repeat(self.pool_stride, axis=1)    
	    hid_cat_P = bessels / upsampled_denom
	    pool_P = 1.0 - 1.0 / bessel_sftmx_denom
		
	    hid_rates, pool_rates = self._dbn_maxpool_sample_helper(hid_cat_P, pool_P)
	    hid_phases = vm(alpha, a*hid_rates / self.sigma_sq)
	    hid_samples = hid_rates*np.exp(1j*hid_phases)
	    pool_phases = np.sum(imx.view_as_blocks(hid_phases, (self.pool_size, self.pool_size)), axis=(2,3))
	    pool_samples = pool_rates*np.exp(1j*pool_phases)
	    return hid_samples, pool_samples
示例#12
0
    def create_bags(self, dir_list):
        bag_list = []
        labels_list = []
        img_list = []
        for dir in dir_list:
            img = io.imread(dir)
            if img.shape[2] == 4:
                img = color.rgba2rgb(img)

            bag = view_as_blocks(img, block_shape=(32, 32,
                                                   3)).reshape(-1, 32, 32, 3)

            # store single cell labels
            label = 1 if 'malignant' in dir else 0

            # shuffle
            if self.shuffle_bag:
                random.shuffle(bag)

            bag_list.append(bag)
            labels_list.append(label)
            img_list.append(img)
        if self.train:
            return bag_list, labels_list
        else:
            return bag_list, labels_list, img_list
示例#13
0
def standard(segments, conv_map_shape, N_segments):
    subsample = tuple(np.array(segments.shape) // np.array(conv_map_shape))
    from skimage.util import view_as_blocks
    W = segments[np.newaxis, ...] == np.arange(N_segments)[..., np.newaxis,
                                                           np.newaxis]
    W = np.sum(view_as_blocks(W, (1, ) + subsample), axis=(3, 4, 5))
    return W.astype(float)
def write_brick(output_service, scale, brick):
    shape = np.array(brick.volume.shape)
    assert (shape[0:2] == output_service.block_width).all()
    assert shape[2] % output_service.block_width == 0
    
    # Omit leading/trailing empty blocks
    block_width = output_service.block_width
    assert (np.array(brick.volume.shape) % block_width).all() == 0
    blockwise_view = view_as_blocks( brick.volume, brick.volume.shape[0:2] + (block_width,) )
    
    # blockwise view has shape (1,1,X/bx, bz, by, bx)
    assert blockwise_view.shape[0:2] == (1,1)
    blockwise_view = blockwise_view[0,0] # drop singleton axes
    
    block_maxes = blockwise_view.max( axis=(1,2,3) )
    assert block_maxes.ndim == 1
    
    nonzero_block_indexes = np.nonzero(block_maxes)[0]
    if len(nonzero_block_indexes) == 0:
        return # brick is completely empty
    
    first_nonzero_block = nonzero_block_indexes[0]
    last_nonzero_block = nonzero_block_indexes[-1]
    
    nonzero_start = (0, 0, block_width*first_nonzero_block)
    nonzero_stop = ( brick.volume.shape[0:2] + (block_width*(last_nonzero_block+1),) )
    nonzero_subvol = brick.volume[box_to_slicing(nonzero_start, nonzero_stop)]
    nonzero_subvol = np.asarray(nonzero_subvol, order='C')

    output_service.write_subvolume(nonzero_subvol, brick.physical_box[0] + nonzero_start, scale)
示例#15
0
def digitized_array(img_array, new_dims, colour_depth):
    """[summary]

    Args:
        img_array (np array): hxw array of greyscale values in [0,255] 
        new_dims (tuple): 2x2 tuple describing the "pixellated" dimensions
                        to reshape the image into.

    Returns:
        array: new array of greyscale values in original image resolution 
    """
    original_dims = img_array.shape
    nx, ny = new_dims
    dh = int(img_array.shape[1] / nx)  # new number of rows
    dw = int(img_array.shape[0] / ny)  # new number of cols
    print(f'Original image array shape {original_dims}')
    print(f'window shape = {dw} wide, {dh} high')

    blocks = view_as_blocks(img_array, (dw, dh))

    colours = np.array(set_color_array(colour_depth))

    for i in range(blocks.shape[0]):
        for j in range(blocks.shape[1]):
            mean_gs_value = int(np.mean(blocks[i, j]))
            nearest_value_idx = (np.abs(colours - mean_gs_value)).argmin()
            blocks[i, j] = nearest_value_idx

    return np.swapaxes(blocks, 1, 2).reshape(original_dims)
示例#16
0
def rando_scrambo_array(img_array, new_dims, colour_depth):
    """[summary]

    Args:
        img_array (np array): hxw array of greyscale values in [0,255] 
        new_dims (tuple): 2x2 tuple describing the "pixellated" dimensions
                        to reshape the image into.

    Returns:
        array: new array of greyscale values in original image resolution 
    """
    t0 = time()
    original_dims = img_array.shape
    nx, ny = new_dims
    dh = int(img_array.shape[1] / nx)  # new number of rows
    dw = int(img_array.shape[0] / ny)  # new number of cols

    blocks = view_as_blocks(img_array, (dw, dh))

    colours = set_color_array(colour_depth)

    blocks_updated = update_block_vals(blocks, colours)

    out = np.swapaxes(blocks_updated, 1, 2).reshape(original_dims)
    t1 = time()
    tt = t1 - t0
    # print(f'rando time = {tt:.1f}')
    return out
示例#17
0
def convert_image_to_stack_of_tiles(image, tile_height, tile_width):
    '''
    Converts an image to a stack of tiles to be fed to the DL models

    Observations:
    - if (image.shape % tile_height != 0) or (image.shape % tile_width != 0)
      the right and bottom remainder will be cropped so that
      (image.shape % tile_height == 0) and (image.shape % tile_width == 0)

    input:
    - image: the image that will be converted as a numpy array
    - tile_height:
    - tile_width:


    Output:
    np.array((n, h, w, c)),
    n: N° of tiles
    h: tile height
    w: tile width
    c: N° of channels (always 3, as rgb and hsv are used)

    '''
    shape = image.shape

    imageCopy = image[:shape[0] - shape[0] % tile_height, :shape[1] -
                      shape[1] % tile_width, :3]
    imageCopy = util.view_as_blocks(imageCopy,
                                    block_shape=(tile_height, tile_width, 3))
    imageCopy = imageCopy.reshape(
        shape[0] // tile_height * shape[1] // tile_width, tile_height,
        tile_width, 3)

    return imageCopy
示例#18
0
def encodeInfo(img, msg):
    img = img.copy()
    color = img[:, :, color_index]
    blocks = view_as_blocks(color, block_shape=(N, N))

    w = blocks.shape[1]
    h = blocks.shape[0]
    r_max = w * h
    encBlockList = []
    iter = 0
    for bit in msg:
        block_number = random.randrange(r_max - 1)
        while (block_number in encBlockList):
            block_number = random.randrange(r_max - 1)
        iter += 1
        if iter > 0.9 * w * h:
            print("err")
            break
        i = block_number // w
        j = block_number % w
        encBlockList.append(block_number)
        block = blocks[i, j]
        coefs = dct(dct(block, axis=0, norm='ortho'), axis=1, norm='ortho')
        while not is_valid_coeff(coefs, bit, P) or (bit != get_bit(block)):
            coefs = change_coeffs(coefs, bit)
            block = round_to_byte(
                idct(idct(coefs, axis=0, norm='ortho'), axis=1, norm='ortho'))
        color[i * N:(i + 1) * N, j * N:(j + 1) * N] = block
    img[:, :, color_index] = color
    return img, encBlockList
def extract_patches_from_raster():
    count = 0
    for raster_file in Path('./world_map').glob('**/*.tif'):
        data = gr.from_file(str(raster_file))
        raster_blocks = view_as_blocks(data.raster, (225, 225))
        for i in range(raster_blocks.shape[0]):
            for j in range(raster_blocks.shape[1]):
                raster_data = raster_blocks[i, j]

                src = cv2.pyrDown(raster_data,
                                  dstsize=(raster_data.shape[1] // 2,
                                           raster_data.shape[0] // 2))

                data_out_downsampled = gr.GeoRaster(
                    src,
                    data.geot,
                    nodata_value=data.nodata_value,
                    projection=data.projection,
                    datatype=data.datatype,
                )
                data_out_downsampled.to_tiff(
                    './data_downsampled_blurred/data_q' + str(count) + str(i) +
                    str(j))

                data_out = gr.GeoRaster(
                    raster_data,
                    data.geot,
                    nodata_value=data.nodata_value,
                    projection=data.projection,
                    datatype=data.datatype,
                )
                data_out.to_tiff('./data/data_q' + str(count) + str(i) +
                                 str(j))
                count += 1
示例#20
0
def chop_to_blocks(data, shape):
    """Subdivides the current image and returns an array of images 
    with the dims `shape`

    Args
    ----
    shape (tuple : ints) : the dims of the subdivided images

    Returns
    -------
    matrix of (j, i, 1, shape[0], shape[1])
    """

    # Make sure there are not multiple strides in the color ch direction
    assert shape[-1] == data.shape[-1]

    # Drop parts of the image that cant be captured by an integer number of
    # strides
    _split_factor = np.floor(np.divide(data.shape, shape)).astype(int)
    _img_lims = (_split_factor * shape)

    #print("Can only preserve up to pix: ", _img_lims)

    _data = np.ascontiguousarray(
        data[:_img_lims[0], :_img_lims[1], :_img_lims[2]])

    return view_as_blocks(_data, shape)
def deserialize_uint64_blocks(compressed_blocks, shape):
    """
    Reconstitute a volume that was serialized with serialize_uint64_blocks(), above.
    
    NOTE: If the volume is not 64-px aligned, then the output will NOT be C-contiguous.
    """
    if (np.array(shape) % 64).any():
        padding = 64 - ( np.array(shape) % 64 )
        aligned_shape = shape + padding
    else:
        aligned_shape = shape

    aligned_volume = np.empty( aligned_shape, dtype=np.uint64 )
    block_view = view_as_blocks( aligned_volume, (64,64,64) )
    
    for bi, (zi, yi, xi) in enumerate(np.ndindex(*block_view.shape[:3])):
        compressed_block = compressed_blocks[bi]
        
        # (See note above regarding recompression with LZ4)
        encoded_block = lz4.frame.decompress( compressed_block )
        block = decode_label_block( encoded_block )
        block_view[zi,yi,xi] = block
    
    if shape == tuple(aligned_shape):
        volume = aligned_volume
    else:
        # Trim
        volume = np.asarray(aligned_volume[box_to_slicing((0,0,0), shape)], order='C')

    return volume
示例#22
0
def Down_Sample(image, block_size, func=np.sum, cval=0):

    if len(block_size) != image.ndim:
        raise ValueError("`block_size` must have the same length "
                         "as `image.shape`.")

    pad_width = []
    for i in range(len(block_size)):
        if block_size[i] < 1:
            raise ValueError("Down-sampling factors must be >= 1. Use "
                             "`skimage.transform.resize` to up-sample an "
                             "image.")
        if image.shape[i] % block_size[i] != 0:
            after_width = block_size[i] - (image.shape[i] % block_size[i])
        else:
            after_width = 0
        pad_width.append((0, after_width))

    image = np.pad(image,
                   pad_width=pad_width,
                   mode='constant',
                   constant_values=cval)

    out = view_as_blocks(image, block_size)

    for i in range(len(out.shape) // 2):
        out = func(out, axis=-1)

    return out
示例#23
0
    def _StatisticalNaturalness(self, L_ldr, win=11):
        phat1 = 4.4
        phat2 = 10.1
        muhat = 115.94
        sigmahat = 27.99
        u = np.mean(L_ldr)

        # moving window standard deviation using reflected image
        if self.original:
            W, H = L_ldr.shape
            w_extra = (11 - W % 11)
            h_extra = (11 - H % 11)
            # zero padding to simulate matlab's behaviour
            if w_extra > 0 or h_extra > 0:
                test = np.pad(L_ldr,
                              pad_width=((0, w_extra), (0, h_extra)),
                              mode='constant')
            else:
                test = L_ldr
            # block view with fixed block size, like in the original article
            view = view_as_blocks(test, block_shape=(11, 11))
            sig = np.mean(np.std(view, axis=(-1, -2)))
        else:
            # deviation: moving window with reflected borders
            sig = np.mean(generic_filter(L_ldr, np.std, size=win))

        beta_mode = (phat1 - 1.) / (phat1 + phat2 - 2.)
        C_0 = beta.pdf(beta_mode, phat1, phat2)
        C = beta.pdf(sig / 64.29, phat1, phat2)
        pc = C / C_0
        B = norm.pdf(u, muhat, sigmahat)
        B_0 = norm.pdf(muhat, muhat, sigmahat)
        pb = B / B_0
        N = pb * pc
        return N
示例#24
0
def deserialize_uint64_blocks(compressed_blocks, shape):
    """
    Reconstitute a volume that was serialized with serialize_uint64_blocks(), above.
    
    NOTE: If the volume is not 64-px aligned, then the output will NOT be C-contiguous.
    """
    if (np.array(shape) % 64).any():
        padding = 64 - ( np.array(shape) % 64 )
        aligned_shape = shape + padding
    else:
        aligned_shape = shape

    aligned_volume = np.empty( aligned_shape, dtype=np.uint64 )
    block_view = view_as_blocks( aligned_volume, (64,64,64) )
    
    for bi, (zi, yi, xi) in enumerate(np.ndindex(*block_view.shape[:3])):
        compressed_block = compressed_blocks[bi]
        
        # (See note above regarding recompression with LZ4)
        encoded_block = lz4.frame.decompress( compressed_block )
        block = decode_label_block( encoded_block )
        block_view[zi,yi,xi] = block
    
    if shape == tuple(aligned_shape):
        volume = aligned_volume
    else:
        # Trim
        volume = np.asarray(aligned_volume[box_to_slicing((0,0,0), shape)], order='C')

    return volume
示例#25
0
文件: Cell.py 项目: tladyman/HOG
    def __init__(self, blockArray, cellSizeX, cellSizeY):
        """Constructor from an input array, this should be a block object.

        Args:
            blockArray: The array of blocks, should be two dimensional.
            cellSizeX: The number of pixels on the x axis of this Cell.
            cellSizeY: The number of pixels on the y axis of this Cell.
        """
        self._blockArray = blockArray

        (block_row, block_col, block_y, block_x) = blockArray.shape

        cell_shape = (cellSizeY, cellSizeX)

        practice = view_as_blocks(blockArray[0,0], cell_shape)
        (cell_row, cell_col, cell_y, cell_x) = practice.shape

        dim1 = block_row * cell_row
        dim2 = block_col * cell_col

        cellArray = np.zeros((dim1, dim2, cellSizeY, cellSizeX))
        # Create a positional array to show which cells belonged to which block.
        # The blockNum is on a grid like below.
        # 1 2 3 4 5
        # 6 7 8 9 10
        blockPosition = np.zeros((dim1,dim2))
        blockNum = 0

        for i, row in enumerate(blockArray):
            for j, col in enumerate(row):
                cell = view_as_blocks(col, (cellSizeY, cellSizeX))
                to_add_y = cell_row * i
                to_add_x = cell_col * j

                for k, cRow in enumerate(cell):
                    for l, cCol in enumerate(cRow):
                        # position to put the cell into
                        y_position = k + to_add_y
                        x_position = l + to_add_x

                        # put cell into cellArray
                        cellArray[y_position, x_position,: ,:] = cCol
                        blockPosition[y_position, x_position] = blockNum
                blockNum += 1

        self.cellArray = cellArray
        self.blockPosition = blockPosition
def PatchMaker(mask_3D, patch_size, window_size, nclasses, pid, datapath):  
     
    patch_labels_training=[]
    patch_labels_testing=[]    
    window_intensities_training=[]
    window_intensities_testing=[]
    test_img_shape = []#np.empty((len(pid_test),2))
    test_img_shape_padded = []#np.empty((len(pid_test),2))
    
    LGE = SimpleITK.ReadImage(datapath + pid + '//' + pid + '-LGE-cropped.mhd')
    scar = SimpleITK.ReadImage(datapath + pid + '//' + pid + '-scar-cropped.mhd')
#    mask = SimpleITK.ReadImage(datapath + pid + '//' + pid + '-myo-cropped.mhd')
    
    #convert a SimpleITK object into an array
    LGE_3D = SimpleITK.GetArrayFromImage(LGE)
    scar_3D = SimpleITK.GetArrayFromImage(scar) 
#    mask_3D = SimpleITK.GetArrayFromImage(myomask 
   #masking LGE with GT myo
    LGE_3D = np.multiply(LGE_3D,mask_3D)

    h_LGE = LGE_3D.shape[1]
    w_LGE = LGE_3D.shape[2] 
    d_LGE = LGE_3D.shape[0]
    test_slice = range(0,d_LGE,skip)

    #make windows size and patch size evenly dvideble 
    if (window_size-patch_size)%2 != 0:
        window_size +=1 
        print('window size has changed to %d '%window_size)
    #calculate the amount of padding for heaght and width of a slice for patches
    rem_w = w_LGE%patch_size
    w_pad=patch_size-rem_w      
    rem_h = h_LGE%patch_size
    h_pad=patch_size-rem_h    
    pads = (h_pad,w_pad)
    
    for sl in test_slice:
        LGE_padded_slice=numpy.lib.pad(LGE_3D[sl,:,:], ((0,h_pad),(0,w_pad)), 'constant', constant_values=(0,0))
        scar_padded_slice=numpy.lib.pad(scar_3D[sl,:,:], ((0,h_pad),(0,w_pad)), 'constant', constant_values=(0,0))  
        LGE_patches = view_as_blocks(scar_padded_slice, block_shape = (patch_size,patch_size))
        LGE_patches = numpy.reshape(LGE_patches,(LGE_patches.shape[0]*LGE_patches.shape[1],patch_size,patch_size)) 
        #re-pad your padded image before you make your windows
        padding = int((window_size - patch_size)/2)
        LGE_repadded_slice = numpy.lib.pad(LGE_padded_slice, ((padding,padding),(padding,padding)), 'constant', constant_values=(0,0))
        #done with the labels, now we will do our windows, 
        LGE_windows = view_as_windows(LGE_repadded_slice, (window_size,window_size), step=patch_size)
        LGE_windows = numpy.reshape(LGE_windows,(LGE_windows.shape[0]*LGE_windows.shape[1],window_size,window_size))        
        #for each patches: 
        for p in range(0,len(LGE_patches)):            
            #label=int(numpy.divide(numpy.multiply(numpy.divide(numpy.sum(LGE_patches[p]),numpy.square(patch_size)),nclasses),1))
            label = LGE_patches[p]
			label = numpy.reshape(label, (1,1))           
 #           if label==nclasses:
   #             label -=1 #mmake sure the range for the classes do not exceed the maximum
            #making your window  intensities a single row
            intensities = numpy.reshape(LGE_windows[p],(window_size*window_size))
            intensities = numpy.reshape(intensities, (1,window_size*window_size))
            patch_labels_testing.append(label)
            window_intensities_testing.append(intensities)
示例#27
0
def preprocess_image(image_path):
    files = os.listdir(image_path)
    for f in files:
        image = mpimg.imread(image_path + '/' + f)

        img_gray = rgb2gray(image)
        thresh = threshold_otsu(img_gray)
        binary = img_gray > thresh

        # Step 1: Proprocess original images into binary images
        # otsu thresholding with appropriate nbins number
        thresh = threshold_otsu(img_gray, nbins = 60)
        binary = img_gray > thresh
        binary = resize(binary, (1600, 2000))

        # size of blocks
        block_shape = (10, 10)

        # see astronaut as a matrix of blocks (of shape block_shape)
        view = view_as_blocks(binary, block_shape)

        # collapse the last two dimensions in one
        flatten_view = view.reshape(view.shape[0], view.shape[1], -1)

        # resampling the image by taking either the `mean`,
        # the `max` or the `median` value of each blocks.
        mean_view = np.mean(flatten_view, axis=2)

        image = mean_view
        seed = np.copy(mean_view)
        seed[1:-1, 1:-1] = image.max()
        mask = image

        #  fill holes (i.e. isolated, dark spots) in an image using morphological reconstruction by erosion
        # plt.title("Filled dark spots by morphological reconstruction")
        filled = reconstruction(seed, mask, method='erosion')
        plt.figure()
        plt.axis('off')
        plt.imshow(filled, cmap=cm.Greys_r)
        plt.savefig("./enhanced_image/" + f)

        # Step 2: Label Junctions
        denoised = denoise_wavelet(filled, multichannel=True)

        image = filled
        coords1 = corner_peaks(corner_harris(image), min_distance=5)
        coords_subpix1 = corner_subpix(image, coords1, window_size=13)

        image = denoised
        coords2 = corner_peaks(corner_harris(image), min_distance=5)
        coords_subpix2 = corner_subpix(image, coords2, window_size=13)

        fig, ax = plt.subplots()

        ax.imshow(image, interpolation='nearest', cmap=plt.cm.gray)
        ax.plot(coords_subpix1[:, 1], coords_subpix1[:, 0], '+r', markersize=15)
        ax.plot(coords_subpix2[:, 1], coords_subpix2[:, 0], '+r', markersize=15)
        plt.axis('off')
        plt.savefig("./preprocessed_image/" + f)
def neighbourhoodify(image, nbhd=10):
    avgs = np.zeros(map(lambda x: int(x / nbhd), image.shape))
    blocks = view_as_blocks(image, (nbhd, nbhd))
    for row in range(0, len(blocks)):
        for col in range(0, len(blocks[row])):
            mode = np.bincount(blocks[row,col].ravel()).argmax()
            avgs[row, col] = mode
    return avgs
示例#29
0
文件: lab_8.py 项目: nAnna519/IPM
def retrieve_message(img, length):
    img = img[:, :, 2]
    width, height = np.shape(img)
    width -= width % n
    height -= height % n
    img = img[:width, :height]
    blocks = view_as_blocks(img, block_shape=(n, n))
    h = blocks.shape[1]
    return [retrieve_bit(blocks[index // h, index % h]) for index in range(length)]
示例#30
0
    def updatePixels(self, tlc, shape, props, **pixelBlocks):
        p = pixelBlocks['raster_pixels']
        m = pixelBlocks['raster_mask']

        if self.func is None:
            b = resize(p, shape, order=0, preserve_range=True)
        else:
            blockSizes = tuple(np.divide(p.shape, shape))
            b = np.ma.masked_array(view_as_blocks(p, blockSizes),
                                   view_as_blocks(~m.astype('b1'), blockSizes))
            for i in range(len(b.shape) // 2):
                b = self.func(b, axis=-1)
            b = b.data

        d = self.padding
        pixelBlocks['output_pixels'] = b.astype(props['pixelType'], copy=False)
        pixelBlocks['output_mask'] = resize(m, shape, order=0, preserve_range=True).astype('u1', copy=False)
        return pixelBlocks
示例#31
0
文件: basemap_info.py 项目: guziy/RPN
    def get_aggregated(self, nagg_x=2, nagg_y=2):

        if USE_SKIMAGE:
            new_lons = view_as_blocks(self.lons, (nagg_x, nagg_y)).mean(axis=2).mean(axis=2)
            new_lats = view_as_blocks(self.lats, (nagg_x, nagg_y)).mean(axis=2).mean(axis=2)
        else:
            nx, ny = self.lons.shape
            new_lons, new_lats = np.zeros((nx // nagg_x, ny // nagg_y)), np.zeros((nx // nagg_x, ny // nagg_y))

            for i in range(0, nx, nagg_x):
                for j in range(0, ny, nagg_y):
                    i1 = i // nagg_x
                    j1 = j // nagg_y

                    new_lons[i1, j1] = np.mean(self.lons[i:i + nagg_x, j:j + nagg_y])
                    new_lats[i1, j1] = np.mean(self.lons[i:i + nagg_x, j:j + nagg_y])

        return BasemapInfo(lons=new_lons, lats=new_lats, bmp=self.basemap)
示例#32
0
 def np_extract_patches(img):
     orig = np.array(img.shape[:2])
     new = patch_s[0] * np.ceil(orig / patch_s[0]).astype(int)
     points = new - orig
     img = np.pad(img, [(0, points[0]), (0, points[1]), (0, 0)],
                  mode='constant')
     patches = view_as_blocks(img, tuple(patch_s)).astype(np.float32)
     patches = patches.reshape(-1, *patch_s)
     return patches
示例#33
0
    def _process(dstl_image):

        # Make sure there are not multiple strides in the color ch direction
        assert block_shape[-1] == dstl_image.data.shape[-1]

        # Drop parts of the image that cant be captured by an integer number of
        # strides
        _split_factor = np.floor(np.divide(dstl_image.data.shape,
                                           block_shape)).astype(int)
        _img_lims = (_split_factor * block_shape)

        print("Can only preserve up to pix: ", _img_lims)

        _data = np.ascontiguousarray(
            dstl_image._data[:_img_lims[0], :_img_lims[1], :_img_lims[2]])

        blocks = view_as_blocks(_data, block_shape)

        ystride, xstride, _ = block_shape
        jmx, imx, *_ = blocks.shape

        # Transform the features of an image into the coordinates of the new
        # blocks
        collection = __transform_and_collect_features(dstl_image, block_shape,
                                                      blocks.shape)

        for j in range(jmx):
            for i in range(imx):
                block_tag = "{}_{}".format(i, j)
                block_id = "{}__{}".format(dstl_image.image_id, block_tag)

                # Save the image
                block_img_path = os.path.join(image_save_dir,
                                              block_id + '.png')
                skio.imsave(block_img_path, blocks[j, i, 0, ...])

                # Save annotations
                # write the image path and x,y coor of bounding boxes to
                # the csv file
                # has format:
                #   path_to_image, x1, y1, x2, y2, label
                try:
                    block_features = collection[block_tag]
                except KeyError:
                    # if no features in an image, just append blank position
                    row = [block_img_path, '', '', '', '', '']
                    annotation_writer.writerow(row)
                else:
                    # for each features, append its locations and label of
                    # the features
                    for label, features in block_features.items():
                        for coor in features:
                            row = [block_img_path, *coor, label]

                            # output to file
                            annotation_writer.writerow(row)
示例#34
0
def aggregate_array(in_arr, nagg_x=2, nagg_y=2):
    """


    :type in_arr: numpy.ndarray
    :type nagg_y: int
    """
    from skimage.util import view_as_blocks

    return view_as_blocks(in_arr, (nagg_x, nagg_y)).mean(axis=2).mean(axis=2)
示例#35
0
def embed_message(orig, msg):
    changed = orig.copy()
    blocks = view_as_blocks(changed, block_shape=(n, n))
    h = blocks.shape[1]
    for index, bit in enumerate(msg):
        i = index // h
        j = index % h
        block = blocks[i, j]
        changed[i * n:(i + 1) * n, j * n:(j + 1) * n] = embed_bit(block, bit)
    return changed
示例#36
0
    def test_random_blocks(self):
        volume = self._gen_test_volume()
        blocks = view_as_blocks(volume, (64, 64, 64)).reshape(-1, 64, 64, 64)

        encoded = encode_mask_blocks(blocks, [(1, 2, 3)] * len(blocks), 17)
        decoded, corners, label = decode_mask_blocks(encoded)

        assert label == 17
        assert (np.array(corners) == (1, 2, 3)).all()
        assert (np.array(decoded) == np.array(blocks)).all()
示例#37
0
    def build_cells(g_magn, theta):

        phi = max_theta // nbins

        cells_g_magn = view_as_blocks(g_magn, (cellh, cellh)).reshape(
            (imgh // cellh)**2, cellh**2)
        cells_theta = view_as_blocks(theta, (cellh, cellh)).reshape(
            (imgh // cellh)**2, cellh**2)
        cells_bin = np.zeros(((imgh // cellh)**2, nbins))

        idx = cells_theta // phi
        ratio = (cells_theta - idx * phi) / phi
        rr = np.arange(cells_bin.shape[0]).reshape(-1, 1)
        cells_bin[rr, idx] = cells_g_magn * (1 - ratio)
        idx += 1
        idx[idx == nbins] = 0
        cells_bin[rr, idx] += cells_g_magn * ratio

        return cells_bin.reshape(((imgh // cellh), (imgh // cellh), nbins))
示例#38
0
 def create_grid(self, height, width):
     """
     Выполняет разбиение изображения на блоки размера (height, width)
     """
     self.block_height = height
     self.block_width = width
     # копируем область изображения, которая покрывается заданными блоками
     h, w = self.image.shape
     self.nrows = h // self.block_height
     self.ncols = w // self.block_width
     self.image_clone = self.image[:self.block_height * self.nrows, :self.block_width * self.ncols].copy()
     # разбиваем скопированную часть изображения на блоки
     self.image_blocks = view_as_blocks(self.image_clone, (self.block_height, self.block_width))
示例#39
0
def extractBlocks(npatches,blocksize=4):
    side_length = np.sqrt(npatches)
    print "side_length,patches",side_length
    if not side_length.is_integer():
      print "ERROR: no integer side length!"
      sys.exit(1)
    else:
      side_length = int(side_length)
    m = np.arange(0,npatches).reshape((side_length,side_length))
    blocks = view_as_blocks(m,(blocksize,blocksize))
    
    blocks = np.reshape(blocks, (blocks.shape[0]*blocks.shape[1],blocksize*blocksize))
    print "Extracting ",blocks.shape[0]," blocks:",blocksize,"X",blocksize

    return blocks
示例#40
0
def block_mask_to_px_mask(block_mask, block_width):
    """
    Given a mask array with block-resolution (each item represents 1 block),
    upscale it to pixel-resolution.
    """
    px_mask_shape = block_width*np.array(block_mask.shape)
    px_mask = np.zeros( px_mask_shape, dtype=np.bool )
    
    # In this 6D array, the first 3 axes address the block index,
    # and the last 3 axes address px within the block
    px_mask_blockwise = view_as_blocks(px_mask, (block_width, block_width, block_width))
    assert px_mask_blockwise.shape[0:3] == block_mask.shape
    assert px_mask_blockwise.shape[3:6] == (block_width, block_width, block_width)
    
    # Now we can broadcast into it from the block mask
    px_mask_blockwise[:] = block_mask[:, :, :, None, None, None]
    return px_mask
示例#41
0
def threshold_local(frame, shape, llim=0.01, ulim=0.99):
    '''
    threshold_local

    Trim a percentile of values from non-overlapping blocks in 2d array.
    '''
    tmp = frame.copy()
    nullmask = (frame != 0) & (frame != np.nan)

    rowblocks = view_as_blocks(tmp, shape).reshape((-1,np.prod(shape)))
    vrange = np.sort(rowblocks, axis=1)
    llimIdx, ulimIdx = round(llim*np.prod(shape)), round(ulim*np.prod(shape))

    thresh_low = vrange[ ..., [llimIdx] ]
    thresh_high = vrange[ ..., [ulimIdx] ]    
    rowblocks[..., (rowblocks < thresh_low) | (rowblocks > thresh_high)] = -1
    
    return nullmask & (tmp != -1)
示例#42
0
文件: agg_blocks.py 项目: guziy/RPN
def agg_blocks_skimage_improved(data, block_shape: tuple, func=np.nanmean, pad_value=np.nan):
    """
    Modified from skimage.measure.block_reduce
    :param data:
    :param block_shape:
    :param func:
    :param pad_value:
    """
    from skimage.util import view_as_blocks

    assert data.ndim == len(block_shape), "Block should have the same number of dimensions as the input array(ndim={}).".format(data.ndim)


    if data.ndim == 1:
        data.shape = data.shape + (1, )
        block_shape = block_shape + (1, )


    pad_width = []
    for i in range(data.ndim):
        if block_shape[i] < 1:
            raise ValueError("Down-sampling factors must be >= 1. Use "
                             "`skimage.transform.resize` to up-sample an "
                             "image.")
        if data.shape[i] % block_shape[i] != 0:
            after_width = block_shape[i] - (data.shape[i] % block_shape[i])
        else:
            after_width = 0
        pad_width.append((0, after_width))

    image = np.pad(data, pad_width=pad_width, mode='constant', constant_values=pad_value)

    out = view_as_blocks(image, block_shape)
    result_shape = out.shape[:-2]
    out = out.reshape((-1, ) + block_shape)

    @jit(nopython=True)
    def wrap(*args):
        return func(*args)


    return np.array([wrap(chunk) for chunk in out]).reshape(result_shape).squeeze()
示例#43
0
文件: array_utils.py 项目: guziy/RPN
def aggregate_array(in_arr, nagg_x=2, nagg_y=2):
    """


    :type in_arr: numpy.ndarray
    :type nagg_y: int
    """
    from skimage.util import view_as_blocks

    if nagg_x == 1 and nagg_y == 1:
        return in_arr

    print(in_arr.shape, nagg_x, nagg_y)

    assert in_arr.shape[0] % nagg_x == 0
    assert in_arr.shape[1] % nagg_y == 0



    return view_as_blocks(in_arr, (nagg_x, nagg_y)).mean(axis=2).mean(axis=2)
示例#44
0
def Feature_Extractor_Fn(vid,num_frames,frame_no,new_shape=(360,480),step=80, radius=45):
    """Extract Daisy feature for a frame of video """
    if frame_no<num_frames-1: 
        frame = vid.get_data(frame_no)  
        frame_resized=resize(frame, new_shape)
        frame_gray= rgb2gray(frame_resized)
        daisy_desc = daisy(frame_gray,step=step, radius=radius)
        daisy_1D=np.ravel(daisy_desc)
         
        """Extract Daisy feature for a patch from the frame of video """
        N=4
        step_glove=int(step/N)
        radius_glove=int(radius/N)
        patch_shape_x=int(new_shape[0]/N)
        patch_shape_y=int(new_shape[1]/N)

        patchs_arr = view_as_blocks(frame_gray, (patch_shape_x,patch_shape_y))
        patch_num_row=patchs_arr.shape[0]
        patch_num_col=patchs_arr.shape[1]
        final_daisy_length=daisy(patchs_arr[0,0,:,:],step=step_glove, radius=radius_glove).size
        patch_daisy_arr=np.zeros((patch_num_row,patch_num_col,final_daisy_length))
        for i in xrange(patch_num_row):
            for k in xrange(patch_num_col):
                patch=patchs_arr[i,k,:,:]
                patch_daisy_desc = daisy(patch,step=step_glove, radius=radius_glove)
                patch_daisy_1D=np.ravel(patch_daisy_desc)
                patch_daisy_arr[i,k,:]=patch_daisy_1D
                
                
        
       #sift = cv2.xfeatures2d.SIFT_create()
       # (sift_kps, sift_descs) = sift.detectAndCompute(frame, None)
       # print("# kps: {}, descriptors: {}".format(len(sift_kps), sift_descs.shape))
       # surf = cv2.xfeatures2d.SURF_create()
      #  (surf_kps, surf_descs) = surf.detectAndCompute(frame, None)
       # print("# kps: {}, descriptors: {}".format(len(surf_kps), surf_descs.shape))
    else:
        print("Frame number is larger than the length of video")
  #  return (daisy_1D,surf_descs,sift_descs)
    return patch_daisy_arr,daisy_1D
示例#45
0
def get_switches(current,maxpool,maxpool_shape):
    # print("getting switches")
    current=np.array(current)
    size = maxpool_shape[0]
    switches = np.zeros((current.shape[0],current.shape[1],int(current.shape[2]/size)*size,int(current.shape[3]/size)*size))

    # print("before shape",switches.shape)
    for k1idx, k1 in enumerate(current):
        for k2idx, k2 in enumerate(k1):
            # print("before", k2[:4,:4])
            while k2.shape[0]%size!=0:
                k2=k2[:-1,:-1]
            blocks = view_as_blocks(k2,maxpool_shape)
            for x in blocks:
                for y in x:
                    idx = np.unravel_index(y.argmax(), y.shape)
                    y.fill(0)
                    y[idx] = 1
            # print("after",k2[:4,:4])
            switches[k1idx,k2idx,:,:]=k2[:]
    # print("after shape",switches.shape)
    return switches
def serialize_uint64_blocks(volume):
    """
    Compress and serialize a volume of uint64.
    
    Preconditions:
      - volume.dtype == np.uint64
      - volume.ndim == 3
      
    NOTE: If volume.shape is NOT divisible by 64, the input will be copied and padded.
    
    Returns compressed_blocks, where the blocks are a flat list, in scan-order
    """
    assert volume.dtype == np.uint64
    assert volume.ndim == 3

    if (np.array(volume.shape) % 64).any():
        padding = 64 - ( np.array(volume.shape) % 64 )
        aligned_shape = volume.shape + padding
        aligned_volume = np.zeros( aligned_shape, dtype=np.uint64 )
        aligned_volume[box_to_slicing((0,0,0), volume.shape)] = volume
    else:
        aligned_volume = volume
    
    assert (np.array(aligned_volume.shape) % 64 == 0).all()
    
    block_view = view_as_blocks( aligned_volume, (64,64,64) )
    compressed_blocks = []
    for zi, yi, xi in np.ndindex(*block_view.shape[:3]):
        block = block_view[zi,yi,xi].copy('C')
        encoded_block = encode_label_block(block)

        # We compress AGAIN, with LZ4, because this seems to provide
        # an additional 2x size reduction for nearly no slowdown.
        compressed_block = lz4.frame.compress( encoded_block )
        compressed_blocks.append( compressed_block )
        del block
    
    return compressed_blocks
示例#47
0
def _clahe(image, ntiles_x, ntiles_y, clip_limit, nbins=128):
    
    ntiles_x = min(ntiles_x, MAX_REG_X)
    ntiles_y = min(ntiles_y, MAX_REG_Y)
    ntiles_y = max(ntiles_x, 2)
    ntiles_x = max(ntiles_y, 2)

    if clip_limit == 1.0:
        return image  # is OK, immediately returns original image.

    map_array = np.zeros((ntiles_y, ntiles_x, nbins), dtype=int)

    y_res = image.shape[0] - image.shape[0] % ntiles_y
    x_res = image.shape[1] - image.shape[1] % ntiles_x
    image = image[: y_res, : x_res]

    x_size = image.shape[1] / ntiles_x  # Actual size of contextual regions
    y_size = image.shape[0] / ntiles_y
    n_pixels = x_size * y_size

    if clip_limit > 0.0:  # Calculate actual cliplimit
        clip_limit = int(clip_limit * (x_size * y_size) / nbins)
        if clip_limit < 1:
            clip_limit = 1
    else:
        clip_limit = NR_OF_GREY  # Large value, do not clip (AHE)

    bin_size = 1 + NR_OF_GREY / nbins
    aLUT = np.arange(NR_OF_GREY)
    aLUT /= bin_size
    img_blocks = view_as_blocks(image, (y_size, x_size))

    # Calculate greylevel mappings for each contextual region
    for y in range(ntiles_y):
        for x in range(ntiles_x):
            sub_img = img_blocks[y, x]
            hist = aLUT[sub_img.ravel()]
            hist = np.bincount(hist)
            hist = np.append(hist, np.zeros(nbins - hist.size, dtype=int))
            hist = clip_histogram(hist, clip_limit)
            hist = map_histogram(hist, 0, NR_OF_GREY - 1, n_pixels)
            map_array[y, x] = hist

    # Interpolate greylevel mappings to get CLAHE image
    ystart = 0
    for y in range(ntiles_y + 1):
        xstart = 0
        if y == 0:  # special case: top row
            ystep = y_size / 2.0
            yU = 0
            yB = 0
        elif y == ntiles_y:  # special case: bottom row
            ystep = y_size / 2.0
            yU = ntiles_y - 1
            yB = yU
        else:  # default values
            ystep = y_size
            yU = y - 1
            yB = yB + 1

        for x in range(ntiles_x + 1):
            if x == 0:  # special case: left column
                xstep = x_size / 2.0
                xL = 0
                xR = 0
            elif x == ntiles_x:  # special case: right column
                xstep = x_size / 2.0
                xL = ntiles_x - 1
                xR = xL
            else:  # default values
                xstep = x_size
                xL = x - 1
                xR = xL + 1

            mapLU = map_array[yU, xL]
            mapRU = map_array[yU, xR]
            mapLB = map_array[yB, xL]
            mapRB = map_array[yB, xR]

            xslice = np.arange(xstart, xstart + xstep)
            yslice = np.arange(ystart, ystart + ystep)
            interpolate(image, xslice, yslice,
                        mapLU, mapRU, mapLB, mapRB, aLUT)

            xstart += xstep  # set pointer on next matrix */

        ystart += ystep

    return image
示例#48
0
def _clahe(image, ntiles_x, ntiles_y, clip_limit, nbins=128):
    """Contrast Limited Adaptive Histogram Equalization.

    Parameters
    ----------
    image : array-like
        Input image.
    ntiles_x : int, optional
        Number of tile regions in the X direction.  Ranges between 2 and 16.
    ntiles_y : int, optional
        Number of tile regions in the Y direction.  Ranges between 2 and 16.
    clip_limit : float, optional
        Normalized clipping limit (higher values give more contrast).
    nbins : int, optional
        Number of gray bins for histogram ("dynamic range").

    Returns
    -------
    out : ndarray
        Equalized image.

    The number of "effective" greylevels in the output image is set by `nbins`;
    selecting a small value (eg. 128) speeds up processing and still produce
    an output image of good quality. The output image will have the same
    minimum and maximum value as the input image. A clip limit smaller than 1
    results in standard (non-contrast limited) AHE.
    """
    ntiles_x = min(ntiles_x, MAX_REG_X)
    ntiles_y = min(ntiles_y, MAX_REG_Y)

    if clip_limit == 1.0:
        return image  # is OK, immediately returns original image.

    h_inner = image.shape[0] - image.shape[0] % ntiles_y
    w_inner = image.shape[1] - image.shape[1] % ntiles_x

    # make the tile size divisible by 2
    h_inner -= h_inner % (2 * ntiles_y)
    w_inner -= w_inner % (2 * ntiles_x)

    orig_shape = image.shape
    width = w_inner // ntiles_x  # Actual size of contextual regions
    height = h_inner // ntiles_y

    if h_inner != image.shape[0]:
        ntiles_y += 1
    if w_inner != image.shape[1]:
        ntiles_x += 1
    if h_inner != image.shape[1] or w_inner != image.shape[0]:
        h_pad = height * ntiles_y - image.shape[0]
        w_pad = width * ntiles_x - image.shape[1]
        image = pad(image, ((0, h_pad), (0, w_pad)), mode='reflect')
        h_inner, w_inner = image.shape

    bin_size = 1 + NR_OF_GREY / nbins
    lut = np.arange(NR_OF_GREY)
    lut //= bin_size
    img_blocks = view_as_blocks(image, (height, width))

    map_array = np.zeros((ntiles_y, ntiles_x, nbins), dtype=int)
    n_pixels = width * height

    if clip_limit > 0.0:  # Calculate actual cliplimit
        clip_limit = int(clip_limit * (width * height) / nbins)
        if clip_limit < 1:
            clip_limit = 1
    else:
        clip_limit = NR_OF_GREY  # Large value, do not clip (AHE)

    # Calculate greylevel mappings for each contextual region
    for y in range(ntiles_y):
        for x in range(ntiles_x):
            sub_img = img_blocks[y, x]
            hist = lut[sub_img.ravel()]
            hist = np.bincount(hist)
            hist = np.append(hist, np.zeros(nbins - hist.size, dtype=int))
            hist = clip_histogram(hist, clip_limit)
            hist = map_histogram(hist, 0, NR_OF_GREY - 1, n_pixels)
            map_array[y, x] = hist

    # Interpolate greylevel mappings to get CLAHE image
    ystart = 0
    for y in range(ntiles_y + 1):
        xstart = 0
        if y == 0:  # special case: top row
            ystep = height / 2.0
            yU = 0
            yB = 0
        elif y == ntiles_y:  # special case: bottom row
            ystep = height / 2.0
            yU = ntiles_y - 1
            yB = yU
        else:  # default values
            ystep = height
            yU = y - 1
            yB = yB + 1

        for x in range(ntiles_x + 1):
            if x == 0:  # special case: left column
                xstep = width / 2.0
                xL = 0
                xR = 0
            elif x == ntiles_x:  # special case: right column
                xstep = width / 2.0
                xL = ntiles_x - 1
                xR = xL
            else:  # default values
                xstep = width
                xL = x - 1
                xR = xL + 1

            mapLU = map_array[yU, xL]
            mapRU = map_array[yU, xR]
            mapLB = map_array[yB, xL]
            mapRB = map_array[yB, xR]

            xslice = np.arange(xstart, xstart + xstep)
            yslice = np.arange(ystart, ystart + ystep)
            interpolate(image, xslice, yslice,
                        mapLU, mapRU, mapLB, mapRB, lut)

            xstart += xstep  # set pointer on next matrix */

        ystart += ystep

    if image.shape != orig_shape:
        image = image[:orig_shape[0], :orig_shape[1]]

    return image
示例#49
0
from matplotlib import pyplot as plt
import matplotlib.cm as cm

from skimage import data
from skimage import color
from skimage.util import view_as_blocks


# get astronaut from skimage.data in grayscale
l = color.rgb2gray(data.astronaut())

# size of blocks
block_shape = (4, 4)

# see astronaut as a matrix of blocks (of shape block_shape)
view = view_as_blocks(l, block_shape)

# collapse the last two dimensions in one
flatten_view = view.reshape(view.shape[0], view.shape[1], -1)

# resampling the image by taking either the `mean`,
# the `max` or the `median` value of each blocks.
mean_view = np.mean(flatten_view, axis=2)
max_view = np.max(flatten_view, axis=2)
median_view = np.median(flatten_view, axis=2)

# display resampled images
fig, axes = plt.subplots(2, 2, figsize=(8, 8), sharex=True, sharey=True)
ax = axes.ravel()

l_resized = ndi.zoom(l, 2, order=3)
示例#50
0
def block_reduce(image, block_size, func=np.sum, cval=0):
    """Down-sample image by applying function to local blocks.

    Parameters
    ----------
    image : ndarray
        N-dimensional input image.
    block_size : array_like
        Array containing down-sampling integer factor along each axis.
    func : callable
        Function object which is used to calculate the return value for each
        local block. This function must implement an ``axis`` parameter such
        as ``numpy.sum`` or ``numpy.min``.
    cval : float
        Constant padding value if image is not perfectly divisible by the
        block size.

    Returns
    -------
    image : ndarray
        Down-sampled image with same number of dimensions as input image.

    Examples
    --------
    >>> from skimage.measure import block_reduce
    >>> image = np.arange(3*3*4).reshape(3, 3, 4)
    >>> image # doctest: +NORMALIZE_WHITESPACE
    array([[[ 0,  1,  2,  3],
            [ 4,  5,  6,  7],
            [ 8,  9, 10, 11]],
           [[12, 13, 14, 15],
            [16, 17, 18, 19],
            [20, 21, 22, 23]],
           [[24, 25, 26, 27],
            [28, 29, 30, 31],
            [32, 33, 34, 35]]])
    >>> block_reduce(image, block_size=(3, 3, 1), func=np.mean)
    array([[[ 16.,  17.,  18.,  19.]]])
    >>> image_max1 = block_reduce(image, block_size=(1, 3, 4), func=np.max)
    >>> image_max1 # doctest: +NORMALIZE_WHITESPACE
    array([[[11]],
           [[23]],
           [[35]]])
    >>> image_max2 = block_reduce(image, block_size=(3, 1, 4), func=np.max)
    >>> image_max2 # doctest: +NORMALIZE_WHITESPACE
    array([[[27],
            [31],
            [35]]])
    """

    if len(block_size) != image.ndim:
        raise ValueError("`block_size` must have the same length "
                         "as `image.shape`.")

    pad_width = []
    for i in range(len(block_size)):
        if block_size[i] < 1:
            raise ValueError("Down-sampling factors must be >= 1. Use "
                             "`skimage.transform.resize` to up-sample an "
                             "image.")
        if image.shape[i] % block_size[i] != 0:
            after_width = block_size[i] - (image.shape[i] % block_size[i])
        else:
            after_width = 0
        pad_width.append((0, after_width))

    image = pad(image, pad_width=pad_width, mode='constant',
                constant_values=cval)

    out = view_as_blocks(image, block_size)

    for i in range(len(out.shape) // 2):
        out = func(out, axis=-1)

    return out
示例#51
0
    def transform(self, X): 

        return np.mean(view_as_blocks(X, self.pool_size), axis=(4, 5, 6, 7))
示例#52
0
def to_patches(img, patch_size=20):
    """
    Assuming a 2-dimensional image, divide into patch_size * patch_size pixel patches.
    """
    return view_as_blocks(img, (patch_size, patch_size))
示例#53
0
def _clahe(image, ntiles_x, ntiles_y, clip_limit, nbins=128):
    """Contrast Limited Adaptive Histogram Equalization.

    Parameters
    ----------
    image : array-like
        Input image.
    ntiles_x : int, optional
        Number of tile regions in the X direction.  Ranges between 2 and 16.
    ntiles_y : int, optional
        Number of tile regions in the Y direction.  Ranges between 2 and 16.
    clip_limit : float, optional
        Normalized clipping limit (higher values give more contrast).
    nbins : int, optional
        Number of gray bins for histogram ("dynamic range").

    Returns
    -------
    out : ndarray
        Equalized image.

    The number of "effective" greylevels in the output image is set by `nbins`;
    selecting a small value (eg. 128) speeds up processing and still produce
    an output image of good quality. The output image will have the same
    minimum and maximum value as the input image. A clip limit smaller than 1
    results in standard (non-contrast limited) AHE.
    """
    ntiles_x = min(ntiles_x, MAX_REG_X)
    ntiles_y = min(ntiles_y, MAX_REG_Y)
    ntiles_y = max(ntiles_x, 2)
    ntiles_x = max(ntiles_y, 2)

    if clip_limit == 1.0:
        return image  # is OK, immediately returns original image.

    map_array = np.zeros((ntiles_y, ntiles_x, nbins), dtype=int)

    y_res = image.shape[0] - image.shape[0] % ntiles_y
    x_res = image.shape[1] - image.shape[1] % ntiles_x
    image = image[: y_res, : x_res]

    x_size = image.shape[1] / ntiles_x  # Actual size of contextual regions
    y_size = image.shape[0] / ntiles_y
    n_pixels = x_size * y_size

    if clip_limit > 0.0:  # Calculate actual cliplimit
        clip_limit = int(clip_limit * (x_size * y_size) / nbins)
        if clip_limit < 1:
            clip_limit = 1
    else:
        clip_limit = NR_OF_GREY  # Large value, do not clip (AHE)

    bin_size = 1 + NR_OF_GREY / nbins
    aLUT = np.arange(NR_OF_GREY)
    aLUT /= bin_size
    img_blocks = view_as_blocks(image, (y_size, x_size))

    # Calculate greylevel mappings for each contextual region
    for y in range(ntiles_y):
        for x in range(ntiles_x):
            sub_img = img_blocks[y, x]
            hist = aLUT[sub_img.ravel()]
            hist = np.bincount(hist)
            hist = np.append(hist, np.zeros(nbins - hist.size, dtype=int))
            hist = clip_histogram(hist, clip_limit)
            hist = map_histogram(hist, 0, NR_OF_GREY - 1, n_pixels)
            map_array[y, x] = hist

    # Interpolate greylevel mappings to get CLAHE image
    ystart = 0
    for y in range(ntiles_y + 1):
        xstart = 0
        if y == 0:  # special case: top row
            ystep = y_size / 2.0
            yU = 0
            yB = 0
        elif y == ntiles_y:  # special case: bottom row
            ystep = y_size / 2.0
            yU = ntiles_y - 1
            yB = yU
        else:  # default values
            ystep = y_size
            yU = y - 1
            yB = yB + 1

        for x in range(ntiles_x + 1):
            if x == 0:  # special case: left column
                xstep = x_size / 2.0
                xL = 0
                xR = 0
            elif x == ntiles_x:  # special case: right column
                xstep = x_size / 2.0
                xL = ntiles_x - 1
                xR = xL
            else:  # default values
                xstep = x_size
                xL = x - 1
                xR = xL + 1

            mapLU = map_array[yU, xL]
            mapRU = map_array[yU, xR]
            mapLB = map_array[yB, xL]
            mapRB = map_array[yB, xR]

            xslice = np.arange(xstart, xstart + xstep)
            yslice = np.arange(ystart, ystart + ystep)
            interpolate(image, xslice, yslice,
                        mapLU, mapRU, mapLB, mapRB, aLUT)

            xstart += xstep  # set pointer on next matrix */

        ystart += ystep

    return image
def simplifyReward(reward, block):
    return ski.view_as_blocks(reward, (block, block)).mean(axis = (2, 3)) # simplified (average) reward matrix
示例#55
0
文件: image.py 项目: zshipko/imagepy
 def blocks(self, size):
     '''blocks are non-overlapping sub images'''
     size = list(size)
     size.append(self.channels)
     return [Image(i) for i in util.view_as_blocks(self, size)]