コード例 #1
0
    def create_image(self, image_manager):

        x0, y0, x1, y1 = self.box

        phase_img = image_manager.phase_image[x0:x1 + 1, y0:y1 + 1]
        phase_img = gray2rgb(img_as_float(phase_img))
        donor_img = rescale_intensity(image_manager.donor_image)[x0:x1 + 1,
                                                                 y0:y1 + 1]
        donor_img = gray2rgb(img_as_float(donor_img))
        acceptor_img = rescale_intensity(
            image_manager.acceptor_image)[x0:x1 + 1, y0:y1 + 1]
        acceptor_img = gray2rgb(img_as_float(acceptor_img))

        cell_masks = np.concatenate(
            (self.cell_mask, self.cell_mask, self.cell_mask), axis=1)
        septum_masks = np.concatenate(
            (self.sept_mask, self.sept_mask, self.sept_mask), axis=1)

        no_mask = np.concatenate((phase_img, donor_img, acceptor_img), axis=1)
        with_masks = mark_boundaries(no_mask,
                                     img_as_uint(cell_masks),
                                     color=(0, 0, 1),
                                     outline_color=None)
        with_masks = mark_boundaries(with_masks,
                                     img_as_uint(septum_masks),
                                     color=(1, 0, 0),
                                     outline_color=None)
        img = np.concatenate((no_mask, with_masks), axis=0)

        self.donacc_image = img
コード例 #2
0
def save_stacks(vis, yfp, dsred, save_path, fov_name):

    vis_con = io.concatenate_images(img_as_uint(vis))
    tf.imsave(save_path + '/%s_bf_stack.tif' % fov_name, vis_con)
    print("BF stack saved")

    yfp_con = io.concatenate_images(img_as_uint(yfp))
    tf.imsave(save_path + '/%s_yfp_stack.tif' % fov_name, yfp_con)
    print("yfp stack saved")

    dsred_con = io.concatenate_images(img_as_uint(dsred))
    tf.imsave(save_path + '/%s_dsred_stack.tif' % fov_name, dsred_con)
    print("dsred stack saved")
コード例 #3
0
def rescale(img):
    # Rescaling the Image to a unsigned integer for Otsu thresholding method
    original_img, rescale_img = color_conversion(img)
    rescaled = rescale_intensity(rescale_img[:, :, 2], out_range=(0, 1))
    int_img = img_as_uint(rescaled)

    return int_img
コード例 #4
0
ファイル: base.py プロジェクト: AbigailMcGovern/msbrainpy
def resize_z(resampled, filedir, filename, shape, originalRes, finalRes):
    z_scl = originalRes[0] / finalRes[0]
    y_scl = originalRes[1] / finalRes[1]
    x_scl = originalRes[2] / finalRes[2]
    shape1 = [
        int(np.ceil(z_scl * shape[0])),
        int(np.ceil(y_scl * shape[1])),
        int(np.ceil(x_scl * shape[2]))
    ]
    size1 = (int(np.ceil(z_scl * shape[0])), int(np.ceil(y_scl * shape[1])))
    out = np.zeros(shape1, dtype=np.float64)
    for i in range(resampled.shape[2]):
        plane = resampled[:, :, i]
        planeZY = resize(plane,
                         size1,
                         order=3,
                         clip=True,
                         anti_aliasing=True,
                         mode='reflect')
        out[:, :, i] = planeZY
    out = img_as_uint(out)
    print('Values along z were resampled')
    print('The final image has a size of ({}, {}, {})'.format(
        out.shape[0], out.shape[1], out.shape[2]))
    filepath = os.path.join(filedir, filename)
    with TiffWriter(filepath) as tiff:
        tiff.save(out)
    print('The image was saved at {}'.format(filepath))
コード例 #5
0
def ycbcr2rgb(im):
    """
    YCBCR2RGB: converts an YCbCr (YUV) in RGB color space.
    
    :param im: numpy.ndarray
      [m x n x 3] image
    """

    if im.ndim != 3:
        raise ValueError('Input image must be YCbCr.')
    h, w, c = im.shape
    if c != 3:
        raise ValueError('Input image must be a 3-channel (YCbCr) image.')

    if im.dtype != np.uint8:
        im = img_as_uint(im)

    iycc = np.array([[1.164, 1.164, 1.164], [0, -0.391, 2.018],
                     [1.596, -0.813, 0]])

    r = im.reshape((h * w, c))

    r[:, 0] -= 16.0
    r[:, 1:3] -= 128.0
    r = np.dot(r, iycc)
    r[r < 0] = 0
    r[r > 255] = 255
    r = np.round(r)
    #x = r[:,2]; r[:,2] = r[:,0]; r[:,0] = x

    im_res = np.array(r.reshape((h, w, c)), dtype=np.uint8)

    return im_res
コード例 #6
0
def save_stacks(translated_images_dict, save_path, fov_name):
    print("Saving stacks...")
    for channel, stack in translated_images_dict.items():
        print("Saving {} stack".format(channel))
        concat_stack = io.concatenate_images(img_as_uint(stack))
        tf.imsave(save_path + '/{}_{}_stack.tif'.format(fov_name, channel),
                  concat_stack)
コード例 #7
0
def flip_rotate(prep_id, tif):
    io.use_plugin('tifffile')
    INPUT = os.path.join(DATA_ROOT, prep_id, TIF)
    OUTPUT = os.path.join(DATA_ROOT, prep_id, ROTATED)
    input_tif = os.path.join(INPUT, tif)
    output_tif = os.path.join(OUTPUT, tif)

    try:
        img = io.imread(input_tif)
    except:
        return 'Bad file size'

    try:
        img = np.rot90(img, 1)
    except:
        print('could not rotate', tif)

    try:
        img = np.fliplr(img)
    except:
        print('could not flip', tif)

    try:
        img = img_as_uint(img)
    except:
        print('could not convert to 16bit', tif)

    try:
        io.imsave(output_tif, img)
    except:
        print('Could not save {}'.format(output_tif))

    return " Flipped and rotated"
コード例 #8
0
def rescale(img):
    # Rescaling the Image to a unsigned integer for Otsu thresholding method
    original_img, rescale_img = color_conversion(img)
    rescaled = rescale_intensity(rescale_img[:, :, 2], out_range=(0, 1))
    int_img = img_as_uint(rescaled)

    return int_img
コード例 #9
0
ファイル: color.py プロジェクト: gitter-badger/WSItk
def rgb2ycbcr(im):
    """
    RGB2YCBCR: converts an RGB image into YCbCr (YUV) color space.
    
    :param im: numpy.ndarray
      [m x n x 3] image
    """
    
    if im.ndim != 3:
        raise ValueError('Input image must be RGB.')
    h, w, c = im.shape
    if c != 3:
        raise ValueError('Input image must be a 3-channel (RGB) image.')
    
    if im.dtype != np.uint8:
        im = img_as_uint(im)
    
    ycc = np.array([[0.257,  0.439, -0.148],
                    [0.504, -0.368, -0.291],
                    [0.098, -0.071,  0.439]])
    
    im = im.reshape((h*w, c))
    
    r = np.dot(im, ycc).reshape((h, w, c))
    r[:,:,0] += 16
    r[:,:,1:3] += 128
    
    im_res = np.array(np.round(r), dtype=im.dtype)
    
    return im_res
コード例 #10
0
ファイル: color.py プロジェクト: gitter-badger/WSItk
def ycbcr2rgb(im):
    """
    YCBCR2RGB: converts an YCbCr (YUV) in RGB color space.
    
    :param im: numpy.ndarray
      [m x n x 3] image
    """

    if im.ndim != 3:
        raise ValueError('Input image must be YCbCr.')
    h, w, c = im.shape
    if c != 3:
        raise ValueError('Input image must be a 3-channel (YCbCr) image.')
    
    if im.dtype != np.uint8:
        im = img_as_uint(im)

    iycc = np.array([[1.164,  1.164,  1.164],
                     [0,     -0.391,  2.018],
                     [1.596, -0.813,  0]])
    
    r = im.reshape((h*w, c))    
    
    r[:, 0] -= 16.0
    r[:, 1:3] -= 128.0
    r = np.dot(r, iycc)
    r[r < 0] = 0
    r[r > 255] = 255
    r = np.round(r)
    #x = r[:,2]; r[:,2] = r[:,0]; r[:,0] = x

    im_res = np.array(r.reshape((h, w, c)), dtype=np.uint8)
    
    return im_res
コード例 #11
0
ファイル: nanozoomer.py プロジェクト: abdelneuhaus/Atlaser
def crop(im, bb, res):
    """
    Crop on brain slice from an ndpi image, given the bounding box around it

    Parameters
    ----------
    im: openslide.OpenSlide
        Open ndpi file
    bb: tuple
        Bounding box
    res: int
        Resolution level wanted

    Returns
    -------
    crop_im: PIL.Image
        Cropped image
    """

    crop_region = im.read_region(
        bb[:2], res, np.intp(bb[2:] / im.level_downsamples[res]
                             ))  # (x,y top-left), resolution, (width, height)

    crop_region = img_as_uint(rgb2gray(np.asarray(crop_region)))

    crop_im = Image.fromarray(crop_region)

    return crop_im
コード例 #12
0
ファイル: preparation.py プロジェクト: kairosight/kairosight
def reduce_stack(stack_in, reduction=1):
    """Rescale the X,Y dimensions of a stack (3-D array, TYX) of optical data,
    using linear interpolation and gaussian anti-aliasing to effectively bin pixels together.

       Parameters
       ----------
       stack_in : ndarray
            A 3-D array (T, Y, X) of optical data, dtype : uint16 or float
       reduction : int, float
            Factor by which to reduce both dimensions, typically in the range 2-10

       Returns
       -------
       stack_out : ndarray
            A reduced 3-D array (T, Y, X) of optical data, dtype : stack_in.dtype
       """
    reduction_factor = 1 / reduction
    test_frame_reduced = rescale(stack_in[0], reduction_factor, multichannel=False)
    stack_reduced_shape = (stack_in.shape[0], test_frame_reduced.shape[0], test_frame_reduced.shape[1])
    stack_out = np.empty(stack_reduced_shape, dtype=stack_in.dtype)  # empty stack
    print('Reducing stack dimensions by {} from W {} X H {} ... to size W {} X H {} ...'
          .format(reduction,
                  stack_in.shape[2], stack_in.shape[1],
                  test_frame_reduced.shape[1], test_frame_reduced.shape[0]))
    for idx, frame in enumerate(stack_in):
        # print('\r\tFrame:\t{}\t/ {}'.format(idx + 1, stack_in.shape[0]), end='', flush=True)
        #     f_filtered = filter_spatial(frame, kernel=self.kernel)
        frame_reduced = img_as_uint(rescale(frame, reduction_factor, anti_aliasing=True, multichannel=False))
        stack_out[idx, :, :] = frame_reduced

    return stack_out
コード例 #13
0
def rgb2ycbcr(im):
    """
    RGB2YCBCR: converts an RGB image into YCbCr (YUV) color space.
    
    :param im: numpy.ndarray
      [m x n x 3] image
    """

    if im.ndim != 3:
        raise ValueError('Input image must be RGB.')
    h, w, c = im.shape
    if c != 3:
        raise ValueError('Input image must be a 3-channel (RGB) image.')

    if im.dtype != np.uint8:
        im = img_as_uint(im)

    ycc = np.array([[0.257, 0.439, -0.148], [0.504, -0.368, -0.291],
                    [0.098, -0.071, 0.439]])

    im = im.reshape((h * w, c))

    r = np.dot(im, ycc).reshape((h, w, c))
    r[:, :, 0] += 16
    r[:, :, 1:3] += 128

    im_res = np.array(np.round(r), dtype=im.dtype)

    return im_res
コード例 #14
0
def get_trenches(image,
                 rotation,
                 FOV,
                 output_directory,
                 top_bottom=None,
                 min_dist=20,
                 thres=1.4,
                 top_thres_multiplier=1,
                 bottom_thres_multiplier=2):

    test_image = img_as_uint(skimage.transform.rotate(image, rotation))
    bin_image = test_image > threshold_li(test_image) * 1

    cropped_bin = bin_image[int(bin_image.shape[0] * 0.4):, ]

    y_mean_intensity = np.mean(cropped_bin, axis=1)

    top_threshold = np.mean(y_mean_intensity) / top_thres_multiplier
    bottom_threshold = np.max(y_mean_intensity) / bottom_thres_multiplier
    top_threshold_line = np.argmax(y_mean_intensity > top_threshold) - 10
    bottom_threshold_line = np.argmax(y_mean_intensity > bottom_threshold) - 10

    x_mean_intensity = np.mean(
        bin_image[top_threshold_line:bottom_threshold_line], axis=0)

    indexes = peakutils.indexes(x_mean_intensity,
                                thres=thres * np.mean(x_mean_intensity),
                                min_dist=min_dist)

    midpoints = (indexes[1:] + indexes[:-1]) / 2
    #f, ax = plt.subplots(figsize=(10,5))
    #plt.plot(x_mean_intensity)
    #plt.plot(y_mean_intensity)
    #plt.scatter(indexes, x_mean_intensity[indexes])

    f, ax = plt.subplots(figsize=(40, 20))
    plt.imshow(test_image)
    plt.vlines(midpoints, ymin=0, ymax=test_image.shape[0], color="r")
    plt.hlines(top_threshold_line, xmin=0, xmax=test_image.shape[1], color="r")
    plt.hlines(bottom_threshold_line,
               xmin=0,
               xmax=test_image.shape[1],
               color="r")
    plt.xlim(test_image.shape[1], 0)
    plt.ylim(test_image.shape[0], 0)
    plt.axis("off")
    if top_bottom == None:
        plt.savefig(output_directory +
                    "diagnostics/trench_finding/{}.jpeg".format(FOV),
                    bbox_inches="tight")
        plt.close("all")
    else:
        plt.savefig(
            output_directory +
            "diagnostics/trench_finding/{}_{}.jpeg".format(FOV, top_bottom),
            bbox_inches="tight")
        plt.close("all")

    return top_threshold_line, bottom_threshold_line, midpoints
コード例 #15
0
ファイル: pil_plugin.py プロジェクト: haohao200609/Hybrid
def ndarray_to_pil(arr, format_str=None):
    """Export an ndarray to a PIL object.

    Parameters
    ----------
    Refer to ``imsave``.

    """
    if arr.ndim == 3:
        arr = img_as_ubyte(arr)
        mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]

    elif format_str in ['png', 'PNG']:
        mode = 'I;16'
        mode_base = 'I'

        if arr.dtype.kind == 'f':
            arr = img_as_uint(arr)

        elif arr.max() < 256 and arr.min() >= 0:
            arr = arr.astype(np.uint8)
            mode = mode_base = 'L'

        else:
            arr = img_as_uint(arr)

    else:
        arr = img_as_ubyte(arr)
        mode = 'L'
        mode_base = 'L'

    if arr.ndim == 2:
        im = Image.new(mode_base, arr.T.shape)
        try:
            im.frombytes(arr.tobytes(), 'raw', mode)
        except AttributeError:
            im.frombytes(arr.tostring(), 'raw', mode)

    else:
        try:
            im = Image.frombytes(mode, (arr.shape[1], arr.shape[0]),
                                 arr.tobytes())
        except AttributeError:
            im = Image.frombytes(mode, (arr.shape[1], arr.shape[0]),
                                  arr.tostring())
    return im
コード例 #16
0
ファイル: pil_plugin.py プロジェクト: samH99/LexisNexis
def ndarray_to_pil(arr, format_str=None):
    """Export an ndarray to a PIL object.

    Parameters
    ----------
    Refer to ``imsave``.

    """
    if arr.ndim == 3:
        arr = img_as_ubyte(arr)
        mode = {3: 'RGB', 4: 'RGBA'}[arr.shape[2]]

    elif format_str in ['png', 'PNG']:
        mode = 'I;16'
        mode_base = 'I'

        if arr.dtype.kind == 'f':
            arr = img_as_uint(arr)

        elif arr.max() < 256 and arr.min() >= 0:
            arr = arr.astype(np.uint8)
            mode = mode_base = 'L'

        else:
            arr = img_as_uint(arr)

    else:
        arr = img_as_ubyte(arr)
        mode = 'L'
        mode_base = 'L'

    if arr.ndim == 2:
        im = Image.new(mode_base, arr.T.shape)
        try:
            im.frombytes(arr.tobytes(), 'raw', mode)
        except AttributeError:
            im.frombytes(arr.tostring(), 'raw', mode)

    else:
        try:
            im = Image.frombytes(mode, (arr.shape[1], arr.shape[0]),
                                 arr.tobytes())
        except AttributeError:
            im = Image.frombytes(mode, (arr.shape[1], arr.shape[0]),
                                 arr.tostring())
    return im
コード例 #17
0
ファイル: process.py プロジェクト: feldman4/lasagna
def get_nuclei(img, opening_radius=6, block_size=80, threshold_offset=0):
    s = Sample(DOWNSAMPLE)
    binary = threshold_adaptive(s.downsample(img), int(block_size / s.rate), offset=threshold_offset)
    filled = fill_holes(binary)
    opened = opening(filled, selem=disk(opening_radius / s.rate))
    nuclei = apply_watershed(opened)
    nuclei = s.upsample(nuclei)
    return img_as_uint(nuclei)
コード例 #18
0
def save_stacks(vis, gfp, mko, e2crimson, save_path, fov_name):

    vis_con = io.concatenate_images(img_as_uint(vis))
    tf.imsave(save_path + '/%s_bf_stack.tif' % fov_name, vis_con)
    print("BF stack saved")

    gfp_con = io.concatenate_images(img_as_uint(gfp))
    tf.imsave(save_path + '/%s_gfp_stack.tif' % fov_name, gfp_con)
    print("gfp stack saved")

    mko_con = io.concatenate_images(img_as_uint(mko))
    tf.imsave(save_path + '/%s_mko_stack.tif' % fov_name, mko_con)
    print("mko stack saved")

    e2crimson_con = io.concatenate_images(img_as_uint(e2crimson))
    tf.imsave(save_path + '/%s_e2crimson_stack.tif' % fov_name, e2crimson_con)
    print("e2crimson stack saved")
コード例 #19
0
ファイル: generate_rain.py プロジェクト: ztime/RainStreakGen
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-o', '--output', type=os.path.abspath, help='Folder to store results in', required=True)
    parser.add_argument('-n', '--number_of_frames', type=int, default=100, help='How many frames to generate')
    parser.add_argument('-p', '--prefix', type=str, default='rain-frame', help="Prefix of rendered images")
    parser.add_argument('-x', '--dimX', type=int, default=128)
    parser.add_argument('-y', '--dimY', type=int, default=128)
    parser.add_argument('--streak_folder', type=os.path.abspath, default='data/Streaks_Garg06', help='Where the dataset of rain is stored')
    parser.add_argument('--img_channels', type=int, default=3, help='How many channels to generate')
    parser.add_argument('--intensity', type=str, choices=['dense', 'middle','light'], default='dense')
    parser.add_argument('--angle', type=int, default=4, choices=[4,5,6,7,8])
    args = parser.parse_args()
    # the last digit in the image '158-5.png' is an angle, which varies between 4 and 8.
    # thes means 10-4.png and 165-4.png are connected bc it's the same angle.
    if not os.path.isdir(args.output):
        os.mkdir(args.output)
    # Set intensity settings
    if args.intensity == 'dense':
        iterations = 8
    elif args.intensity == 'middle':
        iterations = 4
    elif args.intensity == 'light':
        iterations = 2

    all_frames = np.zeros((args.number_of_frames, args.dimX, args.dimY, args.img_channels))
    for frame_index in range(args.number_of_frames):
        print(f"Working on {frame_index}/{args.number_of_frames}...")
        available_images = glob.glob(f"{args.streak_folder}/*-{args.angle}.png")
        for _ in range(iterations): # We dont need to keep track
            random_streak_img = random.choice(available_images)
            random_streak_img = cv2.imread(random_streak_img)
            # Crop 3 pixel rows to get a picture that can scale evenly
            random_streak_img = random_streak_img[3:, :, :]
            # Flip the width and height here, not sure why
            resize_shape = random_streak_img.shape
            resize_shape = (resize_shape[1] * 4, resize_shape[0] * 4)
            random_streak_img_resized = cv2.resize(random_streak_img, resize_shape)
            # put filtered channels in a new frame
            new_frame = np.zeros_like(random_streak_img_resized, dtype=np.float)
            thresholded_frame = imbinarize_O(cv2.cvtColor(random_streak_img_resized, cv2.COLOR_BGR2GRAY))
            frame_mask = bwareafilt(thresholded_frame)
            for img_channel in range(3):
                one_channel = random_streak_img_resized[:,:,img_channel]
                one_channel = np.multiply(one_channel, frame_mask.astype(np.uint8))
                # No idea about these settings, from the original
                one_channel = gaussian(one_channel, 1, truncate=2)
                new_frame[:,:,img_channel] = one_channel
            # Take the size down again
            new_frame = cv2.resize(new_frame, (args.dimX, args.dimY))
            # Add it to the frame with a few modifications
            alpha = random.random() * 0.2 + 0.25
            filtered_frame = new_frame * alpha
            all_frames[frame_index] = all_frames[frame_index] + (new_frame.astype(float) * alpha)
        frame_prepend_zeros = len(str(args.number_of_frames))
        filename = os.path.join(args.output, f"{args.prefix}-{frame_index:0>{frame_prepend_zeros}}.png")
        imsave(filename, img_as_uint(all_frames[frame_index]))

    print(f"Done with {args.number_of_frames} frames!")
コード例 #20
0
ファイル: equalizer.py プロジェクト: pmagwene/spotz
def equalize_from_ROI(img, roi_bbox):
    mask = np.zeros(img.shape)
    minr, minc, maxr, maxc = roi_bbox
    mask[minr:maxr, minc:maxc] = 1
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", UserWarning)
        equalized_img = util.img_as_uint(exposure.equalize_hist(img,
                                                                mask=mask))
    return equalized_img
コード例 #21
0
    def normalize(self, image, dtype='float'):
        image = image / np.max(image)

        if dtype == 'float':
            return img_as_float(image)
        elif dtype == 'uint8':
            return img_as_ubyte(image)
        elif dtype == 'uint16':
            return img_as_uint(image)
コード例 #22
0
def transform(src_dir, city, s, gamma, method='CLAHE'):
    """Scales all images of given city
    @param: src_dir is directory the tiles are in
    @param: city is the positive filter for the city you're looking for
    @param: s is vector of color scalings for that city
    @param: gamma is exponent for scaling
    @param: type of method to transform by"""

    imList = [f for f in os.listdir(src_dir) if ('RGB' in f
                                                and city in f
                                                and 'tif' in f)]
    path = src_dir + 'transformed_{}/'.format(method)
    try: os.mkdir(path)
    except: pass

    if method == 'WB2': #transformations considering entire city of data
        threshold = 0.005
        cum_hist = {}
        for i in range(3): #each color channel
            histSum = 0
            cumSum = 0
            for file in imList:
                image = imio.imread(src_dir + file)
                hist, bins = np.histogram(image[...,i].ravel(), 256, (0,255))
                histSum += hist.sum()
                cumSum += np.cumsum(hist)
            cum_hist[i] = cumSum / histSum

    for file in imList:
        name = file[0].upper() + file[1:]
        image = imio.imread(src_dir + file)

        if method == 'gamma_correction':
            new_image = gammaCorrect(image, s, gamma)
        elif method == 'scale_only':
            new_image = gamma(image, s, gamma=1.0)
        elif method == 'CLAHE':
            new_image = exposure.equalize_adapthist(image, clip_limit=0.01)
            new_image = img_as_uint(new_image)
        elif method == 'log':
            new_image = exposure.adjust_log(image)
        elif method == 'sigmoid':
            new_image = exposure.adjust_sigmoid(image, gain=4, cutoff=0.35)
        elif method == 'KyleWb2':
            new_image = KyleWB2(image)
        elif method == 'WB2':
            new_image = np.zeros_like(image) #Initialize final image

            for i in range(3): #each color channel
                bmin = np.where(cum_hist[i]>threshold)[0][0]
                bmax = np.where(cum_hist[i]>1-threshold)[0][0]
                new_image[...,i] = np.clip(image[...,i], bmin, bmax)
                new_image[...,i] = (new_image[...,i]-bmin) / (bmax - bmin) * 255
            new_image = new_image.round().astype('uint8')

        print(name)
        imio.imwrite(path + name, new_image)
コード例 #23
0
    def get_cell_images(self, path, label, image_manager, cell_manager,
                        params):
        if label is None:
            filename = self.cell_data_filename
            if not os.path.exists(filename + "/_cell_data/fluor"):
                os.makedirs(filename + "/_cell_data/fluor")

            if image_manager.optional_image is not None:
                if not os.path.exists(filename + "/_cell_data/optional"):
                    os.makedirs(filename + "/_cell_data/optional")
        else:
            filename = self.cell_data_filename
            if not os.path.exists(filename + "/_cell_data/fluor"):
                os.makedirs(filename + "/_cell_data/fluor")

            if image_manager.optional_image is not None:
                if not os.path.exists(filename + "/_cell_data/optional"):
                    os.makedirs(filename + "/_cell_data/optional")

        x_align, y_align = params.imageloaderparams.x_align, params.imageloaderparams.y_align

        fluor_img = image_manager.fluor_image
        fluor_w_cells = cell_manager.fluor_w_cells
        optional_image = image_manager.optional_image
        optional_w_cells = cell_manager.optional_w_cells

        for key in cell_manager.cells.keys():
            x0, y0, x1, y1 = cell_manager.cells[key].box
            fluor_cell = np.concatenate(
                (fluor_img[x0:x1 + 1,
                           y0:y1 + 1], fluor_img[x0:x1 + 1, y0:y1 + 1] *
                 cell_manager.cells[key].cell_mask),
                axis=1)
            imsave(filename + "/_cell_data/fluor/" + key + ".png",
                   img_as_uint(fluor_cell))

            if optional_image is not None:
                optional_cell = np.concatenate(
                    (optional_image[x0:x1 + 1, y0:y1 + 1],
                     optional_image[x0:x1 + 1, y0:y1 + 1] *
                     cell_manager.cells[key].cell_mask),
                    axis=1)
                imsave(filename + "/_cell_data/optional/" + key + ".png",
                       img_as_uint(optional_cell))
コード例 #24
0
ファイル: images.py プロジェクト: brunomsaraiva/eHooke_1.0
    def overlay_mask_base_image(self):
        """ Creates a new image with an overlay of the mask
        over the base image"""

        x0, y0, x1, y1 = self.clip

        self.base_w_mask = mark_boundaries(self.base_image[x0:x1, y0:y1],
                                           img_as_uint(self.mask),
                                           color=(1, 0, 1),
                                           outline_color=None)
コード例 #25
0
ファイル: images.py プロジェクト: brunomsaraiva/eHooke_1.0
    def overlay_mask_optional_image(self):
        """Creates a new image with an overlay of the mask over the fluor
        image"""

        optional_image = color.rgb2gray(self.optional_image)
        optional_image = exposure.rescale_intensity(optional_image)
        optional_image = img_as_float(optional_image)

        self.optional_w_mask = mark_boundaries(optional_image, img_as_uint(
            self.mask), color=(1, 0, 1), outline_color=None)
コード例 #26
0
    def overlay_mask_base_image(self):
        """ Creates a new image with an overlay of the mask
        over the base image"""

        x0, y0, x1, y1 = self.clip

        self.base_w_mask = mark_boundaries(self.base_image[x0:x1, y0:y1],
                                           img_as_uint(self.mask),
                                           color=(0, 1, 1),
                                           outline_color=None)
コード例 #27
0
def step_filter(image):
    imageC = np.copy(image)
    alpha_mask = imageC[:, :, 3]
    image_rgb = imageC[:, :, :3]
    image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)

    image_gray_norm = img_as_float(image_gray)
    filtered_rgb = triple_step_filter(img_as_float(image_rgb), image_gray_norm,
                                      alpha_mask)
    imageC[:, :, :3] = img_as_uint(filtered_rgb)

    return eliminate_mid_transparencies(imageC, alpha_mask)
コード例 #28
0
ファイル: ihc_analysis.py プロジェクト: AidanRoss/histology
def rescale(img):
    # Rescaling the Image to a unsigned integer for Otsu thresholding method
    # original_img, mask, dab, rr, cc = segment(img)
    # b = mask.data
    # rescaled_mask = rescale_intensity(b, out_range=(0, 1))
    orig, ihc = color_conversion(img)
    rescaled = rescale_intensity(ihc[:, :, 2], out_range=(0, 1))
    int_img = img_as_uint(rescaled)

    # int_mask_data = img_as_uint(rescaled_mask)
    # print 'loop once'
    return int_img, orig, ihc  # , int_mask_data
コード例 #29
0
 def _apply(self, imgmsg, maskmsg):
     bridge = cv_bridge.CvBridge()
     img = bridge.imgmsg_to_cv2(imgmsg)
     if img.ndim == 2:
         img = gray2rgb(img)
     mask = bridge.imgmsg_to_cv2(maskmsg, desired_encoding='mono8')
     mask = mask.reshape(mask.shape[:2])
     mask = gray2rgb(mask)
     # compute label
     roi = closed_mask_roi(mask)
     roi_labels = masked_slic(img=img[roi],
                              mask=mask[roi],
                              n_segments=20,
                              compactness=30)
     if roi_labels is None:
         return
     labels = np.zeros(mask.shape, dtype=np.int32)
     # labels.fill(-1)  # set bg_label
     labels[roi] = roi_labels
     if self.is_debugging:
         # publish debug slic label
         slic_labelmsg = bridge.cv2_to_imgmsg(labels)
         slic_labelmsg.header = imgmsg.header
         self.pub_slic.publish(slic_labelmsg)
     # compute rag
     g = rag_solidity(labels, connectivity=2)
     if self.is_debugging:
         # publish debug rag drawn image
         rag_img = draw_rag(labels, g, img)
         rag_img = img_as_uint(rag_img)
         rag_imgmsg = bridge.cv2_to_imgmsg(rag_img.astype(np.uint8),
                                           encoding='rgb8')
         rag_imgmsg.header = imgmsg.header
         self.pub_rag.publish(rag_imgmsg)
     # merge rag with solidity
     merged_labels = merge_hierarchical(labels,
                                        g,
                                        thresh=1,
                                        rag_copy=False,
                                        in_place_merge=True,
                                        merge_func=_solidity_merge_func,
                                        weight_func=_solidity_weight_func)
     merged_labels += 1
     merged_labels[mask == 0] = 0
     merged_labelmsg = bridge.cv2_to_imgmsg(merged_labels.astype(np.int32))
     merged_labelmsg.header = imgmsg.header
     self.pub.publish(merged_labelmsg)
     if self.is_debugging:
         out = label2rgb(merged_labels, img)
         out = (out * 255).astype(np.uint8)
         out_msg = bridge.cv2_to_imgmsg(out, encoding='rgb8')
         out_msg.header = imgmsg.header
         self.pub_label.publish(out_msg)
コード例 #30
0
    def overlay_mask_optional_image(self):
        """Creates a new image with an overlay of the mask over the fluor
        image"""

        optional_image = color.rgb2gray(self.optional_image)
        optional_image = exposure.rescale_intensity(optional_image)
        optional_image = img_as_float(optional_image)

        self.optional_w_mask = mark_boundaries(optional_image,
                                               img_as_uint(self.mask),
                                               color=(0, 1, 1),
                                               outline_color=None)
コード例 #31
0
def bayer_to_rgb(full_sensor_image):
    """
    Demosaic a full readout of a sensor that has a Bayer-pattern
    filter overlaid on it.
    """

    # Cop out - use someone else's demosaicing algorithms.
    # https://gist.github.com/bbattista/8358ccafecf927ae1c58c944ab470ffb

    bayer = img_as_uint(color.rgb2gray(full_sensor_image))
    demosaic = cv2.cvtColor(bayer, cv2.COLOR_BAYER_BG2RGB)
    return img_as_ubyte(demosaic)
コード例 #32
0
def run(params):
    image_location = params['inputImagePath']
    result_location = params['resultPath']
    sigma = float(params['sigma'])
    tCount = int(params['TCount'])
    zCount = int(params['ZCount'])
    if not os.path.exists(image_location):
        print(f"Error: {image_location} does not exist")
        return

    image_data = imread(image_location)
    dims = image_data.shape
    shape_image = np.empty(image_data.shape, dtype=np.float32)

    # 3D+T
    if tCount > 1 and zCount > 1:
        print(f"Applying to 3D+T case with dims: {image_data.shape}")
        for t in range(0, dims[0]):
            for z in range(0, dims[1]):
                shape_image[t, z, :, :] = shape_index(image_data[t, z, :, :],
                                                      sigma=sigma,
                                                      mode='reflect').astype(
                                                          np.float32)
        axes = 'YXZT'
    # 2D+T or 3D
    elif (tCount > 1 and zCount == 1) or (tCount == 1 and zCount > 1):
        print(f"Applying to 2D+T or 3D case with dims: {image_data.shape}")
        for d in range(0, dims[0]):
            shape_image[d, :, :] = shape_index(image_data[d, :, :],
                                               sigma=sigma,
                                               mode='reflect').astype(
                                                   np.float32)
        if tCount > 1:
            axes = 'YXT'
        else:
            axes = 'YXZ'
    # 2D
    else:
        print(f"Applying to 2D case with dims: {image_data.shape}")
        shape_image = shape_index(image_data, sigma=sigma, mode='reflect')
        axes = 'YX'

    # NaNs are usually returned - convert these to possible pixel values
    shape_image = np.nan_to_num(shape_image)

    if image_data.dtype == np.uint16:
        shape_image = img_as_uint(shape_image)
    else:
        shape_image = img_as_ubyte(shape_image)

    imsave(result_location, shape_image, metadata={'axes': axes})
コード例 #33
0
def convert_to_tif(f_name):
    read_file = h5py.File(f_name)
    base_data = read_file['DataSet']

    # THIS ASSUMES THAT YOU HAVE A MULTICOLOR Z STACK IN TIME
    resolution_levels, \
    time_points, n_time_points, \
    channels, n_channels, \
    n_z_levels, z_levels, \
    n_rows, n_cols = get_h5_file_info(base_data)

    # Get the index of the bad frame start
    bad_index_start = get_bad_frame_index(
        np.array(base_data[resolution_levels[0]][time_points[0]][channels[0]]
                 ['Data']))

    banner_text = 'File Breakdown'
    print(banner_text)
    print('_' * len(banner_text))
    print('Channels: %d' % n_channels)
    print('Time Points: %d' % n_time_points)
    print('Z Levels: %d' % (bad_index_start + 1))
    print('Native (rows, cols): (%d,%d)' % (n_rows, n_cols))
    print('_' * len(banner_text))

    with TiffWriter(f_name.rsplit('.', maxsplit=1)[0].split('/')[-1] + '.tif',
                    imagej=True) as out_tif:
        mmap_fname = f_name + '.mmap'
        output_stack = np.memmap(mmap_fname,
                                 dtype=np.uint16,
                                 shape=(n_time_points, bad_index_start,
                                        n_channels, n_rows, n_cols),
                                 mode='w+')

        for i_t, t in enumerate(time_points):
            print('%s/%d' % (t, n_time_points - 1))
            for i_z, z_lvl in enumerate(z_levels[:bad_index_start]):
                print('%s/%d Z %d/%d' %
                      (t, n_time_points - 1, i_z + 1, bad_index_start))
                for i_channel, channel in enumerate(channels):
                    output_stack[i_t, i_z, i_channel] = img_as_uint(
                        np.array(
                            base_data[resolution_levels[0]][time_points[i_t]][
                                channels[i_channel]]['Data'][i_z]))

        # Save the reduced file
        out_tif.save(output_stack)

        # Delete the reduced stack
        del output_stack
        os.remove(mmap_fname)
コード例 #34
0
def apply_transform(moving,
                    target,
                    moving_pts,
                    target_pts,
                    transformer,
                    output_shape_rc=None):
    '''
    :param transformer: transformer object from skimage. See https://scikit-image.org/docs/dev/api/skimage.transform.html for different transformations
    :param output_shape_rc: shape of warped image (row, col). If None, uses shape of traget image
    return
    '''
    if output_shape_rc is None:
        output_shape_rc = target.shape[:2]

    # case of transformer
    if str(transformer.__class__
           ) == "<class 'skimage.transform._geometric.SimilarityTransform'>":
        transformer.estimate(moving_pts, target_pts)
        warped_img = transform.warp(moving,
                                    transformer.inverse,
                                    output_shape=output_shape_rc)
        warped_pts = transformer(moving_pts)

    elif str(transformer.__class__
             ) == "<class 'skimage.transform._geometric.PolynomialTransform'>":
        transformer.estimate(target_pts, moving_pts)
        warped_img = transform.warp(moving,
                                    transformer,
                                    output_shape=output_shape_rc)
        ### Restimate to warp points
        transformer.estimate(moving_pts, target_pts)
        warped_pts = transformer(moving_pts)

    else:
        sys.exit(
            'Error @ apply_transform : handling for this transformer type is not yet implemented {transformer.__class__}.'
        )

    # dtype warped float64 image
    if not (warped_img.dtype.type is np.float64):
        sys.exit(
            'Error @ keypointregist.apply_transform : warped_img dtype is not np.float64 {warped_img.dtype.type}.\nthis should not happen. fix source code!'
        )
    if (target_img.dtype.type is np.uint8):
        warped_img = util.img_as_ubyte(warped_img)
    else:
        warped_img = util.img_as_uint(warped_img)

    return warped_img, warped_pts
コード例 #35
0
 def _apply(self, imgmsg, maskmsg):
     bridge = cv_bridge.CvBridge()
     img = bridge.imgmsg_to_cv2(imgmsg)
     if img.ndim == 2:
         img = gray2rgb(img)
     mask = bridge.imgmsg_to_cv2(maskmsg, desired_encoding='mono8')
     mask = mask.reshape(mask.shape[:2])
     mask = gray2rgb(mask)
     # compute label
     roi = closed_mask_roi(mask)
     roi_labels = masked_slic(img=img[roi], mask=mask[roi],
                              n_segments=20, compactness=30)
     if roi_labels is None:
         return
     labels = np.zeros(mask.shape, dtype=np.int32)
     # labels.fill(-1)  # set bg_label
     labels[roi] = roi_labels
     if self.is_debugging:
         # publish debug slic label
         slic_labelmsg = bridge.cv2_to_imgmsg(labels)
         slic_labelmsg.header = imgmsg.header
         self.pub_slic.publish(slic_labelmsg)
     # compute rag
     g = rag_solidity(labels, connectivity=2)
     if self.is_debugging:
         # publish debug rag drawn image
         rag_img = draw_rag(labels, g, img)
         rag_img = img_as_uint(rag_img)
         rag_imgmsg = bridge.cv2_to_imgmsg(
             rag_img.astype(np.uint8), encoding='rgb8')
         rag_imgmsg.header = imgmsg.header
         self.pub_rag.publish(rag_imgmsg)
     # merge rag with solidity
     merged_labels = merge_hierarchical(
         labels, g, thresh=1, rag_copy=False,
         in_place_merge=True,
         merge_func=_solidity_merge_func,
         weight_func=_solidity_weight_func)
     merged_labels += 1
     merged_labels[mask == 0] = 0
     merged_labelmsg = bridge.cv2_to_imgmsg(merged_labels.astype(np.int32))
     merged_labelmsg.header = imgmsg.header
     self.pub.publish(merged_labelmsg)
     if self.is_debugging:
         out = label2rgb(merged_labels, img)
         out = (out * 255).astype(np.uint8)
         out_msg = bridge.cv2_to_imgmsg(out, encoding='rgb8')
         out_msg.header = imgmsg.header
         self.pub_label.publish(out_msg)
コード例 #36
0
def run(params):
	RTimageLocation = params['inputRTImagePath']
	GTimageLocation = params['inputGTImagePath']
	resultLocation = params['resultPath']
	resultLocationAdj = params['resultPathAdj']
	
	# Checking existence of temporary files (individual channels)
	if not os.path.exists(RTimageLocation):
		print(f'Error: {RTimageLocation} does not exist')
		return; 
	if not os.path.exists(GTimageLocation):
		print(f'Error: {GTimageLocation} does not exist')
		return; 
		
	# Loading input images
	RTData = imread(RTimageLocation)
	GTData = imread(GTimageLocation)
	print(f'Dimensions of Restored image: {RTData.shape}')
	print(f'Dimensions of GT image: {GTData.shape}')
	
	# Histogram matching
	matched_GTData = match_histograms(GTData, RTData).astype(RTData.dtype)
	
	# MSE measurement
	# valMSE = skimage.measure.compare_mse(RTData, GTData) # deprecated in scikit-image 0.18 
	valMSE = mean_squared_error(RTData, matched_GTData)
	print(f'___ MSE = {valMSE} ___')	# Value appears in the log if Verbosity option is set to 'Everything'
	
	# SSIM measurement
	outFullSSIM = structural_similarity(RTData, matched_GTData, full=True)
	
	# Extracting mean value (first item)
	outMeanSSIM = outFullSSIM[0]
	print(f'___ Mean SSIM = {outMeanSSIM} ___')
	
	# Extracting map (second item)
	outSSIM = outFullSSIM[1]
	print(f'Bit depth of SSIM array: {outSSIM.dtype}')
	
	# Convert output array whose range is [0-1] to adjusted bit range (8- or 16-bit)
	if RTData.dtype is np.dtype('u2'):
		outputData = img_as_uint(outSSIM)
	elif RTData.dtype is np.dtype('f4'):
		outputData = img_as_float32(outSSIM)	# necessary?
	else:
		outputData = img_as_ubyte(outSSIM)
	
	imsave(resultLocation, outputData)	
	imsave(resultLocationAdj, matched_GTData)
コード例 #37
0
ファイル: temporal.py プロジェクト: danlopez00/crop_predict
def compress_temporal_image(temporal_image):
    """
    Averages frames in the image and then normalizes them to values between [0, 2^16]

    :param temporal_image: Image to compress
    :return: 2D numpy image array of type ubyte
    """

    # Normalize the image to values between [-1 and 1]
    image_center = average([min(temporal_image), max(temporal_image)])
    image_shift = temporal_image - image_center
    image_normalized = image_shift/max([-min(image_shift), max(image_shift)])

    # Change image to a ubyte and return
    return img_as_uint(image_normalized)
コード例 #38
0
def run(params):
    image_location = params['inputImagePath']
    result_object_location = params['resultObjectPath']
    threshold = int(params['threshold'])
    radius = int(params['radius'])
    tCount = int(params['TCount'])
    zCount = int(params['ZCount'])
    
    if not os.path.exists(image_location):
        print(f'Error: {image_location} does not exist')
        return
    
    if zCount > 1:
        print('This recipes currently only supports 2D and 2D+T images.')
        print('Try using ThresholdWithoutBorders3D.py instead.')
        return
        
    image_data = imread(image_location)
    dims = image_data.shape
    print(dims)
    mask = np.empty(image_data.shape, dtype=image_data.dtype)
    if radius != 0:
        structure = disk(radius)
    
    # 2D+T
    if tCount > 1:
        print(f"Applying to 2D+T case with dims: {image_data.shape}")
        for t in range(0, dims[0]):
            mask[t,:,:] = np.where(image_data[t,:,:] > threshold, 1, 0)
            if radius != 0:
                mask[t,:,:] = closing(mask[t,:,:], selem=structure)
            mask[t,:,:] = clear_border(mask[t,:,:])
        axes = 'YXT'
    # 2D
    else:
        print(f"Applying to 2D case with dims: {image_data.shape}")
        mask = np.where(image_data > threshold, 1, 0)
        if radius != 0:
            mask = closing(mask, selem=structure)
        mask = clear_border(mask)
        axes = 'YX'
    
    if image_data.dtype == np.uint16:
        mask = img_as_uint(mask)
    else:
        mask = img_as_ubyte(mask)

    imsave(result_object_location, mask, metadata={'axes': axes})
コード例 #39
0
def run(params):
    image_location = params['inputImagePath']
    result_location = params['resultPath']
    sigma_min = float(params['sigma_min'])
    sigma_max = float(params['sigma_max'])
    tCount = int(params['TCount'])
    zCount = int(params['ZCount'])

    if not os.path.exists(image_location):
        print(f'Error: {image_location} does not exist')
        return

    image_data = imread(image_location)
    dims = image_data.shape
    meijering_image = np.empty_like(image_data)
    output_data = np.empty_like(image_data)

    sigmas = np.arange(sigma_min, sigma_max,
                       round((sigma_max - sigma_min) / 5, 1))

    if tCount > 1:
        print('Time series are currently not supported.')
        return

    meijering_image = meijering(image_data, sigmas=sigmas, black_ridges=False)

    # Cropping is performed in 2D to get rid of bright pixels at edges of the image.

    if zCount > 1:
        crop_size = max(int(max(list(dims[1:])) / 100), 4)
        meijering_image[:, 0:crop_size, 0:crop_size] = 0
        meijering_image[:, 0:crop_size, -crop_size:] = 0
        meijering_image[:, -crop_size:, 0:crop_size] = 0
        meijering_image[:, -crop_size:, -crop_size:] = 0
    else:
        crop_size = max(int(max(list(dims)) / 100), 4)
        meijering_image[0:crop_size, 0:crop_size] = 0
        meijering_image[0:crop_size, -crop_size:] = 0
        meijering_image[-crop_size:, 0:crop_size] = 0
        meijering_image[-crop_size:, -crop_size:] = 0

    if image_data.dtype == np.uint16:
        output_data = img_as_uint(meijering_image)
    else:
        output_data = img_as_ubyte(meijering_image)

    imsave(result_location, output_data)
コード例 #40
0
def test_adapthist_color():
    """Test an RGB color uint16 image
    """
    img = util.img_as_uint(data.astronaut())
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter('always')
        hist, bin_centers = exposure.histogram(img)
        assert len(w) > 0
    adapted = exposure.equalize_adapthist(img, clip_limit=0.01)

    assert adapted.min() == 0
    assert adapted.max() == 1.0
    assert img.shape == adapted.shape
    full_scale = exposure.rescale_intensity(img)
    assert_almost_equal(peak_snr(full_scale, adapted), 109.393, 1)
    assert_almost_equal(norm_brightness_err(full_scale, adapted), 0.02, 2)
    return data, adapted
コード例 #41
0
ファイル: imgs2lf.py プロジェクト: manuSrep/DisneyDispPy
def imgs2lf(input_dir, lf_file, lf_dataset='lightfield', img_extension = '.png', dtype=np.uint8, RGB=True):
    """
    Convert several images to a lightfield.

    Parameters
    ----------
    input_dir : string
        The directory where the ligthfield images are located.
    lf_file: string
        The filename  (including the directory), of the output file.
    lf_dataset : string, optional
        The new container name of the hdf5 file.
    img_extension : string, optional
        The file extension of the images to look for.
    dtype : numpy.dtype, optional
        The new data type for the downscaled lightfield. Must be either
        np.float64, np.uint8 or np.uint16.
    RGB : bool, optional
        If True, the output lightfield will be converted to RGB (default).
        Otherwise gray type images are stored.
    """

    # look for images
    files = multiLoading(identifier="*.{e}".format(e=img_extension), path=input_dir)

    # prepare saving
    lf_file = prepareSaving(lf_file, extension=".hdf5")

    # Which dtype should be used?
    if dtype == np.float64:
        img_0 = img_as_float(imread(files[0]))
    elif dtype == np.uint8:
        img_0 = img_as_ubyte(imread(files[0]))
    elif dtype == np.uint16:
        img_0 = img_as_uint(imread(files[0]))
    else:
        raise TypeError('The given data type is not supported!')


    # Do we shall take RGB or gray images?
    if (len(img_0.shape) == 3 and img_0.shape[2] == 3):
        rows, cols, orig_channels = img_0.shape  # automatically determine the images'shapes from the first image.
    elif len(img_0.shape) == 2:
        orig_channels = 1
        rows, cols = img_0.shape # automatically determine the images'shapes from the first image.
    else:
        raise TypeError('The given images are neither gray nor RGB images!')

    f_out = h5py.File(lf_file, 'w')
    if RGB:
        dataset = f_out.create_dataset(lf_dataset,
                                       shape=(len(files), rows, cols, 3),
                                       dtype=dtype)
    else:
        dataset = f_out.create_dataset(lf_dataset,
                                       shape=(len(files), rows, cols),
                                       dtype=dtype)
    for k in range(len(files)):

        if dtype == np.float64:
            if orig_channels==1 and RGB:
                dataset[k, ...] = img_as_float(gray2rgb(imread(files[k])))
            elif orig_channels==3 and not RGB:
                dataset[k, ...] = img_as_float(rgb2gray(imread(files[k])))
            else:
                dataset[k, ...] = img_as_float(imread(files[k]))
        elif dtype == np.uint8:
            if orig_channels==1 and RGB:
                dataset[k, ...] = img_as_ubyte(gray2rgb(imread(files[k])))
            elif orig_channels==3 and not RGB:
                dataset[k, ...] = img_as_ubyte(rgb2gray(imread(files[k])))
            else:
                dataset[k, ...] = img_as_ubyte(imread(files[k]))
        elif dtype == np.uint16:
            if orig_channels==1 and RGB:
                dataset[k, ...] = img_as_uint(gray2rgb(imread(files[k])))
            elif orig_channels==3 and not RGB:
                dataset[k, ...] = img_as_uint(rgb2gray(imread(files[k])))
            else:
                dataset[k, ...] = img_as_uint(imread(files[k]))
        else:
            raise TypeError('Given dtype not supported.')
    f_out.close()
コード例 #42
0
ファイル: lf2epi.py プロジェクト: manuSrep/DisneyDispPy
def create_epis(lf_in, epi_out, hdf5_dataset_in="lightfield", hdf5_dataset_out="epis", dtype=np.float64, RGB=True):
    """
    Create epis for all resolutions given by the input lightfield.

    Parameters
    ----------
    lf_in : string
        The input hdf5 filename (including the directory) of the lightfield.
    epi_out : string
        The output hdf5 filename (including the directory) of the lightfield
        in all resolutions.
    hdf5_dataset_in: string
        The container name inside the hdf5 file for the lightfield. The same
        name will be used for the new file.
    hdf5_dataset_out: string, optional
        The container name inside the hdf5 file for the epis.
    dtype : numpy.dtype, optional
        The new data type for the epis. Must be either
        np.float64, np.uint8 or np.uint16.
    RGB : bool, optional
        If True, the output epis will be converted to RGB (default).
        Otherwise gray type images are stored.
    """

    # Initialze the hdf5 file objects
    lf_in = prepareLoading(lf_in)
    epi_out = prepareSaving(epi_out, extension=".hdf5")

    # Initialze the hdf5 file objects
    lf_in = h5py.File(lf_in, 'r')
    epi_out = h5py.File(epi_out, 'w')

    # Check if there is a resolution attribute. Create otherwise
    r_all = lf_in[hdf5_dataset_in].attrs.get('resolutions')[...]

    epi_grp = epi_out.create_group(hdf5_dataset_out)
    epi_grp.attrs.create('resolutions', r_all)


    # Initialize a progress bar to follow the conversion
    widgets = ['Create EPIs: ', Percentage(), ' ', Bar(),' ', ETA(), ' ']
    progress = ProgressBar(widgets=widgets, max_val=r_all.shape[0]).start()
    for r,res in enumerate(r_all):
        progress.update(r)

        set_name = str(res[0]) + 'x' + str(res[1])
        lf_data = lf_in[hdf5_dataset_in + '/' + set_name]

        # Find out what data we have
        if len(lf_data.shape) == 4 and lf_data.shape[-1] == 3:
            OLDRGB = True
        elif len(lf_data.shape) == 3:
            OLDRGB = False
        else:
            raise TypeError(
                'The given lightfield contains neither gray nor RGB images!')

        if RGB:
            epi_data = epi_grp.create_dataset(set_name, shape=(res[0],lf_data.shape[0], res[1],3), dtype=dtype)
        else:
            epi_data = epi_grp.create_dataset(set_name, shape=(res[0],lf_data.shape[0], res[1]), dtype=dtype)


        for v in range(res[0]):

            if dtype == np.float64:
                if RGB and not OLDRGB:
                    epi_data[v] = img_as_float(gray2rgb(lf_data[:,v,])).reshape(epi_data[v].shape)
                elif not RGB and OLDRGB:
                    epi_data[v] = img_as_float(rgb2gray(lf_data[:,v,])).reshape(epi_data[v].shape)
                else:
                    epi_data[v] = img_as_float(lf_data[:,v,...]).reshape(epi_data[v].shape)
            elif dtype == np.uint16:
                if RGB and not OLDRGB:
                    epi_data[v] = img_as_uint(gray2rgb(lf_data[:,v,])).reshape(epi_data[v].shape)
                elif not RGB and OLDRGB:
                    epi_data[v] = img_as_uint(rgb2gray(lf_data[:,v,])).reshape(epi_data[v].shape)
                else:
                    epi_data[v] = img_as_uint(lf_data[:,v,...]).reshape(epi_data[v].shape)
            elif dtype == np.uint8:
                if RGB and not OLDRGB:
                    epi_data[v] = img_as_ubyte(gray2rgb(lf_data[:,v,...])).reshape(epi_data[v].shape)
                elif not RGB and OLDRGB:
                    epi_data[v] = img_as_ubyte(rgb2gray(lf_data[:,v,...])).reshape(epi_data[v].shape)
                else:
                    epi_data[v] = img_as_ubyte(lf_data[:,v,...]).reshape(epi_data[v].shape)
            else:
                raise TypeError('Given dtype not supported.')

    progress.finish()

    # Cleanup
    lf_in.close()
    epi_out.close()
コード例 #43
0
ファイル: lf2epi.py プロジェクト: manuSrep/DisneyDispPy
def downsample_lightfield(lf_in, lf_out, hdf5_dataset, r_all):
    """
    Reduces the dimension of the input lightfield to the values given.
    Results are stored in a new hdf5 file.

    Parameters
    ----------
    lf_in : string
        The input hdf5 filename (including the directory) of the lightfield.
    lf_out : string
        The output hdf5 filename (including the directory) of the lightfield
        in all resolutions.
    hdf5_dataset: string
        The container name inside the hdf5 file for the lightfield. The same
        name will be used for the new file.
    r_all: array_like
        All resolutions to create. Each entry is a tuple (u,v) of resolutions.
    """

    # Initialize the hdf5 file objects
    lf_in = prepareLoading(lf_in)
    lf_out = prepareSaving(lf_out, extension=".hdf5")

    lf_in = h5py.File(lf_in, 'r')
    lf_out = h5py.File(lf_out, 'w')

    data_in = lf_in[hdf5_dataset]
    # Find out what data we have
    if len(data_in.shape) == 4 and data_in.shape[-1] == 3:
        RGB = True
    elif len(data_in.shape) == 3:
        RGB = False
    else:
        raise TypeError('The given lightfield contains neither gray nor RGB images!')

    # Which dtype should be used?
    if data_in.dtype == np.float64:
        DTYPE = np.float64
    elif data_in.dtype == np.uint8:
        DTYPE = np.uint8
    elif data_in. dtype == np.uint16:
        DTYPE = np.uint16
    else:
        raise TypeError('The given data type is not supported!')


    # We need to store all resolutions
    grp_out = lf_out.create_group(hdf5_dataset)
    grp_out.attrs.create('resolutions', r_all)

    # Initialize a progress bar to follow the downsampling
    widgets = ['Downscale lightfield: ', Percentage(), ' ', Bar(),' ', ETA(), ' ']
    progress = ProgressBar(widgets=widgets, max_val=r_all.shape[0]).start()

    for r,res in enumerate(r_all):

        if RGB:
            data_out = grp_out.create_dataset(str(res[0]) + 'x' + str(res[1]), shape=(data_in.shape[0], res[0], res[1], data_in.shape[3]), dtype=data_in.dtype)
        else:
            data_out = grp_out.create_dataset(str(res[0]) + 'x' + str(res[1]), shape=(data_in.shape[0], res[0], res[1]), dtype=data_in.dtype)

        if r == 0: # at lowest resolution we take the original image
            for s in range(data_in.shape[0]):
                data_out[s] = img_as_float(data_in[s])
        else: # we smooth the imput data
            data_prior = grp_out[str(r_all[r-1][0]) + 'x' + str(r_all[r-1][1])]
            for s in range(data_in.shape[0]):
                data_smoothed = img_as_float(gaussian(data_prior[s], sigma=np.sqrt(0.5), multichannel=True))
                if DTYPE is np.float64:
                    data_out[s] = img_as_float(resize(data_smoothed, (res[0], res[1])))
                elif DTYPE is np.uint16:
                    data_out[s] = img_as_uint(resize(data_smoothed, (res[0], res[1])))
                else:
                    data_out[s] = img_as_ubyte(resize(data_smoothed, (res[0], res[1])))
        
        progress.update(r)
    progress.finish()

    # Cleanup
    lf_in.close()
    lf_out.close()