コード例 #1
0
def blend(img1, img2, mask, depth=4):
    gaussian1 = [img1]
    for i in range(depth):
        gaussian1.append(
            pyramid_reduce(gaussian1[i], multichannel=True, sigma=2))

    gaussian2 = [img2]
    for i in range(depth):
        gaussian2.append(
            pyramid_reduce(gaussian2[i], multichannel=True, sigma=2))

    mask = gaussian(mask, multichannel=True, sigma=20)
    mask_piramid = [mask]
    for i in range(depth):
        mask_piramid.append(
            pyramid_reduce(mask_piramid[i], multichannel=True, sigma=10))

    reconstructed1 = [gaussian1[-1]]
    for i in range(0, len(gaussian1) - 1):
        reconstructed1.append(
            pyramid_expand(reconstructed1[i], multichannel=True))
    reconstructed1.reverse()

    reconstructed2 = [gaussian2[-1]]
    for i in range(0, len(gaussian2) - 1):
        reconstructed2.append(
            pyramid_expand(reconstructed2[i], multichannel=True))
    reconstructed2.reverse()

    laplacian1 = []
    for i in range(depth + 1):
        laplacian1.append(gaussian1[i] - reconstructed1[i])

    laplacian2 = []
    for i in range(depth + 1):
        laplacian2.append(gaussian2[i] - reconstructed2[i])

    assert len(gaussian1) == len(gaussian2) == len(mask_piramid) == len(
        laplacian1) == len(laplacian2)

    blended_piramid = []
    for i in range(len(mask_piramid) - 1, -1, -1):
        blended_piramid.append((laplacian1[i] * mask_piramid[i]) +
                               ((1 - mask_piramid[i]) * laplacian2[i]))

    first = (mask_piramid[-1] * reconstructed1[-1]) + (
        (1 - mask_piramid[-1]) * reconstructed2[-1])
    final = blended_piramid[0] + first
    for i in range(1, depth + 1):
        final = pyramid_expand(final, multichannel=True, sigma=2)
        final = final + blended_piramid[i]

    return final
コード例 #2
0
def get_sample(gr, x, y, s):
    img = gr.ReadAsArray(x,y,s,s)
    img = img.swapaxes(0,2).swapaxes(0,1)
    if s!= 256:
        return (pyramid_reduce(img, downscale = s/256) * 255).astype(np.uint8)
    else:
        return img
コード例 #3
0
def scale_images(input_folder = '../train-256', output_folder = '../train-128', pixel_size = 128.):
    input_list = set([file for file in os.listdir(input_folder) if file.endswith('.jpeg')])
    output_list = set([file for file in os.listdir(output_folder) if file.endswith('.jpeg')])
    files_to_process = list(input_list - output_list)
    
    for fn in files_to_process:        
        original_image = io.imread(input_folder + '/' + fn)    
        scale = original_image.shape[0] / pixel_size
        if scale > 1:
            output_image = transform.pyramid_reduce(original_image, downscale=scale)
        elif scale < 1:
            output_image = transform.pyramid_expand(original_image, upscale=1/scale)
        else:
            output_image = original_image

        if output_image.shape != (pixel_size, pixel_size):
            x = pixel_size - output_image.shape[0]
            y = pixel_size - output_image.shape[1]
            if x < 0:
                image = output_image[:x, :]
            if y < 0:
                image = output_image[:, :y]
    
        output_image = (output_image - output_image.min()) / output_image.max()
    
        try:
            io.imsave(output_folder + '/' + fn, output_image)
        except ValueError:
            print fn + ' not saved!'
コード例 #4
0
def load_and_preprocess(filename, new_shape=None, channels="RGB",
    downsample=None, crop=None):
    '''
    Load image and do basic preprocessing.
        - resize image to a specified shape;
        - subtract ImageNet mean;
        - make sure output image data is 0...255 uint8.
    '''
    img = imread(filename) # RGB image
    if downsample is not None:
        img = pyramid_reduce(img)
    if img.max()<=1.0:
        img = img * 255.0 / img.max()
    if crop is not None:
        i = np.random.randint(crop//2, img.shape[0]-crop//2)
        j = np.random.randint(crop//2, img.shape[1]-crop//2)
        img = img[(i-crop//2):(i+crop//2),(j-crop//2):(j+crop//2)]
    if new_shape is not None:
        img = resize(img, new_shape, preserve_range=True)
    # imagenet_mean_bgr = np.array([103.939, 116.779, 123.68])
    imagenet_mean_rgb = np.array([123.68, 116.779, 103.939])
    for i in range(3):
        img[:,:,i] = img[:,:,i] - imagenet_mean_rgb[i]
    # for VGG networks pre-trained on ImageNet, channels are BGR
    # (ports from Caffe)
    if channels=="BGR":
        img = img[:, :, [2,1,0]] # swap channel from RGB to BGR
    return img.astype(np.uint8)
コード例 #5
0
ファイル: formula.py プロジェクト: sciencywolf/HiRES
def get_square(image, square_size):
    height, width = image.shape
    if (height > width):
        differ = height
    else:
        differ = width
    differ += 2

    # square filler
    mask = np.zeros((differ, differ), dtype="uint8")

    x_pos = int((differ - width) / 2)
    y_pos = int((differ - height) / 2)

    # center image inside the square
    mask[y_pos:y_pos + height, x_pos:x_pos + width] = image[0:height, 0:width]

    # downscale if needed
    if differ / square_size > 1:
        mask = (255 * pyramid_reduce(
            mask, differ / square_size, multichannel=False)).astype(np.uint8)
    else:
        mask = cv2.resize(mask, (square_size, square_size),
                          interpolation=cv2.INTER_AREA)
    return mask
コード例 #6
0
ファイル: _optical_flow_utils.py プロジェクト: fmg30/diss
def get_pyramid(I, downscale=2.0, nlevel=10, min_size=16):
    """Construct image pyramid.

    Parameters
    ----------
    I : ndarray
        The image to be preprocessed (Gray scale or RGB).
    downscale : float
        The pyramid downscale factor.
    nlevel : int
        The maximum number of pyramid levels.
    min_size : int
        The minimum size for any dimension of the pyramid levels.

    Returns
    -------
    pyramid : list[ndarray]
        The coarse to fine images pyramid.

    """

    pyramid = [I]
    size = min(I.shape)
    count = 1

    while (count < nlevel) and (size > downscale * min_size):
        J = pyramid_reduce(pyramid[-1], downscale, multichannel=False)
        pyramid.append(J)
        size = min(J.shape)
        count += 1

    return pyramid[::-1]
コード例 #7
0
 def get_image_at_scale(self,
                        image,
                        scale,
                        num_scales=10,
                        coarsest_scale=0.1):
     im = image.copy()
     if num_scales > 1:
         downscale = coarsest_scale**(-1.0 / (num_scales - 1))
         for s in range(scale):
             im = pyramid_reduce(
                 im, downscale,
                 multichannel=True) if im.ndim == 3 else pyramid_reduce(
                     im, downscale, multichannel=False)
     if im.ndim == 3:
         return np.moveaxis(im, -1, 0)
     else:
         return im
コード例 #8
0
ファイル: transforms.py プロジェクト: vishalbelsare/deepscm
def _get_disk(radius: int, scale: int):
    mag_radius = scale * radius
    mag_disk = morphology.disk(mag_radius, dtype=np.float64)
    disk = transform.pyramid_reduce(mag_disk,
                                    downscale=scale,
                                    order=1,
                                    multichannel=False)
    return disk
コード例 #9
0
def getRandom_sample(g_raster):
    x, y, s = random_loc_size(g_raster.RasterYSize, g_raster.RasterXSize)
    img = g_raster.ReadAsArray(y,x,s,s)
    img = img.swapaxes(0,2).swapaxes(0,1)
    if s!= 256:
        return (pyramid_reduce(img, downscale = s/256) * 255).astype(np.uint8)
    else:
        return img
コード例 #10
0
 def __prepare_image(image):
     """Prepare image for comparison"""
     image = rgb2gray(image)
     image = adjust_gamma(image)
     image = pyramid_reduce(image, downscale=8)
     with warnings.catch_warnings():
         warnings.simplefilter("ignore")
         image = img_as_ubyte(image)
     return image
コード例 #11
0
ファイル: rolling_ball.py プロジェクト: acorbat/img_manager
def subtract_rolling_ball(image, radius):
    """Subtracts background from image using the Rolling Ball algorithm."""
    subtract = SubtractBall(radius)
    new_radius = subtract.ball.width
    small_image = pyramid_reduce(image, downscale=subtract.ball.shrink_factor)
    background = threshold_local(small_image,
                                 new_radius,
                                 method='generic',
                                 param=subtract.bg)
    background = resize(background, image.shape)
    return image - background
コード例 #12
0
    def load_video(self, path, to_grey=True, downscale=2):

        videodata = vread(path)
        imgs = []
        for img in tqdm(videodata):
            if to_grey:
                img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            if downscale:
                img = pyramid_reduce(img, downscale=downscale)
            imgs.append(img)
        imgs = np.asarray(imgs)
        return imgs
コード例 #13
0
def generate_GP(image_copy, gp_level):

    gp_image = [image_copy]
    for i in range(gp_level):

        image_copy = pyramid_reduce(image_copy,
                                    downscale=2,
                                    multichannel=True,
                                    sigma=3)
        gp_image.append(image_copy)

    return gp_image
コード例 #14
0
def render(triangles, img, color_mode, fill_mode):
    """
    Generates samples points for triangulation of a given image.

    Parameters
    ----------
    triangles : np.array
        The delaunay triangulation of the image
    img : np.array
        The image to create a low-polygon approximation.
    """
    t0 = time.perf_counter()
    low_poly = np.empty(shape=(2 * img.shape[0], 2 * img.shape[1],
                               img.shape[2]),
                        dtype=np.uint8)

    for triangle in triangles:
        if fill_mode == 'wire':
            rr, cc = polygon_perimeter(2 * triangle[:, 0], 2 * triangle[:, 1],
                                       low_poly.shape)
        elif fill_mode == 'solid':
            rr, cc = polygon(2 * triangle[:, 0], 2 * triangle[:, 1],
                             low_poly.shape)

        if color_mode == 'centroid':
            centroid = np.mean(triangle, axis=0, dtype=np.int32)
            color = img[tuple(centroid)]
        elif color_mode == 'mean':
            color = np.mean(img[polygon(triangle[:, 0], triangle[:, 1],
                                        img.shape)],
                            axis=0)

        low_poly[rr, cc] = color
    t1 = time.perf_counter()
    if args.time:
        print(f"Render timer: {round(t1 - t0, 3)} seconds.")

    fig, (ax1, ax2) = plt.subplots(nrows=1,
                                   ncols=2,
                                   figsize=(8, 3),
                                   sharex=True,
                                   sharey=True)
    ax1.imshow(img)
    ax1.axis('off')
    low_poly = pyramid_reduce(low_poly, multichannel=True)
    ax2.imshow(low_poly)
    ax2.axis('off')
    fig.tight_layout()
    # plt.show()

    if args.save:
        name = args.save_name if args.save_name is not None else f"{args.img.replace('.jpg', '')}_tri.png"
        imsave(name, low_poly)
コード例 #15
0
 def get_mask_at_scale(self,
                       image,
                       scale,
                       num_scales=10,
                       coarsest_scale=0.1,
                       mask_threshold=0.1):
     im = image.copy()
     if num_scales > 1:
         downscale = coarsest_scale**(-1.0 / (num_scales - 1))
         for s in range(scale):
             im = pyramid_reduce(im, downscale,
                                 multichannel=False) > mask_threshold
     return im
コード例 #16
0
def subsample(X):
    # ndarray(seconds, channels, length/2, width/2) 
    # subsample(ndarray(seconds, channels, length, width)
    """
    
    """
    
    seconds = X.shape[0]
    channels = X.shape[1]
    length = X.shape[2]
    width = X.shape[3]
    Xout = np.zeros((seconds,channels,length/2,width/2))
    for i in range(X.shape[0]):
        j = 0
        while j < X.shape[1]:
            
            Xout[i,j:j+3,:,:] = transform.pyramid_reduce(
                 transform.pyramid_reduce(
                     X[i,j:j+3,:,:].transpose(0,2,1)).transpose((0,2,1))
             )

            j+=3
    return Xout
コード例 #17
0
def one_level_laplacian(img):
    # generate Gaussian pyramid for Apple
    A = img.copy()

    # Downsample blurred A
    small_A = pyramid_reduce(A, downscale=2, multichannel=True)

    # Upsample small, blurred A
    # insert zeros between pixels, then apply a gaussian low pass filter
    upsampled_A = pyramid_expand(small_A, upscale=2, multichannel=True)

    # generate Laplacian level for A
    laplace_A = A - upsampled_A

    return small_A, upsampled_A, laplace_A
コード例 #18
0
    def downscale(self, image) -> np.ndarray:
        """Convenience method to map an image in the hi-res scale down to the original MNIST format.

        Parameters
        ----------
        image : (scale*H, scale*W) array_like
            High-resolution input image.

        Returns
        -------
        (H, W) numpy.ndarray
            Low-resolution `uint8` image.
        """
        image = np.asarray(image)
        down_img = transform.pyramid_reduce(image, downscale=self.scale, order=3)  # type: np.ndarray
        return (255. * down_img).astype(np.uint8)
コード例 #19
0
def test_prior_boxes_1():
    voc2012 = VOC2012()
    image, boxes = voc2012[random.randrange(len(voc2012))]
    image, boxes = image_padding(image, boxes)
    print('num boxes: {}'.format(len(boxes)))

    limit = {
        0: {
            'min': 0,
            'max': 64
        },
        1: {
            'min': 64,
            'max': 256
        },
        2: {
            'min': 256,
            'max': 512
        }
    }
    for layer in range(3):
        # switch plot
        fig, ax = plt.subplots()
        ax.imshow(image)

        #
        scale = pow(4, layer)

        # loop boxes
        for top, bottom, left, right, name in boxes:
            height, width = bottom - top, right - left
            mid_x, mid_y = (left + right) // 2, (top + bottom) // 2

            if limit[layer]['min'] <= max(height, width) < limit[layer]['max']:
                # prior box in current feature map
                rect = patches.Rectangle((left // scale, top // scale),
                                         width // scale,
                                         height // scale,
                                         edgecolor='red',
                                         linewidth=2,
                                         fill=False)
                ax.add_patch(rect)

        #
        image = transform.pyramid_reduce(image, downscale=4)

    plt.show()
コード例 #20
0
ファイル: data.py プロジェクト: GALI472/deepdecoder
def weight_pyramid(generator, weights=[1, 1, 1]):
    nb_layers = len(weights) - 1
    for batch in generator:
        batch_merged = []
        for img in batch:
            img = img[0]
            lap_pyr = []
            prev = img
            for i in range(nb_layers):
                gauss = pyramid_reduce(prev)
                lap_pyr.append(prev - pyramid_expand(gauss))
                prev = gauss

            merged = gauss*weights[0]
            for i, lap in enumerate(reversed(lap_pyr)):
                merged = pyramid_expand(merged) + weights[i+1]*lap
            batch_merged.append(merged)
        yield np.stack(batch_merged).reshape(batch.shape)
コード例 #21
0
def spectral_cluster(inp_image,
                     n_clusters=int(config['SPECTRAL']['N_CLUSTERS'])):
    original_shape = inp_image.shape
    downsampled_img = pyramid_reduce(inp_image, 3)
    shape = downsampled_img.shape
    downsampled_img = downsampled_img.reshape(shape[0] * shape[1], shape[2])
    sp = SpectralClustering(n_clusters=n_clusters,
                            eigen_solver=config['SPECTRAL']['EIGEN_SOLVER'],
                            affinity=config["SPECTRAL"]["AFFINITY"])
    sp.fit_predict(downsampled_img)
    clust = sp.labels_
    clust = clust.reshape(shape[0], shape[1])
    # Performimg kmeans to re generate clusters after resize, original segmentation remains intact.
    clust = k_means_clustering(
        n_clusters,
        resize(clust,
               (original_shape[:-1])).reshape((original_shape[:-1]) + (1, )))
    return clust
コード例 #22
0
ファイル: data.py プロジェクト: GALI472/deepdecoder
def weight_pyramid(generator, weights=[1, 1, 1]):
    nb_layers = len(weights) - 1
    for batch in generator:
        batch_merged = []
        for img in batch:
            img = img[0]
            lap_pyr = []
            prev = img
            for i in range(nb_layers):
                gauss = pyramid_reduce(prev)
                lap_pyr.append(prev - pyramid_expand(gauss))
                prev = gauss

            merged = gauss * weights[0]
            for i, lap in enumerate(reversed(lap_pyr)):
                merged = pyramid_expand(merged) + weights[i + 1] * lap
            batch_merged.append(merged)
        yield np.stack(batch_merged).reshape(batch.shape)
コード例 #23
0
def preprocess_images(input_folder = '../train', output_folder = '../train-256', pixel_size = 256.):
    input_list = set([file for file in os.listdir(input_folder) if file.endswith('.jpeg')])
    output_list = set([file for file in os.listdir(output_folder) if file.endswith('.jpeg')])
    files_to_process = list(input_list - output_list)
    
    for fn in files_to_process:
        original_image = io.imread(input_folder + '/' + fn)
        shape = original_image.shape[1] - original_image.shape[0]
        if shape < 0:
            shape = -shape
            if shape % 2 == 0:
                shape += 1
            kernel = np.ones((shape, 1))
        else:
            if shape % 2 == 0:
                shape += 1
            kernel = np.ones((1, shape))
        output_image = remove_padding(original_image, kernel)
        grayed_image = color.rgb2gray(padded_image)
        blurred_image = gaussian_filter(grayed_image,sigma=6, multichannel=False)
        difference_image = equalize_hist(grayed_image - blurred_image)

        scale = difference_image.shape[0] / pixel_size
        if scale > 1:
            output_image = transform.pyramid_reduce(difference_image, downscale=scale)
        elif scale < 1:
            output_image = transform.pyramid_expand(difference_image, upscale=1/scale)
        else:
            output_image = difference_image

        if output_image.shape != (pixel_size, pixel_size):
            x = pixel_size - output_image.shape[0]
            y = pixel_size - output_image.shape[1]
            if x < 0:
                image = output_image[:x, :]
            if y < 0:
                image = output_image[:, :y]
    
        output_image = (output_image - output_image.min()) / output_image.max()
        
        try:
            io.imsave(output_folder + '/' + fn, output_image)
        except ValueError:
            print fn + ' not saved!'
コード例 #24
0
def convert_to_square(image, size, retain_aspect_ratio=False):
    """
    Resizes an image while maintaing aspect ratio

    image: image as numpy array
    size: resize size for square
    retain_aspect_ratio: if true, it will try to resize the image while maintaining it's aspect ratio.

    Note: retain_aspect_ratio is not playing well with image normalization and seems to alter the original image quite a bit
    """

    mask = None

    if retain_aspect_ratio:

        height, width = image.shape
        if (height > width):
            differ = height
        else:
            differ = width
        differ += 4

        # square filler
        background_filler = image[0][0]
        mask = np.zeros((differ, differ), dtype="uint8")
        mask.fill(background_filler)

        x_pos = int((differ - width) / 2)
        y_pos = int((differ - height) / 2)

        # center image inside the square
        mask[y_pos:y_pos + height, x_pos:x_pos + width] = image[0:height,
                                                                0:width]

        # downscale if needed
        # note: pyramid_reduce is normalizing the image by default
        if differ / size > 1:
            mask = pyramid_reduce(mask, differ / size)
            mask = np.round(mask, 1)
    else:
        mask = image

    return cv2.resize(mask, (size, size), interpolation=cv2.INTER_AREA)
コード例 #25
0
ファイル: morpho.py プロジェクト: dccastro/Morpho-MNIST
    def downscale(self, image) -> np.ndarray:
        """Convenience method to map an image in the hi-res scale down to the original MNIST format.

        Parameters
        ----------
        image : (scale*H, scale*W) array_like
            High-resolution input image.

        Returns
        -------
        (H, W) numpy.ndarray
            Low-resolution `uint8` image.
        """
        image = np.asarray(image)
        if self.scale > 1:
            down_img = transform.pyramid_reduce(image, downscale=self.scale, order=3)  # type: np.ndarray
        else:
            down_img = image
        return (255. * down_img).astype(np.uint8)
コード例 #26
0
ファイル: process.py プロジェクト: taroorat/triangler
def process(
    img: Union[ndarray, str],
    coloring: ColorMethod,
    sampling: SampleMethod,
    edging: EdgeMethod,
    points: int,
    blur: int,
    reduce: bool,
) -> np.array:
    if isinstance(img, str):
        img = imread(img)
    sample_points: ndarray = EdgePoints(img, points, edging).get_edge_points(
        sampling=sampling, blur=blur,
    )
    triangulated: Delaunay = Delaunay(sample_points)

    # noinspection PyUnresolvedReferences
    triangles = sample_points[triangulated.simplices]

    res = np.empty(
        shape=(2 * img.shape[0], 2 * img.shape[1], img.shape[2]), dtype=np.uint8
    )

    if coloring is ColorMethod.CENTROID:
        for triangle in triangles:
            i, j = polygon(2 * triangle[:, 0], 2 * triangle[:, 1], res.shape)
            res[i, j] = img[tuple(np.mean(triangle, axis=0, dtype=np.int32))]
    elif coloring is ColorMethod.MEAN:
        for triangle in triangles:
            i, j = polygon(2 * triangle[:, 0], 2 * triangle[:, 1], res.shape)
            res[i, j] = np.mean(
                img[polygon(triangle[:, 0], triangle[:, 1], img.shape)], axis=0
            )
    else:
        raise ValueError(
            "Unexpected coloring method: {}\n"
            "use {} instead: {}".format(
                coloring, ColorMethod.__name__, ColorMethod.__members__
            )
        )

    return pyramid_reduce(res, multichannel=True) if reduce else res
コード例 #27
0
def get_square(image, square_size):
    height, width = image.shape
    if(height > width):
        differ = height
    else:
        differ = width
    differ += 4

    mask = np.zeros((differ, differ), dtype = "uint8")

    x_pos = int((differ - width) / 2)
    y_pos = int((differ - height) / 2)

    mask[y_pos: y_pos + height, x_pos: x_pos + width] = image[0: height, 0: width]

    if differ / square_size > 1:
        mask = pyramid_reduce(mask, differ / square_size)
    else:
        mask = cv2.resize(mask, (square_size, square_size), interpolation = cv2.INTER_AREA)
    
    return mask
コード例 #28
0
def resize_cyx_image(image, new_size):
    """
    This function resizes a CYX image.

    :param image: CYX ndarray
    :param new_size: tuple of shape of desired image dimensions in CYX
    :return: image with shape of new_size
    """
    scaling = float(image.shape[1]) / float(new_size[1])
    # get the shape of the image that is resized by the scaling factor
    test_shape = np.ceil(np.divide(image.shape, [1, scaling, scaling]))
    # sometimes the scaling can be rounded incorrectly and scale the image to
    # one pixel too high or too low
    if not np.array_equal(test_shape, new_size):
        # getting the scaling from the other dimension solves this rounding problem
        scaling = float(image.shape[2]) / float(new_size[2])
        test_shape = np.ceil(np.divide(image.shape, [1, scaling, scaling]))
        # if neither scaling factors achieve the desired shape, then the aspect ratio of the image
        # is different than the aspect ratio of new_size
        if not np.array_equal(test_shape, new_size):
            raise ValueError(
                "This image does not have the same aspect ratio as new_size")

    image = image.transpose((2, 1, 0))

    # im_out = t.resize(image, new_size)

    if scaling < 1:
        scaling = 1.0 / scaling
        im_out = t.pyramid_expand(image, upscale=scaling)
    elif scaling > 1:
        im_out = t.pyramid_reduce(image, downscale=scaling)
    else:
        im_out = image

    im_out = im_out.transpose((2, 1, 0))
    assert im_out.shape == new_size

    return im_out
コード例 #29
0
def spectral_cluster(inp_image, n_clusters=2):
    raise ("SPECTRAL CLUSTERING NOT WORKING!!!!")
    original_shape = inp_image.shape
    downsampled_img = pyramid_reduce(inp_image, 3)
    shape = downsampled_img.shape
    downsampled_img = downsampled_img.reshape(shape[0] * shape[1], 1)
    EIGEN_SOLVER = "arpack"
    AFFINITY = "nearest_neighbors"
    #N_CLUSTERS=5
    #WINDOW_SIZE=2

    sp = SpectralClustering(n_clusters=n_clusters,
                            eigen_solver=EIGEN_SOLVER,
                            affinity=AFFINITY)
    sp.fit_predict(downsampled_img)
    clust = sp.labels_
    clust = clust.reshape(shape[0], shape[1])
    # Performimg kmeans to re generate clusters after resize, original segmentation remains intact.
    clust = k_means_clustering(
        resize(clust, (original_shape[-1])).reshape(shape[0], shape[1]),
        n_clusters)
    return computeClusterMaxoids(inp_image, clust, n_clusters)
コード例 #30
0
def process_img(img):

    s = img.shape

    # downsample
    downscale = s[1] // 100
    if downscale > 1:
        thumb = transform.pyramid_reduce(img, downscale=downscale)
        thumb = 255 * thumb
        thumb = thumb.astype('uint8')
    else:
        thumb = img.astype('uint8')

    # slic
    labels = segmentation.slic(thumb, compactness=.1, n_segments=2)

    # predict tail segment
    tail_mask = predict_tail2(labels)

    # upsample mask
    tail_mask_big = transform.resize(255 * tail_mask,
                                     output_shape=(s[0], s[1]),
                                     order=0,
                                     mode='edge')  #refine sizing
    tail_mask_big = skimage.img_as_ubyte(tail_mask_big)

    # dialate tail mask
    tail_mask_big = morphology.binary_dilation(
        tail_mask_big, selem=morphology.disk(radius=downscale))

    # generate full res tail
    alpha = np.expand_dims(255 * tail_mask_big, axis=2)
    img = np.concatenate((img, alpha), axis=2)
    img = skimage.img_as_ubyte(img)

    # returns thumbnail, and full resolution img with alpha channels
    return img
コード例 #31
0
ファイル: preprocess.py プロジェクト: sunwoo0119/Study
eval_list = np.loadtxt(os.path.join(base_path, 'list_eval_partition.csv'),
                       dtype=str,
                       delimiter=',',
                       skiprows=1)
#%%
img_sample = cv2.imread(os.path.join(img_base_path, 
                                     eval_list[0][0]))

h, w, _ = img_sample.shape

# 정사각형 이미지로 crop 해준다.
crop_sample = img_sample[int((h-w)/2):int(-(h-w)/2), :] 

# 이미지를 4배만큼 축소하고 normalize 한다.
resized_sample = pyramid_reduce(crop_sample, 
                                downscale=4,
                                multichannel=True) # 컬러채널 허용

pad = int((crop_sample.shape[0] - resized_sample.shape[0]) / 2)

padded_sample = cv2.copyMakeBorder(resized_sample,
                                   top=pad,
                                   bottom=pad,
                                   left=pad,
                                   right=pad,
                                   borderType=cv2.BORDER_CONSTANT,
                                   value=(0,0,0))

print(crop_sample.shape, padded_sample.shape)

fig, ax = plt.subplots(1,4,figsize=(12,5))
コード例 #32
0
ファイル: example.py プロジェクト: halflings/imagepipe
 def process(self):
     super(Resize, self).process()
     self.resized_image.value = transform.pyramid_reduce(self.original_image.value, downscale=2)
コード例 #33
0
def test_pyramid_reduce():
    rows, cols, dim = image.shape
    out = pyramid_reduce(image, downscale=2)
    assert_array_equal(out.shape, (rows / 2, cols / 2, dim))
コード例 #34
0
    kernels *= diff
    spread_bkg = spread_bkg_obs

comps = detector.mixture.mixture_components()

#theta = kernels.reshape((kernels.shape[0], -1))

llhs = [[] for i in xrange(detector.num_mixtures)] 

print 'Iterating CAD images'

for cad_i, cad_filename in enumerate(cad_files):
    cad = gv.img.load_image(cad_filename)
    f = cad.shape[0]/size[0]
    if f > 1:
        cad = pyramid_reduce(cad, downscale=f)
    elif f < 1:
        cad = pyramid_expand(cad, upscale=1/f)
    mixcomp = comps[cad_i]

    alpha = cad[...,3]
    gray_cad = gv.img.asgray(cad)
    bkg_img = bkg_generator.next()
    
    # Notice, gray_cad is not multiplied by alpha, since most files use premultiplied alpha 
    composite = gray_cad + bkg_img * (1 - alpha)

    # Get features
    X_full = descriptor.extract_features(composite, settings=dict(spread_radii=radii))

    X = gv.sub.subsample(X_full, subsize)
コード例 #35
0
    from keras.models import Model
    from keras.layers import Input, Conv2D, Activation
    from keras.optimizers import Adam
    from keras.losses import mean_squared_error
    from keras.datasets import cifar10
    import skimage.io
    from skimage.transform import pyramid_reduce
    import numpy as np

    #Downloading dataset, downscaling, padding
    (HDimages, ignore), (test_images, ignore) = cifar10.load_data()

    downscaled = np.zeros((HDimages.shape[0], 16, 16, 3))
    downscaled_test = np.zeros((test_images.shape[0], 16, 16, 3))
    for i, image in enumerate(HDimages):
        downscaled[i, :, :, :] = pyramid_reduce(image, 2)

    for i, image in enumerate(test_images):
        downscaled_test[i, :, :, :] = pyramid_reduce(image, 2)

    pad = 3
    padded = np.zeros((downscaled.shape[0], downscaled.shape[1] + 2 * pad,
                       downscaled.shape[2] + 2 * pad, downscaled.shape[3]))
    padded_test = np.zeros(
        (downscaled_test.shape[0], downscaled_test.shape[1] + 2 * pad,
         downscaled_test.shape[2] + 2 * pad, downscaled_test.shape[3]))
    for i, image in enumerate(downscaled):
        padded[i, pad:-1 * (pad), pad:-1 * (pad), :] = image
    for i, image in enumerate(downscaled_test):
        padded_test[i, pad:-1 * (pad), pad:-1 * (pad), :] = image
コード例 #36
0
img_path = './img2_crop.jpg'
window_size = (120, 100)  #heuristically chosen for this image

img = color.rgb2gray(misc.imread(img_path))
h, w = img.shape

delta_x, delta_y = stride

x_list = range(0, w - window_size[1] + 1, delta_x)
y_list = range(0, h - window_size[0] + 1, delta_y)

lis = []
for w in x_list:
    for h in y_list:
        #downscale value also heurestically chosen
        im = pyramid_reduce(img[h:h + window_size[0], w:w + window_size[1]],
                            downscale=5)
        lis.append(np.array(im[2:21, :19] * 255, dtype=np.uint8))

import pickle
pickle.dump(lis, open("test-windows.pkl", "wb"))
#lis = pickle.load( open( "test-windows.pkl", "rb" ) )

##Finding-windows
k = len(lis) / 10  #no of chunks, jobs
iterator = range(0, len(lis) - k, k)
from joblib import Parallel, delayed
from parr_test import myfunc

pdb.set_trace()
results = Parallel(n_jobs=-1)(delayed(myfunc)(lis[i:i + k]) for i in iterator)
コード例 #37
0
def save_2d_keypoints_and_images(video_name, video_path, npy_path,
                                 rgb_skeleton_data, frame_time_dict):
    mismatch_count = 0
    ## cap = cv2.VideoCapture(video_path)
    ## assert(cap.isOpened() == True)

    container = av.open(video_path)

    for k, fr in enumerate(container.decode(video=0)):
        assert (k == fr.index)
        ## for k in frame_time_dict.keys():
        nearest_idx, nearest_time = find_nearest_frameindex_from_skeleton_file(
            rgb_skeleton_data[..., 0],
            frame_time_dict[k])  # take column 0 (time) from rgb data
        # print("k (video frame) ", k, "\t time", frame_time_dict[k], "\t nearest_idx from skeleton file", nearest_idx, "\t nearest_time", nearest_time)  # print("k=>", k, nearest_idx, "<= nearest_idx")

        if (abs(frame_time_dict[k] - nearest_time) >
                1000000):  # 100 ns ticks, so 1000000 = 0.1sec
            mismatch_count += 1
            continue  # do not add the nearest found index if the difference is really big (>0.1sec)
        else:
            # print(rgb_skeleton_data[nearest_idx])
            if (np.inf not in rgb_skeleton_data[nearest_idx]
                ):  # do not add if there is np.inf in the line

                ## cap.set(cv2.CAP_PROP_POS_FRAMES, k)
                ## success, frame = cap.read()  # frame is read as (h, w, c)

                success = True  # hard-coded for PyAV
                frame = fr.to_image()
                # converting PIL (<class 'PIL.Image.Image'>) to <class 'numpy.ndarray'>
                img = np.asarray(frame)  # h, w, c

                if success:
                    os.makedirs(os.path.join(npy_path, video_name),
                                exist_ok=True)
                    save_dir = os.path.join(npy_path, video_name)

                    # 1
                    # save image with the original resolution
                    # print("kth frame =", k, frame.shape, "\n")
                    # cv2.imwrite(os.path.join(save_dir, video_name + "_vfr_" + str(k) + "_skfr_" + str(nearest_idx) + '.jpg'), frame)

                    # 2
                    # save downsampled image

                    ## bgr to rgb
                    ## img = frame[...,::-1]
                    img_central = img[:, 240:(1920 - 240), :]
                    # downsample by 4.5
                    img_down = pyramid_reduce(
                        img_central, downscale=4.5)  # better than resize
                    # print("img_down shape (h, w, c)", img_down.shape)  # height, width, channels (rgb)
                    skimage.io.imsave(
                        os.path.join(
                            save_dir, video_name + "_vfr_" + str(k) +
                            "_skfr_" + str(nearest_idx) + "_240x320.jpg"),
                        img_down)

                    # 3
                    # save heatmaps and pafs
                    sk_keypoints_with_tracking_info = rgb_skeleton_data[
                        nearest_idx][1:]  # ignore index 0 (time)
                    sk_keypoints = np.delete(
                        sk_keypoints_with_tracking_info,
                        np.arange(0, sk_keypoints_with_tracking_info.size, 3)
                    )  # this is without tracking info, by removing the tracking info
                    # print("sk_kp shape =", sk_keypoints.shape)  # (38, )

                    # for 20 (actually 19 + background) heatmaps =====================================
                    for kpn in range(sk_keypoints.shape[0] // 2):
                        kpx = sk_keypoints[2 * kpn]
                        kpy = sk_keypoints[2 * kpn + 1]  # print(kpx, kpy)

                        index_array = np.zeros((240 // ground_truth_factor,
                                                320 // ground_truth_factor, 2))
                        for i in range(index_array.shape[0]):
                            for j in range(index_array.shape[1]):
                                index_array[i][j] = [
                                    i, j
                                ]  # height (y), width (x) => index_array[:,:,0] = y pixel coordinate and index_array[:,:,1] = x

                        if kpn == 0:
                            heatmap = get_heatmap(
                                index_array, kpx_kpy_transformer([kpx, kpy])
                            )  # /4 because image is 1080 x 1920 and so are the original pixel locations of the keypoints
                        else:
                            heatmap = np.dstack(
                                (heatmap,
                                 get_heatmap(index_array,
                                             kpx_kpy_transformer([kpx, kpy]))))
                        # print("heatmap.shape =", heatmap.shape)

                    # generate background heatmap
                    maxed_heatmap = np.max(
                        heatmap[:, :, :], axis=2
                    )  # print("maxed_heatmap.shape = ", maxed_heatmap.shape)

                    heatmap = np.dstack((heatmap, 1 - maxed_heatmap))
                    # print("final heatmap.shape =", heatmap.shape)
                    np.save(
                        os.path.join(
                            save_dir, video_name + "_vfr_" + str(k) +
                            "_skfr_" + str(nearest_idx) + "_heatmap30x40.npy"),
                        heatmap)

                    # for 18x2 PAFs =====================================
                    for n, pair in enumerate(paf_pairs_indices):
                        # print("writing paf for index", n, pair)
                        index_array = np.zeros((240 // ground_truth_factor,
                                                320 // ground_truth_factor, 2))
                        for i in range(index_array.shape[0]):
                            for j in range(index_array.shape[1]):
                                index_array[i][j] = [
                                    i, j
                                ]  # height (y), width (x) => index_array[:,:,0] = y pixel coordinate and index_array[:,:,1] = x

                        if n == 0:
                            paf = get_pafx_pafy(
                                index_array,
                                kp0xy=kpx_kpy_transformer([
                                    sk_keypoints[2 * pair[0]],
                                    sk_keypoints[2 * pair[0] + 1]
                                ]),
                                kp1xy=kpx_kpy_transformer([
                                    sk_keypoints[2 * pair[1]],
                                    sk_keypoints[2 * pair[1] + 1]
                                ]))
                        else:
                            paf = np.dstack(
                                (paf,
                                 get_pafx_pafy(
                                     index_array,
                                     kp0xy=kpx_kpy_transformer([
                                         sk_keypoints[2 * pair[0]],
                                         sk_keypoints[2 * pair[0] + 1]
                                     ]),
                                     kp1xy=kpx_kpy_transformer([
                                         sk_keypoints[2 * pair[1]],
                                         sk_keypoints[2 * pair[1] + 1]
                                     ]))))
                        # print("paf.shape =", paf.shape)

                    # print("final paf.shape =========================", paf.shape)
                    np.save(
                        os.path.join(
                            save_dir, video_name + "_vfr_" + str(k) +
                            "_skfr_" + str(nearest_idx) + "_paf30x40.npy"),
                        paf)

                    # 4
                    # save the 2d keypoints of shape (38,)
                    # print(rgb_skeleton_data[nearest_idx])
                    # print(save_dir, os.path.join("", video_name + "_vfr_" + str(k) + "_skfr_" + str(nearest_idx) + '.npy'))
                    np.save(
                        os.path.join(
                            save_dir, video_name + "_vfr_" + str(k) +
                            "_skfr_" + str(nearest_idx) + '.npy'),
                        rgb_skeleton_data[nearest_idx][1:]
                    )  # index 0 is time # saving all 57 values 19 * 3 (tracking, x, y)

    ## cap.release()
    print("mismatch_count =", mismatch_count)
コード例 #38
0
def _process_file(settings, bkg_stack, bkg_stack_num, fn, mixcomp):
    ag.info("Processing file", fn)
    seed = np.abs(hash(fn) % 123124)
    descriptor_name = settings["detector"]["descriptor"]
    img_size = settings["detector"]["image_size"]
    part_size = settings[descriptor_name]["part_size"]
    psize = settings["detector"]["subsample_size"]

    # The 4 is for the edge border that falls off
    # orig_sh = (img_size[0] - part_size[0] - 4 + 1, img_size[1] - part_size[1] - 4 + 1)
    orig_sh = img_size
    sh = gv.sub.subsample_size(np.ones(orig_sh), psize)

    # We need the descriptor to generate and manipulate images
    descriptor = gv.load_descriptor(settings)

    counts = np.zeros(
        (settings["detector"]["num_mixtures"], sh[0], sh[1], descriptor.num_parts + 1, descriptor.num_parts),
        dtype=np.uint16,
    )

    prnds = [np.random.RandomState(seed + i) for i in xrange(5)]

    # Binarize support and Extract alpha
    # color_img, alpha = gv.img.load_image_binarized_alpha(fn)
    color_img = gv.img.load_image(fn)

    from skimage.transform import pyramid_reduce, pyramid_expand

    f = color_img.shape[0] / settings["detector"]["image_size"][0]
    if f > 1:
        color_img = pyramid_reduce(color_img, downscale=f)
    elif f < 1:
        color_img = pyramid_expand(color_img, upscale=1 / f)

    alpha = color_img[..., 3]
    img = gv.img.asgray(color_img)

    # Resize it
    # TODO: This only looks at the first axis

    assert img.shape == settings["detector"]["image_size"], "Target size not achieved: {0} != {1}".format(
        img.shape, settings["detector"]["image_size"]
    )

    # Settings
    bsettings = settings["edges"].copy()
    radius = bsettings["radius"]
    bsettings["radius"] = 0

    # offsets = gv.sub.subsample_offset_shape(sh, psize)

    # locations0 = xrange(offsets[0], sh[0], psize[0])
    # locations1 = xrange(offsets[1], sh[1], psize[1])
    locations0 = xrange(sh[0])
    locations1 = xrange(sh[1])
    # locations0 = xrange(10-4, 10+5)
    # locations1 = xrange(10-4, 10+5)

    # locations0 = xrange(10, 11)
    # locations1 = xrange(10, 11)

    # padded_theta = descriptor.unspread_parts_padded

    # pad = 10
    pad = 5
    size = settings[descriptor_name]["part_size"]
    X_pad_size = (size[0] + pad * 2, size[1] + pad * 2)

    img_pad = ag.util.zeropad(img, pad)

    alpha_pad = ag.util.zeropad(alpha, pad)

    # Iterate every duplicate

    dups = settings["detector"].get("duplicates", 1)

    bkgs = np.empty(((descriptor.num_parts + 1) * dups,) + X_pad_size)
    # cads = np.empty((descriptor.num_parts,) + X_pad_size)
    # alphas = np.empty((descriptor.num_parts,) + X_pad_size, dtype=np.bool)

    radii = settings["detector"]["spread_radii"]
    psize = settings["detector"]["subsample_size"]
    cb = settings["detector"].get("crop_border")
    sett = dict(spread_radii=radii, subsample_size=psize, crop_border=cb)

    plt.clf()
    plt.imshow(img)
    plt.savefig("output/img.png")

    if 0:
        # NEW{
        totfeats = np.zeros(sh + (descriptor.num_parts,) * 2)
        for f in xrange(descriptor.num_parts):
            num = bkg_stack_num[f]

            for d in xrange(dups):
                feats = np.zeros(sh + (descriptor.num_parts,), dtype=np.uint8)

                for i, j in itr.product(locations0, locations1):
                    x = i * psize[0]
                    y = i * psize[1]

                    bkg_i = prnds[4].randint(num)
                    bkg = bkg_stack[f, bkg_i]

                    selection = [slice(x, x + X_pad_size[0]), slice(y, y + X_pad_size[1])]
                    # X_pad = edges_pad[selection].copy()
                    patch = img_pad[selection]
                    alpha_patch = alpha_pad[selection]

                    # patch = np.expand_dims(patch, 0)
                    # alpha_patch = np.expand_dims(alpha_patch, 0)

                    # TODO: Which one?
                    # img_with_bkg = patch + bkg * (1 - alpha_patch)
                    img_with_bkg = patch * alpha_patch + bkg * (1 - alpha_patch)

                    edges_pads = ag.features.bedges(img_with_bkg, **bsettings)
                    X_pad_spreads = ag.features.bspread(edges_pads, spread=bsettings["spread"], radius=radius)

                    padding = pad - 2
                    X_spreads = X_pad_spreads[padding:-padding:, padding:-padding]

                    partprobs = ag.features.code_parts(
                        X_spreads,
                        descriptor._log_parts,
                        descriptor._log_invparts,
                        descriptor.settings["threshold"],
                        descriptor.settings["patch_frame"],
                    )

                    part = partprobs.argmax()
                    if part > 0:
                        feats[i, j, part - 1] = 1

                # Now spread the parts
                feats = ag.features.bspread(feats, spread="box", radius=2)

                totfeats[:, :, f] += feats

        # }

        kernels = totfeats[:, :, 0].astype(np.float32) / (descriptor.num_parts * dups)

        # Subsample kernels
        sub_kernels = gv.sub.subsample(kernels, psize, skip_first_axis=False)

        np.save("tmp2.npy", sub_kernels)
        print "saved tmp2.npy"
        import sys

        sys.exit(0)

    # ag.info("Iteration {0}/{1}".format(loop+1, num_duplicates))
    # ag.info("Iteration")
    for i, j in itr.product(locations0, locations1):
        x = i * psize[0]
        y = i * psize[1]

        print "processing", i, j
        selection = [slice(x, x + X_pad_size[0]), slice(y, y + X_pad_size[1])]
        # X_pad = edges_pad[selection].copy()
        patch = img_pad[selection]
        alpha_patch = alpha_pad[selection]

        patch = np.expand_dims(patch, 0)
        alpha_patch = np.expand_dims(alpha_patch, 0)

        for f in xrange(descriptor.num_parts + 1):
            num = bkg_stack_num[f]

            for d in xrange(dups):
                bkg_i = prnds[4].randint(num)
                bkgs[f * dups + d] = bkg_stack[f, bkg_i]

        img_with_bkgs = patch * alpha_patch + bkgs * (1 - alpha_patch)

        if 0:
            edges_pads = ag.features.bedges(img_with_bkgs, **bsettings)
            X_pad_spreads = ag.features.bspread(edges_pads, spread=bsettings["spread"], radius=radius)

            padding = pad - 2
            X_spreads = X_pad_spreads[:, padding:-padding:, padding:-padding]

        # partprobs = ag.features.code_parts_many(X_spreads, descriptor._log_parts, descriptor._log_invparts,
        # descriptor.settings['threshold'], descriptor.settings['patch_frame'])

        # parts = partprobs.argmax(axis=-1)

        parts = np.asarray([descriptor.extract_features(im, settings=sett)[0, 0] for im in img_with_bkgs])

        for f in xrange(descriptor.num_parts + 1):
            hist = np.bincount(parts[f * dups : (f + 1) * dups].ravel(), minlength=descriptor.num_parts + 1)
            counts[mixcomp, i, j, f] += hist[1:]

        # import pdb; pdb.set_trace()

        # for f in xrange(descriptor.num_parts):
        #    for d in xrange(dups):
        #        # Code parts
        #        #parts = descriptor.extract_parts(X_spreads[f*dups+d].astype(np.uint8))
    #
    #                f_plus = parts[f*dups+d]
    #                if f_plus > 0:
    # tau = self.settings.get('tau')
    # if self.settings.get('tau'):
    # parts = partprobs.argmax(axis=-1)

    # Accumulate and return
    #                    counts[mixcomp,i,j,f,f_plus-1] += 1#parts[0,0]

    if 0:
        kernels = counts[:, :, :, 0].astype(np.float32) / (descriptor.num_parts * dups)

        import pdb

        pdb.set_trace()

        radii = (2, 2)

        aa_log = np.log(1 - kernels)
        aa_log = ag.util.zeropad(aa_log, (0, radii[0], radii[1], 0))

        integral_aa_log = aa_log.cumsum(1).cumsum(2)

        offsets = gv.sub.subsample_offset(kernels[0], psize)

        if 1:
            # Fix kernels
            istep = 2 * radii[0]
            jstep = 2 * radii[1]
            sh = kernels.shape[1:3]
            for mixcomp in xrange(1):
                # Note, we are going in strides of psize, given a certain offset, since
                # we will be subsampling anyway, so we don't need to do the rest.
                for i in xrange(offsets[0], sh[0], psize[0]):
                    for j in xrange(offsets[1], sh[1], psize[1]):
                        p = gv.img.integrate(integral_aa_log[mixcomp], i, j, i + istep, j + jstep)
                        kernels[mixcomp, i, j] = 1 - np.exp(p)

        # Subsample kernels
        sub_kernels = gv.sub.subsample(kernels, psize, skip_first_axis=True)

        np.save("tmp.npy", sub_kernels)
        print "saved tmp.npy"
        import sys

        sys.exit(0)

    if 0:
        for f in xrange(descriptor.num_parts):

            # Pick only one background for this part and file
            num = bkg_stack_num[f]

            # Assumes num > 0

            bkg_i = prnds[4].randint(num)

            bkgmap = bkg_stack[f, bkg_i]

            # Composite
            img_with_bkg = gv.img.composite(patch, bkgmap, alpha_patch)

            # Retrieve unspread edges (with a given background gray level)
            edges_pad = ag.features.bedges(img_with_bkg, **bsettings)

            # Pad the edges
            # edges_pad = ag.util.zeropad(edges, (pad, pad, 0))

            # Do spreading
            X_pad_spread = ag.features.bspread(edges_pad, spread=bsettings["spread"], radius=radius)

            # De-pad
            padding = pad - 2
            X_spread = X_pad_spread[padding:-padding, padding:-padding]

            # Code parts
            parts = descriptor.extract_parts(X_spread.astype(np.uint8))

            # Accumulate and return
            counts[mixcomp, i, j, f] += parts[0, 0]

    # Translate counts to spread counts (since we're assuming independence of samples within one CAD image)

    return counts
コード例 #39
0
ファイル: IXM_Montage.py プロジェクト: uc-clarklab/arrayer
def montageIXM(plate_path, labelled= True, rescale_intensity=True):
	""""Place into plate folder sorted by wavelength. Run. Will create a montages of
	each wavelength separately and a combined RGB overlay with files ordered according
	to the .HTD metadata file from the IXM which contain indicators of sites selected. 
	Function is set to automatically rescale image intensity unless otherwise specified."""
	# Obtains the working directory of the targeted folder.
	directory = plate_path
	
	# Make a list of all subdirectories (time point folders)
	subdirectories = os.walk('.').next()[1]
	#print subdirectories
	
	# Search for relevant metadata file
	files_in_directory = os.listdir(directory)
	#print files_in_directory
	
	for file in files_in_directory:
		htd_check = re.search('.HTD', file)
		if htd_check:
			metadata_file = file
	if 'metadata_file' in locals():
		print 'Metadata file "' + metadata_file +'" identified. Attempting to read...'
	else:
		print 'Error: Metadata file with .HTD extension is missing.'
		return
	
	# Reads the metadata file to a dictionary consisting of line title
	# and value pairs.
	with open(metadata_file, 'r') as csv_metadata:
		csv_reader = csv.reader(csv_metadata)
		csv_dict = dict()
		
		# Clean up messy formatting of lines read from the proprietary 
		# metadata file. Removes unnecessary spaces and quotations to 
		# put information in readily accessible format.
		for line in csv_reader:
			key = line.pop(0)
			#print line
			line = [item.split(' ',1)[1] for item in line]
			#print line
			i = 0
			for item in line:
				line[i] = re.sub('"|"','', item)
				i += 1
			#print line
			csv_dict.update({key:line})
		key_list = csv_dict.keys()
		#print key_list
	print 'Metadata file read successfully.'
	
	# Determine shape of montage based on metadata
	print 'Determining the shape of your montage based on metadata...'
	def list2string2int(list):
		# Converts a list of a single string to an integer
		list = int(re.sub("'|'",'', str(list)).replace('[','').replace(']',''))
		return list

	# Determine max columns and rows for sites selected
	site_selection_list = []
	for key in key_list:
		#print key
		site_selectorcheck = re.search('SiteSelection\d{1,2}', key)
		if site_selectorcheck:
			print site_selectorcheck.group(), ' identified!'
			site_selection_list.append(csv_dict[key])
			
	#print site_selection_list
	col_selector = []
	row_selector = []
	for bool_list in site_selection_list:
		# Identify number of columns selected
		col_selection_counter = bool_list.count('TRUE')
		col_selector.append(col_selection_counter)
		
		# Identify number of rows selected
		index = 0
		for bool in bool_list:
			if bool == 'TRUE':
				row_selector.append(index)
			index += 1
			
	row_selector_counter = []
	for i in row_selector:
		row_selector_counter.append(row_selector.count(i))
	#print row_selector_counter
	
	columns = max(col_selector)
	rows =  max(row_selector_counter)
	print '(columns, rows):', (columns, rows)
	
	# Determine well setup for plate
	XWells = list2string2int(csv_dict['XWells'])
	YWells = list2string2int(csv_dict['YWells'])
	#print XWells, YWells
	grid_shape = (columns*XWells, rows*YWells)
	print 'Montage grid dimensions:', grid_shape
	
	print 'Searching for image wavelength folders...'
	
	for subdirectory in subdirectories:
		
		# Search file name string for site metadata
		wavecheck = re.search('w\d', subdirectory)
		
		if wavecheck:
			
			print 'Image folder ' + str(wavecheck.group()) + ' identified.'
			
			# Create a list of files in the current subdirectory
			files = os.listdir(directory + '/' + subdirectory)
			#print files
			files = [subdirectory + '/' + file for file in files]
			#print files
			
			total_files = len(files)
			print 'There are ' + str(total_files) + ' image files.'
				
	
	###################################################################################
	# METHOD 5 - NumPy binary files 
	# http://docs.scipy.org/doc/numpy/reference/generated/numpy.save.html#numpy.save
	###################################################################################	
	
	# Issue: Works as well as (or better than!) any other method! Now add feature to correct how the montage is displayed
	# Add method to create RGB composite overlay montage of DAP, FITC, and TexasRed channels
	# Add text labelling functionality for verification using PIL.ImageDraw.Draw.text(xy,text,fill=None,font=None,anchor=None)?
	# e.g. https://code.google.com/p/esutil/source/browse/trunk/esutil/sqlite_util.py
	
			montage = wavecheck.group()
			temp = directory + '/tmp'
			# if os.path.exists(temp):
				# shutil.rmtree(temp)
			# os.mkdir(temp)
			# Rescale intensity of images
			sites_per_well = total_files/(XWells*YWells)
			print 'sites_per_well: ', sites_per_well
			
			
			for i in range(XWells):
				row_id = 0 
				counter = 0
				new_row = True
				print 'Starting well: ', i
									
				for j in range(sites_per_well):
					# print 'counter: ', counter
					# print 'new_row: ', new_row
					image = Image.open(files[j+i*sites_per_well])		
					# print 'File:', j+i*sites_per_well			
					image = np.array(image, dtype=np.int16)
					#print 'Converted to NumPy array.'
					
					# To rescale the intensity of EACH image INDEPENDENTLY, uncomment the line below
					# and comment the other exposure.rescale_intensity(montage_np) after the for loop ends
					#image = exposure.rescale_intensity(image)
					
					image = transform.pyramid_reduce(image, downscale=10)
					#print 'Resizing image now.'
					
					if new_row:
						row_builder = image
						# print 'Row initialized.'
						new_row = False
						
					else:
						row_builder = np.hstack((row_builder, image))
					# print 'row_builder: ' + str(row_builder.shape)
						
					
					
					if counter == rows: # If we reached the end of a row
						# print '(counter, rows):', (counter, rows)
						if row_id == 0: # If the row is the first row
							montage_np = row_builder
							#print 'montage_np: '+ str(montage_np.shape)
							
						else: # If the row is any other row
							#print 'montage_np: '+ str(montage_np.shape)
							# print 'row_builder: ', row_builder.shape
							montage_np = np.vstack((montage_np,row_builder))
						counter = 0
						row_id += 1
						new_row = True
						print('Row %d complete!' % row_id)
						# print row_builder.size
						#test = Image.fromarray(montage_np)
						#test.save(temp+'/Montage_%s_%s.tif' % (montage, row_str))
					else:	
						counter += 1
				print 'Well Montage Dimensions:', montage_np.shape	
				if i == 0:
					well_stack = montage_np
				else:
					well_stack = np.hstack((well_stack, montage_np))
				print 'Overall Montage Dimensions:', well_stack.shape
			
			print('Compiling and rescaling the montage for viewing...')
			
			# To rescale the intensity of the TOTAL MONTAGE all at once, allowing visual comparison of image intensities,
			# leave the line below uncommented and comment the exposure.rescale_intensity(image) in the for loop, above
			well_stack = exposure.rescale_intensity(well_stack)
			
			img = Image.fromarray(well_stack)
			print('Montage %s created.' % montage)
			img.save('Montage_%s.tif' % montage)
			print('Montage %s saved!' % montage)
コード例 #40
0
ファイル: sliding_window.py プロジェクト: revorg7/Viola-Jones

img = color.rgb2gray(misc.imread(img_path))
h,w = img.shape

delta_x,delta_y = stride

x_list = range(0,w-window_size[1]+1,delta_x)
y_list = range(0,h-window_size[0]+1,delta_y)


lis=[]
for w in x_list:
    for h in y_list:
        #downscale value also heurestically chosen
        im = pyramid_reduce(img[h:h+window_size[0],w:w+window_size[1]],downscale=5)
        lis.append( np.array(im[2:21,:19] * 255, dtype = np.uint8) )

import pickle
pickle.dump( lis, open( "test-windows.pkl", "wb" ) )
#lis = pickle.load( open( "test-windows.pkl", "rb" ) )


##Finding-windows
k = len(lis)/10 #no of chunks, jobs
iterator = range(0,len(lis)-k,k)
from joblib import Parallel, delayed
from parr_test import myfunc

pdb.set_trace()
results = Parallel(n_jobs=-1)(delayed(myfunc)(lis[i:i+k]) for i in iterator)