Пример #1
0
    def im2patches(self, id):
        # extracts all patches from image and saves in destination folder
        ims = dict()
        gts = dict()
        self.patchTensors = dict()
        self.gtTensors = dict()
        y_max = (self.height - self.pSize) / self.stride
        x_max = (self.width - self.pSize) / self.stride
        Y = range(0, y_max * self.stride + 1, self.stride)
        X = range(0, x_max * self.stride + 1, self.stride)
        n_patches = len(Y) * len(X)

        for k in self.sequences.keys():

            image = self.sequences[k]['root'] + '/input/in%06d.jpg' % (
                self.sequences[k]['start'] + id)
            ground_truth = self.sequences[k][
                'root'] + '/groundtruth/gt%06d.png' % (
                    self.sequences[k]['start'] + id)

            # read image and prepare patch tensor
            if self.grayScale:
                ims[k] = misc.imresize(
                    np.array(rgb2gray(mpimg.imread(image)) * 255, np.uint8),
                    [self.height, self.width], 'bilinear')
                patchContainer = np.zeros((1, 2, self.pSize, self.pSize, 1),
                                          np.uint8)

            else:
                ims[k] = misc.imresize(mpimg.imread(image, np.uint8),
                                       [self.height, self.width], 'bilinear')
                patchContainer = np.zeros((1, 2, self.pSize, self.pSize, 3),
                                          np.uint8)

            gtContainer = np.zeros((1, self.pSize, self.pSize), np.uint8)
            # prepare groundtruth to label tensor
            gts[k] = misc.imresize(
                np.array(
                    rgb2gray(mpimg.imread(ground_truth, np.uint8)) * 255,
                    np.uint8), [self.height, self.width], 'bilinear')

            for y in range(0, y_max * self.stride + 1, self.stride):
                for x in range(0, x_max * self.stride + 1, self.stride):
                    patchContainer[0, :, :, :, :], gtContainer[
                        0, :, :], inROI = self.createPatchXY(
                            ims[k], gts[k], k, x, y)
                    if inROI:
                        self.patchTensors[k] = np.vstack([
                            self.patchTensors[k], patchContainer
                        ]) if k in self.patchTensors.keys() else patchContainer
                        self.gtTensors[k] = np.vstack([
                            self.gtTensors[k], gtContainer
                        ]) if k in self.gtTensors.keys() else gtContainer
Пример #2
0
def main():
    stack = load_tif_stack(
        "/home/rhein/workspace/chloroplasts/yfp+2386-tc-1-cytoD_decon_stable_Ch1_yfp.ics movie.tif", limit=None
    )
    stack = np.array([rgb2gray(s) for s in stack])

    print stack.shape, stack.dtype

    for i, im in enumerate(stack):
        lne, _ = scanal(im, 0.5 + 2 ** np.arange(2, 3), 4.0, 0.0, np.ones(2))
        #         lne **= .5
        seg = lne > (lne.mean() + 1.0 * lne.std())
        print i, lne.mean(), lne.std()

        skel = skeleton(seg)

        #         plt.figure(str(i))
        #         plt.subplot(311), plt.imshow(im, 'gray', interpolation='nearest')
        #         plt.subplot(312), plt.imshow(skel, 'gray', interpolation='nearest')
        #         plt.subplot(313), plt.imshow(seg, 'gray', interpolation='nearest')

        seg = (seg != 0).astype(float)
        seg *= 255
        seg = seg.astype(np.uint8)
        bioformats.write_image(
            "/home/rhein/workspace/chloroplasts/seg.tif", seg, bioformats.PT_UINT8, z=i, size_z=len(stack)
        )

        skel = (skel != 0).astype(float)
        skel *= 255
        skel = skel.astype(np.uint8)
        bioformats.write_image(
            "/home/rhein/workspace/chloroplasts/skel.tif", skel, bioformats.PT_UINT8, z=i, size_z=len(stack)
        )
Пример #3
0
    def __init__(self, sequence, patchSize, size, stride, grayScale=True):
        assert (patchSize % 2 != 0), 'patchSize should be odd!'

        self.sequences = sequence  # dictionary of sequences
        self.bg_images_color = dict()
        self.bg_images = dict()
        self.ROI = dict()
        # hack with absolute paths
        self.bg_root = '/usr/home/rez/ZM/BG_Model1'  #TODO
        self.data_root = '/usr/home/rez/ZM/CDNet_Dataset/dataset'  #TODO

        self.pSize = patchSize
        self.grayScale = grayScale
        self.stride = stride
        self.height, self.width = size[0], size[1]
        self.x_max = (self.width - pSize) / self.stride
        self.y_max = (self.height - pSize) / self.stride

        # generate color and grayscale background images
        for id in self.sequences.keys():
            self.ROI.update({
                id:
                misc.imresize(
                    np.array(
                        rgb2gray(
                            mpimg.imread(
                                self.sequences[id]['root'] + '/ROI.png',
                                np.uint8)) * 255, np.uint8),
                    [self.height, self.width], 'nearest')
            })
def execute(outputsize):
    
    faces_dir_path = "data/train_set/48_48_faces_web_augmented"
    bkgs_dir_path = "data/train_set/48_48_nonfaces_aflw"
    
    target_path = "data/train_set/13"
    
    
    faces_dir=join(target_path,"faces")
    nonfaces_dir = join(target_path,"nonfaces") 
    
    
 
    os.makedirs(nonfaces_dir)
    os.makedirs(faces_dir)
    
    img_faces = [ f for f in listdir(faces_dir_path) if isfile(join(faces_dir_path,f)) and f.endswith("png") ]
    img_bkgs =  [ f for f in listdir(bkgs_dir_path) if isfile(join(bkgs_dir_path,f)) and f.endswith("jpg") ]
    
    for i, img_name in enumerate(img_faces):
        img_path = join(faces_dir_path,img_name)
        img = imread(img_path)
        resized_img = resize(img,outputsize)     
        ubyte_img = img_as_ubyte(resized_img)   
        imsave(join(faces_dir,img_name), ubyte_img)
        print "processed "+ img_path
        
    for i, img_name in enumerate(img_bkgs):
        img_path = join(bkgs_dir_path,img_name)
        img = imread(img_path)
        gray_img = rgb2gray(img)  
        resized_img = resize(gray_img,outputsize)    
        ubyte_img = img_as_ubyte(resized_img)            
        imsave(join(nonfaces_dir,img_name), ubyte_img)
        print "processed "+ img_path
Пример #5
0
    def __init__(self, sequence, patchSize, size, stride, grayScale=True):
        assert (patchSize % 2 != 0), 'patchSize should be odd!'

        self.sequences = sequence  # dictionary of sequences
        self.bg_images_color = dict()
        self.bg_images = dict()
        self.ROI = dict()
        self.bg_root = '/usr/home/rez/ZM/CNN/bg_cnn_code/bg_cnn_code/linux/data/bg_data'

        self.pSize = patchSize
        self.grayScale = grayScale
        self.stride = stride
        self.height, self.width = size[0], size[1]
        self.x_max = (self.width - pSize) / self.stride
        self.y_max = (self.height - pSize) / self.stride

        # generate color and grayscale background images
        for id in self.sequences.keys():
            #color_bg = self.generateBgImage(1,250,id)
            tmp_cat, tmp_vid = self.getCategoryVideoFromId(id)
            color_bg = misc.imresize(
                mpimg.imread(
                    self.bg_root + '/' + tmp_cat + '/' + tmp_vid +
                    '/background.jpg', np.uint8), [
                        self.height,
                        self.width,
                    ], 'bilinear')

            #mpimg.imread(self.sequences[id]['root'] + '/ROI.png', np.uint8)
            self.bg_images_color.update({id: color_bg})
            self.bg_images.update(
                {id: np.array(rgb2gray(color_bg) * 255, np.uint8)})

            self.ROI.update({
                id:
                misc.imresize(
                    np.array(
                        rgb2gray(
                            mpimg.imread(
                                self.sequences[id]['root'] + '/ROI.png',
                                np.uint8)) * 255, np.uint8),
                    [self.height, self.width], 'nearest')
            })
Пример #6
0
def grayspace(src):
    if isinstance(src, ndarray):
        from skimage.color.colorconv import rgb2gray
        dst = rgb2gray(src)
    else:
        if src.channels > 1:
            dst = CreateMat(src.height, src.width, CV_8UC1)
            CvtColor(src, dst, CV_BGR2GRAY)
        else:
            dst = src

    return dst
Пример #7
0
def test_warp_identity():
    img = img_as_float(rgb2gray(astronaut()))
    assert len(img.shape) == 2
    assert np.allclose(img, warp(img, AffineTransform(rotation=0)))
    assert not np.allclose(img, warp(img, AffineTransform(rotation=0.1)))
    rgb_img = np.transpose(np.asarray([img, np.zeros_like(img), img]),
                           (1, 2, 0))
    warped_rgb_img = warp(rgb_img, AffineTransform(rotation=0.1))
    assert np.allclose(rgb_img, warp(rgb_img, AffineTransform(rotation=0)))
    assert not np.allclose(rgb_img, warped_rgb_img)
    # assert no cross-talk between bands
    assert np.all(0 == warped_rgb_img[:, :, 1])
Пример #8
0
def grayspace(src):
    if isinstance(src, ndarray):
        from skimage.color.colorconv import rgb2gray
        dst = rgb2gray(src)
    else:
        if src.channels > 1:
            dst = CreateMat(src.height, src.width, CV_8UC1)
            CvtColor(src, dst, CV_BGR2GRAY)
        else:
            dst = src

    return dst
def convert_to_gray():
    bkgs_dir_path = "data/train_set/13/nonfaces"
    target_path = "data/train_set/13/nonfaces_gray"
     
    os.makedirs(target_path)
    img_bkgs =  [ f for f in listdir(bkgs_dir_path) if isfile(join(bkgs_dir_path,f)) and f.endswith("jpg") ]
     
     
    for i, img_name in enumerate(img_bkgs):
        img_path = join(bkgs_dir_path,img_name)
        img = imread(img_path)
        gray_img = rgb2gray(img)
        imsave(join(target_path,img_name), gray_img)
def convert_to_gray():
    bkgs_dir_path = "data/train_set/13/nonfaces"
    target_path = "data/train_set/13/nonfaces_gray"

    os.makedirs(target_path)
    img_bkgs = [
        f for f in listdir(bkgs_dir_path)
        if isfile(join(bkgs_dir_path, f)) and f.endswith("jpg")
    ]

    for i, img_name in enumerate(img_bkgs):
        img_path = join(bkgs_dir_path, img_name)
        img = imread(img_path)
        gray_img = rgb2gray(img)
        imsave(join(target_path, img_name), gray_img)
Пример #11
0
def load_image(img, as_gray=True):
    fo = io.BytesIO(img)
    img = skimage.io.call_plugin('imread', fo, plugin='pil', as_gray=as_gray)

    if not hasattr(img, 'ndim'):
        return img

    if img.ndim > 2:
        if img.shape[-1] not in (3, 4) and img.shape[-3] in (3, 4):
            img = np.swapaxes(img, -1, -3)
            img = np.swapaxes(img, -2, -3)

        if as_gray:
            img = rgb2gray(img)
    return img
def execute(outputsize):

    faces_dir_path = "data/train_set/48_48_faces_web_augmented"
    bkgs_dir_path = "data/train_set/48_48_nonfaces_aflw"

    target_path = "data/train_set/13"

    faces_dir = join(target_path, "faces")
    nonfaces_dir = join(target_path, "nonfaces")

    os.makedirs(nonfaces_dir)
    os.makedirs(faces_dir)

    img_faces = [
        f for f in listdir(faces_dir_path)
        if isfile(join(faces_dir_path, f)) and f.endswith("png")
    ]
    img_bkgs = [
        f for f in listdir(bkgs_dir_path)
        if isfile(join(bkgs_dir_path, f)) and f.endswith("jpg")
    ]

    for i, img_name in enumerate(img_faces):
        img_path = join(faces_dir_path, img_name)
        img = imread(img_path)
        resized_img = resize(img, outputsize)
        ubyte_img = img_as_ubyte(resized_img)
        imsave(join(faces_dir, img_name), ubyte_img)
        print "processed " + img_path

    for i, img_name in enumerate(img_bkgs):
        img_path = join(bkgs_dir_path, img_name)
        img = imread(img_path)
        gray_img = rgb2gray(img)
        resized_img = resize(gray_img, outputsize)
        ubyte_img = img_as_ubyte(resized_img)
        imsave(join(nonfaces_dir, img_name), ubyte_img)
        print "processed " + img_path
Пример #13
0
def main():
    stack = load_tif_stack(
        '/home/rhein/workspace/chloroplasts/yfp+2386-tc-1-cytoD_decon_stable_Ch1_yfp.ics movie.tif',
        limit=None)
    stack = np.array([rgb2gray(s) for s in stack])

    print stack.shape, stack.dtype

    for i, im in enumerate(stack):
        lne, _ = scanal(im, .5 + 2**np.arange(2, 3), 4., 0., np.ones(2))
        #         lne **= .5
        seg = lne > (lne.mean() + 1. * lne.std())
        print i, lne.mean(), lne.std()

        skel = skeleton(seg)

        #         plt.figure(str(i))
        #         plt.subplot(311), plt.imshow(im, 'gray', interpolation='nearest')
        #         plt.subplot(312), plt.imshow(skel, 'gray', interpolation='nearest')
        #         plt.subplot(313), plt.imshow(seg, 'gray', interpolation='nearest')

        seg = (seg != 0).astype(float)
        seg *= 255
        seg = seg.astype(np.uint8)
        bioformats.write_image('/home/rhein/workspace/chloroplasts/seg.tif',
                               seg,
                               bioformats.PT_UINT8,
                               z=i,
                               size_z=len(stack))

        skel = (skel != 0).astype(float)
        skel *= 255
        skel = skel.astype(np.uint8)
        bioformats.write_image('/home/rhein/workspace/chloroplasts/skel.tif',
                               skel,
                               bioformats.PT_UINT8,
                               z=i,
                               size_z=len(stack))
def extract_feature(scenario):
    n_layer = scenario['layer']
    target_directory = get_feature_array_scenario_path(scenario['codename'])
    
    create_directory(target_directory)
    array_px_files = get_files(target_directory)
    
    # Jangan lakukan ekstraksi fitur ulang
    if len(array_px_files) >= 50:
        print "feature "+scenario['codename']+" is already existed. Abort mission"
        return
        
    # Ambil semua file gambar
    image_filenames = get_files(directory_path)
    counter = 0
    for image_filename in image_filenames:
#         print "Extracting %s:%s"%(counter, position_file)
        counter += 1
        a = read_image(image_filename)
        gt = read_groundtruth_image(image_filename)
        
        # konversi menjadi binary image
        gt = gt > 20
        gt = gt.astype(int)
        image_shape = a.shape
        image_row = image_shape[0]
        image_col = image_shape[1]
        image_layer = image_shape[2]
        
        im_slic = []
        im_disp = []
        im_bound = []
        features = []
        
        # Extract superpixel feature for each layer
        for i in range(n_layer):
            im_slic.append(slic(a, compactness=scenario['settings'][i]['compactness'],
                                n_segments=scenario['settings'][i]['segment'],
                                sigma=scenario['settings'][i]['sigma']))
            im_slic[i] = label(im_slic[i], neighbors=8)
            im_disp.append(np.copy(im_slic[i]))
            im_bound.append(mark_boundaries(a, im_slic[i]))
            temp_feature = regionprops(im_slic[i], intensity_image=rgb2gray(a))
            features.append(list_to_dict(temp_feature))
            
        X_indiv = []
        
        for im_row in range(image_row):
            for im_col in range(image_col):
    #         extract position and corresponding labels
                posLabel = gt[im_row, im_col]
                current_labels = []
                
        #         validate labels. 0 label is not allowed. causing not exists error
                valid_position = True
                for i in range(n_layer):
                    current_level_labels = im_slic[i][im_row, im_col] 
                    current_labels.append(current_level_labels)
                    if current_level_labels == 0:
                        valid_position = False
                        break
                
                if not valid_position:
                    continue
                
        #         concat all layer properties
                x_entry = []
                for i in range(n_layer):
                    feat = features[i][current_labels[i]]
                    for att in attributes:
                        if att == 'bbox':
                            (min_row, min_col, max_row, max_col) = feat['bbox']
                            x_entry.append(min_row)
                            x_entry.append(min_col)
                            x_entry.append(max_row)
                            x_entry.append(max_col)
                        else:
                            x_entry.append(feat[att])
                    if posLabel == 1:
                        mark(current_labels[i], 1, im_slic[i], im_disp[i])
                x_entry.append(posLabel)
                X_indiv.append(x_entry)
                
        f = get_feature_array_file(scenario['codename'], image_filename, mode='w')
        
        X_indiv = np.array(X_indiv)
        X_indiv_u = unique_rows(X_indiv)
        np.save(f, X_indiv_u)
        f.close() 
Пример #15
0
def crop_sk(eye):
    red_eye = rgb2gray(eye)

    edges = canny(red_eye, sigma=3)
    return do_cropping(eye, edges)
Пример #16
0
import numpy as np

import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt

from skimage.io import imread
from skimage.color.colorconv import rgb2gray

from do import shakti


def compute_laplacian(I):
    if len(I.shape) != 2 or I.dtype != np.float32:
        raise RuntimeError

    L = np.empty(I.shape, dtype=np.float32)
    shakti.compute_laplacian(L, I)
    return L

I = imread('/home/david/Dropbox/wallpapers/Kingfisher.jpg')
I = rgb2gray(I).astype(np.float32)
L = compute_laplacian(I)

plt.imshow(L, interpolation='nearest', cmap=plt.get_cmap('gray'))
plt.show()
                res[l_item.label] = l_item
            return res

        for i in range(n_layer):
            im_slic.append(
                slic(
                    a,
                    compactness=scenario["settings"][i]["compactness"],
                    n_segments=scenario["settings"][i]["segment"],
                    sigma=scenario["settings"][i]["sigma"],
                )
            )
            im_slic[i] = label(im_slic[i], neighbors=8)
            im_disp.append(np.copy(im_slic[i]))
            im_bound.append(mark_boundaries(a, im_slic[i]))
            temp_feature = regionprops(im_slic[i], intensity_image=rgb2gray(a))
            features.append(list_to_dict(temp_feature))

        coordinates = features[0]

        def mark(label, value, im_slice, im_display):
            indexes = np.where(im_slice == label)
            for i, v in enumerate(indexes[0]):
                im_display[v, indexes[1][i]] = value

        global labels
        labels = {}
        X_indiv = []

        for im_row in range(image_row):
            for im_col in range(image_col):
Пример #18
0
def _label2rgb_overlay(label,
                       image=None,
                       colors=None,
                       alpha=0.3,
                       bg_label=-1,
                       bg_color=None,
                       image_alpha=1):
    """Return an RGB image where color-coded labels are painted over the image.

    Parameters
    ----------
    label : array, shape (M, N)
        Integer array of labels with the same shape as `image`.
    image : array, shape (M, N, 3), optional
        Image used as underlay for labels. If the input is an RGB image, it's
        converted to grayscale before coloring.
    colors : list, optional
        List of colors. If the number of labels exceeds the number of colors,
        then the colors are cycled.
    alpha : float [0, 1], optional
        Opacity of colorized labels. Ignored if image is `None`.
    bg_label : int, optional
        Label that's treated as the background.
    bg_color : str or array, optional
        Background color. Must be a name in `color_dict` or RGB float values
        between [0, 1].
    image_alpha : float [0, 1], optional
        Opacity of the image.

    Returns
    -------
    result : array of float, shape (M, N, 3)
        The result of blending a cycling colormap (`colors`) for each distinct
        value in `label` with the image, at a certain alpha value.
    """
    if colors is None:
        colors = DEFAULT_COLORS
    colors = [_rgb_vector(c) for c in colors]

    if image is None:
        image = np.zeros(label.shape + (3, ), dtype=np.float64)
        # Opacity doesn't make sense if no image exists.
        alpha = 1
    else:
        if not image.shape[:2] == label.shape:
            raise ValueError("`image` and `label` must be the same shape")

        if image.min() < 0:
            warn("Negative intensities in `image` are not supported")

        image = img_as_float(rgb2gray(image))
        image = gray2rgb(image) * image_alpha + (1 - image_alpha)

    # Ensure that all labels are non-negative so we can index into
    # `label_to_color` correctly.
    offset = min(label.min(), bg_label)
    if offset != 0:
        label = label - offset  # Make sure you don't modify the input array.
        bg_label -= offset

    new_type = np.min_scalar_type(int(label.max()))
    if new_type == np.bool:
        new_type = np.uint8
    label = label.astype(new_type)

    unique_labels, color_cycle = _match_label_with_color(
        label, colors, bg_label, bg_color)

    if len(unique_labels) == 0:
        return image

    dense_labels = range(max(unique_labels) + 1)
    label_to_color = np.array([c for i, c in zip(dense_labels, color_cycle)])

    result = label_to_color[label] * alpha + image * (1 - alpha)

    # Remove background label if its color was not specified.
    remove_background = bg_label in unique_labels and bg_color is None
    if remove_background:
        result[label == bg_label] = image[label == bg_label]

    return result
Пример #19
0
from os import listdir
from os.path import isfile, join, splitext
from skimage.io import imsave

position_files = [ f for f in listdir(position_path) if isfile(join(position_path, f)) ]
counter = 0
for position_file in position_files:
    print "Process-%s:%s"%(counter, position_file)
    counter += 1
    a = data.imread(directory_path + splitext(position_file)[0] + ".jpg")
    output = np.zeros((a.shape[0], a.shape[1]))
    coordinates = np.load(position_path + position_file)
    # Import image, SLIC, Feature extraction, & GUI interface
    im_slic = slic(a, compactness=layer[0]['compactness'], n_segments=layer[0]['segment'], sigma=1)
    im_slic = label(im_slic, neighbors=8)
    temp = regionprops(im_slic, intensity_image=rgb2gray(a))
    
    def list_to_dict(list):
        res = {}
        for l_item in list:
            res[l_item.label] = l_item
        return res
    
    features = list_to_dict(temp)
    
        
    def mark(label, value, im_slice, im_display):
        indexes = np.where(im_slice == label)
        for i,v in enumerate(indexes[0]):
            im_display[v,indexes[1][i]] = value
        
Пример #20
0
def multiTest(mean, eigen_vectors, train_weights, covariance=None):

    multi_image = cv2.imread(MULTI_FACE)

    window_size = (int(image_size[1] / 2), int(image_size[0] / 2))
    down_scale = 1.2
    skip_pixels = 10

    coords = []
    for (i, resized) in enumerate(
            pyramid_gaussian(multi_image,
                             downscale=down_scale,
                             multichannel=True)):
        if (resized.shape[1] <= window_size[0]
                or resized.shape[0] <= window_size[1]
                or resized.shape[1] <= 200 or resized.shape[0] <= 200):
            break
        _coords = []
        for (x, y, window) in slide(resized, skip_pixels, window_size):
            if window.shape[::-1][1:] != window_size: continue

            mul_image = bgr2rgb(window) if RGB else rgb2gray(bgr2rgb(window))
            mul_image = cv2.resize(mul_image,
                                   (image_size[1], image_size[0])).flatten()

            test_img = []
            test_img.append(mul_image)

            # Classifier
            mul_weights = getWeights(test_img, mean, eigen_vectors)
            result, similarity = getResults(train_weights,
                                            mul_weights,
                                            covariance=covariance)

            if result[0]:
                _coords.append((x, y, similarity[0]))

            # Show detected face in current scale
            cv_copy = resized.copy()
            cv2.rectangle(cv_copy, (x, y),
                          (x + window_size[0], y + window_size[1]),
                          (0, 255, 0), 1)
            for x, y, sim in _coords:
                cv2.rectangle(cv_copy, (x, y),
                              (x + window_size[0], y + window_size[1]),
                              (0, 0, 255), 1)
                cv2.putText(cv_copy, sim, (x, y), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (255, 255, 255), 1)
            cv2.imshow("Sliding Window", cv_copy)
            cv2.waitKey(1)

        for x, y, sim in _coords:
            down_size = down_scale**i
            w = int(window_size[0] * down_size)
            h = int(window_size[1] * down_size)
            x, y = int(x * down_size), int(y * down_size)
            coords.append((x, y, x + w, y + h, sim))

    coords = non_max_suppression_fast(np.array(coords))
    # show all detected faces
    for [x1, y1, x2, y2, sim] in coords:

        cv2.rectangle(multi_image, (x1, y1), (x2, y2), (0, 255, 0), 1)
        cv2.putText(multi_image, sim, (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (255, 255, 255), 1)

    cv2.imshow("Detected Faces", multi_image)
    cv2.waitKey(0)
Пример #21
0
def grayspace(src):
    from skimage.color.colorconv import rgb2gray
    dst = rgb2gray(src)
    return dst
Пример #22
0
def grayspace(src):
    from skimage.color.colorconv import rgb2gray
    dst = rgb2gray(src)
    return dst
from matplotlib import pyplot as plt
from skimage import measure, filters
from skimage.color.colorconv import rgba2rgb, rgb2gray
from utils import show_image
import numpy as np

image_dice = plt.imread('images/dices.png')

# Make the image grayscale
image_dice = rgb2gray(rgba2rgb(image_dice))

# Obtain the optimal thresh value
thresh = filters.threshold_otsu(image_dice)

# Apply thresholding
binary = image_dice > thresh

# Find contours at a constant value of 0.8
contours = measure.find_contours(binary, 0.8)

# show_image(image_dice)

# Create list with the shape of each contour
shape_contours = [cnt.shape[0] for cnt in contours]

# Set 50 as the maximum size of the dots shape
max_dots_shape = 50

# Count dots in contours excluding bigger than dots size
dots_contours = [cnt for cnt in contours if np.shape(cnt)[0] < max_dots_shape]
            ptop = max(0, y0 - pad)
            pbottom = min(data.height, y1 + pad)
            pright = min(data.width, x1 + pad)
            pleft = max(0, x0 - pad)

            sub_image = data_array[ptop:pbottom, pleft:pright, :].copy()
            sub_mask = mask[ptop:pbottom, pleft:pright]
            sub_highlight = highlight[ptop:pbottom, pleft:pright]

            H = Homography(sub_image, mask=sub_mask)
            output = H.rectified
            sub_highlight = warp(sub_highlight,
                                 AffineTransform(H.H),
                                 preserve_range=True)

            gs = gray2rgb(rgb2gray(output))

            highlighted = output.copy()
            highlighted[sub_highlight == 0] = 0.5 * gs[sub_highlight == 0]
            highlighted[sub_highlight == 128] = (255, 0, 0)

            projection_matrix = H.H
            metadata['use_quad'] = False
            metadata['projection'] = projection_matrix.tolist()
            metadata['subimage'] = dict(left=ptop,
                                        right=pbottom,
                                        bottom=pright,
                                        top=pleft)

        out_folder = args.ofolder
        out_basename = stem + '-facade-{:02}'.format(j + 1)