def focus_score(self): 
     f_score = (color.rgb2grey(self.img) - erosion(color.rgb2grey(self.img), square(4)))
     non_zero_pixel_area = self.get_nonzero_pixel_area(f_score)
     #print("focus score: " + str(np.sum(f_score) / non_zero_pixel_area))
     #plt.imshow(f_score)
     #plt.show()
     return np.sum(f_score) / non_zero_pixel_area 
def compare_images(imageA, imageB, title, show_plot=True):
    """
    computes the mean squared error and structural similarity
    """

    # index values for mean squared error
    if VERBOSE: print("comparing mean squared error...")
    m = mse(imageA, imageB)

    # convert the images to grayscale
    if VERBOSE: print("converting to greyscale...")
    imageA_grey = rgb2grey(imageA)
    imageB_grey = rgb2grey(imageB)

    # uses image copies to avoid runtime warning for ssim computation
    img1_grey = np.copy(imageA_grey)
    img2_grey = np.copy(imageB_grey)

    # index values for structural similarity
    if VERBOSE: print("comparing structural similarity...")
    s = ssim(img1_grey, img2_grey)

    if show_plot:
        if VERBOSE: print("plotting images...")
        try:
            import matplotlib.pyplot as plt
        except:
            print("Error importing pyplot from matplotlib, please install matplotlib package first...")
            sys.tracebacklimit=0
            raise Exception("Importing matplotlib failed")

        # setup the figure
        fig, ax = plt.subplots(2, 2)
        fig.suptitle("%s\nMSE: %.5f, SSIM: %.5f" % (title, m, s))

        ax[0][0].text(-10, -10, 'MSE: %.5f' %(m))

        # show first image
        ax[0][0].imshow(imageA, cmap=plt.cm.gray)
        ax[0][0].axis("off")

        # show the second image
        ax[0][1].imshow(imageB, cmap=plt.cm.gray)
        ax[0][1].axis("off")

        ax[1][0].text(-10, -10, 'SSIM: %.5f' %(s))

        # show first grey image
        ax[1][0].imshow(img1_grey, cmap=plt.cm.gray)
        ax[1][0].axis("off")

        # show the second grey image
        ax[1][1].imshow(img2_grey, cmap=plt.cm.gray)
        ax[1][1].axis("off")

        # show the images
        plt.show()

    return m, s
示例#3
0
def read_image(filename):
    """Read an image from the disk and output data arrays."""
    image_array_rgb = misc.imread(filename, mode='RGB')
    #  image_array_grey = misc.imread(filename, flatten=True, mode='F')
    image_array_grey = color.rgb2grey(image_array_rgb)*255
    image_array_luv = color.rgb2luv(image_array_rgb)
    return image_array_rgb, image_array_grey, image_array_luv
示例#4
0
def featurize(img_name):
    """Load an image and convert it into a dictionary of features"""
    img = plt.imread(os.path.join('stimuli', img_name + '.png'))
    height, width, _ = img.shape
    features = defaultdict(int)
    for y in range(height):
        for x in range(width):
            features['red'] += img[y][x][0]
            features['green'] += img[y][x][1]
            features['blue'] += img[y][x][2]
            features['alpha'] += img[y][x][3]

    grey = color.rgb2grey(img)
    for y in range(height):
        for x in range(width):
            for key, value in per_pixel(grey, y, x):
                features[key] += value

    # Normalize over image size
    for key, value in features.items():
        features[key] = float(value) / height / width

    features['blob'] = feature.blob_dog(grey).shape[0]
    features['corners'] = feature.corner_peaks(
        feature.corner_harris(grey)).shape[0]
    return features
示例#5
0
def feature_extraction(raw_data):
    image = color.rgb2grey(raw_data)

    fd, hog_image = hog(image, orientations=8, pixels_per_cell=(16, 16),
                        cells_per_block=(1, 1), visualise=True)

    return hog_image
示例#6
0
def modify(img):
    """Randomly modify an image
    
    This is a preprocessing step for training an OCR classifier. It takes
    in an image and casts it to greyscale, reshapes it, and adds some
    (1) rotations, (2) translations and (3) noise.
    
    If more efficiency is needed, we could factor out some of the initial
    nonrandom transforms.
    """
    
    block_size = np.random.uniform(20, 40)
    rotation = 5*np.random.randn()
    
    #print 'BLOCK SIZE', block_size
    #print 'ROTATION  ', rotation
    
    img = color.rgb2grey(img)
    img = transform.resize(img, output_shape=(50,30))
    img = filter.threshold_adaptive(img, block_size=block_size)
    
    # rotate the image
    img = np.logical_not(transform.rotate(np.logical_not(img), rotation))
    # translate the image
    img = shift(img)
    # add some noise to the image
    img = noise(img)
    
    img = transform.resize(img, output_shape=(25,15))
    return filter.threshold_adaptive(img, block_size=25)
示例#7
0
def dhash(picture):
    "Compute dhash as uint64."
    img = rgb2grey(resize(picture, (9, 8)))
    h = np.zeros([8], dtype=np.uint8)
    for a in range(8):
        h[a] = TWOS[img[a] > img[a + 1]].sum()
    return (BIGS * h).sum()
示例#8
0
def register_feature_calculators():
    return [
        lambda img: GaborFilter.compute_feats(rgb2grey(img), GaborFilter.generate_kernels(2)),
        # lambda img: GLCM.compute_feats(rgb2grey(img), [1, 5, 10, 20], [0, np.pi / 4, np.pi / 2, np.pi * 3 / 4]),
        lambda img: ColorAnalyzer.compute_feats(img, 150, 255, ColorAnalyzer.ColorChannel.Green),
        lambda img: ColorAnalyzer.compute_feats(img, 50, 150, ColorAnalyzer.ColorChannel.Hue),
    ]
示例#9
0
def _compute_auto_correlation(image, sigma):
    """Compute auto-correlation matrix using sum of squared differences.

    Parameters
    ----------
    image : ndarray
        Input image.
    sigma : float
        Standard deviation used for the Gaussian kernel, which is used as
        weighting function for the auto-correlation matrix.

    Returns
    -------
    Axx : ndarray
        Element of the auto-correlation matrix for each pixel in input image.
    Axy : ndarray
        Element of the auto-correlation matrix for each pixel in input image.
    Ayy : ndarray
        Element of the auto-correlation matrix for each pixel in input image.

    """

    if image.ndim == 3:
        image = img_as_float(rgb2grey(image))

    imx, imy = _compute_derivatives(image)

    # structure tensore
    Axx = ndimage.gaussian_filter(imx * imx, sigma, mode='constant', cval=0)
    Axy = ndimage.gaussian_filter(imx * imy, sigma, mode='constant', cval=0)
    Ayy = ndimage.gaussian_filter(imy * imy, sigma, mode='constant', cval=0)

    return Axx, Axy, Ayy
示例#10
0
def processOneImage(inputPath, outputPath):
    image = io.imread(inputPath)
    greyImage = rgb2grey(image)
    threshold = threshold_otsu(greyImage)
    imgout = closing(greyImage > threshold, square(1))
    imgout = crop(imgout)
    imgout = transform.resize(imgout, (max(imgout.shape), max(imgout.shape)))
    io.imsave(outputPath, imgout)
def analyze_image(args):
    """Analyze all wells from all trays in one image."""
    filename, config = args
    LOGGER.debug(filename)
    rows = config["rows"]
    columns = config["columns"]
    well_names = config["well_names"]

    name = splitext(basename(filename))[0]
    if config["parse_dates"]:
        try:
            index = convert_to_datetime(fix_date(name))
        except ValueError as err:
            return {"error": str(err), "filename": filename}
    else:
        index = name

    try:
        image = rgb2grey(imread(filename))
    except OSError as err:
        return {"error": str(err), "filename": filename}

    plate_images = cut_image(image)

    data = dict()

    for i, plate_name in zip(config["plate_indexes"], config["plate_names"]):
        plate = data[plate_name] = dict()
        plate[config["index_name"]] = index
        plate_image = plate_images[i]
        if i // 3 == 0:
            calibration_plate = config["left_image"]
            positions = config["left_positions"]
        else:
            calibration_plate = config["right_image"]
            positions = config["right_positions"]

        try:
            edge_image = canny(plate_image, CANNY_SIGMA)
            offset = align_plates(edge_image, calibration_plate)

            # Add the offset to get the well centers in the analyzed plate.
            well_centers = generate_well_centers(
                np.array(positions) + offset, config["plate_size"], rows,
                columns)
            assert len(well_centers) == rows * columns
            # Add a minimal value to avoid zero division.
            plate_image /= (1 - plate_image + float_info.epsilon)

            well_intensities = [find_well_intensity(plate_image, center)
                                for center in well_centers]

            for well, intensity in zip(well_names, well_intensities):
                plate[well] = intensity
        except (AttributeError, IndexError) as err:
            return {"error": str(err), "filename": filename}

    return data
def image():
    """Load a single image from p1 brain directory.
    output: a single image as a numpy array"""


    inputDir = '{}'.format(all.__path__[0])
    img = load_image('p1-D3-01b.jpg',inputDir)
    img = color.rgb2grey(img)
    return img 
示例#13
0
def apply_watermark(filename):
    img = io.imread(filename) # Image in gray scale
    img = color.rgb2grey(img)
    if img.dtype.name != 'uint8':
        img = img * 255
        img = img.astype(numpy.uint8)
    image = img.copy()
    blocks = []
    width, height = image.shape
    hor_block = width / 4
    ver_block = height / 4
    block_counter = 0
    for x in range(0, hor_block):
        for y in range(0, ver_block):
            x_coor = x * 4
            y_coor = y * 4
            block = image[x_coor: x_coor + 4, y_coor: y_coor + 4]
            blocks.append(block)
            block_counter += 1
    n = block_counter
    k = Functions.get_biggest_prime(n)

    for index in range(0, n):
        block_B = blocks[index]
        block_A = (blocks[Functions.mapping(index + 1, k, n) - 1]).copy()
        for x in range(0, 4):
            for y in range(0, 4):
                block_B[x, y] = Functions.removeLSB(block_B[x, y])
        avg_B = Functions.average(block_B)
        for i in range(0, 2):
            for j in range(0, 2):
                i_coor = i * 2
                j_coor = j * 2
                blockBS = block_B[i_coor: i_coor+2, j_coor: j_coor+2]
                average = Functions.average(blockBS)
                v = 0
                if average >= avg_B:
                    v = 1
                p = 1
                if Functions.ones_in_sixMSB(average) % 2 == 0:
                    p = 0
                subblock_a = block_A[i_coor: i_coor+2, j_coor: j_coor+2].copy()
                avg_as = Functions.average(subblock_a)
                r = Functions.split_binary_sixMSB(avg_as)
                if v == 1:
                    v = 2
                if p == 1:
                    p = 2
                if r[2] == 1:
                    r[2] = 2
                if r[4] == 1:
                    r[4] = 2
                blockBS[0][0] = (blockBS[0][0] + v + r[0])
                blockBS[0][1] = (blockBS[0][1] + p + r[1])
                blockBS[1][0] = (blockBS[1][0] + r[2] + r[3])
                blockBS[1][1] = (blockBS[1][1] + r[4] + r[5])
    return image
def normalize(image, subtractMin):
    """
    @params { array-like } image Skimage type acceptable
    @return { np.ndarray }
    """
    if subtractMin:
        return color.rgb2grey(image)
    else:
        return np.divide(a, np.max(a))
def stainspace_to_2d_array(ihc_xyz, channel):
    #rescale = rescale_intensity(ihc_xyz[:, :, channel], out_range=(0,1))
    #stain_array = np.dstack((np.zeros_like(rescale), rescale, rescale))

    #try to not reverse engineer rescale right now
    stain_array = ihc_xyz[:, :, channel]
    #plt.imshow(stain_array)
    gray_array = rgb2grey(stain_array)
    #plt.imshow(gray_array)
    return gray_array
示例#16
0
def load_th_image(image_name):
    image = data.load(image_name)
    grey = rgb2grey(image)

    th = threshold_otsu(grey)

    bin = grey >= th

    bin[50:-50,50:-50] = 0
    return bin
示例#17
0
文件: hog.py 项目: cooxee/VODOAT
def predict(image_url, fileName=None):
    svmObject = getSVMInstance()
    image_url = image_url.replace(' ', '')[:-1] # to remove \n
    fileName = "temp.png"
    getImageAndSave(image_url)
    image = imread(fileName)
    image = skimage.transform.resize(image, (100,100))
    image = rgb2grey(image)
    fd = hog(image, orientations=9, pixels_per_cell=(16, 16), cells_per_block=(1, 1), visualise=False,normalise=True)
    prediction = svmObject.predict(fd)
    return prediction
示例#18
0
def preprocess(image, height=50, block_size=50):
    """Turn to greyscale, scale to a height, and then threshold to binary
    """

    image = color.rgb2grey(image)
    size_factor = float(height) / image.shape[0]
    new_size = [int(e * size_factor) for e in image.shape]
    image = transform.resize(image, new_size)
    image = filter.threshold_adaptive(image, block_size=30)

    return image
示例#19
0
 def extract_histogram(self, bins=10):
     """Extract grey intensity histogram."""
     assert len(self.images) > 0, 'No images loaded! Did you call ' \
                                  'load_images() ?'
     histograms = []
     for image in self.images:
         grey = skicol.rgb2grey(image)
         hist_values, bins = np.histogram(grey, range=(0, 1), bins=bins)
         histograms.append(hist_values)
     histograms = np.array(histograms)
     histograms = histograms.astype('float')
     return histograms
def watershed_counter(path_image):
    # load the image and convert it to a floating point data type
    rgb_image = img_as_float(io.imread(path_image))
    image = rgb2grey(rgb_image)

    bin_image = image > threshold_otsu(image)
    fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(8, 2.5))
    ax1.imshow(image, cmap=plt.cm.gray)
    ax1.set_title('Original Image')
    ax1.axis('off')

    ax2.hist(image)
    ax2.set_title('Otsu Thresholded Histogram')
    ax2.axvline(threshold_otsu(image), color='r')

    ax3.imshow(bin_image, cmap=plt.cm.gray)
    ax3.set_title('Thresholded Image')
    ax3.axis('off')

    # Now we want to separate the two objects in image
    # Generate the markers as local maxima of the distance to the background

    distance = ndi.distance_transform_edt(bin_image)
    local_maxi = peak_local_max(distance, indices=False, footprint=np.ones((3, 3)), labels=bin_image)
    markers = ndi.label(local_maxi)[0]
    labels = watershed(distance, markers, mask=bin_image)

    regions = regionprops(labels)
    regions = [r for r in regions if r.area > 50]
    num = len(regions)

    fig, axes = plt.subplots(ncols=4, figsize=(8, 2.7))
    ax0, ax1, ax2, ax3 = axes

    ax0.imshow(image, cmap=plt.cm.gray, interpolation='nearest')
    ax0.set_title('Overlapping objects')
    ax1.imshow(-distance, cmap=plt.cm.jet, interpolation='nearest')
    ax1.set_title('Distances')
    ax2.imshow(labels, cmap=plt.cm.spectral, interpolation='nearest')
    ax2.set_title(str(num) + ' Total Objects')
    ax3.imshow(rgb_image, cmap=plt.cm.gray, interpolation='nearest')
    ax3.contour(labels, [0.5], linewidths=1.2, colors='y')
    ax3.axis('off')

    for ax in axes:
        ax.axis('off')

    fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
                        right=1)

    print num

    plt.show()
示例#21
0
def punch(img):
    # Identifiying the Tissue punches in order to Crop the image correctly
    # Canny edges and RANSAC is used to fit a circe to the punch
    # A Mask is created

    distance = 0
    r = 0

    float_im, orig, ihc = create_bin(img)
    gray = rgb2grey(orig)
    smooth = gaussian(gray, sigma=3)

    shape = np.shape(gray)
    l = shape[0]
    w = shape[1]

    x = l - 20
    y = w - 20

    rows = np.array([[x, x, x], [x + 1, x + 1, x + 1]])
    columns = np.array([[y, y, y], [y + 1, y + 1, y + 1]])

    corner = gray[rows, columns]

    thresh = np.mean(corner)
    print thresh
    binar = (smooth < thresh - 0.01)

    bin = remove_small_holes(binar, min_size=100000, connectivity=2)
    bin1 = remove_small_objects(bin, min_size=5000, connectivity=2)
    bin2 = gaussian(bin1, sigma=3)
    bin3 = (bin2 > 0)

    # eosin = IHC[:, :, 2]
    edges = canny(bin3)
    coords = np.column_stack(np.nonzero(edges))

    model, inliers = ransac(coords, CircleModel, min_samples=4, residual_threshold=1, max_trials=1000)

    # rr, cc = circle_perimeter(int(model.params[0]),
    #                          int(model.params[1]),
    #                          int(model.params[2]),
    #                          shape=im.shape)

    a, b = model.params[0], model.params[1]
    r = model.params[2]
    ny, nx = bin3.shape
    ix, iy = np.meshgrid(np.arange(nx), np.arange(ny))
    distance = np.sqrt((ix - b)**2 + (iy - a)**2)

    mask = np.ma.masked_where(distance > r, bin3)

    return distance, r, float_im, orig, ihc, bin3
def featureExtractOneFileUnit(loc,clusterData):
    size=(128,128)
    im=(resize(rgb2grey(imread(loc)),size)*255) #resize

    rpq1=patchResponseMap(get2DMatrix(extractPatch1(im[0:64,0:64])),clusterData)
    rpq2=patchResponseMap(get2DMatrix(extractPatch1(im[64:,0:64])),clusterData)
    rpq3=patchResponseMap(get2DMatrix(extractPatch1(im[64:,64:])),clusterData)
    rpq4=patchResponseMap(get2DMatrix(extractPatch1(im[0:64,64:])),clusterData)

    #print "patch extraction done",rpq1.shape,rpq2.shape,rpq3.shape,rpq4.shape
    	

    return np.hstack((rpq1,rpq2,rpq3,rpq4))            
示例#23
0
def imread(fname, as_grey=False, plugin=None, flatten=None,
           **plugin_args):
    """Load an image from file.

    Parameters
    ----------
    fname : string
        Image file name, e.g. ``test.jpg`` or URL.
    as_grey : bool
        If True, convert color images to grey-scale (32-bit floats).
        Images that are already in grey-scale format are not converted.
    plugin : str
        Name of plugin to use (Python Imaging Library by default).

    Other Parameters
    ----------------
    flatten : bool
        Backward compatible keyword, superseded by `as_grey`.

    Returns
    -------
    img_array : ndarray
        The different colour bands/channels are stored in the
        third dimension, such that a grey-image is MxN, an
        RGB-image MxNx3 and an RGBA-image MxNx4.

    Other parameters
    ----------------
    plugin_args : keywords
        Passed to the given plugin.

    """
    # Backward compatibility
    if flatten is not None:
        as_grey = flatten

    if is_url(fname):
        _, ext = os.path.splitext(fname)
        with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:
            u = urlopen(fname)
            f.write(u.read())
        img = call_plugin('imread', f.name, plugin=plugin, **plugin_args)
        os.remove(f.name)
    else:
        img = call_plugin('imread', fname, plugin=plugin, **plugin_args)

    if as_grey and getattr(img, 'ndim', 0) >= 3:
        img = rgb2grey(img)

    return img
示例#24
0
def get_data_batch(selected_files, prev_i, i):
	X = np.empty((BATCH_SIZE, IMAGE_SIZE[0]*IMAGE_SIZE[1]))
	x_idx = 0
	print "Processing:", prev_i, i
	ids = []
	for x_i in range(prev_i, i):
		# Converting to grey for first pass - need to evaluate all three channels
		# separately for color
		img = rgb2grey(imread(BASE_PATH + selected_files[x_i])).reshape((1, IMAGE_SIZE[0]*IMAGE_SIZE[1]))
		X[x_idx,:] = img
		galaxy_id = int(selected_files[x_i].split('.')[0])
		ids.append(galaxy_id)
		x_idx += 1
	return X, ids, (x_i+1)
示例#25
0
def get_joly_scenes_sementation(imgs, nb_std_above_mean_th=5.):
    """This function return the a list of potential scene chament segmentation without perceptual hash function.
    
    :param imgs: the list of frames (in grayscale) of the video to segment
    :param nb_std_above_mean_th: number of std above the mean of differences between frame to set a segmentation threshold    
    :type imgs: list(np.array)
    :type nb_std_above_mean_th: float
    
    :return: the list of indexes of begining / ending sequences...
    :rtype: list(tupple)
    """
    #compute diff between images
    diffs = [0]
    
    im1 = rgb2grey(resize(imgs[0], (8, 8)))
    im1 = (im1 - im1.min()) / (im1.max() - im1.min()) * 255 #dynamic expension
    for i in range(len(imgs) - 1):
        im2 = rgb2grey(resize(imgs[i + 1], (8, 8)))
        im2 = (im2 - im2.min()) / (im2.max() - im2.min()) * 255
        diffs.append(abs(im1 - im2).sum())
        im1 = im2
    
    diffs = np.array(diffs)
    
    scene_change_threshold = diffs.mean() + diffs.std() * nb_std_above_mean_th
    
    #make the scene segmentation
    scenes = []
    changes = diffs > scene_change_threshold #list(bollinger(diffs, lag=5, th=nb_std_above_mean_th)) 
    sequence_begining = 0
    for i in range(len(changes)):
        if changes[i]:
            scenes.append((sequence_begining, i))
            sequence_begining = i
    
    return scenes
    def get_corner_distances(self): 
        a = corner_shi_tomasi(color.rgb2grey(self.img))
#        val = filters.threshold_otsu(self.img)
#        mask = self.img < val 
#        a = peak_local_max(mask)
        print(a.shape)
        print(a)
        d1 = self.get_coord_dist(a[0], a[1])
        d2 = self.get_coord_dist(a[1], a[2])
        d3 = self.get_coord_dist(a[2], a[3])
        d4 = self.get_coord_dist(a[3], a[0])
        print('corner distances')
        print(d1)
        print(d2)
        print(d3)
        print(d4)
        print('std dev: ' + str(np.std([d1,d2,d3,4])))
示例#27
0
def phash64(img):
    """Compute a perceptual hash of an image.

    :param img: a rgb image to be hashed

    :type img: numpy.ndarray

    :return: a perceptrual hash of img coded on 64 bits
    :rtype: int
    """
    resized = rgb2grey(resize(img, (8, 8)))
    mean = resized.mean()
    boolean_matrix = resized > mean
    hash_lst = boolean_matrix.reshape((1, 64))[0]
    hash_lst = list(map(int, hash_lst))
    im_hash = 0
    for v in hash_lst:
        im_hash  = (im_hash << 1) | v
    return im_hash
示例#28
0
def imread(fname, as_grey=False, plugin=None, flatten=None,
           **plugin_args):
    """Load an image from file.

    Parameters
    ----------
    fname : string
        Image file name, e.g. ``test.jpg``.
    as_grey : bool
        If True, convert color images to grey-scale (32-bit floats).
        Images that are already in grey-scale format are not converted.
    plugin : str
        Name of plugin to use (Python Imaging Library by default).

    Other Parameters
    ----------------
    flatten : bool
        Backward compatible keyword, superseded by `as_grey`.

    Returns
    -------
    img_array : ndarray
        The different colour bands/channels are stored in the
        third dimension, such that a grey-image is MxN, an
        RGB-image MxNx3 and an RGBA-image MxNx4.

    Other parameters
    ----------------
    plugin_args : keywords
        Passed to the given plugin.

    """
    # Backward compatibility
    if flatten is not None:
        as_grey = flatten

    img = call_plugin('imread', fname, plugin=plugin, **plugin_args)

    if as_grey and getattr(img, 'ndim', 0) >= 3:
        img = rgb2grey(img)

    return Image(img)
def featureExtractOneFile(loc,clusterData,doRandom=True,randomPixno=1000):
    size=(128,128)
    im=(resize(rgb2grey(imread(loc)),size)*255) #resize
    rp=extractPatch(im)
    print "patch extraction done"
    #for each pixel that has been converted into a 36 pixel patch, we are first going to find out the nearest codebook patch. Then, 
    #we will create an one hot vector. Finally we will sum up the vectors belonging to the same quadrant and concatenate them.
    patchDict={}
    quadrant=0
    #this is too time consuming, we are choosing 5000 pixels randomly
    #randomPixelIndices=sorted(range((128-(PS/2))*(128-(PS/2))), key=os.urandom)[0:5000]
    #print len(randomPixelIndices)
    
    randomPixelIndices=range((128-(PS/2))*(128-(PS/2)))
    if doRandom: 
        random.shuffle(randomPixelIndices)
        print "shuffling done"
    else:
        randomPixno=len(randomPixelIndices)
    #for i,patch in enumerate(rp):
    for i in randomPixelIndices[:randomPixno]:
        patch=rp[i]      
        divFactor=128-(PS/2)
        x=i/divFactor
        y=i%divFactor
        if x<65 and y<65:
            quadrant=1
        elif x>65 and y<65:
            quadrant=2
        elif x>65 and y>65:
            quadrant=3
        else:
            quadrant=4
        if patch.shape[0]*patch.shape[1]==36:
            patch=standardizePatch(patch)
            if quadrant not in patchDict:
                patchDict[quadrant]=nearestOneHot(patch,clusterData)
            else:
                patchDict[quadrant]=np.sum((patchDict[quadrant],nearestOneHot(patch,clusterData)),axis=0)
    
    #pprint(patchDict)      
    return np.hstack((patchDict[1],patchDict[2],patchDict[3],patchDict[4]))            
def extract_features(data_path, data_csv_path, radius, n_points, threshold, is_train=True, is_retrain=False):
    X = []; y = []
    file = open(data_csv_path, 'r')
    header = next(file)
    for line in file:
        if is_retrain: 
            fname, label, prob = line.strip().split('\t')
            if float(prob) <= threshold: continue
        elif is_train:
            fname, label = line.strip().split(',')
        else:
            fname = line.strip()
        img = plt.imread(data_path + fname)
        img_gray = color.rgb2grey(img)
        lbp = local_binary_pattern(img_gray, n_points, radius, method='default')
        X.append(np.histogram(lbp, bins=np.arange(2**n_points))[0])
        if not is_train: continue
        y.append(int(label))
    file.close()
    return np.array(X), np.array(y)
cur_x, cur_y = (260, 220)
img = img[:, :, 0:3]

#cur_x, cur_y = (135, 135)
#img = img[cur_y - 135:cur_y + 136,
#          cur_x - 135:cur_x + 136, 0:3]

is_orange = np.logical_and.reduce(abs(img - (255, 90, 0)) <= 20, 2)
is_yellow = np.logical_and.reduce(abs(img - (255, 174, 0)) <= 20, 2)
is_yellow = binary_closing(is_yellow, disk(3))

img2 = img.copy()
img2[is_orange] = (255, 0, 255)
img2[is_yellow] = (0, 0, 255)

greyimg = rgb2grey(img)
edges = feature.canny(greyimg, sigma=1.0)
closed = binary_closing(edges, disk(4))
#eroded = binary_erosion(closed, disk(1))
eroded = binary_dilation(closed, disk(2))
final = eroded.copy()
final[128:143, 128:143] = np.logical_and(final[128:143, 128:143],
                                         np.logical_not(disk(7)))

# extend doorways to form cuts in walls
cut_doorways(final, is_yellow)

# fill all unreachable pixels
fill_exterior_pixels(final, cur_x, cur_y)

#final = np.logical_not(final)  # display _walkable_ pixels
示例#32
0
# 5. For illustrative purposes, mark the centre position of each dice in the image grid so we
# can visualize the results.

import matplotlib.pyplot as plt
import numpy as np
from skimage import data, io, filters
from skimage.color import rgb2grey
from skimage.filters import threshold_otsu, threshold_mean, threshold_li, threshold_yen, threshold_minimum
from skimage.morphology import reconstruction
from skimage.measure import find_contours
from skimage.feature import blob_dog, blob_log, blob_doh
#================= Importing the ‘Dice.jpg’ image ==============#
original_image = io.imread(
    'C:\Users\jackh\OneDrive\Documents\College\Python\Images\dice.jpg')
#======================= Processing ============================# takes 1 minute
Black_White_image = rgb2grey(original_image)
thresh = threshold_minimum(Black_White_image)
binary_image = Black_White_image > thresh
contours1 = find_contours(binary_image, 0.1)
binary_image_copy = np.copy(binary_image)
binary_image_copy[1:-1, 1:-1] = 1
filled_image = reconstruction(binary_image_copy,
                              binary_image,
                              method='erosion')
filled_image[0:100, :] = 0
filled_image[:, 0:200] = 0
contours2 = find_contours(filled_image, 0.1)
Blob_centres = blob_log(filled_image, min_sigma=100)

### count dice
#===================== Plotting & Printing =======================#
示例#33
0
            values, indices = ImgConvNets.predict('dcnn', res_dir,
                                                  'digits2_dcnn', flt_images)

            print('Predicted values for the image:')
            for i in range(0, 10):
                for j in range(0, 10):
                    print('{}\t'.format(indices[i * 10 + j][0]), end='')
                print('\n')

            plot_samples(X_images, img_height=20, img_width=20, shuffle=False)
        else:
            # Predict 10 new hand-written digits
            images = []
            for i in np.arange(0, 10):
                im1 = skio.imread(os.path.join(img_dir, str(i) + 'Test.png'))
                im2 = skcolor.rgb2grey(im1)
                im3 = tsf.resize(im2, (20, 20), order=1, mode='constant')
                images.append(im3.T.reshape(-1))

            X_images = np.asarray(images)
            print("X_images shape: {}".format(X_images.shape))
            flt_images = threshold_filtered_images(X_images, 20, 20)

            values, indices = ImgConvNets.predict('dcnn',
                                                  res_dir,
                                                  'digits2_dcnn',
                                                  flt_images,
                                                  k=3)

            for i in range(0, 10):
                print(
示例#34
0
    def updateProperties(self, qimg=None, save_properties=True):
        """
        Extracts the properties (regionprops) based on the current segmentation, incorporating
        manually added and excluded regions.
        The qimg parameter allows to call this function for batch processing by providing
        the target image (insted of the currently shown image in CP)

        :param qimg: cp.Image query object
        """

        # if no qimg object is provided - get the currently displayed image from CP
        if qimg is None:
            # get current frame
            self.cframe = self.cp.getCurrentFrame()

            # retrieve data
            self.qimg = self.db.getImage(frame=self.cframe, layer=self.getOption("evaluation_layer"))
        else:
            self.qimg = qimg

        if self.qimg is None:
            raise IndexError("No image found with sort_index %d and layer %s." % (self.cframe, self.getOption("evaluation_layer")))

        if self.getOption("file_source") == "clickpoints":
            img = self.qimg.data
        elif self.getOption("file_source") == "file":
            img = imread(os.path.join(list(self.db.getPaths())[0].path, self.qimg.filename))

        # convert rgb to grayscale
        if img.shape[-1] == 3:
            img = rgb2grey(img)
            # convert to 0-255 range if applicable
            if img.max() <= 1.0:
                img*=255

        # cleanup
        self.db.deleteMarkers(image=self.qimg, type='cell (auto)')

        # get current mask
        cp_mask = self.db.getMask(image=self.qimg)

        # skip processing if no mask is found
        if cp_mask is None:
            print("Addon CellMeasure: No mask found for current image!")
            return

        # get mask data and apply filter for user data
        mask = np.array(cp_mask.data).copy()
        mask[mask == self.MaskType_manual.index] = 1  # set manual marked area to true
        mask[mask == self.MaskType_exclude.index] = 0  # remove manually excluded areas

        # get regionprops
        mask_labeled = label(mask)
        regions = regionprops(mask_labeled, img)

        # iterate over properties and set marker for display
        passed = {}
        cell_markers = {}
        for nr,region in enumerate(regions):
            passed[nr] = True
            text = ""
            for key in self.regionsprops_actOptions:
                if self.getOption(self.regionsprops_actOptions[key]):
                    if self.evalInputs[key][1].value()<=region[key]<=self.evalInputs[key][2].value():
                        text += "\n%s= %.2f"%(key, region[key])
                        pass
                    else:
                        passed[nr] = False
                        break
            if passed[nr]:
                cell_markers[nr] = self.db.setMarker(image=self.qimg, x=region.centroid[1], y=region.centroid[0],
                                  text='Cell $marker_id%s' % (text), type='cell (auto)')

        # update CP display to show marker
        self.cp.reloadMarker()

        # store parameters in Pandas DF
        results = []
        for nr, prop in enumerate(regions):
            # if prop.area > self.inputMinSize.value():
            if passed[nr]:
                tmp = dotdict()
                tmp.frame = cell_markers[nr].image.sort_index
                tmp.nr = cell_markers[nr].id
                tmp.filename = self.qimg.filename
                tmp.centroid_x = prop.centroid[0]
                tmp.centroid_y = prop.centroid[1]

                for key in REGIONPROPS_TYPE:
                    tmp[key]=prop[key]
                # tmp.area = prop.area
                # tmp.mean_intensity = prop.mean_intensity
                # tmp.equivalent_diameter = prop.equivalent_diameter
                # tmp.axis_minor = prop.minor_axis_length
                # tmp.axis_major = prop.major_axis_length

                results.append(tmp)
        columns = ["frame", "nr", "filename", "centroid_x", "centroid_y", *REGIONPROPS_TYPE.keys()]

        df = pd.DataFrame.from_records(results, columns=columns)
        # print(df)

        # if the save flag is set - store results to file
        if save_properties:
            df.to_excel(os.path.splitext(self.qimg.filename)[0] + '_eval.xls')

        return df
示例#35
0
 def test_rgb2grey_alpha(self):
     x = np.random.rand(10, 10, 4)
     assert rgb2grey(x).ndim == 2
示例#36
0
 def test_case3(self):
     img3 = rgb2grey(imread('test3.png'))
     i = segment(img3)
     self.assertEqual(len(i[1]),2)
示例#37
0
 def test_case1(self):
     img1 = rgb2grey(imread('test1.png'))
     i = segment(img1)
     self.assertEqual(len(i[1]),5)
示例#38
0
        decimal_value = 0
        hex_string = []
        for index, value in enumerate(difference):
            if value:
                decimal_value += 2**(index % 8)
            if (index % 8) == 7:
                hex_string.append(hex(decimal_value)[2:].rjust(2, '0'))
                decimal_value = 0
    
        total_string.append(*hex_string)
    return ''.join(total_string)

img1 = imread('cat_reference.png')
img2 = imread('cat_eye.png')
img3 = imread('pingouin1.png')

# Convert the pictures to grey-scale
greyscale_img1 = rgb2grey(img1)
greyscale_img2 = rgb2grey(img2)
greyscale_img3 = rgb2grey(img3)

# Resize the pictures in 9x8
resized_img1 = resize(greyscale_img1, (9,8))
resized_img2 = resize(greyscale_img2, (9,8))
resized_img3 = resize(greyscale_img3, (9,8))

# Compute the difference matrix
differences1 = resized_img1 > resized_img2

differences2 = resized_img1 > resized_img3
示例#39
0
    def test_rgb2grey(self):
        x = np.array([1, 1, 1]).reshape((1, 1, 3)).astype(np.float)
        g = rgb2grey(x)
        assert_array_almost_equal(g, 1)

        assert_equal(g.shape, (1, 1))
示例#40
0
img = img_as_float(image)
p2, p98 = np.percentile(img, (0.0, 20))
# p2, p98 = np.percentile(img, (0.2, 100))
img_rescale = exposure.rescale_intensity(img,
                                         in_range=(p2,
                                                   p98))  # Contrast stretching
# img_eq = exposure.equalize_hist(img) #linear, ups noise
# img=img_rescale
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
# img=img_adapteq
img_rescale = exposure.rescale_intensity(img_adapteq,
                                         in_range=(p2,
                                                   p98))  # Contrast stretching

# img=image
img_grey = rgb2grey(img)  #rgb2grey(img) # img_grey = rgb2gray(rgba2rgb(img))
# img = img_as_float(img)
# t = threshold_otsu(img_grey)
t = threshold_yen(img_grey)
print("t:", t)  #0.005
img_binarised = img_grey > t  #<
# img_binarised = img_grey > 0.05 #0.05
img_labelled = measure.label(img_binarised.astype('uint8'))
# img_labelled=img

fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(10, 4))

# ax[0].imshow(img)
# ax[0].set_title('jh', fontsize=12)
# ax[1].imshow(img_labelled, cmap='gray')
# ax[1].set_title(f'Binarised (Using Yen)', fontsize=12)
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 25 20:15:14 2018

@author: Xiaopeng
"""

import numpy as np
from skimage import io, data, color
img = data.coffee()
cutp = img[100:200, 300:400, :]
#裁剪图片
io.imsave('C:\\Users\\Xiaopeng\\Desktop\\Picture\\coffeeCut.jpg', cutp)
#对图片特定部分赋值
img[40:70, :] = img[350:380, :]
io.imsave('C:\\Users\\Xiaopeng\\Desktop\\Picture\\coffee=.jpg', img)

#对图片进行二值化
picture = data.immunohistochemistry()
img_grey = color.rgb2grey(picture)
print(img_grey.shape)
rows, cols = img_grey.shape
for i in range(rows):
    for j in range(cols):
        if img_grey[i, j] <= 0.5:
            img_grey[i, j] = 0
        else:
            img_grey[i, j] = 1
io.imshow(img_grey)
io.imsave('C:\\Users\\Xiaopeng\\Desktop\\Picture\\separate.jpg', img_grey)
示例#42
0
#== CANS AT 16 ANGLES (guessed about 360 degrees/16) ==
w,h,s = 14,17, 3
can1, can2 =  getCanImg(M1,s,0,98,46,w,h),  getCanImg(M1,s,10,104,41,w,h)
can3, can4 =  getCanImg(M1,s,16,110,44,w,h),getCanImg(M1,s,19,110,50,w,h)
can5, can6 =  getCanImg(M1,s,22,110,60,w,h),getCanImg(M1,s,26,106,69,w,h)
can7, can8 =  getCanImg(M1,s,31,101,76,w,h),getCanImg(M1,s,35,93,76,w,h)
can9, can10 = getCanImg(M1,s,44,82,49,w,h), getCanImg(M1,s,50,81,38,w,h)
can11,can12 = getCanImg(M1,s,55,80,40,w,h), getCanImg(M1,s,64,82,65,w,h)
can13,can14 = getCanImg(M1,s,74,90,80,w,h), getCanImg(M1,s,79,92,70,w,h)
can15,can16 = getCanImg(M1,s,94,92,41,w,h), getCanImg(M1,s,111,88,73,w,h)

obs = [can1,can2,can3,can4,can5,can6,can7,can8,can9,can10,can11,can12,can13,can14,can15,can16]
imageSetToObjectCoordinates( [ M1[3] ], obs, name='data/cam1_4')

if False: #- alternate way
	imageSetToObjectCoordinates([M1[1]], rgb2grey(M1[0][29][67:114:,58:89,:]), bConv=True, name='data/cam1_4')
	#imageSetToObjectCoordinates([M1[3]], M1[0][29][67:114:,58:89,:], bConv=False, name='cam1_4')

#== CANS AT 16 ANGLES (guessed about 360 degrees/16) ==
w,h,s, oy = 17,20, 3, 10
can1, can2 =  getCanImg(M2,s, 0, 48,108,w,h,offy=oy), getCanImg(M2,s, 5, 61, 93,w,h,offy=oy) 
can3, can4 =  getCanImg(M2,s,12, 81, 69,w,h,offy=oy), getCanImg(M2,s,17, 96, 58,w,h,offy=oy)
can5, can6 =  getCanImg(M2,s,20,104, 59,w,h,offy=oy), getCanImg(M2,s,24,104, 68,w,h,offy=oy)
can7, can8 =  getCanImg(M2,s,29,102, 90,w,h,offy=oy), getCanImg(M2,s,33, 94,105,w,h,offy=oy)
can9, can10 = getCanImg(M2,s,37, 86,114,w,h,offy=oy), getCanImg(M2,s,42, 74,110,w,h,offy=oy)
can11,can12 = getCanImg(M2,s,46, 65, 98,w,h,offy=oy), getCanImg(M2,s,49, 59, 82,w,h,offy=oy)
can13,can14 = getCanImg(M2,s,53, 57, 65,w,h,offy=oy), getCanImg(M2,s,58, 55, 52,w,h,offy=oy)
can15,can16 = getCanImg(M2,s,67, 64, 73,w,h,offy=oy), getCanImg(M2,s,74, 75,107,w,h,offy=oy)
obs = [can1,can2,can3,can4,can5,can6,can7,can8,can9,can10,can11,can12,can13,can14,can15,can16]
plotCanRot(can1,can2,can3,can4,can5,can6,can7,can8,can9,can10,can11,can12,can13,can14,can15,can16,fname='images/canR16_2_4.png')
imageSetToObjectCoordinates( [ M2[3] ], obs, name='data/cam2_4' )
示例#43
0
def extract_and_describe(img, kmeans):
    features = daisy(rgb2grey(img), step=4).reshape((-1, 200))
    assignments = kmeans.predict(features)
    histogram, _ = np.histogram(assignments, bins=500, range=(0, 499))
    return histogram
示例#44
0
 def test_rgb2grey_on_grey(self):
     rgb2grey(np.random.rand(5, 5))
示例#45
0
文件: test.py 项目: wangdanfeng/KNN
    exit(0)
    path = 'data/img/trains/'
    for file in os.listdir(path):
        file_path = os.path.join(path, file)
        print file_path
    exit(0)
        # impath = 'cat.png'
    # image = io.imread(impath, as_gray=True)
    # img = data.hubble_deep_field()
    # img = data.chelsea()
    img = data.astronaut()

    # impath = 'test.png'
    # img = io.imread(impath)
    img2 = data.astronaut()
    img2 = color.rgb2grey(img2)
    # io.imshow(img2)
    # io.imsave('data-img2.png',img2)
    # print img #小数表示的是颜色程度
    # print "lbp"
    # print img2
    # exit(0)
    # show(img)
    # 像素读取, 显示B通道的20行,10列的像素值
    # print (img[20, 10, 2])
    # 显示红色单通道图片的程序如下
    img_R = img[:, :, 0]
    # show(img_R)
    # 像素修改,例如,对宇航员图片随机添加椒盐噪声
    # 随机生成5000个椒盐点
    # img = change_salt(img)
示例#46
0
def preprocess(state):
    state = rgb2grey(state)
    state = state.reshape([1, 210, 160])
    state /= 255
    return state
示例#47
0
 def test_case2(self):
     img2 = rgb2grey(imread('test2.png'))
     i = segment(img2)
     self.assertEqual(len(i[1]),3)
示例#48
0
def harris_detector(image):
    image = rgb2grey(image)
    coords_img = corner_peaks(corner_harris(image), min_distance=5)
    coords_subpix_img = corner_subpix(image, coords_img, window_size=20)
    return coords_img, coords_subpix_img
示例#49
0
 def test_case4(self):
     img4 = rgb2grey(imread('test4.png'))
     i = segment(img4)
     self.assertEqual(len(i[1]),1)
示例#50
0
import matplotlib.pyplot as plt
#import skimage.util as util
import skimage.color as color
import skimage.feature as feature
import skimage.io as io
import numpy as np

# Make this the path to the file you wish to process
fname = "test1a.png"
"""
Our first script for finding eyes using template matching"
"""

img = io.imread(fname)

imgB = color.rgb2grey(img) > 0.333
io.imshow(imgB)
plt.show()
se1 = [[1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1],
       [1, 1, 1, 1, 1]]
se1 = np.asarray(se1)
imgE = morpho.morphoErode(imgB, se1, 0)
io.imshow(imgE)
plt.show()
imgD = morpho.morphoDilate(imgB, se1, 0)
io.imshow(imgD)
plt.show()

# find hit-or-miss transform
imgH = imgE ^ imgD
io.imshow(imgH)
示例#51
0
    def updateSegmentation(self, qimg=None):
        """
        Update segmentation according to parameters TH and SELEM and display results in CP
        The qimg parameter allows to call this function for batch processing by providing
        the target image (insted of the currently shown image in CP)

        :param qimg: cp.Image query object
        """

        # if no qimg object is provided - get the currently displayed image from CP
        if qimg is None:
            # get current frame
            self.cframe = self.cp.getCurrentFrame()

            # retrieve data
            self.qimg = self.db.getImage(frame=self.cframe, layer=self.getOption("segmentation_layer"))
        else:
            self.qimg = qimg

        if self.qimg is None:
            raise IndexError("No image found with sort_index %d and layer %s." % (self.cframe, self.getOption("segmentation_layer")))

        if self.getOption("file_source") == "clickpoints":
            img = self.qimg.data
        elif self.getOption("file_source") == "file":
            # img = imread(os.path.join(list(self.db.getPaths())[0].path, self.qimg.filename))
            img = imread(os.path.join(self.qimg.path.path, self.qimg.filename))

        # convert rgb to grayscale
        if img.shape[-1] == 3:
            img = rgb2grey(img)
            # convert to 0-255 range if applicable
            if img.max() <= 1.0:
                img*=255

        ### pre processing ###
        if self.inputGauss.value() > 0:
            img = gaussian_filter(img, self.getOption("segmentation_gauss"))
            #print("apply Gauss with value", self.getOption("segmentation_gauss"))


        ### segmentation ###
        """
        NOTE:
        add additional approaches here, please follow the provided samples
        return should be binary mask in uint8 dtype
        """

        mode = self.getOption("segmentation_mode")
        # create binary mask
        mask = np.zeros(img.shape, dtype='uint8')
        if mode == "th_simple":
            if self.getOption("segmentation_invert_mask"):
                mask[img < self.getOption("segmentation_th")] = 1
            else:
                mask[img > self.getOption("segmentation_th")] = 1

        if mode == "th_local":
            local_thresh = threshold_local(img, block_size=self.getOption("segmentation_localth_block_size")*2-1)
            if self.getOption("segmentation_invert_mask"):
                mask[img < local_thresh] = 1
            else:
                mask[img > local_thresh] = 1

        if mode == "th_lukas":
            local_thresh = threshold_local(img, block_size=self.getOption("segmentation_localth_block_size")*2-1)
            img_thresh = (img > local_thresh) + 0
            img_erosion = binary_erosion(img_thresh)
            img_re_small = remove_small_objects(img_erosion, min_size=self.getOption("segmentation_localth_min_size"))
            img_gaus = gaussian(img_re_small)
            otsu_th = threshold_otsu(img_gaus)
            img_otsu = (img_gaus > otsu_th) + 0
            mask = (binary_fill_holes(img_otsu) + 0).astype('uint8')

        if mode == "th_elham":
            struct = disk(self.getOption("segmentation_EL_min_size"))#structural element for binary erosion
            # "segmentation_EL_min_size", "segmentation_EL_max_size","segmentation_EL_low_th","segmentation_EL_high_th"
            mask = canny(img, sigma=1, low_threshold=self.getOption("segmentation_EL_low_th"), high_threshold=self.getOption("segmentation_EL_high_th"),use_quantiles=True)  # edge detection
            mask = binary_fill_holes(mask, structure=struct) #fill holes
            mask = binary_erosion(mask, structure=struct).astype(np.uint8)  # erode to remove lines and small schmutz

        # use open operation to reduce noise
        if self.getOption("segmentation_slm_size") != 0:
            mask_open = opening(mask, disk(self.getOption("segmentation_slm_size")))
        else:
            mask_open = mask

        # add user input
        cp_mask = self.db.getMask(image=self.qimg)
        mask_open[mask_open == 1] = self.MaskType_auto.index
        if not cp_mask is None:
            # add additional information
            mask_open[cp_mask.data == self.MaskType_exclude.index] = self.MaskType_exclude.index
            mask_open[cp_mask.data == self.MaskType_manual.index] = self.MaskType_manual.index

        # print(mask_open)

        self.db.setMask(image=self.qimg, data=mask_open)
        self.cp.reloadMask()
示例#52
0
def main(argv=None):
    content_layers = FLAGS.content_layers.split(',')
    style_layers = FLAGS.style_layers.split(',')
    style_layers_weights = [
        float(i) for i in FLAGS.style_layers_weights.split(",")
    ]
    #num_steps_decay = 82786 * 2 / FLAGS.batch_size

    dirnames = []
    for root, dirs, files in os.walk(FLAGS.data_path):
        dirnames += [[os.path.join(root, name) for name in files]]

    style_features_t = flow_losses.get_style_features(FLAGS)
    training_path = os.path.join(FLAGS.model_path, FLAGS.naming)
    if not (os.path.exists(training_path)):
        os.makedirs(training_path)

    with tf.Session() as sess:
        """Build Network"""
        network_fn = nets_factory.get_network_fn(FLAGS.loss_model,
                                                 num_classes=1,
                                                 is_training=False)
        image_preprocessing_fn, image_unprocessing_fn = preprocessing_factory.get_preprocessing(
            FLAGS.loss_model, is_training=False)

        image_placeholder = tf.placeholder(
            tf.float32,
            shape=[FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size, 3])
        flow_placeholder = tf.placeholder(tf.float32,
                                          shape=[
                                              FLAGS.batch_size - 1,
                                              FLAGS.image_size,
                                              FLAGS.image_size, 2
                                          ])

        generated = model.net(image_placeholder, FLAGS.alpha)
        processed_generated = [
            image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size)
            for image in tf.unstack(generated, axis=0, num=FLAGS.batch_size)
        ]
        processed_generated = tf.stack(processed_generated)
        _, endpoints_dict = network_fn(tf.concat(
            [processed_generated, image_placeholder], 0),
                                       spatial_squeeze=False)
        """Build Losses"""
        content_loss = flow_losses.content_loss(endpoints_dict, content_layers)
        style_loss, style_losses = flow_losses.style_loss(
            endpoints_dict, style_features_t, style_layers,
            style_layers_weights)
        tv_loss = flow_losses.total_variation_loss(
            generated)  # use the unprocessed image
        flow_loss = flow_losses.flow_loss(generated, flow_placeholder,
                                          FLAGS.batch_size, FLAGS.image_size,
                                          FLAGS.image_size)

        content_loss = FLAGS.content_weight * content_loss
        style_loss = FLAGS.style_weight * style_loss
        tv_loss = FLAGS.tv_weight * tv_loss
        flow_loss = FLAGS.flow_weight * flow_loss
        loss = style_loss + content_loss + tv_loss + flow_loss
        """Prepare to Train"""
        global_step = tf.Variable(0, name="global_step", trainable=False)
        variable_to_train = []
        for variable in tf.trainable_variables():
            if not (variable.name.startswith(FLAGS.loss_model)):
                variable_to_train.append(variable)

        lr = tf.train.exponential_decay(learning_rate=1e-3,
                                        global_step=global_step,
                                        decay_steps=100000,
                                        decay_rate=1e-1,
                                        staircase=True)
        optimizer = tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-8)
        train_op = optimizer.minimize(loss,
                                      global_step=global_step,
                                      var_list=variable_to_train)
        #train_op = tf.train.AdamOptimizer(1e-3).minimize(loss, global_step=global_step, var_list=variable_to_train)
        variables_to_restore = []
        for v in tf.global_variables():
            if not (v.name.startswith(FLAGS.loss_model)):
                variables_to_restore.append(v)
        saver = tf.train.Saver(variables_to_restore)
        sess.run([
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ])
        init_func = utils._get_init_fn(FLAGS)
        init_func(sess)
        last_file = tf.train.latest_checkpoint(training_path)
        if last_file:
            print('Restoring model from {}'.format(last_file))
            saver.restore(sess, last_file)
        """Start Training"""
        for filenames in dirnames:
            images = []
            images_gray = []
            for filename in sorted(filenames):
                print(filename)
                image_bytes = io.imread(filename)
                image = image_bytes - MEAN_VALUES
                image_gray = color.rgb2grey(image_bytes)
                images.append(image_bytes)
                images_gray.append(image_gray)
            flows = []
            for i in xrange(len(images_gray) - 1):
                flow = cv2.calcOpticalFlowFarneback(images_gray[i],
                                                    images_gray[i + 1], 0.5, 3,
                                                    15, 3, 5, 1.2, 0)
                flows.append(flow)

            for i in xrange(len(images) - FLAGS.batch_size):
                feed_dict = {
                    image_placeholder:
                    np.stack(images[i:i + FLAGS.batch_size]),
                    flow_placeholder:
                    np.stack(flows[i:i + FLAGS.batch_size - 1])
                }

                _, c_loss, s_losses, t_loss, f_loss, total_loss, step = sess.run(
                    [
                        train_op, content_loss, style_losses, tv_loss,
                        flow_loss, loss, global_step
                    ],
                    feed_dict=feed_dict)
                """logging"""
                if step % 10 == 0:
                    print(step, c_loss, s_losses, t_loss, f_loss, total_loss)
                """checkpoint"""
                if step % 10000 == 0:
                    saver.save(sess,
                               os.path.join(training_path, 'flow-loss-model'),
                               global_step=step)
                if step == FLAGS.max_iter:
                    saver.save(
                        sess,
                        os.path.join(training_path, 'flow-loss-model-done'))
                    print("Save flow-loss-model done!")
                    return
        if step < FLAGS.max_iter:
            saver.save(sess, os.path.join(training_path,
                                          'flow-loss-model-done'))
            print("Save flow-loss-model done")
示例#53
0
def stainspace2array(ihc_xyz, channel):
    rescale = rescale_intensity(ihc_xyz[:, :, channel], out_range=(0, 1))
    stain_array = np.dstack((np.zeros_like(rescale), rescale, rescale))
    grey_array = rgb2grey(stain_array)

    return grey_array
from scipy import ndimage, misc
import matplotlib.pyplot as plt
from skimage import color
from skimage import io
import numpy as np

fig = plt.figure()

plt.gray()  # show the filtered result in grayscale

ax1 = fig.add_subplot(221)  # left side
ax2 = fig.add_subplot(222)  # right side
ax3 = fig.add_subplot(223) 
ax4 = fig.add_subplot(224) 

img_noisy=color.rgb2grey(io.imread(r'C:\Users\Gabriel\Documents\Projects\Basecodeit\machine_learning_playground\images\\coins_noisy.gif'))

kernel = np.array([[1,1,1],
                  [1,1,1],
                  [1,1,1]])

result = ndimage.convolve(img_noisy, kernel)
result2= ndimage.median_filter(img_noisy, size=(3,3))
result3 = ndimage.median_filter(result, size=(3,3))

ax1.imshow(img_noisy)
ax2.imshow(result)
ax3.imshow(result2)
ax4.imshow(result3)

plt.show()
示例#55
0
from skimage import data, io, transform
from skimage.viewer import ImageViewer
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label, regionprops
from skimage.morphology import closing, square
from skimage.color import rgb2grey, label2rgb

#image = io.imread('sampleAudi.png')
#image = io.imread('sampleAudi2.jpg')
image = io.imread('sampleChevrolet.jpg')
#image = io.imread('sampleHonda.jpg')
#image = io.imread('sampleBenz.jpg')
#image = io.imread('sampleBMW3.jpg')

greyImage = rgb2grey(image)
greyImage = transform.resize(greyImage, (500, 600))
threshold = threshold_otsu(greyImage)
imageOtsu = closing(greyImage >= threshold, square(3))

# Original Binary
plt.figure()
io.imshow(imageOtsu)

# Edge detection
cpImage = imageOtsu.copy()
clearBorder = clear_border(cpImage)
labeled = label(clearBorder)
borders = np.logical_xor(imageOtsu, cpImage)
labeled[borders] = -1
labeledImage = label2rgb(labeled, image=imageOtsu)
示例#56
0
import pickle
import numpy as np
from pylab import *
from skimage import data, io, color
from mypca import MyPCA

pca = MyPCA('trained_model.out')
with open('transformed_set.out', 'r') as f:
    X = pickle.load(f)
with open('transformed_id_set.out', 'r') as f:
    ids = pickle.load(f)

for idx in range(0, 10):
    img = color.rgb2grey(
        io.imread('images_training_rev1/' + str(ids[idx]) + '.jpg'))
    subplot(2, 10, idx + 1)
    xlabel("original" + str(ids[idx]))
    imshow(img)

for idx in range(0, 10):
    img = pca.model.inverse_transform(X[idx]).reshape(424, 424)
    subplot(2, 10, idx + 11)
    xlabel("pca" + str(ids[idx]))
    imshow(img)

show()
示例#57
0
    def extract_coinHough(self, cam_im, ravers):
        """
        EXTRACT COIN
        """
        # STEP 0: INITIALIZATION AND PARAMETERS
        # Image init
        cam_imasarray = np.asarray(cam_im)
        cam_imasarray.setflags(write=1)
        circle_coordinates = None
        size_gaus = int(
            max(15, 2.0 * round(int(np.shape(cam_im)[0] * 0.03) / 2.0) + 1.0))

        # Image is resized to achieve 200 pixels each side
        scalefactors = max(1, int(np.shape(cam_imasarray)[0] / 200.0))

        # STEP 1: DETOURAGE PAR COULEUR. Detourage autour de la pièce
        img_filtered = cv2.medianBlur(cam_imasarray, size_gaus)

        # Contour extraction
        has_cverge = False
        # number of allowed iterations to find valid boundaries
        max_iter = 5
        cnt_ter = 0
        while (not has_cverge) and (cnt_ter < max_iter):
            cnt_ter = cnt_ter + 1
            #  Find boundaried around the coin and croponce
            __, bornes0EQ, bornes1EQ, has_cverge = self.rmvbvckgnd_otsu(
                img_filtered, kernelscale=0.05)
            img_filtered = img_filtered[bornes1EQ[0]:bornes1EQ[1]:1,
                                        bornes0EQ[0]:bornes0EQ[1]:1, :]
            cam_imasarray = cam_imasarray[bornes1EQ[0]:bornes1EQ[1]:1,
                                          bornes0EQ[0]:bornes0EQ[1]:1, :]

        # Same with different blurring
        img_filtered = cv2.medianBlur(cam_imasarray,
                                      2 * int(size_gaus / 4) + 1)
        has_cverge = False
        cnt_ter = 0
        while not (has_cverge) and (cnt_ter < max_iter):
            cnt_ter = cnt_ter + 1
            #  Find boundaries around the coin and crop
            __, bornes0EQ, bornes1EQ, has_cverge = self.rmvbvckgnd_otsu(
                img_filtered, kernelscale=0.05)
            img_filtered = img_filtered[bornes1EQ[0]:bornes1EQ[1]:1,
                                        bornes0EQ[0]:bornes0EQ[1]:1, :]
            cam_imasarray = cam_imasarray[bornes1EQ[0]:bornes1EQ[1]:1,
                                          bornes0EQ[0]:bornes0EQ[1]:1, :]

        # STEP 2: HOUGH PROCESSING. Fitting the coin inside a circle.
        # Preprocessing to improve Hough performance: resizing,  median blurring
        downscale_local_mean(cam_imasarray,
                             (scalefactors, scalefactors, scalefactors))
        img_filtered = gaussian(rgb2grey(cam_imasarray), size_gaus)
        # Preprocessing : contrast improvement
        img_filtered = Preprocessing.Preprocessing.contrast_stretching(
            img_filtered)
        otsu_threshold, _ = cv2.threshold(img_filtered,
                                          np.min(np.min(img_filtered)),
                                          np.max(np.max(img_filtered)),
                                          cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        # Parameters for Hough
        __l = min(img_filtered.shape[0], img_filtered.shape[1])
        # Margins to extract revers and avers
        marge_ravers = np.int(0.05 * __l)
        # Minimum distance between the centers of the detected circles
        min_dist_center = max(0.5 * __l, 1)
        # Margin range in % to look for the right diameter
        range_margin = 0.00
        param2scale = 1.0
        size_gaus = 2 * np.int(0.05 * __l / 2.0) + 1

        try:
            # Try to find circles with increasing radius range
            while circle_coordinates is None:

                min_radius = max(20,
                                 int((0.95 - range_margin) / 4.0 * float(__l)))
                max_radius = int(0.55 * float(__l))

                # Hough Transform to detect coins. This method is based on openCV rather than sci-image,
                # since openCV demonstrate superior circle detection performances
                circle_coordinates = cv2.HoughCircles(
                    img_filtered,
                    cv2.HOUGH_GRADIENT,
                    dp=1,
                    minDist=min_dist_center,
                    param1=int(param2scale * otsu_threshold),
                    param2=int(param2scale * otsu_threshold / 2.0),
                    minRadius=min_radius,
                    maxRadius=max_radius)

                # Get circles only within the original image frame. We allow extra margin
                if circle_coordinates is not None:
                    index_good_circles = np.logical_and(
                        np.logical_and(
                            1 < (circle_coordinates[0, :, 0] -
                                 circle_coordinates[0, :, 2]),
                            (circle_coordinates[0, :, 0] +
                             circle_coordinates[0, :, 2]) <
                            img_filtered.shape[1] + 1),
                        np.logical_and(
                            1 < (circle_coordinates[0, :, 1] -
                                 circle_coordinates[0, :, 2]),
                            (circle_coordinates[0, :, 1] +
                             circle_coordinates[0, :, 2]) <
                            img_filtered.shape[0] + 1))
                    circle_coordinates = circle_coordinates[:,
                                                            index_good_circles, :]

                    # If list is empty,  then no circles were found
                    if not (circle_coordinates.any()):
                        circle_coordinates = None

                # Expanding the radius range
                range_margin = range_margin + 0.01
                min_dist_center = max(min_dist_center / 1.0, 5.0)
                param2scale = param2scale / 1.05

        except:  # Watershed... if Hough transform failed
            pass

        # STEP 3: EXTRACTION OF THE CIRCLE AND ALLOCATION THE IMAGE.
        # After an acceptable candidate has been found,  we update the coordinates.
        # Update avers and revers attributes,  second argument
        # circle_coordinates[0, :,2].argmax(0) is the circle with largest diameter,  0 is the best circle
        circle_coordinates = np.int16(1 / scalefactors * np.around(
            circle_coordinates[0, circle_coordinates[0, :, 2].argmax(0), :]))

        # Circle boundaries in the original picture and store the detected image
        circle_coordinates[2] = circle_coordinates[2] * 1.02
        radius = circle_coordinates[2]
        taille = 2 * (marge_ravers + circle_coordinates[2])
        getattr(self, ravers).im_BRG = 0 * np.ones(
            (taille, taille, 3)).astype(np.uint8)

        # Copy image
        bornes = [
            circle_coordinates[0] - circle_coordinates[2] - marge_ravers,
            circle_coordinates[0] + circle_coordinates[2] + marge_ravers,
            circle_coordinates[1] - circle_coordinates[2] - marge_ravers,
            circle_coordinates[1] + circle_coordinates[2] + marge_ravers
        ]

        boundaries = [
            int(max(0, bornes[0])),
            int(min(cam_imasarray.shape[1], bornes[1])),
            int(max(0, bornes[2])),
            int(min(cam_imasarray.shape[0], bornes[3]))
        ]

        # Update image
        getattr(self, ravers).im_BRG[
            max(0, -0 -
                bornes[2]):(taille -
                            max(0, bornes[3] - 0 - cam_imasarray.shape[0])):1,
            max(0, -0 - bornes[0]):(
                taille - max(0, bornes[1] - 0 - cam_imasarray.shape[1])
            ):1, ::] = cam_imasarray[boundaries[2]:boundaries[3]:1,
                                     boundaries[0]:boundaries[1]:1, ::]

        # Set background to white
        im_brg_blurred = cv2.medianBlur(
            getattr(self, ravers).im_BRG, size_gaus)

        # Remove black background
        getattr(self, ravers).im_BRG = self.black2white(getattr(self,
                                                                ravers).im_BRG,
                                                        im_brg_blurred,
                                                        satL=8,
                                                        valU=15)

        # Update attributes
        getattr(self, ravers).xc = float(marge_ravers + circle_coordinates[2])
        getattr(self, ravers).yc = float(marge_ravers + circle_coordinates[2])
        getattr(self, ravers).radius = float(radius)

        # Set to white whats not in the circle
        x_coords, y_coords = np.meshgrid(
            range(0, int(getattr(self, ravers).im_BRG.shape[0]), 1),
            range(0, int(getattr(self, ravers).im_BRG.shape[0]), 1))
        # Transposition
        x_coords = x_coords.T
        y_coords = y_coords.T

        for color in 0, 1, 2:
            g_ind = (((x_coords - getattr(self, ravers).xc)**2 +
                      (y_coords - getattr(self, ravers).yc)**2) > radius**2)
            x_coords = x_coords[g_ind]
            y_coords = y_coords[g_ind]
            # Transformation to linear indices
            index_ = np.ravel_multi_index(np.concatenate(
                ([np.ravel(x_coords,
                           order='C')], [np.ravel(y_coords, order='C')],
                 [np.ravel(y_coords, order='C') * 0 + color])),
                                          dims=getattr(self,
                                                       ravers).im_BRG.shape,
                                          order='C')
            np.ravel(getattr(self, ravers).im_BRG, order='C')[index_] = 255
示例#58
0
                              levels=256,
                              normed=True)
        glcm[i - 1, :8] = greycoprops(matrix, 'dissimilarity').ravel()
        glcm[i - 1, 8:16] = greycoprops(matrix, 'correlation').ravel()

    return np.mean(glcm, axis=0) / 50


def multiappend(seq_features):
    result = seq_features[0]
    for feature in seq_features[1:]:
        result = np.append(result, feature, axis=0)

    return result


if __name__ == '__main__':
    alltypes = open("classes.txt").read().splitlines()
    clf = joblib.load('clf_final.pkl')
    print(clf)
    img = io.imread("t4.jpg")
    if img.shape != (512, 512, 3):
        img = resize(img, (512, 512, 3))
    greyimg = color.rgb2grey(img)
    area = np.count_nonzero(greyimg)
    BGR, HSV, (Hist5,
               Hist3), glcm = BGRCues_img(img), HSVCues_img(img), HistCues_img(
                   greyimg, area), GLCM(greyimg)
    test = multiappend([BGR, HSV, Hist5, Hist3, glcm])
    print(alltypes[int(clf.predict(test.reshape(-1, len(test))))])
示例#59
0
 def test_rgb2grey_contiguous(self):
     x = np.random.rand(10, 10, 3)
     assert rgb2grey(x).flags["C_CONTIGUOUS"]
     assert rgb2grey(x[:5, :5]).flags["C_CONTIGUOUS"]
示例#60
0
 def _scharr(img):
     # Invert the image to ease edge detection.
     img = 1. - img
     grey = skc.rgb2grey(img)
     return skf.scharr(grey)