예제 #1
0
def get_distorted(image, params, orient = "horizont"):
  shifts = []
  np_image = array(image.convert("L"))
  for el in params:
    if el[0] == "sin":
      shifts.append(lambda x: np_image.shape[0] / el[1] * \
        np.sin(x * el[2] / np_image.shape[1]))
    if el[0] == "cos":
      shifts.append(lambda x: np_image.shape[0] / el[1] * \
        np.cos(x * el[2] / np_image.shape[1]))
    if el[0] == "triang":
      lambda x: np_image.shape[0] / el[1] * \
        (x / el[2] / np_image.shape[1] - math.floor(x / (el[2] / np_image.shape[1])))
    if el[0] == "erosion":
      np_image = erosion(np_image, square(el[1]))
    if el[0] == "dilation":
      np_image = dilation(np_image, square(el[1]))

  if orient == "horizont":
    for idx in xrange(np_image.shape[0]):
      for shift in shifts:
        np_image[idx,:] = np.roll(np_image[idx,:], int(shift(idx)))
  if orient == "vert":
    for idx in xrange(np_image.shape[1]):
      for shift in shifts:
        np_image[:, idx] = np.roll(np_image[:, idx], int(shift(idx)))

  return Image.fromarray(np_image)
예제 #2
0
def get_symbols(image):
  dil_eros = bin_search(dilatation_cross_numb, [image], (1, 16), 1.0, "dec")
  block_size = 50
  binary_adaptive_image = erosion(dilation(threshold_adaptive(
    array(image.convert("L")), block_size, offset=10),
      square(dil_eros)), square(dil_eros))

  all_labels = label(binary_adaptive_image, background = True)
  objects = find_objects(all_labels)

  av_width = av_height = 0
  symbols = []

  for obj in objects:
    symb = (binary_adaptive_image[obj], (obj[0].start, obj[1].start))
    symbols.append(symb)
    av_height += symb[0].shape[0]
    av_width += symb[0].shape[1]

  av_width /= float(len(objects))
  av_height /= float(len(objects))

  symbols = [symb for symb in symbols
    if symb[0].shape[0] >= av_height and symb[0].shape[1] >= av_width]

  return symbols
예제 #3
0
def morphoNoiseRemoval(img):
    "Removes noise by succession of 5 opening/closing morphological operators"
    for i in range(0,5):
        img = opening2(img, square(3))
        img = closing2(img, square(3))
        
    return img
예제 #4
0
파일: task5a.py 프로젝트: niklasmh/ntnu
def removeChessboard(img):

    # Get the major lines in the image
    edges, dilatedEdges, (h, theta, d) = findLines(img)

    # Create image with ones to fill inn lines
    lines = np.ones(img.shape[:2])

    # Add lines to image as zeroes
    for _, angle, dist in zip(*hough_line_peaks(h, theta, d)):
        y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)
        y1 = (dist - img.shape[1] * np.cos(angle)) / np.sin(angle)
        x, y = line(int(y1), 0, int(y0), img.shape[1] - 1)
        x = np.clip(x, 0, img.shape[0] - 1)
        y = np.clip(y, 0, img.shape[1] - 1)
        lines[x, y] = 0

    # Remove border edges from image with all edges
    w = 4
    edges = np.pad(edges[w:img.shape[0] - w, w:img.shape[1] - w], w, mode='constant')

    # Erode the lines bigger, such that they cover the original lines
    lines = erosion(lines, square(13))

    # Remove major lines and close shape paths
    removedChessboard = closing(edges * lines, square(8))

    return removedChessboard
예제 #5
0
def calculate_masked_stats():
    plate_no = "59798"
    parsed = get_plate_files(plate_no)
    for w in ['w2']:
        files = filter(lambda f: f.wave == w[1], parsed)
        # accum = np.zeros((2160, 2160), dtype=np.uint32)
        # files = filter(lambda x: 's1' not in x and 's7' not in x, all_files)
        nof = len(files)
        for i, frame in enumerate(files[0:5], 1):
            LogHelper.logText(frame.fullpath)
            img = imread(frame.fullpath)
            t = filters.threshold_yen(img)
            b1 = img > t
            b2 = binary_erosion(b1, square(2))
            b3 = binary_dilation(b2, square(10))
            b4 = binary_closing(b3, square(3))
            imm = np.ma.masked_where(b4, img)
            mn, mx = np.percentile(imm, (1, 99))
            LogHelper.logText(
                '%3d of %d, %4d-%4d-%4d-%5d, %.0f-%.0f'
                % (i, nof, imm.min(), mn, mx, imm.max(), imm.mean(), imm.std())
            )
            im2 = imm.filled(int(imm.mean()))
            out_name = "{0}\\{5}-{1}{2}-{3}-{4}.tif".format(ROOT_DIR, frame.row, frame.column, frame.site, LogHelper.init_ts, frame.experiment)
            imsave(out_name, im2)
예제 #6
0
def morph(img, tparams):
    ops = [mor.grey.erosion, mor.grey.dilation]
    t = ops[np.random.randint(2)] 
    if t == 0:    
        selem = mor.square(np.random.randint(1, tparams['selem_size'][0]))
    else:
        selem = mor.square(np.random.randint(1, tparams['selem_size'][1]))
    return t(img, selem)    
예제 #7
0
 def seg_sect(self, img):
     img_canny = canny(img, sigma=self.sigma,
                       low_threshold=self.low_threshold)
     
     img_dilate = binary_dilation(img_canny, square(3))
     img_erode = binary_erosion(img_dilate, square(3))
     img_fill = binary_fill_holes(img_erode)
     
     return img_fill
예제 #8
0
def process_cell(img):

    # la binariza en caso de que sea escala de grises
    if not img.dtype == 'bool':
        img = img > 0  # Binarizar

    # Calcular máscaras para limpiar lineas largas verticales
    h_k = 0.8
    sum0 = np.sum(img, 0)  # Aplastar la matriz a una fila con las sumas de los valores de cada columna.
    thr0 = sum0 < h_k * img.shape[0]
    thr0 = thr0.reshape(len(thr0), 1) # Convertirlo a vector de una dimensión

    # Calcular máscaras para limpiar lineas largas horizontales
    w_k = 0.5
    sum1 = np.sum(img, 1)
    thr1 = sum1 < w_k * img.shape[1]
    thr1 = thr1.reshape(len(thr1), 1)

    mask = thr0.transpose() * thr1 # Generar máscara final para la celda
    mask_lines = mask.copy()

    elem = morphology.square(5)
    mask = morphology.binary_erosion(mask, elem) # Eliminar ruido

    img1 = np.bitwise_and(mask, img) # Imagen filtrada

    # segmentación del bloque de números
    kerw = 5  # Kernel width
    thr_k = 0.8

    # Calcular mascara para marcar inicio y fin de región con dígitos horizontalmente
    sum0 = np.sum(img1, 0)
    sum0 = signal.medfilt(sum0, kerw)
    thr0 = sum0 > thr_k * np.median(sum0)
    thr0 = np.bitwise_and(thr0.cumsum() > 0, np.flipud(np.flipud(thr0).cumsum() > 0))
    thr0 = thr0.reshape(len(thr0), 1)

    # Calcular mascara para marcar inicio y fin de región con dígitos verticalmente
    sum1 = np.sum(img1, 1)
    sum1 = signal.medfilt(sum1, kerw)
    thr1 = sum1 > thr_k * np.median(sum1)
    thr1 = np.bitwise_and(thr1.cumsum() > 0, np.flipud(np.flipud(thr1).cumsum() > 0))
    thr1 = thr1.reshape(len(thr1), 1)

    # Mascara final para inicio y fin de caracteres (bounding box of digit region)
    mask = thr0.transpose() * thr1
    mask = morphology.binary_dilation(mask, morphology.square(2))


    img = np.bitwise_and(mask_lines.astype(img.dtype), img)  # Aplicar máscara para quitar lineas
    img = morphology.binary_dilation(img, morphology.disk(1)) # Dilatación para unir números quebrados por la máscara anterior
    img = morphology.binary_erosion(img, morphology.disk(1)) # Volver a la fomorma 'original' con los bordes unidos

    return np.bitwise_and(mask, img)
예제 #9
0
def _getPoseMask(peaks, height, width, radius=4, var=4, mode='Solid'):
    ## MSCOCO Pose part_str = [nose, neck, Rsho, Relb, Rwri, Lsho, Lelb, Lwri, Rhip, Rkne, Rank, Lhip, Lkne, Lank, Leye, Reye, Lear, Rear, pt19]
    # find connection in the specified sequence, center 29 is in the position 15
    # limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], \
    #            [10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], \
    #            [1,16], [16,18], [3,17], [6,18]]
    # limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], \
    #            [10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], \
    #            [1,16], [16,18]] # , [9,12]
    # limbSeq = [[3,4], [4,5], [6,7], [7,8], [9,10], \
    #            [10,11], [12,13], [13,14], [2,1], [1,15], [15,17], \
    #            [1,16], [16,18]] # 
    limbSeq = [[2,3], [2,6], [3,4], [4,5], [6,7], [7,8], [2,9], [9,10], \
                         [10,11], [2,12], [12,13], [13,14], [2,1], [1,15], [15,17], \
                         [1,16], [16,18], [2,17], [2,18], [9,12], [12,6], [9,3], [17,18]] #
    indices = []
    values = []
    for limb in limbSeq:
        p0 = peaks[limb[0] -1]
        p1 = peaks[limb[1] -1]
        if 0!=len(p0) and 0!=len(p1):
            r0 = p0[0][1]
            c0 = p0[0][0]
            r1 = p1[0][1]
            c1 = p1[0][0]
            ind, val = _getSparseKeypoint(r0, c0, 0, height, width, radius, var, mode)
            indices.extend(ind)
            values.extend(val)
            ind, val = _getSparseKeypoint(r1, c1, 0, height, width, radius, var, mode)
            indices.extend(ind)
            values.extend(val)
        
            distance = np.sqrt((r0-r1)**2 + (c0-c1)**2)
            sampleN = int(distance/radius)
            # sampleN = 0
            if sampleN>1:
                for i in xrange(1,sampleN):
                    r = r0 + (r1-r0)*i/sampleN
                    c = c0 + (c1-c0)*i/sampleN
                    ind, val = _getSparseKeypoint(r, c, 0, height, width, radius, var, mode)
                    indices.extend(ind)
                    values.extend(val)

    shape = [height, width, 1]
    ## Fill body
    dense = np.squeeze(_sparse2dense(indices, values, shape))
    ## TODO
    # im = Image.fromarray((dense*255).astype(np.uint8))
    # im.save('xxxxx.png')
    # pdb.set_trace()
    dense = dilation(dense, square(5))
    dense = erosion(dense, square(5))
    return dense
예제 #10
0
def getRegions():
    """Geocode address and retreive image centered
    around lat/long"""
    address = request.args.get('address')
    results = Geocoder.geocode(address)
    lat, lng = results[0].coordinates
    zip_code = results[0].postal_code

    map_url = 'https://maps.googleapis.com/maps/api/staticmap?center={0},{1}&size=640x640&zoom=19&sensor=false&maptype=roadmap&&style=visibility:simplified|gamma:0.1'
    request_url = map_url.format(lat, lng)
    req = urllib.urlopen(request_url)
    img = io.imread(req.geturl(),flatten=True)
    labels, numobjects = ndimage.label(img)
    image = filter.canny(img, sigma=3)
    thresh = threshold_otsu(image)
    bw = closing(image > thresh, square(3))

    # remove artifacts connected to image border
    cleared = bw.copy()
    clear_border(cleared)

    # label image regions
    label_image = label(cleared)
    borders = np.logical_xor(bw, cleared)
    label_image[borders] = -1
    image_label_overlay = label2rgb(label_image, image=image)

    fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
    ax.imshow(image_label_overlay)
예제 #11
0
def estimate_rotation(img):
    assert(img.dtype == 'bool')

    # elimina bloques rellenos para acelerar la deteccion de lineas
    elem = morphology.square(2)
    aux = morphology.binary_dilation(img, elem) - morphology.binary_erosion(img, elem)

    # Detección de lineas usando transformada de Hough probabilística
    thres = 50
    minlen = 0.1 * min(aux.shape)
    maxgap = 0.01 * minlen
    lines = transform.probabilistic_hough(aux, threshold=thres, line_length=minlen, line_gap=maxgap)

    # me aseguro que el primer punto de cada línea sea el más próximo al origen
    for lin in lines:
        (x0,y0), (x1,y1) = lin
        if x1*x1+y1*y1 < x0*x0+y0*y0:
            (x0, x1) = (x1, x0)
            (y0, y1) = (y1, y0)

    # orientación dominante
    angle_half_range = np.math.pi / 4
    nbins = int(2 * angle_half_range * (180./np.math.pi) / 0.2)

    orient = []
    for lin in lines:
        (x0,y0), (x1,y1) = lin
        orient.append(np.math.atan2(y1-y0, x1-x0))

    (h, binval) = np.histogram(orient, range=(-angle_half_range, angle_half_range), bins=nbins)
    alpha = binval[h.argmax()] * (180./ np.math.pi)
    return alpha + 0.5 * (binval[1] - binval[0]) * (180./ np.math.pi)
예제 #12
0
def process_image(image):
    tic = time.clock()
    # rescale intensity
    p2, p98 = np.percentile(image, (1, 99.9))
    image = rescale_intensity(1.0*image, in_range=(p2, p98))

    # do simple filter based on color value
    thresh = 0.5*threshold_func(image)
    filtered_image = np.zeros_like(image,dtype=np.uint8) # set up all-zero image
    filtered_image[image > thresh] = 1 # filtered values set to 1

    # perform watershed transform to split clusters
    distance = ndi.distance_transform_edt(filtered_image)
    local_maxi = peak_local_max(distance, indices=False, footprint=morphology.square(7),
                            labels=filtered_image, exclude_border=False)
    markers = ndi.label(local_maxi)[0]

    # segment and label particles
    labels = morphology.watershed(-distance, markers, mask=filtered_image)
    backup_labels = labels.copy()

    # remove boundaries and restore any small particles deleted in this process
    labels[find_boundaries(labels)] = 0
    for i in np.unique(backup_labels)[1:]:
        if np.count_nonzero(labels[backup_labels == i]) == 0:
            labels[backup_labels == i] = i
    toc = time.clock()
    procTime = toc - tic
    return image, labels, procTime
예제 #13
0
파일: align.py 프로젝트: atbd/PythonUtile
    def run4(self):
        """ Cette fonction recadre les images grâce à SURF et RANSAC, fonctionne bien."""
        for x in xrange(len(self.stack)-1):
            print('Traitement image ' + str(x+1))
            im1,im2 = 255.*gaussian_filter(self.stack[x,...], sqrt(self.initial_sigma**2 - 0.25)), 255.*gaussian_filter(self.stack[x+1,...], sqrt(self.initial_sigma**2 - 0.25))
            im1,im2 = enhance_contrast(normaliser(im1), square(5)), enhance_contrast(normaliser(im2), square(5))
            im1, im2 = normaliser(im1), normaliser(im2)
            
            b = cv2.SURF()
            #b.create("Feature2D.BRISK")
            
            k1,d1 = b.detectAndCompute(im1,None)
            k2,d2 = b.detectAndCompute(im2,None)
            
            bf = cv2.BFMatcher()
            matches = bf.knnMatch(d1,d2, k=2)

            # Apply ratio test
            good = []
            for m,n in matches:
                if m.distance < 0.75*n.distance:
                    good.append(m)
            
            g1,g2 = [],[]
            for i in good:
                g1.append(k1[i.queryIdx].pt)
                g2.append(k2[i.trainIdx].pt)

            model, inliers = ransac((np.array(g1), np.array(g2)), AffineTransform, min_samples=3, residual_threshold=self.min_epsilon, max_trials=self.max_trials, stop_residuals_sum=self.min_inlier_ratio)
            
            self.stack[x+1,...] = warp(self.stack[x+1,...], AffineTransform(rotation=model.rotation, translation=model.translation), output_shape=self.stack[x+1].shape)

        self.stack = self.stack.astype(np.uint8)
예제 #14
0
파일: dehaze.py 프로젝트: LouisK130/oii
def dark_channel(image, structure=square(15)):
    (h, w, c) = image.shape
    dark_channels = np.zeros_like(image)
    for i in range(c):
        dark_channels[:, :, i] = minimum_filter(image[:, :, i], footprint=structure)
    dark_channel = np.min(dark_channels, axis=2)
    return dark_channel
예제 #15
0
def edge_confidence(epi, window=9, threshold=0.02):
    """
    Calculates the edge confidence according to eq. 2.
    This is a simple measurement which designates an edge if pixel intensities
    of gray value changes. An edge is determined by the sum of difference in
    pixel intensity of a central pixel to all pixels in a 1D window of size
    window. It is assumed that there is an edge if the sum is greater than a
    given threshold.

    Parameters
    ----------
    epi : numpy.array [v,u]
        Set of all gray-value epis for scanline s_hat.
    window_size : int, optional
        The 1D window siz in pixels. As the window should be centered
        symmetrically around the pixel to process the value shpuld be odd. For
        even numbers the next higher odd number is chosen.
    threshold : float, optional
        The threshold giving the smallest difference in EPI luminescence which
        must be overcome to designate an edge.

    Returns
    -------
    Ce : numpy.array [v,u]
         Edge confidence values for each EPI pixel.
    Me : numpy.array [v,u] of boolean.
        True means an edge was discovered for at that pixel.
    """

    v_dim = epi.shape[0]
    u_dim = epi.shape[1]
    # Check dimensions of input data
    assert epi.shape == (v_dim, u_dim,), 'Input EPI has wrong shape in function \'edge_confidence\'.'

    # Make window size odd
    if window % 2 == 0:
        warnings.warn(
            'window should be an odd number in function \'edge_confidence\'. Window size {g} was given but {u} is used instead.'.format(
                g=window, u=window + 1))
        window += 1

    # We avoid the border problem by padding the epi.
    padded_epi = np.pad(epi, ((0, 0), (int(window // 2), int(window // 2))), 'edge')
    assert padded_epi.shape == (v_dim, u_dim + window - 1,), 'Padded epi has wrong shape in function \'edge_confidence\'.'

    # Calculate confidence values
    Ce = np.zeros(epi.shape, dtype=np.float32)  # initiate array
    for k in range(window):
        Ce += (epi[...] - padded_epi[:, k:epi.shape[1] + k]) ** 2
    Me = Ce > threshold  # create confidence Mask
    Me = binary_opening(Me, selem=square(2), out=Me)  # work with square to avoid aliasing

    # Let's see if our results have reasonable meaning'
    assert np.all(
        Ce >= 0), 'Negative edge confidence found in function \'edge_confidence\'.'
    assert Ce.shape == (v_dim,
                        u_dim,), 'Ce output has incorrect shape in fucntion \'edge_confidence\'.'
    assert Me.shape == (v_dim,
                        u_dim,), 'Me output has incorrect shape in fucntion \'edge_confidence\'.'
    return Ce, Me
 def get_rough_detection(self, img, bigsize=40.0, smallsize=4.0, thresh = 0):
     diff = self.difference_of_gaussian(-img, bigsize, smallsize)
     diff[diff>thresh] = 1
     
     se = morphology.square(4)
     ero = morphology.erosion(diff, se)
     
     labimage = label(ero)
     #rec = morphology.reconstruction(ero, img, method='dilation').astype(np.dtype('uint8'))
     
     # connectivity=1 corresponds to 4-connectivity.
     morphology.remove_small_objects(labimage, min_size=600, connectivity=1, in_place=True)
     #res = np.zeros(img.shape)
     ero[labimage==0] = 0
     ero = 1 - ero
     labimage = label(ero)
     morphology.remove_small_objects(labimage, min_size=400, connectivity=1, in_place=True)
     ero[labimage==0] = 0
     res = 1 - ero
     res[res>0] = 255
     
     #temp = 255 - temp
     #temp = morphology.remove_small_objects(temp, min_size=400, connectivity=1, in_place=True)
     #res = 255 - temp
     
     return res
 def focus_score(self): 
     f_score = (color.rgb2grey(self.img) - erosion(color.rgb2grey(self.img), square(4)))
     non_zero_pixel_area = self.get_nonzero_pixel_area(f_score)
     #print("focus score: " + str(np.sum(f_score) / non_zero_pixel_area))
     #plt.imshow(f_score)
     #plt.show()
     return np.sum(f_score) / non_zero_pixel_area 
예제 #18
0
파일: test.py 프로젝트: vadez/SOP
def segmentation(image):
	"""Executes image segmentation based on various features of the video stream"""
	gray = cv2.cvtColor(image, cv2.cv.CV_RGB2GRAY)
	blurred = cv2.GaussianBlur(gray, (5, 5), 0)

	ret, bw = cv2.threshold(blurred, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
	
	# Close binary image
	result = cv2.dilate(bw, square(10), iterations = 1)
	result = cv2.erode(result, square(10), iterations = 1)
	
	# Open binary image
	result = cv2.erode(result, square(10), iterations = 1)
	result = cv2.dilate(result, square(10), iterations = 1)
	
	return label(result) * 50
예제 #19
0
파일: align.py 프로젝트: atbd/PythonUtile
    def run3(self):
        """ Cette fonction test des alternatives à SIFT et ORB. Ne fonctionne pas."""
        for x in xrange(len(self.stack)-1):
            print('Traitement image ' + str(x+1))
            im1,im2 = 255.*gaussian_filter(self.stack[x,...], sqrt(self.initial_sigma**2 - 0.25)), 255.*gaussian_filter(self.stack[x+1,...], sqrt(self.initial_sigma**2 - 0.25))
            im1,im2 = enhance_contrast(normaliser(im1), square(3)), enhance_contrast(normaliser(im2), square(3))
            im1, im2 = normaliser(im1), normaliser(im2)
            
            b = cv2.BRISK()
            #b.create("Feature2D.BRISK")
            
            k1,d1 = b.detectAndCompute(im1,None)
            k2,d2 = b.detectAndCompute(im2,None)
            
            bf = cv2.BFMatcher(cv2.NORM_HAMMING)
            matches = bf.match(d1,d2)
            
            g1,g2 = [],[]
            for i in matches:
                g1.append(k1[i.queryIdx].pt)
                g2.append(k2[i.trainIdx].pt)

            model, inliers = ransac((np.array(g1), np.array(g2)), AffineTransform, min_samples=3, residual_threshold=self.min_epsilon, max_trials=self.max_trials, stop_residuals_sum=self.min_inlier_ratio)
            
            self.stack[x+1,...] = warp(self.stack[x+1,...], AffineTransform(rotation=model.rotation, translation=model.translation), output_shape=self.stack[x+1].shape)

        self.stack = self.stack.astype(np.uint8)
예제 #20
0
    def updateRasterInfo(self, **kwargs):
        kwargs['output_info']['statistics'] = ()
        kwargs['output_info']['histogram'] = ()

        self.window = square(int(kwargs.get('size', 3)))
        m = kwargs.get('measure', 'Mean').lower()
        if m == 'minimum':
            self.func = rank.minimum
        elif m == 'maximum':
            self.func = rank.maximum
        elif m == 'mean':
            self.func = rank.mean
        elif m == 'bilateral mean':
            self.func = rank.mean_bilateral
        elif m == 'median':
            self.func = rank.median
        elif m == 'sum':
            self.func = rank.sum
        elif m == 'entropy':
            self.func = rank.entropy
        elif m == 'threshold':
            self.func = rank.threshold
        elif m == 'autolevel':
            self.func = rank.autolevel
        return kwargs
    def plot_preprocessed_image(self):
        """
        plots pre-processed image. The plotted image is the same as obtained at the end
        of the get_text_candidates method.
        """
        image = restoration.denoise_tv_chambolle(self.image, weight=0.1)
        thresh = threshold_otsu(image)
        bw = closing(image > thresh, square(2))
        cleared = bw.copy()

        label_image = measure.label(cleared)
        borders = np.logical_xor(bw, cleared)

        label_image[borders] = -1
        image_label_overlay = label2rgb(label_image, image=image)

        fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(12, 12))
        ax.imshow(image_label_overlay)

        for region in regionprops(label_image):
            if region.area < 10:
                continue

            minr, minc, maxr, maxc = region.bbox
            rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                      fill=False, edgecolor='red', linewidth=2)
            ax.add_patch(rect)

        plt.show()
예제 #22
0
def median_filter(image, selem=None):
    if selem is None:
        # default mask is 5x5 square
        selem = square(5)
    depth = image.shape[2]
    return np.dstack(median(channel[...,0], selem)
                     for channel in np.dsplit(image, depth)) / 255.
예제 #23
0
def filter_img(img):
    selem = square(11)
    img[:, :, 0] = rank.mean(img[:, :, 0], selem=selem)
    img[:, :, 1] = rank.mean(img[:, :, 1], selem=selem)
    img[:, :, 2] = rank.mean(img[:, :, 2], selem=selem)
    #return np.array(img, dtype=float)
    return img_as_float(img)
예제 #24
0
def squareMask(maskImg, square_width): #both odd and even square_with are allowed
    boxsize = maskImg.get_xsize()
    maskArray = EMNumPy.em2numpy(maskImg)
    
    if (boxsize <= square_width):
        print "ERROR: the width of the square cannot be larger than the boxsize of particles."
        sys.exit()
        
    #from skimage.morphology import square
    #Generates a flat, square-shaped structuring element.
    #Every pixel along the perimeter has a chessboard distance no greater than radius (radius=floor(width/2)) pixels.
    squareArray = square(square_width, dtype=np.uint8)
    m, n = squareArray.shape
    assert m==n
    
    if (m%2 == 0):
        pad_before = (boxsize - m)/2
        pad_after = (boxsize - m)/2
    else:
        pad_before = (boxsize - m)/2
        pad_after = (boxsize - m)/2+1
    #pad_width = (boxsize - square_width)/2
    #print "m, n, pad_before, pad_after", m, n, pad_before, pad_after
    #squareArrayPad = np.pad(squareArray, pad_width, mode='constant')
    squareArrayPad = np.pad(squareArray, (pad_before, pad_after), mode='constant')
    
    squareImg = EMNumPy.numpy2em(squareArrayPad)
    return squareImg
예제 #25
0
def roofRegion(edge):
    """Estimate region based on edges of roofRegion
    """
    # apply threshold
    thresh = threshold_otsu(image)
    bw = closing(image > thresh, square(3))

    # remove artifacts connected to image border
    cleared = bw.copy()
    clear_border(cleared)

    # label image regions
    label_image = label(cleared)
    borders = np.logical_xor(bw, cleared)
    label_image[borders] = -1
    image_label_overlay = label2rgb(label_image, image=image)

    fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
    ax.imshow(image_label_overlay)

    for region in regionprops(label_image):

        # skip small images
        if region.area < 100:
            continue

        # draw rectangle around segmented coins
        minr, minc, maxr, maxc = region.bbox
        rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                  fill=False, edgecolor='red', linewidth=2)
        ax.add_patch(rect)

    plt.show()
예제 #26
0
def blobs(image, remove_mb = None, val = 160, size = 100):
    """ Convolve a kernel on the image and a gaussian filter to highligh blobs. Find blobs using the
    Difference of Gaussian. Remove from the list of blobs the blobs that are at the membrane.
    return 3 different list
    """

    thresh = threshold_otsu(image)

    #Find all the blobs in the image using Difference of Gaussian
    blobs_in_image = feature.blob_dog(image, min_sigma=0.01,
                        max_sigma=3, threshold=thresh)
    blob_list = []
    for blob in blobs_in_image:
        y, x, r = blob
        blob_list.append((y, x))



    if remove_mb == None:
        blob_in_image_after_binary = set(blob_list)

    else:
        #Create a mask to remove blobs that are at the membrane and surrounded
        #by bright big object
        binary = image >= val*thresh/100
        binary = dilation(binary, square(3))
        binary = remove_small_objects(binary, min_size=size)
        # Create a list of coordinate with the binary image
        coor_binary = np.nonzero(binary)
        list_blob_masked = zip(*coor_binary)
        #Substract the list of coordinate from the binary image to the list of blobs
        blob_in_image_after_binary = (set(blob_list) - set (list_blob_masked))

    return blob_in_image_after_binary
def main():
    for file_path in glob.glob("/home/lucas/Downloads/Lucas/GSK 10uM/*.JPG"):

        img = data.imread(file_path, as_grey=True)

        img = transform.resize(img, [600, 600])
        img_color = transform.resize(data.imread(file_path), [600, 600])

        img[img >img.mean()-0.1] = 0

        # io.imshow(img)
        # io.show()
        #
        edges = canny(img)
        bordas_fechadas = closing(img > 0.1, square(15)) # fechando gaps
        fill_cells = ndi.binary_fill_holes(bordas_fechadas)
        # io.imshow(fill_cells)
        # io.show()
        img_label = label(fill_cells, background=0)
        n= 0
        for  x in regionprops(img_label):
            if x.area < 2000 and x.area > 300:
                n +=1
                print x.area
                minr, minc, maxr, maxc = x.bbox
                try:
                    out_path_name = file_path.split("/")[-1].rstrip(".JPG")
                    io.imsave("out/cell_{}_pic_{}_area_{}.png".format(n, out_path_name, str(round(x.area))),img_color[minr-3: maxr+3, minc-3: maxc+3])
                    #io.show()
                except:
                    pass
def threshold_image(image, threshold=0):
	"""
	This function takes out any values in an image's RGB matrix that are
	below the threshold value.

	Inputs:
	- image: a matrix describing an image with only one channel represented.
	- threshold: a value, between 0 and 1, for which if an image matrix's
				 value is below, will be set to 0, and if above, will be 
				 set to 1.

				 If the threshold is set to 0, then an Otsu thresholding will
				 be returned.

	Outputs:
	- thresholded_image: a matrix representation of the thresholded image.
						 this is essentially a black and white image.
	- thresh: the threshold value

	To screen: the black-and-white image representation.
	- 
	"""
	if threshold == 0:
		thresh = threshold_otsu(image)

	if threshold != 0:
		thresh = threshold

	thresholded_image = closing(image > thresh, square(3), out=None)
	imshow(thresholded_image)

	return thresholded_image, thresh
예제 #29
0
파일: locator.py 프로젝트: waliens/sldc
def mask_to_objects_2d(mask, background=0, offset=None):
    """Convert 2D (binary or label) mask to polygons. Generates borders fitting in the objects.
    Parameters
    ----------
    mask: ndarray
        2D mask array. Expected shape: (height, width).
    background: int
        Value used for encoding background pixels.
    offset: tuple (optional, default: None)
        (x, y) coordinate offset to apply to all the extracted polygons.
    Returns
    -------
    extracted: list of AnnotationSlice
        Each object slice represent an object from the image. Fields time and depth of AnnotationSlice are set to None.
    """
    if mask.ndim != 2:
        raise ValueError("Cannot handle image with ndim different from 2 ({} dim. given).".format(mask.ndim))
    if offset is None:
        offset = (0, 0)
    # opencv only supports contour extraction for binary masks: clean mask and binarize
    mask_cpy = np.zeros(mask.shape, dtype=np.uint8)
    mask_cpy[mask != background] = 255
    # create artificial separation between adjacent touching each other + clean
    contours = dilation(mask, square(3)) - mask
    mask_cpy[np.logical_and(contours > 0, mask > 0)] = background
    mask_cpy = clean_mask(mask_cpy, background=background)
    # extract polygons and labels
    polygons = _locate(mask_cpy, offset=offset)
    objects = list()
    for polygon in polygons:
        # loop for handling multipart geometries
        for curr in flatten_geoms(polygon.geoms) if hasattr(polygon, "geoms") else [polygon]:
            x, y = get_polygon_inner_point(curr)
            objects.append((polygon, mask[y - offset[1], x - offset[0]]))
    return objects
예제 #30
0
def label_particles_edge(im, sigma=2, closing_size=0, **extra_args):
    """ Segment image using Canny edge-finding filter.

        parameters
        ----------
        im : image in which to find particles
        sigma : size of the Canny filter
        closing_size : size of the closing filter

        returns
        -------
        labels : an image array of uniquely labeled segments
    """
    from skimage.morphology import square, binary_closing, skeletonize
    if skimage_version < StrictVersion('0.11'):
        from skimage.filter import canny
    else:
        from skimage.filters import canny
    edges = canny(im, sigma=sigma)
    if closing_size > 0:
        edges = binary_closing(edges, square(closing_size))
    edges = skeletonize(edges)
    labels = sklabel(edges)
    print "found {} segments".format(labels.max())
    # in ma.array mask, False is True, and vice versa
    labels = np.ma.array(labels, mask=edges == 0)
    return labels
예제 #31
0
    def _get_batches_of_transformed_samples(self, index_array):
        batch_x = []
        batch_y = []

        for batch_index, image_index in enumerate(index_array):
            _idx = self.image_ids[image_index]

            img0 = all_images[_idx].copy()
            msk0 = all_masks[_idx].copy()
            lbl0 = all_labels[_idx].copy()
            good4copy = all_good4copy[_idx]

            x0 = random.randint(0, img0.shape[1] - input_shape[1])
            y0 = random.randint(0, img0.shape[0] - input_shape[0])
            img = img0[y0:y0 + input_shape[0], x0:x0 + input_shape[1], :]
            msk = msk0[y0:y0 + input_shape[0], x0:x0 + input_shape[1], :]

            if len(good4copy) > 0 and random.random() > 0.75:
                num_copy = random.randrange(1, min(6, len(good4copy) + 1))
                lbl_max = lbl0.max()
                for i in range(num_copy):
                    lbl_max += 1
                    l_id = random.choice(good4copy)
                    lbl_msk = all_labels[_idx] == l_id
                    row, col = np.where(lbl_msk)
                    y1, x1 = np.min(np.where(lbl_msk), axis=1)
                    y2, x2 = np.max(np.where(lbl_msk), axis=1)
                    lbl_msk = lbl_msk[y1:y2 + 1, x1:x2 + 1]
                    lbl_img = img0[y1:y2 + 1, x1:x2 + 1, :]
                    if random.random() > 0.5:
                        lbl_msk = lbl_msk[:, ::-1, ...]
                        lbl_img = lbl_img[:, ::-1, ...]
                    rot = random.randrange(4)
                    if rot > 0:
                        lbl_msk = np.rot90(lbl_msk, k=rot)
                        lbl_img = np.rot90(lbl_img, k=rot)
                    x1 = random.randint(
                        max(0, x0 - lbl_msk.shape[1] // 2),
                        min(img0.shape[1] - lbl_msk.shape[1],
                            x0 + input_shape[1] - lbl_msk.shape[1] // 2))
                    y1 = random.randint(
                        max(0, y0 - lbl_msk.shape[0] // 2),
                        min(img0.shape[0] - lbl_msk.shape[0],
                            y0 + input_shape[0] - lbl_msk.shape[0] // 2))
                    tmp = erosion(lbl_msk, square(5))
                    lbl_msk_dif = lbl_msk ^ tmp
                    tmp = dilation(lbl_msk, square(5))
                    lbl_msk_dif = lbl_msk_dif | (tmp ^ lbl_msk)
                    lbl0[y1:y1 + lbl_msk.shape[0],
                         x1:x1 + lbl_msk.shape[1]][lbl_msk] = lbl_max
                    img0[y1:y1 + lbl_msk.shape[0],
                         x1:x1 + lbl_msk.shape[1]][lbl_msk] = lbl_img[lbl_msk]
                    full_diff_mask = np.zeros_like(img0[..., 0], dtype='bool')
                    full_diff_mask[y1:y1 + lbl_msk.shape[0],
                                   x1:x1 + lbl_msk.shape[1]] = lbl_msk_dif
                    img0[..., 0][full_diff_mask] = median(
                        img0[..., 0], mask=full_diff_mask)[full_diff_mask]
                    img0[..., 1][full_diff_mask] = median(
                        img0[..., 1], mask=full_diff_mask)[full_diff_mask]
                    img0[..., 2][full_diff_mask] = median(
                        img0[..., 2], mask=full_diff_mask)[full_diff_mask]
                img = img0[y0:y0 + input_shape[0], x0:x0 + input_shape[1], :]
                lbl = lbl0[y0:y0 + input_shape[0], x0:x0 + input_shape[1]]
                msk = create_mask(lbl)

            if 'ic100_' in all_ids[_idx] or 'gnf_' in all_ids[_idx]:
                data = self.random_transformers[1](image=img[..., ::-1],
                                                   mask=msk)
            else:
                data = self.random_transformers[0](image=img[..., ::-1],
                                                   mask=msk)

            img = data['image'][..., ::-1]
            msk = data['mask']

            msk = msk.astype('float')
            msk[..., 0] = (msk[..., 0] > 127) * 1
            msk[..., 1] = (msk[..., 1] > 127) * (msk[..., 0] == 0) * 1
            msk[..., 2] = (msk[..., 1] == 0) * (msk[..., 0] == 0) * 1
            otp = msk

            img = np.concatenate([img, bgr_to_lab(img)], axis=2)
            batch_x.append(img)
            batch_y.append(otp)
        batch_x = np.array(batch_x, dtype="float32")
        batch_y = np.array(batch_y, dtype="float32")
        batch_x = preprocess_inputs(batch_x)
        return self.transform_batch_x(batch_x), self.transform_batch_y(batch_y)
예제 #32
0
'''
Created on Aug 6, 2019

@author: jsaavedr
Filtro Mediana
'''

import matplotlib.pyplot as plt
import skimage.filters as filters
import skimage.morphology as morphology
import scipy.ndimage.filters as nd_filters
import utils
import pai_io

if __name__ == '__main__':
    filename = '../images/gray/ruido.tif'
    image = pai_io.imread(filename, as_gray=True)
    strel = morphology.square(3)
    image_median = filters.median(image, strel)
    g_kernel = utils.get_gaussian2d(2, 6)
    image_g = nd_filters.convolve(image, g_kernel, mode='constant', cval=0)
    fig, xs = plt.subplots(1, 3)
    for i in range(3):
        xs[i].set_axis_off()
    xs[0].imshow(image, cmap='gray', vmin=0, vmax=255)
    xs[0].set_title('Image')
    xs[1].imshow(image_g, cmap='gray', vmin=0, vmax=255)
    xs[1].set_title('Filtro Gaussiano')
    xs[2].imshow(image_median, cmap='gray', vmin=0, vmax=255)
    xs[2].set_title('Filtro Mediana')
    plt.show()
예제 #33
0
M = dict_imp.transform(M)
M = normalize(M)
print('Number of samples in Dictionary', len(names))
M = remove_bands(M, bands)
data = remove_bands(data, bands)
recon_img = np.reshape(data.transpose(),
                       (data.shape[1], img.shape[0], img.shape[1]))
#spec.view_cube(recon_img)
print('Dictionary Shape', M.shape)
print('Data reduced shape', data.shape)
pad_img = add_padding(recon_img, 3)
print('Padded image', pad_img.shape)

M = M.transpose()
data = data.transpose()

mu = .004
lamb = .1
gamma = .006
n_iter = 300
width = 5
strel = square(width)
#data = np.transpose(data)

X = morph_opt(M, data, lamb, gamma, mu, strel, n_iter=300)
#M = np.transpose(M)
print(X.shape)
recon = np.dot(M, X)
#recon = np.transpose(recon)
recon_img = np.reshape(recon, (250, 190, 188))
예제 #34
0
def erosion(img):
    # return greyscale morphological erosion of an image
    return mp.erosion(img, mp.square(2, dtype=np.uint8))
예제 #35
0
def remove_dead_pixels(vid, thresh=1.1):
    for frame in tqdm(range(vid.shape[0]), desc='Removing Dead Pixels'):
        med = skimage.filters.median(vid[frame, :, :], square(10)).ravel()
        img = vid[frame, :, :].ravel()
        img[img > thresh * med] = med[img > thresh * med]
        vid[frame, :, :] = img.reshape(vid.shape[1], vid.shape[2])
예제 #36
0
def Bfx_lbp(I, R=None, options={}):
    """
     X, Xn, options = Bfx_lbp(I, R, options)

     Toolbox: Balu
        Local Binary Patterns features

        X is the features vector, Xn is the list of feature names (see Example
        to see how it works).

        It calculates the LBP over the a regular grid of patches. The function
        uses scikit-image's local_binary_pattern implementation
        (see http://scikit-image.org/docs/dev/api/skimage.feature.html#local-binary-pattern).

        It returns a matrix of uniform lbp82 descriptors for I, made by
        concatenating histograms of each grid cell in the image.
        Grid size is options['hdiv'] * options['vdiv']

        R is a binary image or empty. If R is given the lbp will be computed
        the corresponding pixles R==0 in image I will be set to 0.

        options['mappingtype'] can have one of this options: {'nri_uniform', 'uniform', 'ror', 'default'}.
        If not options is provided 'nri_uniform' which produces histograms of 59 bins will be used.

         Output:
         X is a matrix of size ((hdiv*vdiv) x 59), each row has a
             histogram corresponding to a grid cell. We use 59 bins.
         options['x'] of size hdiv*vdiv is the x coordinates of center of ith grid cell
         options['y'] of size hdiv*vdiv is the y coordinates of center of ith grid cell
         Both coordinates are calculated as if image was a square of side length 1.

         References:
         Ojala, T.; Pietikainen, M. & Maenpaa, T. Multiresolution gray-scale
         and rotation invariant texture classification with local binary
         patterns. IEEE Transactions on Pattern Analysis and Machine
         Intelligence, 2002, 24, 971-987.

         Mu, Y. et al (2008): Discriminative Local Binary Patterns for Human
         Detection in Personal Album. CVPR-2008.

         Example 1:
            import numpy as np
            from balu.ImagesAndData import balu_imageload
            from balu.FeatureExtraction import Bfx_lbp
            from balu.InputOutput import Bio_printfeatures
            from matplotlib.pyplot import bar, figure, show

            options = {
                'weight': 0,                    # Weigth of the histogram bins
                'vdiv': 3,                      # one vertical divition
                'hdiv':  3,                     # one horizontal divition
                'samples': 8,                   # number of neighbor samples
                'mappingtype': 'nri_uniform'    # uniform LBP
            }
            I = balu_imageload('testimg1.jpg')  # input image
            J = I[119:219, 119:239, 1]          # region of interest (green)
            figure(1)
            imshow(J, cmap='gray')              # image to be analyzed
            X, Xn = Bfx_lbp(J, None, options)   # LBP features
            figure(2);
            bar(np.arange(X.shape[1]), X[0, :])
            Bio_printfeatures(X, Xn)
            show()

         Example 2:
            import numpy as np
            from balu.ImagesAndData import balu_imageload
            from balu.FeatureExtraction import Bfx_lbp
            from balu.InputOutput import Bio_printfeatures
            from matplotlib.pyplot import bar, figure, show

            options = {
                'weight': 0,                    # Weigth of the histogram bins
                'vdiv': 3,                      # one vertical divition
                'hdiv':  3,                     # one horizontal divition
                'samples': 8,                   # number of neighbor samples
                'mappingtype': 'uniform'        # uniform LBP
            }
            I = balu_imageload('testimg1.jpg')  # input image
            J = I[119:219, 119:239, 1]          # region of interest (green)
            figure(1)
            imshow(J, cmap='gray')              # image to be analyzed
            X, Xn = Bfx_lbp(J, None, options)   # LBP features
            figure(2);
            bar(np.arange(X.shape[1]), X[0, :])
            Bio_printfeatures(X, Xn)
            show()

       See also Bfx_gabor, Bfx_clp, Bfx_fourier, Bfx_dct.

     (c) GRIMA-DCCUC, 2011
     http://grima.ing.puc.cl

     With collaboration from:
     Diego Patiño ([email protected]) -> Translated implementation into python (2017)
    """

    if R is None:
        R = np.ones(I.shape)

    if 'show' not in options:
        options['show'] = False

    if 'normalize' not in options:
        options['normalize'] = False

    if options['show']:
        print('--- extracting local binary patterns features...')

    if 'samples' not in options:
        options['samples'] = 8

    if 'integral' not in options:
        options['integral'] = False

    if 'radius' not in options:
        options['radius'] = np.log(options['samples']) / np.log(2.0) - 1

    if 'weight' not in options:
        options['weight'] = 0

    LBPst = 'LBP'

    if 'mappingtype' not in options:
        options['mappingtype'] = 'nri_uniform'

    if options['mappingtype'] == 'ror':
        num_patterns = 256
    elif options['mappingtype'] == 'uniform':
        num_patterns = 10
    elif options['mappingtype'] == 'nri_uniform':
        num_patterns = 59
    else:
        options['mappingtype'] = 'default'
        num_patterns = 256

    st = '{0},{1}'.format(options['samples'], options['mappingtype'])

    # Get lbp image
    if R is not None:
        I[np.where(R == 0)] = 0

    radius = options['radius']
    P = options['samples']
    LBP = local_binary_pattern(I, P=P, R=radius, method=options['mappingtype'])
    n1, n2 = LBP.shape
    options['Ilbp'] = LBP

    if options['integral']:
        options['Hx'] = Bim_inthist(LBP, num_patterns)

    vdiv = options['vdiv']
    hdiv = options['hdiv']

    modn1 = n1 % vdiv
    if modn1 != 0:
        LBP = np.concatenate((LBP.T, np.zeros((LBP.shape[1], vdiv - modn1))),
                             1).T
        I = np.concatenate((I.T, np.zeros((I.shape[1], vdiv - modn1))), 1).T

    modn2 = n2 % hdiv
    if modn2 != 0:
        LBP = np.concatenate((LBP, np.zeros((LBP.shape[0], hdiv - modn2))), 1)
        I = np.concatenate((I, np.zeros((I.shape[0], hdiv - modn2))), 1)

    n1, n2 = LBP.shape

    ylen = int(np.round(n1 / vdiv))
    xlen = int(np.round(n2 / hdiv))
    # split image into blocks (saved as columns)
    grid_img = view_as_blocks(LBP, block_shape=(ylen, xlen))
    if options['weight'] > 0:
        LBPst = 'w' + LBPst
        mt = int(2 * radius - 1)
        mt2 = float(mt**2)
        Id = I.astype(int)

        weight = options['weight']
        if weight == 1:
            W = np.abs(
                convolve2d(Id, np.ones((mt, mt)) / mt2, mode='same') - Id)
        elif weight == 2:
            W = (np.abs(
                convolve2d(Id, np.ones((mt, mt)) / mt2, mode='same') -
                Id)) / (Id + 1)
        elif weight == 3:
            W = np.abs(median(Id, square(mt)) - Id)
        elif weight == 4:
            W = np.abs(median(Id, square(mt)) - Id) / (Id + 1)
        elif weight == 5:
            W = np.abs(order_filter(Id, np.ones((mt, mt)), 0) - Id)
        elif weight == 6:
            W = np.abs(order_filter(Id, np.ones((mt, mt)), 0) - Id) / (Id + 1)
        elif weight == 7:
            Id = convolve2d(Id, np.ones((mt, mt)) / mt2, mode='same')
            W = np.abs(order_filter(Id, np.ones((mt, mt)), 0) - Id) / (Id + 1)
        elif weight == 8:
            Id = median(Id, square(mt))
            W = np.abs(order_filter(Id, np.ones((mt, mt)), 0) - Id) / (Id + 1)
        elif weight == 9:
            Id = median(Id, square(mt))
            W = np.abs(order_filter(Id, np.ones((mt, mt)), 1) - Id) / (Id + 1)
        else:
            print("Bfx_lbp does not recognize options['weight'] = {0}.".format(
                options['weight']))

        grid_W = view_as_blocks(W, block_shape=(ylen, xlen))
        num_rows_blocks, num_cols_blocks = grid_W.shape[0:2]
        desc = np.zeros((num_patterns, num_cols_blocks * num_rows_blocks))
        p = 0
        for br in range(num_rows_blocks):
            for bc in range(num_cols_blocks):
                x = grid_img[br, bc].astype(int).ravel()
                y = grid_W[br, bc].ravel()
                d = np.zeros(num_patterns)
                for k in range(ylen * xlen):
                    d[x[k]] += y[k]
                desc[:, p] = d
                p += 1
    else:
        desc = (np.histogram(grid_img.ravel(), num_patterns)[0])[None]
        # calculate coordinates of descriptors as if it was square w/ side=1

    dx = 1.0 / float(hdiv)
    dy = 1.0 / float(vdiv)
    x = np.linspace(dx / 2.0, 1 - dx / 2.0, hdiv)
    y = np.linspace(dy / 2.0, 1 - dy / 2.0, vdiv)
    options['x'] = x
    options['y'] = y

    D = desc.T

    M, N = D.shape
    Xn = (N * M) * [None]
    X = np.zeros((1, N * M))
    k = 0
    for i in range(M):
        for j in range(N):
            Xn[k] = '{0}({1},{2})[{3}]                       '.format(
                LBPst, i, j, st)
            X[0, k] = D[i, j]
            k += 1

    if options['normalize']:
        X = X / np.sum(X)

    return X, Xn
예제 #37
0
	plt.savefig(path + '/outlines/' + input_no_ext + '.jpg')
	plt.close()

path = sys.argv[1]
input_images =  sorted([f for f in os.listdir(path) if f.endswith('.tif')])
print(input_images)
mask_images = sorted([f for f in os.listdir(path) if f.endswith('ties_.png')])
all_dfs = []

print('Paths are correct')
for image_name, mask_name in zip(input_images, mask_images) :

	mask= io.imread(path +'/' + mask_name, as_gray = True)
	mask_bw = rgb2gray(mask)
	thresh = threshold_otsu(mask_bw)
	bw = closing(mask_bw > thresh, square(3))

	cleared = clear_border(bw)

	label_image = measure.label(cleared)
	area = []
	length = []
	width = []
	solidity = []
	centroid = []
	for region in measure.regionprops(label_image):
		area.append(region.area)
		length.append(region.major_axis_length)
		width.append(region.minor_axis_length)
		solidity.append(region.solidity)
		centroid.append(region.centroid)
예제 #38
0
파일: mask.py 프로젝트: CosmiQ/solaris
def boundary_mask(footprint_msk=None,
                  out_file=None,
                  reference_im=None,
                  boundary_width=3,
                  boundary_type='inner',
                  burn_value=255,
                  **kwargs):
    """Convert a dataframe of geometries to a pixel mask.

    Note
    ----
    This function requires creation of a footprint mask before it can operate;
    therefore, if there is no footprint mask already present, it will create
    one. In that case, additional arguments for :func:`footprint_mask` (e.g.
    ``df``) must be passed.

    By default, this function draws boundaries *within* the edges of objects.
    To change this behavior, use the `boundary_type` argument.

    Arguments
    ---------
    footprint_msk : :class:`numpy.array`, optional
        A filled in footprint mask created using :func:`footprint_mask`. If not
        provided, one will be made by calling :func:`footprint_mask` before
        creating the boundary mask, and the required arguments for that
        function must be provided as kwargs.
    out_file : str, optional
        Path to an image file to save the output to. Must be compatible with
        :class:`rasterio.DatasetReader`. If provided, a `reference_im` must be
        provided (for metadata purposes).
    reference_im : :class:`rasterio.DatasetReader` or `str`, optional
        An image to extract necessary coordinate information from: the
        affine transformation matrix, the image extent, etc. If provided,
        `affine_obj` and `shape` are ignored
    boundary_width : int, optional
        The width of the boundary to be created **in pixels.** Defaults to 3.
    boundary_type : ``"inner"`` or ``"outer"``, optional
        Where to draw the boundaries: within the object (``"inner"``) or
        outside of it (``"outer"``). Defaults to ``"inner"``.
    burn_value : `int`, optional
        The value to use for labeling objects in the mask. Defaults to 255 (the
        max value for ``uint8`` arrays). The mask array will be set to the same
        dtype as `burn_value`. Ignored if `burn_field` is provided.
    **kwargs : optional
        Additional arguments to pass to :func:`footprint_mask` if one needs to
        be created.

    Returns
    -------
    boundary_mask : :class:`numpy.array`
        A pixel mask with 0s for non-object pixels and the same value as the
        footprint mask `burn_value` for the boundaries of each object.

    """
    if out_file and not reference_im:
        raise ValueError(
            'If saving output to file, `reference_im` must be provided.')
    if reference_im:
        reference_im = _check_rasterio_im_load(reference_im)
    # need to have a footprint mask for this function, so make it if not given
    if footprint_msk is None:
        footprint_msk = footprint_mask(reference_im=reference_im,
                                       burn_value=burn_value,
                                       **kwargs)

    # perform dilation or erosion of `footprint_mask` to get the boundary
    strel = square(boundary_width)
    if boundary_type == 'outer':
        boundary_mask = dilation(footprint_msk, strel)
    elif boundary_type == 'inner':
        boundary_mask = erosion(footprint_msk, strel)
    # use xor operator between border and footprint mask to get _just_ boundary
    boundary_mask = boundary_mask ^ footprint_msk
    # scale the `True` values to burn_value and return
    boundary_mask = boundary_mask > 0  # need to binarize to get burn val right
    output_arr = boundary_mask.astype('uint8') * burn_value

    if out_file:
        meta = reference_im.meta.copy()
        meta.update(count=1)
        meta.update(dtype='uint8')
        with rasterio.open(out_file, 'w', **meta) as dst:
            dst.write(output_arr, indexes=1)

    return output_arr
예제 #39
0
F = I[1525:3000, 1450:1800]
imshow(F, cmap='gray')
show()

print("imagen recortada binarizada")

borde2 = sobel(F, mask=None)
Z = (borde2 > 0.1).astype('uint32')
imshow(Z, cmap='gray')
show()

plt.hist(borde2.ravel(), 256, [0, 1])
plt.show()

selSquare = square(14)
selDisk = disk(12)
selRectangle = rectangle(6, 20)

IDilationSquare = dilation(Z, selem=selSquare)
#imshow(IDilationSquare, cmap='gray')
#show()
IDilationDisk = dilation(Z, selem=selDisk)
#imshow(IDilationDisk, cmap='gray')
#show()
IDilationRectangle = dilation(Z, selem=selRectangle)
imshow(IDilationRectangle, cmap='gray')
show()

IClosingSquare = closing(IDilationDisk, selem=selSquare)
#imshow(IClosingSquare, cmap='gray')
예제 #40
0
    def optimalThreshold(self,
                         color_im,
                         method='r_small_objects',
                         remove_spots=True,
                         level=200,
                         debug=True):
        im = cv2.cvtColor(color_im, cv2.COLOR_BGR2GRAY)
        ret, thresh = cv2.threshold(im, 0, 255,
                                    cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        if debug:
            cv2.imwrite('binary.jpg', thresh)

        thresh = filters.median(thresh, morphology.square(7))
        cv2.imwrite('binary_med.jpg', thresh)
        #MORPHOLOGICAL_REMOVE_SMALL_OBJECTS
        imglab = morphology.label(thresh)  # create labels in segmented image
        min_s = (int)(im.shape[0])
        try:
            cleaned = morphology.remove_small_objects(imglab,
                                                      min_size=min_s,
                                                      connectivity=8)
        except UserWarning:
            pass
        res_small = np.zeros((cleaned.shape))  # create array of size cleaned
        res_small[cleaned > 0] = 255
        res_small = np.uint8(res_small)
        if debug:
            cv2.imwrite("cleaned.jpg", res_small)

        #MORPOLOGICAL_OPENING_OPENCV
        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (32, 32))
        opened_mask = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel)

        if debug:
            cv2.imwrite("opened_mask.jpg", opened_mask)

        if remove_spots:
            blurred = cv2.GaussianBlur(im, (11, 11), 0)
            spots = cv2.threshold(blurred, level, 255, cv2.THRESH_BINARY)[1]
            res_small = res_small - spots
            opened_mask = opened_mask - spots
            if debug:
                cv2.imwrite("cleaned_spots.jpg", res_small)
                cv2.imwrite("opened_mask_spots.jpg", opened_mask)
        img, contours, h = cv2.findContours(res_small, cv2.RETR_TREE,
                                            cv2.CHAIN_APPROX_SIMPLE)

        max_c = []
        max_a = 0
        result = np.zeros(img.shape)
        for contour in contours:
            if cv2.contourArea(contour) > max_a:
                max_c = contour
                max_a = cv2.contourArea(contour)

        bouding = cv2.boundingRect(max_c)
        print(bouding)
        if debug:
            masked_img = cv2.bitwise_and(color_im, color_im, mask=opened_mask)
            masked_img2 = cv2.bitwise_and(color_im, color_im, mask=res_small)
            cv2.imwrite("masked_img.jpg", masked_img)
            cv2.imwrite("masked_img_small.jpg", masked_img2)
        min_x = bouding[1]
        min_y = bouding[0]
        w_x = bouding[1] + bouding[3]
        h_y = bouding[0] + bouding[2]
        return res_small, min_x, min_y, w_x, h_y
예제 #41
0
def binary_image(folder, image_file, threshold=2, figsize=(10, 10),
                 ajar=False, close=False, show=False,
                 channel=None, imname=None):
    """Create binary image from input image with optional opening step.

    Parameters
    ----------
    folder : string
        Directory containing image_file
    image_file : string
        Filename of image to be analyzed.
    threshold : int or float
        Intensity threshold of binary image.
    figsize : tuple of int or float
        Size of output figure.
    ajar : bool
        If True, opens binary image by performing a dilation followed by
        an erosion.
    close : bool
        If True, closes binary image by performing an erosion followed by a
        dilation.
    show : bool
        If True, outputs image to Jupyter notebook display.
    channel : int
        Channel of image to read in for multichannel images e.g.
        testim[:, :, channel]
    imname : string
        Desired name of output file. Defaults to 'test.png'.

    Returns
    -------
    op_image : numpy.ndarray
        Output image.

    Examples
    --------

    """

    fname = '{}/{}'.format(folder, image_file)
    if channel is None:
        test_image = sio.imread(fname)
    else:
        test_image = sio.imread(fname)[:, :, channel]

    bi_image = test_image > threshold

    if ajar is True:
        op_image = opening(bi_image, square(3))
    else:
        op_image = bi_image

    if close is True:
        op_image = closing(op_image, square(3))

    if show:
        fig, ax = plt.subplots(figsize=figsize)
        ax.imshow(op_image, cmap='gray')
        ax.axis('off')

    op_image = op_image.astype('uint8')*255

    if imname is None:
        output = "clean_{}".format(image_file)
    else:
        output = imname

    sio.imsave('{}/{}'.format(folder, output), op_image)

    return op_image
예제 #42
0
    def remove_background(self, method, *args, **kwargs):
        """Perform background subtraction via multiple methods.

        Parameters
        ----------
        method : string
            Specify the method used to determine the direct beam position.

            * 'h-dome' -
            * 'gaussian_difference' - Uses a difference between two gaussian
                                convolutions to determine where the peaks are,
                                and sets all other pixels to 0.
            * 'median' - Use a median filter for background removal
            * 'reference_pattern' - Subtract a user-defined reference patterns
                from every diffraction pattern.

        sigma_min : int, float
            Standard deviation for the minimum gaussian convolution
            (gaussian_difference only)
        sigma_max : int, float
            Standard deviation for the maximum gaussian convolution
            (gaussian_difference only)
        footprint : int
            Size of the window that is convoluted with the array to determine
            the median. Should be large enough that it is about 3x as big as the
            size of the peaks (median only).
        implementation : 'scipy' or 'skimage'
            (median only) see expt_utils.subtract_background_median
            for details, if not selected 'scipy' is used
        bg : array
            Background array extracted from vacuum. (subtract_reference only)
        *args:
            Arguments to be passed to map().
        **kwargs:
            Keyword arguments to be passed to map().

        Returns
        -------
        bg_subtracted : :obj:`ElectronDiffraction2D`
            A copy of the data with the background subtracted. Be aware that
            this function will only return inplace.

        """
        if method == 'h-dome':
            scale = self.data.max()
            self.data = self.data / scale
            bg_subtracted = self.map(regional_filter,
                                     inplace=False,
                                     *args,
                                     **kwargs)
            bg_subtracted.map(filters.rank.mean, selem=square(3))
            bg_subtracted.data = bg_subtracted.data / bg_subtracted.data.max()

        elif method == 'gaussian_difference':
            bg_subtracted = self.map(subtract_background_dog,
                                     inplace=False,
                                     *args,
                                     **kwargs)

        elif method == 'median':
            if 'implementation' in kwargs.keys():
                if kwargs['implementation'] != 'scipy' and kwargs[
                        'implementation'] != 'skimage':
                    raise NotImplementedError(
                        "Unknown implementation `{}`".format(
                            kwargs['implementation']))

            bg_subtracted = self.map(subtract_background_median,
                                     inplace=False,
                                     *args,
                                     **kwargs)

        elif method == 'reference_pattern':
            bg_subtracted = self.map(subtract_reference,
                                     inplace=False,
                                     *args,
                                     **kwargs)

        else:
            raise NotImplementedError(
                "The method specified, '{}', is not implemented. See"
                "documentation for available implementations.".format(method))

        return bg_subtracted
예제 #43
0
def clean_image(folder, image_file, threshold=2, figsize=(10, 10),
                ajar=False, close=False, show=False,
                area_thresh=50, channel=None, imname=None):
    """Create binary image from input image with optional opening step.

    Parameters
    ----------
    folder : string
        Directory containing image_file
    image_file : string
        Filename of image to be analyzed.
    threshold : int or float
        Intensity threshold of binary image.
    figsize : tuple of int or float
        Size of output figure.
    ajar : bool
        If True, opens binary image by performing a dilation followed by
        an erosion.
    close : bool
        If True, closes binary image by performing an erosion followed by a
        dilation.
    show : bool
        If True, outputs image to Jupyter notebook display.
    area_thresh : int or float
        Minimum square pixels for object to be included in final image.
    channel : int
        Channel of image to read in for multichannel images e.g.
        testim[:, :, channel]
    imname : string
        Desired name of output file. Defaults to 'test.png'.

    Returns
    -------
    short_image : numpy.ndarray
        Output binary image. All small objects (area < area_thresh) are
        filtered out.
    short_props : skimage.object
        Contains all properties of objects identified in image.

    Examples
    --------

    """

    fname = '{}/{}'.format(folder, image_file)
    if channel is None:
        test_image = sio.imread(fname)
    else:
        test_image = sio.imread(fname)[:, :, channel]
    bi_image = test_image > threshold

    if ajar is True:
        op_image = opening(bi_image, square(3))
    else:
        op_image = bi_image

    if close is True:
        op_image = closing(op_image, square(3))

    op_image = op_image.astype('uint8')*255

#     if default_name:
#         output = "clean_{}.png".format(image_file.split('.')[0])
#     else:
#         output = fname

#     sio.imsave(folder+'/'+output, op_image)

    # Labelling and cleaning up image.
    test_image = op_image
    labels = label(test_image)
    props = regionprops(labels)

    short_image = np.zeros(labels.shape)
    counter = 0
    skip = 0
    short_props = []
    for i in range(0, len(props)):
        area = props[i]['area']
        if area < area_thresh:
            skip = skip + 1
        else:
            short_props.append(props[i])
            test_coords = props[i]['coords'].tolist()
            for coord in test_coords:
                short_image[coord[0], coord[1]] = True
            counter = counter + 1

    if show:
        fig, ax = plt.subplots(figsize=figsize)
        ax.imshow(short_image, cmap='gray')
        ax.axis('off')

    short_image = short_image.astype('uint8')*255

    if imname is None:
        output = "short_{}".format(image_file)
    else:
        output = imname

    sio.imsave(folder+'/'+output, short_image)

    return short_image, short_props
예제 #44
0
def dilation(img):
    # return greyscale morphological dilation of an image
    return mp.dilation(img, mp.square(2, dtype=np.uint8))
예제 #45
0
def merge_overlapping_images(metadata, inputs):
    """
    Merge simultaneous overlapping images that cover the area of interest.
    When the area of interest is located at the boundary between 2 images, there
    will be overlap between the 2 images and both will be downloaded from Google
    Earth Engine. This function merges the 2 images, so that the area of interest
    is covered by only 1 image.

    KV WRL 2018

    Arguments:
    -----------
    metadata: dict
        contains all the information about the satellite images that were downloaded
    inputs: dict with the following keys
        'sitename': str
            name of the site
        'polygon': list
            polygon containing the lon/lat coordinates to be extracted,
            longitudes in the first column and latitudes in the second column,
            there are 5 pairs of lat/lon with the fifth point equal to the first point:
            ```
            polygon = [[[151.3, -33.7],[151.4, -33.7],[151.4, -33.8],[151.3, -33.8],
            [151.3, -33.7]]]
            ```
        'dates': list of str
            list that contains 2 strings with the initial and final dates in
            format 'yyyy-mm-dd':
            ```
            dates = ['1987-01-01', '2018-01-01']
            ```
        'sat_list': list of str
            list that contains the names of the satellite missions to include:
            ```
            sat_list = ['L5', 'L7', 'L8', 'S2']
            ```
        'filepath_data': str
            filepath to the directory where the images are downloaded

    Returns:
    -----------
    metadata_updated: dict
        updated metadata

    """

    # only for Sentinel-2 at this stage (not sure if this is needed for Landsat images)
    sat = 'S2'
    filepath = os.path.join(inputs['filepath'], inputs['sitename'])
    filenames = metadata[sat]['filenames']

    # find the pairs of images that are within 5 minutes of each other
    time_delta = 5 * 60  # 5 minutes in seconds
    dates = metadata[sat]['dates'].copy()
    pairs = []
    for i, date in enumerate(metadata[sat]['dates']):
        # dummy value so it does not match it again
        dates[i] = pytz.utc.localize(datetime(1, 1, 1) + timedelta(days=i + 1))
        # calculate time difference
        time_diff = np.array(
            [np.abs((date - _).total_seconds()) for _ in dates])
        # find the matching times and add to pairs list
        boolvec = time_diff <= time_delta
        if np.sum(boolvec) == 0:
            continue
        else:
            idx_dup = np.where(boolvec)[0][0]
            pairs.append([i, idx_dup])
    # because they could be triplicates in S2 images, adjust the pairs for consecutive merges
    for i in range(1, len(pairs)):
        if pairs[i - 1][1] == pairs[i][0]:
            pairs[i][0] = pairs[i - 1][0]

    # for each pair of image, first check if one image completely contains the other
    # in that case keep the larger image. Otherwise merge the two images.
    for i, pair in enumerate(pairs):
        # get filenames of all the files corresponding to the each image in the pair
        fn_im = []
        for index in range(len(pair)):
            fn_im.append([
                os.path.join(filepath, 'S2', '10m', filenames[pair[index]]),
                os.path.join(filepath, 'S2', '20m',
                             filenames[pair[index]].replace('10m', '20m')),
                os.path.join(filepath, 'S2', '60m',
                             filenames[pair[index]].replace('10m', '60m')),
                os.path.join(
                    filepath, 'S2', 'meta',
                    filenames[pair[index]].replace('_10m',
                                                   '').replace('.tif', '.txt'))
            ])
        # get polygon for first image
        polygon0 = SDS_tools.get_image_bounds(fn_im[0][0])
        im_epsg0 = metadata[sat]['epsg'][pair[0]]
        # get polygon for second image
        polygon1 = SDS_tools.get_image_bounds(fn_im[1][0])
        im_epsg1 = metadata[sat]['epsg'][pair[1]]
        # check if epsg are the same
        if not im_epsg0 == im_epsg1:
            print(
                'WARNING: there was an error as two S2 images do not have the same epsg,'
                +
                ' please open an issue on Github at https://github.com/kvos/CoastSat/issues'
                + ' and include your script so we can find out what happened.')
            break
        # check if one image contains the other one
        if polygon0.contains(polygon1):
            # if polygon0 contains polygon1, remove files for polygon1
            for k in range(4):  # remove the 3 .tif files + the .txt file
                os.chmod(fn_im[1][k], 0o777)
                os.remove(fn_im[1][k])
            # print('removed 1')
            continue
        elif polygon1.contains(polygon0):
            # if polygon1 contains polygon0, remove image0
            for k in range(4):  # remove the 3 .tif files + the .txt file
                os.chmod(fn_im[0][k], 0o777)
                os.remove(fn_im[0][k])
            # print('removed 0')
            # adjust the order in case of triplicates
            if i + 1 < len(pairs):
                if pairs[i + 1][0] == pair[0]: pairs[i + 1][0] = pairs[i][1]
            continue
        # otherwise merge the two images after masking the nodata values
        else:
            for index in range(len(pair)):
                # read image
                im_ms, georef, cloud_mask, im_extra, im_QA, im_nodata = SDS_preprocess.preprocess_single(
                    fn_im[index], sat, False)
                # in Sentinel2 images close to the edge of the image there are some artefacts,
                # that are squares with constant pixel intensities. They need to be masked in the
                # raster (GEOTIFF). It can be done using the image standard deviation, which
                # indicates values close to 0 for the artefacts.
                if len(im_ms) > 0:
                    # calculate image std for the first 10m band
                    im_std = SDS_tools.image_std(im_ms[:, :, 0], 1)
                    # convert to binary
                    im_binary = np.logical_or(im_std < 1e-6, np.isnan(im_std))
                    # dilate to fill the edges (which have high std)
                    mask10 = morphology.dilation(im_binary,
                                                 morphology.square(3))
                    # mask the 10m .tif file (add no_data where mask is True)
                    SDS_tools.mask_raster(fn_im[index][0], mask10)
                    # now calculate the mask for the 20m band (SWIR1)
                    # for the older version of the ee api calculate the image std again
                    if int(ee.__version__[-3:]) <= 201:
                        # calculate std to create another mask for the 20m band (SWIR1)
                        im_std = SDS_tools.image_std(im_extra, 1)
                        im_binary = np.logical_or(im_std < 1e-6,
                                                  np.isnan(im_std))
                        mask20 = morphology.dilation(im_binary,
                                                     morphology.square(3))
                    # for the newer versions just resample the mask for the 10m bands
                    else:
                        # create mask for the 20m band (SWIR1) by resampling the 10m one
                        mask20 = ndimage.zoom(mask10, zoom=1 / 2, order=0)
                        mask20 = transform.resize(mask20,
                                                  im_extra.shape,
                                                  mode='constant',
                                                  order=0,
                                                  preserve_range=True)
                        mask20 = mask20.astype(bool)
                    # mask the 20m .tif file (im_extra)
                    SDS_tools.mask_raster(fn_im[index][1], mask20)
                    # create a mask for the 60m QA band by resampling the 20m one
                    mask60 = ndimage.zoom(mask20, zoom=1 / 3, order=0)
                    mask60 = transform.resize(mask60,
                                              im_QA.shape,
                                              mode='constant',
                                              order=0,
                                              preserve_range=True)
                    mask60 = mask60.astype(bool)
                    # mask the 60m .tif file (im_QA)
                    SDS_tools.mask_raster(fn_im[index][2], mask60)
                    # make a figure for quality control/debugging
                    # im_RGB = SDS_preprocess.rescale_image_intensity(im_ms[:,:,[2,1,0]], cloud_mask, 99.9)
                    # fig,ax= plt.subplots(2,3,tight_layout=True)
                    # ax[0,0].imshow(im_RGB)
                    # ax[0,0].set_title('RGB original')
                    # ax[1,0].imshow(mask10)
                    # ax[1,0].set_title('Mask 10m')
                    # ax[0,1].imshow(mask20)
                    # ax[0,1].set_title('Mask 20m')
                    # ax[1,1].imshow(mask60)
                    # ax[1,1].set_title('Mask 60 m')
                    # ax[0,2].imshow(im_QA)
                    # ax[0,2].set_title('Im QA')
                    # ax[1,2].imshow(im_nodata)
                    # ax[1,2].set_title('Im nodata')
                else:
                    continue

            # once all the pairs of .tif files have been masked with no_data, merge the using gdal_merge
            fn_merged = os.path.join(filepath, 'merged.tif')
            for k in range(3):
                # merge masked bands
                gdal_merge.main(
                    ['', '-o', fn_merged, '-n', '0', fn_im[0][k], fn_im[1][k]])
                # remove old files
                os.chmod(fn_im[0][k], 0o777)
                os.remove(fn_im[0][k])
                os.chmod(fn_im[1][k], 0o777)
                os.remove(fn_im[1][k])
                # rename new file
                fn_new = fn_im[0][k].split('.')[0] + '_merged.tif'
                os.chmod(fn_merged, 0o777)
                os.rename(fn_merged, fn_new)

            # open both metadata files
            metadict0 = dict([])
            with open(fn_im[0][3], 'r') as f:
                metadict0['filename'] = f.readline().split('\t')[1].replace(
                    '\n', '')
                metadict0['acc_georef'] = float(
                    f.readline().split('\t')[1].replace('\n', ''))
                metadict0['epsg'] = int(f.readline().split('\t')[1].replace(
                    '\n', ''))
            metadict1 = dict([])
            with open(fn_im[1][3], 'r') as f:
                metadict1['filename'] = f.readline().split('\t')[1].replace(
                    '\n', '')
                metadict1['acc_georef'] = float(
                    f.readline().split('\t')[1].replace('\n', ''))
                metadict1['epsg'] = int(f.readline().split('\t')[1].replace(
                    '\n', ''))
            # check if both images have the same georef accuracy
            if np.any(
                    np.array([
                        metadict0['acc_georef'], metadict1['acc_georef']
                    ]) == -1):
                metadict0['georef'] = -1
            # add new name
            metadict0['filename'] = metadict0['filename'].split(
                '.')[0] + '_merged.tif'
            # remove the old metadata.txt files
            os.chmod(fn_im[0][3], 0o777)
            os.remove(fn_im[0][3])
            os.chmod(fn_im[1][3], 0o777)
            os.remove(fn_im[1][3])
            # rewrite the .txt file with a new metadata file
            fn_new = fn_im[0][3].split('.')[0] + '_merged.txt'
            with open(fn_new, 'w') as f:
                for key in metadict0.keys():
                    f.write('%s\t%s\n' % (key, metadict0[key]))

            # update filenames list (in case there are triplicates)
            filenames[pair[0]] = metadict0['filename']

    print(
        '%d out of %d Sentinel-2 images were merged (overlapping or duplicate)'
        % (len(pairs), len(filenames)))

    # update the metadata dict
    metadata_updated = get_metadata(inputs)

    return metadata_updated
예제 #46
0
draw.set_color(img,[rr,cc],[255,0,0])
plt.imshow(img,plt.cm.gray)
'''8、空心椭圆'''
plt.subplot(428)
plt.title('hollow ellipse')
img=data.chelsea()
rr, cc=draw.ellipse_perimeter(150, 150, 30, 80)
draw.set_color(img,[rr,cc],[255,0,0])
plt.imshow(img,plt.cm.gray)
plt.show()
# =============================================================================
print('''十一、基本形态学滤波''')
# =============================================================================
img=data.checkerboard()
'''1、膨胀(dilation)'''
dst1=sm.dilation(img,sm.square(5))  #用边长为5的正方形滤波器进行膨胀滤波
dst2=sm.dilation(img,sm.square(15))  #用边长为15的正方形滤波器进行膨胀滤波
fig = plt.figure('morphology',figsize=(12,4))
fig.suptitle('dilation')
plt.subplot(131)
plt.title('origin image')
plt.imshow(img,plt.cm.gray)
plt.subplot(132)
plt.title('morphological image')
plt.imshow(dst1,plt.cm.gray)
plt.subplot(133)
plt.title('morphological image')
plt.imshow(dst2,plt.cm.gray)
plt.show()
'''2、腐蚀(erosion)'''
dst1=sm.erosion(img,sm.square(5))  #用边长为5的正方形滤波器进行膨胀滤波
예제 #47
0
def merge_overlapping_images(metadata, inputs):
    """
    Merge simultaneous overlapping images that cover the area of interest.
    When the area of interest is located at the boundary between 2 images, there 
    will be overlap between the 2 images and both will be downloaded from Google
    Earth Engine. This function merges the 2 images, so that the area of interest 
    is covered by only 1 image.
    
    KV WRL 2018
        
    Arguments:
    -----------
    metadata: dict
        contains all the information about the satellite images that were downloaded
    inputs: dict with the following keys
        'sitename': str
            name of the site
        'polygon': list
            polygon containing the lon/lat coordinates to be extracted,
            longitudes in the first column and latitudes in the second column,
            there are 5 pairs of lat/lon with the fifth point equal to the first point:
            ```
            polygon = [[[151.3, -33.7],[151.4, -33.7],[151.4, -33.8],[151.3, -33.8],
            [151.3, -33.7]]]
            ```
        'dates': list of str
            list that contains 2 strings with the initial and final dates in 
            format 'yyyy-mm-dd':
            ```
            dates = ['1987-01-01', '2018-01-01']
            ```
        'sat_list': list of str
            list that contains the names of the satellite missions to include: 
            ```
            sat_list = ['L5', 'L7', 'L8', 'S2']
            ```
        'filepath_data': str
            filepath to the directory where the images are downloaded
        
    Returns:
    -----------
    metadata_updated: dict
        updated metadata
            
    """

    # only for Sentinel-2 at this stage (not sure if this is needed for Landsat images)
    sat = 'S2'
    filepath = os.path.join(inputs['filepath'], inputs['sitename'])
    filenames = metadata[sat]['filenames']
    # find the pairs of images that are within 5 minutes of each other
    time_delta = 5 * 60  # 5 minutes in seconds
    dates = metadata[sat]['dates'].copy()
    pairs = []
    for i, date in enumerate(metadata[sat]['dates']):
        # dummy value so it does not match it again
        dates[i] = pytz.utc.localize(datetime(1, 1, 1) + timedelta(days=i + 1))
        # calculate time difference
        time_diff = np.array(
            [np.abs((date - _).total_seconds()) for _ in dates])
        # find the matching times and add to pairs list
        boolvec = time_diff <= time_delta
        if np.sum(boolvec) == 0:
            continue
        else:
            idx_dup = np.where(boolvec)[0][0]
            pairs.append([i, idx_dup])

    # for each pair of image, create a mask and add no_data into the .tif file (this is needed before merging .tif files)
    for i, pair in enumerate(pairs):
        fn_im = []
        for index in range(len(pair)):
            # get filenames of all the files corresponding to the each image in the pair
            fn_im.append([
                os.path.join(filepath, 'S2', '10m', filenames[pair[index]]),
                os.path.join(filepath, 'S2', '20m',
                             filenames[pair[index]].replace('10m', '20m')),
                os.path.join(filepath, 'S2', '60m',
                             filenames[pair[index]].replace('10m', '60m')),
                os.path.join(
                    filepath, 'S2', 'meta',
                    filenames[pair[index]].replace('_10m',
                                                   '').replace('.tif', '.txt'))
            ])
            # read that image
            im_ms, georef, cloud_mask, im_extra, im_QA, im_nodata = SDS_preprocess.preprocess_single(
                fn_im[index], sat, False)
            # im_RGB = SDS_preprocess.rescale_image_intensity(im_ms[:,:,[2,1,0]], cloud_mask, 99.9)

            # in Sentinel2 images close to the edge of the image there are some artefacts,
            # that are squares with constant pixel intensities. They need to be masked in the
            # raster (GEOTIFF). It can be done using the image standard deviation, which
            # indicates values close to 0 for the artefacts.
            if len(im_ms) > 0:
                # calculate image std for the first 10m band
                im_std = SDS_tools.image_std(im_ms[:, :, 0], 1)
                # convert to binary
                im_binary = np.logical_or(im_std < 1e-6, np.isnan(im_std))
                # dilate to fill the edges (which have high std)
                mask10 = morphology.dilation(im_binary, morphology.square(3))
                # mask all 10m bands
                for k in range(im_ms.shape[2]):
                    im_ms[mask10, k] = np.nan
                # mask the 10m .tif file (add no_data where mask is True)
                SDS_tools.mask_raster(fn_im[index][0], mask10)

                # create another mask for the 20m band (SWIR1)
                im_std = SDS_tools.image_std(im_extra, 1)
                im_binary = np.logical_or(im_std < 1e-6, np.isnan(im_std))
                mask20 = morphology.dilation(im_binary, morphology.square(3))
                im_extra[mask20] = np.nan
                # mask the 20m .tif file (im_extra)
                SDS_tools.mask_raster(fn_im[index][1], mask20)

                # use the 20m mask to create a mask for the 60m QA band (by resampling)
                mask60 = ndimage.zoom(mask20, zoom=1 / 3, order=0)
                mask60 = transform.resize(mask60,
                                          im_QA.shape,
                                          mode='constant',
                                          order=0,
                                          preserve_range=True)
                mask60 = mask60.astype(bool)
                # mask the 60m .tif file (im_QA)
                SDS_tools.mask_raster(fn_im[index][2], mask60)

            else:
                continue

            # make a figure for quality control
            # fig,ax= plt.subplots(2,2,tight_layout=True)
            # ax[0,0].imshow(im_RGB)
            # ax[0,0].set_title('RGB original')
            # ax[1,0].imshow(mask10)
            # ax[1,0].set_title('Mask 10m')
            # ax[0,1].imshow(mask20)
            # ax[0,1].set_title('Mask 20m')
            # ax[1,1].imshow(mask60)
            # ax[1,1].set_title('Mask 60 m')

        # once all the pairs of .tif files have been masked with no_data, merge the using gdal_merge
        fn_merged = os.path.join(filepath, 'merged.tif')

        # merge masked 10m bands and remove duplicate file
        gdal_merge.main(
            ['', '-o', fn_merged, '-n', '0', fn_im[0][0], fn_im[1][0]])
        os.chmod(fn_im[0][0], 0o777)
        os.remove(fn_im[0][0])
        os.chmod(fn_im[1][0], 0o777)
        os.remove(fn_im[1][0])
        os.chmod(fn_merged, 0o777)
        os.rename(fn_merged, fn_im[0][0])

        # merge masked 20m band (SWIR band)
        gdal_merge.main(
            ['', '-o', fn_merged, '-n', '0', fn_im[0][1], fn_im[1][1]])
        os.chmod(fn_im[0][1], 0o777)
        os.remove(fn_im[0][1])
        os.chmod(fn_im[1][1], 0o777)
        os.remove(fn_im[1][1])
        os.chmod(fn_merged, 0o777)
        os.rename(fn_merged, fn_im[0][1])

        # merge QA band (60m band)
        gdal_merge.main(
            ['', '-o', fn_merged, '-n', '0', fn_im[0][2], fn_im[1][2]])
        os.chmod(fn_im[0][2], 0o777)
        os.remove(fn_im[0][2])
        os.chmod(fn_im[1][2], 0o777)
        os.remove(fn_im[1][2])
        os.chmod(fn_merged, 0o777)
        os.rename(fn_merged, fn_im[0][2])

        # remove the metadata .txt file of the duplicate image
        os.chmod(fn_im[1][3], 0o777)
        os.remove(fn_im[1][3])

    print('%d pairs of overlapping Sentinel-2 images were merged' % len(pairs))

    # update the metadata dict
    metadata_updated = copy.deepcopy(metadata)
    idx_removed = []
    idx_kept = []
    for pair in pairs:
        idx_removed.append(pair[1])
    for idx in np.arange(0, len(metadata[sat]['dates'])):
        if not idx in idx_removed: idx_kept.append(idx)
    for key in metadata_updated[sat].keys():
        metadata_updated[sat][key] = [
            metadata_updated[sat][key][_] for _ in idx_kept
        ]

    return metadata_updated
예제 #48
0
def main():
    parser=argparse.ArgumentParser(description='Image-file processing to identify total number of droplets',
                                 formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('input_image', type=str, 
                      help='input image file')
    parser.add_argument('template_image', type=str, 
                      help='template image file')
    parser.add_argument('--outfile', type=str,
                      help='output image name.  Will default to input_image_processed.png')
    parser.add_argument('--cutoff_area', type=int,
                      help='rectangle size. only detected rectangles above this size will be displayed')
    parser.add_argument('--include', action='store_true', dest="include_edge", 
                      help='Include edge or boundary or border droplets; incomplete droplets found on the edge of the picture will be included too.')
    parser.add_argument("--exclude", action="store_false", dest="include_edge",
                      help='Exclude edge or boundary or border droplets; incomplete droplets found on the edge of the picture will be excluded.')
    parser.add_argument('--threshold', type=float, default=0.6,
                      help='thresholding parameter, tweak from 0 to 1 and visually examine results, depends on the image, default is 0.6')


    args=parser.parse_args()
    
    if args.outfile==None:
        args.outfile=''.join([path.splitext(args.input_image)[0],"_border_",str(args.include_edge),'_processed']) ## leaving out the file extension; adding it later just before writing out the image file

    filename=args.input_image
    rectsize=args.cutoff_area


    print "...."
    print "image file being processed..."
    print "..."
    
   
    from skimage import img_as_float
    image = io.imread(filename, flatten=True) # conver 3d to 2d image by using flatten=True
    image = img_as_float(image)
    #io.imshow(image)   ## check the image
    #io.show()   ## check the image

    from skimage.color import rgb2gray
    img_gray = rgb2gray(image)
    #io.imshow(img_gray)   ## check the image
    #io.show()   ## check the image
    
    image = gaussian_filter(image, 1)
    seed = np.copy(image)
    seed[1:-1, 1:-1] = image.min()
    mask = image
    
    dilated = reconstruction(seed, mask, method='dilation')
    

    """    
    fig, (ax0, ax1, ax2) = plt.subplots(nrows=1,
                                        ncols=3,
                                        figsize=(8, 2.5),
                                        sharex=True,
                                        sharey=True)
    
    ax0.imshow(image, cmap='gray')
    ax0.set_title('original image')
    ax0.axis('off')
    ax0.set_adjustable('box-forced')
    
    ax1.imshow(dilated, vmin=image.min(), vmax=image.max(), cmap='gray')
    ax1.set_title('dilated')
    ax1.axis('off')
    ax1.set_adjustable('box-forced')
    
    ax2.imshow(image - dilated, cmap='gray')
    ax2.set_title('image - dilated')
    ax2.axis('off')
    ax2.set_adjustable('box-forced')
    
    fig.tight_layout()
    """

    
    print "...."
    print "background correction..."
    print "..."
    
    h = 0.4
    seed = image - h
    dilated = reconstruction(seed, mask, method='dilation')
    hdome = image - dilated
    #io.imshow(hdome)   ## check the image
    #io.show()   ## check the image

    
    """
    fig, (ax0, ax1, ax2) = plt.subplots(nrows=1, ncols=3, figsize=(8, 2.5))
    yslice = 197
    
    ax0.plot(mask[yslice], '0.5', label='mask')
    ax0.plot(seed[yslice], 'k', label='seed')
    ax0.plot(dilated[yslice], 'r', label='dilated')
    ax0.set_ylim(-0.2, 2)
    ax0.set_title('image slice')
    ax0.set_xticks([])
    ax0.legend()
    
    ax1.imshow(dilated, vmin=image.min(), vmax=image.max(), cmap='gray')
    ax1.axhline(yslice, color='r', alpha=0.4)
    ax1.set_title('dilated')
    ax1.axis('off')
    
    ax2.imshow(hdome, cmap='gray')
    ax2.axhline(yslice, color='r', alpha=0.4)
    ax2.set_title('image - dilated')
    ax2.axis('off')
    
    fig.tight_layout()
    plt.show()
    """    
    
    
    print "...."
    print "edge detection..."
    print "..."
    
    im = hdome
    edges1 = feature.canny(image, sigma=3)
    edges2 = feature.canny(im, sigma=3)
    #io.imshow(edges1)   ## check the image
    #io.show()   ## check the image
    #io.imshow(edges2)   ## check the image
    #io.show()   ## check the image



    """
    # display results
    fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3),
                                        sharex=True, sharey=True)
    ax1.imshow(im, cmap=plt.cm.gray)
    ax1.axis('off')
    ax1.set_title('Original image', fontsize=10)
    
    ax2.imshow(edges1, cmap=plt.cm.gray)
    ax2.axis('off')
    ax2.set_title('Canny filter on original image, $\sigma=3$', fontsize=10)
    
    ax3.imshow(edges2, cmap=plt.cm.gray)
    ax3.axis('off')
    ax3.set_title('Canny filter on background subtracted image, $\sigma=3$', fontsize=10)
    
    fig.tight_layout()
    plt.show()
    """    
        

 
#    
### check how good are the original and processed images by selecting corresponding image here
#   
    image=image
    #image=edges2
    #image=hdome
    
    ## apply threshold
    thresh = threshold_otsu(image)
    bw = closing(image > thresh, square(2))
    #io.imshow(bw)   ## check the image
    #io.show()   ## check the image
    
    
    print "... are we including incomplete droplets at the edge of image?..."
    print ".............................................................", args.include_edge
    if args.include_edge is False:
        ## remove artifacts connected to image border
        cleared = clear_border(bw)    ## use this option to avoid the incomplete droplets at boundary/edge of picture frame
    else:
        cleared = bw  ## use this to use all droplets in the image; even incomplete ones at the boundary/edge of pciture frame
    
    
    #io.imshow(cleared)   ## check the image
    #io.show()   ## check the image
        
    
    # label image regions
    label_image = label(cleared)
    image_label_overlay = label2rgb(label_image, image=image)
    #io.imshow(image_label_overlay)   ## check the image
    #io.show()   ## check the image
    
    
    fig, ax = plt.subplots(figsize=(10, 6))
    #ax.imshow(label_image)
    #ax.imshow(image_label_overlay)


    targetimagefile=args.input_image
    templateimagefile=args.template_image
    thresholdval=args.threshold

    beads_count=0
    droplet_count=0
    outfile=0
    for region in regionprops(label_image):
        # take regions with large enough areas; should correspond to droplets        
        if region.area >= rectsize:
            # draw rectangle around segmented droplets
            droplet_count=droplet_count+1
            minr, minc, maxr, maxc = region.bbox
            rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                      fill=False, edgecolor='yellow', linewidth=2)
            #print region.bbox
            try:
                crop_image = image[minr-50:maxr+50, minc-50:maxc+50]  ## offset the bounding box in all directions; seems better this way based on visual examination
            except ValueError:  #raised if this subtraction takes it out of bounds
                pass
            finally:
                crop_image = image[minr:maxr, minc:maxc]
            #io.imshow(crop_image)   ## check the image
            #io.show()   ## check the image
            outfile=outfile+1
            ## improve this and instead of passing file names of whole images to the findtemplate function
            ## pass the cropped image that already exists here
            ## for now, not changing too drastically on something that works
            ## slow tweaks ensuring what works functionally well doesnt break down
            beads=findtemplate(templateimagefile, targetimagefile, region.bbox, outfile, thresholdval)
        
            ax.add_patch(rect)
              
    ax.set_axis_off()
    plt.tight_layout()
    outfile=args.outfile + "_totaldroplets-" + str(droplet_count) + ".png"
    print "...saving image file...."
    print outfile
    print "........................"
    plt.savefig(outfile)
    #plt.show()      ## activate this if you want to examine how the processed images are turning out and stop/start with different input parameters; key one being --cutoff_area

 
    
    print "...total droplets identified in the image:"
    print droplet_count
    print "..."
예제 #49
0
    def __getitem__(self, idx):
        fileName = self.fileList[idx]
        # load image
        imgName = os.path.join(self.dataFolder, fileName + '_mlt.png')
        image = io.imread(imgName)
        if len(image.shape) == 2:
            image = np.tile(image[..., None], (1, 3))
        image = np.float32(image) / 255.0

        # load albedo
        albedoName = os.path.join(self.albedoFolder,
                                  fileName + '_mlt_albedo.png')
        albedo = io.imread(albedoName)
        if len(albedo.shape) == 2:
            albedo = np.tile(albedo[..., None], (1, 3))
        albedo = np.float32(albedo) / 255.0
        albedo[albedo < 1e-6] = 1e-6

        # --------------------------------------------------------------------
        # complicated code copied from CGI
        # I don't really think this block of code is totally correct
        # get shading and mask according to the code
        maskName = os.path.join(self.dataFolder, fileName + "_mlt_mask.png")
        mask = io.imread(maskName)
        mask = np.float32(mask) / 255.0

        gt_R_gray = np.mean(albedo, 2)
        mask[gt_R_gray < 1e-6] = 0
        mask[np.mean(image, 2) < 1e-6] = 0
        mask = skimage.morphology.binary_erosion(mask, square(11))
        mask = np.expand_dims(mask, axis=2)
        mask = np.repeat(mask, 3, axis=2)
        albedo[albedo < 1e-6] = 1e-6

        rgb_img = image**2.2
        shading = rgb_img / albedo

        mask[
            shading >
            10] = 0  # CGI code this value is set to be 10, but I think it is wrong
        #mask[shading > 20] = 0
        mask[shading < 1e-4] = 0

        shading[shading < 1e-4] = 1e-4
        shading[shading > 20] = 20

        if np.sum(mask) < 10:
            max_S = 1.0
        else:
            max_S = np.percentile(shading[mask > 0.5], 90)

        shading = shading / max_S
        mask = np.float32(np.abs(np.sum(mask, axis=2) / 3.0 - 1.0) < 1e-6)
        #------------------------------------------------------------------------

        ## shading saved as raw
        #shadingName = os.path.join(self.shadingFolder, fileName + '.tiff')
        #shading = imageio.imread(shadingName)
        #if len(shading.shape)==2:
        #    shading = np.tile(shading[...,None], (1, 3))
        #shading = shading/20.0

        if fileName in self.missingList:
            # no normal
            imgHeight = image.shape[0]
            imgWidth = image.shape[1]
            normal = np.zeros((imgHeight, imgWidth, 3))
            normalMask = np.zeros((imgHeight, imgWidth))
        else:
            normalName = os.path.join(self.normalFolder,
                                      fileName + '_norm_camera.png')
            normal = io.imread(normalName)
            normalMaskName = os.path.join(self.normalFolder,
                                          fileName + '_valid.png')
            normalMask = io.imread(normalMaskName)

        if self.transform:
            image, albedo, shading, normal, mask, normalMask = \
                    self.transform([image, albedo,  shading, normal, mask, normalMask])
        return image, albedo, shading, normal, mask, normalMask
예제 #50
0
    def _get_batches_of_transformed_samples(self, index_array):
        batch_x = []
        batch_y = []

        #print(index_array)

        for batch_index, image_index in enumerate(index_array):
            row = self.image_table.iloc[image_index]
            img_id = row.ImageId

            if img_id in exclude_list:
                continue

            img0 = cv2.imread(path.join(images_folder, '{0}'.format(img_id)),
                              cv2.IMREAD_COLOR)
            mask_path = path.join(masks_folder, '{0}.png'.format(img_id[:-4]))
            if not os.path.exists(mask_path):
                msk0 = np.zeros((768, 768, 3))
                lbl0 = np.zeros((768, 768))
            else:
                msk0 = cv2.imread(mask_path, cv2.IMREAD_UNCHANGED)
                lbl0 = cv2.imread(
                    path.join(labels_folder, '{0}.tif'.format(img_id[:-4])),
                    cv2.IMREAD_UNCHANGED)

                if img0.shape[0] == 0 or msk0.shape[0] == 0 or lbl0.shape[
                        0] == 0:
                    img0 = np.zeros((768, 768, 3))
                    msk0 = np.zeros((768, 768, 3))
                    lbl0 = np.zeros((768, 768))
#             print('mask_path', mask_path)
#             print('img_shape', img0.shape, 'mask_shape', msk0.shape)
            tmp = np.zeros_like(msk0[..., 0], dtype='uint8')
            tmp[1:-1, 1:-1] = msk0[1:-1, 1:-1, 0]
            good4copy = list(
                set(np.unique(lbl0[lbl0 > 0])).symmetric_difference(
                    np.unique(lbl0[(lbl0 > 0) & (tmp == 0)])))

            x0 = random.randint(0, img0.shape[1] - self.input_shape[1])
            y0 = random.randint(0, img0.shape[0] - self.input_shape[0])
            img = img0[y0:y0 + self.input_shape[0],
                       x0:x0 + self.input_shape[1], :]
            msk = msk0[y0:y0 + self.input_shape[0],
                       x0:x0 + self.input_shape[1], :]

            if len(good4copy) > 0 and random.random() > 0.75:
                num_copy = random.randrange(1, min(6, len(good4copy) + 1))
                lbl_max = lbl0.max()
                for i in range(num_copy):
                    lbl_max += 1
                    l_id = random.choice(good4copy)
                    lbl_msk = lbl0 == l_id
                    row, col = np.where(lbl_msk)
                    y1, x1 = np.min(np.where(lbl_msk), axis=1)
                    y2, x2 = np.max(np.where(lbl_msk), axis=1)
                    lbl_msk = lbl_msk[y1:y2 + 1, x1:x2 + 1]
                    lbl_img = img0[y1:y2 + 1, x1:x2 + 1, :]
                    if random.random() > 0.5:
                        lbl_msk = lbl_msk[:, ::-1, ...]
                        lbl_img = lbl_img[:, ::-1, ...]
                    rot = random.randrange(4)
                    if rot > 0:
                        lbl_msk = np.rot90(lbl_msk, k=rot)
                        lbl_img = np.rot90(lbl_img, k=rot)
                    x1 = random.randint(
                        max(0, x0 - lbl_msk.shape[1] // 2),
                        min(img0.shape[1] - lbl_msk.shape[1],
                            x0 + self.input_shape[1] - lbl_msk.shape[1] // 2))
                    y1 = random.randint(
                        max(0, y0 - lbl_msk.shape[0] // 2),
                        min(img0.shape[0] - lbl_msk.shape[0],
                            y0 + self.input_shape[0] - lbl_msk.shape[0] // 2))
                    tmp = erosion(lbl_msk, square(5))
                    lbl_msk_dif = lbl_msk ^ tmp
                    tmp = dilation(lbl_msk, square(5))
                    lbl_msk_dif = lbl_msk_dif | (tmp ^ lbl_msk)
                    lbl0[y1:y1 + lbl_msk.shape[0],
                         x1:x1 + lbl_msk.shape[1]][lbl_msk] = lbl_max
                    img0[y1:y1 + lbl_msk.shape[0],
                         x1:x1 + lbl_msk.shape[1]][lbl_msk] = lbl_img[lbl_msk]
                    full_diff_mask = np.zeros_like(img0[..., 0], dtype='bool')
                    full_diff_mask[y1:y1 + lbl_msk.shape[0],
                                   x1:x1 + lbl_msk.shape[1]] = lbl_msk_dif
                    img0[..., 0][full_diff_mask] = median(
                        img0[..., 0], mask=full_diff_mask)[full_diff_mask]
                    img0[..., 1][full_diff_mask] = median(
                        img0[..., 1], mask=full_diff_mask)[full_diff_mask]
                    img0[..., 2][full_diff_mask] = median(
                        img0[..., 2], mask=full_diff_mask)[full_diff_mask]
                img = img0[y0:y0 + self.input_shape[0],
                           x0:x0 + self.input_shape[1], :]
                lbl = lbl0[y0:y0 + self.input_shape[0],
                           x0:x0 + self.input_shape[1]]
                msk = create_mask(lbl)


#             return img, msk
            data = self.random_transformers[0](image=img[..., ::-1], mask=msk)

            img = data['image'][..., ::-1]
            msk = data['mask']

            msk = msk.astype('float')
            msk[..., 0] = (msk[..., 0] > 127) * 1
            msk[..., 1] = (msk[..., 1] > 127) * (msk[..., 0] == 0) * 1
            msk[..., 2] = (msk[..., 1] == 0) * (msk[..., 0] == 0) * 1
            otp = msk

            img = np.concatenate([img, bgr_to_lab(img)], axis=2)
            batch_x.append(img)
            batch_y.append(otp)
        batch_x = np.array(batch_x, dtype="float32")
        batch_y = np.array(batch_y, dtype="float32")
        batch_x = preprocess_input(batch_x)
        return self.transform_batch_x(batch_x), self.transform_batch_y(batch_y)
예제 #51
0
add_image APIs
"""

from skimage import data
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label
from skimage.morphology import closing, square, remove_small_objects
from napari import ViewerApp
from napari.util import app_context

with app_context():
    image = data.coins()[50:-50, 50:-50]

    # apply threshold
    thresh = threshold_otsu(image)
    bw = closing(image > thresh, square(4))

    # remove artifacts connected to image border
    cleared = remove_small_objects(clear_border(bw), 20)

    # label image regions
    label_image = label(cleared)

    # initialise viewer with astro image
    viewer = ViewerApp(coins=image, multichannel=False)
    viewer.layers[0].colormap = 'gray'

    # add the labels
    label_layer = viewer.add_labels(label_image, name='segmentation')
    def __getitem__(self, idx):
        _idx = self.train_idxs[idx]

        fn = self.all_files[_idx]

        img = cv2.imread(fn, cv2.IMREAD_COLOR)
        img2 = cv2.imread(fn.replace('_pre_disaster', '_post_disaster'),
                          cv2.IMREAD_COLOR)

        msk0 = cv2.imread(fn.replace('/images/', '/masks/'),
                          cv2.IMREAD_UNCHANGED)
        lbl_msk1 = cv2.imread(
            fn.replace('/images/', '/masks/').replace('_pre_disaster',
                                                      '_post_disaster'),
            cv2.IMREAD_UNCHANGED)
        msk1 = np.zeros_like(lbl_msk1)
        msk2 = np.zeros_like(lbl_msk1)
        msk3 = np.zeros_like(lbl_msk1)
        msk4 = np.zeros_like(lbl_msk1)
        msk2[lbl_msk1 == 2] = 255
        msk3[lbl_msk1 == 3] = 255
        msk4[lbl_msk1 == 4] = 255
        msk1[lbl_msk1 == 1] = 255

        if random.random() > 0.5:
            img = img[::-1, ...]
            img2 = img2[::-1, ...]
            msk0 = msk0[::-1, ...]
            msk1 = msk1[::-1, ...]
            msk2 = msk2[::-1, ...]
            msk3 = msk3[::-1, ...]
            msk4 = msk4[::-1, ...]

        if random.random() > 0.05:
            rot = random.randrange(4)
            if rot > 0:
                img = np.rot90(img, k=rot)
                img2 = np.rot90(img2, k=rot)
                msk0 = np.rot90(msk0, k=rot)
                msk1 = np.rot90(msk1, k=rot)
                msk2 = np.rot90(msk2, k=rot)
                msk3 = np.rot90(msk3, k=rot)
                msk4 = np.rot90(msk4, k=rot)

        if random.random() > 0.9:
            shift_pnt = (random.randint(-320, 320), random.randint(-320, 320))
            img = shift_image(img, shift_pnt)
            img2 = shift_image(img2, shift_pnt)
            msk0 = shift_image(msk0, shift_pnt)
            msk1 = shift_image(msk1, shift_pnt)
            msk2 = shift_image(msk2, shift_pnt)
            msk3 = shift_image(msk3, shift_pnt)
            msk4 = shift_image(msk4, shift_pnt)

        if random.random() > 0.6:
            rot_pnt = (img.shape[0] // 2 + random.randint(-320, 320),
                       img.shape[1] // 2 + random.randint(-320, 320))
            scale = 0.9 + random.random() * 0.2
            angle = random.randint(0, 20) - 10
            if (angle != 0) or (scale != 1):
                img = rotate_image(img, angle, scale, rot_pnt)
                img2 = rotate_image(img2, angle, scale, rot_pnt)
                msk0 = rotate_image(msk0, angle, scale, rot_pnt)
                msk1 = rotate_image(msk1, angle, scale, rot_pnt)
                msk2 = rotate_image(msk2, angle, scale, rot_pnt)
                msk3 = rotate_image(msk3, angle, scale, rot_pnt)
                msk4 = rotate_image(msk4, angle, scale, rot_pnt)

        if random.random() > 0.985:
            img = shift_channels(img, random.randint(-5, 5),
                                 random.randint(-5, 5), random.randint(-5, 5))
        elif random.random() > 0.985:
            img2 = shift_channels(img2, random.randint(-5, 5),
                                  random.randint(-5, 5), random.randint(-5, 5))

        if random.random() > 0.985:
            img = change_hsv(img, random.randint(-5, 5), random.randint(-5, 5),
                             random.randint(-5, 5))
        elif random.random() > 0.985:
            img2 = change_hsv(img2, random.randint(-5, 5),
                              random.randint(-5, 5), random.randint(-5, 5))

        if random.random() > 0.98:
            if random.random() > 0.985:
                img = clahe(img)
            elif random.random() > 0.985:
                img = gauss_noise(img)
            elif random.random() > 0.985:
                img = cv2.blur(img, (3, 3))
        elif random.random() > 0.98:
            if random.random() > 0.985:
                img = saturation(img, 0.9 + random.random() * 0.2)
            elif random.random() > 0.985:
                img = brightness(img, 0.9 + random.random() * 0.2)
            elif random.random() > 0.985:
                img = contrast(img, 0.9 + random.random() * 0.2)

        if random.random() > 0.98:
            if random.random() > 0.985:
                img2 = clahe(img2)
            elif random.random() > 0.985:
                img2 = gauss_noise(img2)
            elif random.random() > 0.985:
                img2 = cv2.blur(img2, (3, 3))
        elif random.random() > 0.98:
            if random.random() > 0.985:
                img2 = saturation(img2, 0.9 + random.random() * 0.2)
            elif random.random() > 0.985:
                img2 = brightness(img2, 0.9 + random.random() * 0.2)
            elif random.random() > 0.985:
                img2 = contrast(img2, 0.9 + random.random() * 0.2)

        if random.random() > 0.983:
            el_det = self.elastic.to_deterministic()
            img = el_det.augment_image(img)

        if random.random() > 0.983:
            el_det = self.elastic.to_deterministic()
            img2 = el_det.augment_image(img2)

        msk0 = msk0[..., np.newaxis]
        msk1 = msk1[..., np.newaxis]
        msk2 = msk2[..., np.newaxis]
        msk3 = msk3[..., np.newaxis]
        msk4 = msk4[..., np.newaxis]

        msk = np.concatenate([msk0, msk1, msk2, msk3, msk4], axis=2)
        msk = (msk > 127)

        msk[..., 0] = False
        msk[..., 1] = dilation(msk[..., 1], square(5))
        msk[..., 2] = dilation(msk[..., 2], square(5))
        msk[..., 3] = dilation(msk[..., 3], square(5))
        msk[..., 4] = dilation(msk[..., 4], square(5))
        msk[..., 1][msk[..., 2:].max(axis=2)] = False
        msk[..., 3][msk[..., 2]] = False
        msk[..., 4][msk[..., 2]] = False
        msk[..., 4][msk[..., 3]] = False
        msk[..., 0][msk[..., 1:].max(axis=2)] = True
        msk = msk * 1

        lbl_msk = msk.argmax(axis=2)

        img = np.concatenate([img, img2], axis=2)
        img = preprocess_inputs(img)

        img = torch.from_numpy(img.transpose((2, 0, 1))).float()
        msk = torch.from_numpy(msk.transpose((2, 0, 1))).long()

        sample = {'img': img, 'msk': msk, 'lbl_msk': lbl_msk, 'fn': fn}
        return sample
예제 #53
0
                               sharey=True)
ax0.imshow(np.squeeze(test_img).astype('uint8'), cmap='gray')
ax1.imshow(overlay)
plt.show()

# # general way

# In[19]:

# the rest defects
defects_

# In[20]:

start_time = time.time()
bin_img = morphology.binary_closing((pred_img > 0), morphology.square(3))
label_img = measure.label(bin_img > 0)
regions = measure.regionprops(label_img)
proc_time = time.time() - start_time
print('proc time = ', proc_time)

overlay = np.squeeze(test_img).copy().astype('uint8')
for region in regions:

    label = region.label
    area = region.area

    if area < 0:
        continue

    y0, x0, y1, x1 = region.bbox
예제 #54
0
    def use_mask_tif():
        for one_color in unique_colors:
            # print("one_color",one_color)
            # (0, 255, 127)
            # new_img=np.where(loaded_img_mask[:,:,:3]==np.array(one_color),1.0,0.0)

            loaded_img_mask_proc = loaded_img_mask[:, :, :3]

            # (0, 255, 127) locations are True
            new_img = np.all(loaded_img_mask_proc == np.array(one_color),
                             axis=2)
            # print("new_img",new_img.shape)
            # print("new_img",new_img)

            new_img = skimage.morphology.binary_dilation(new_img, square(3))
            new_img = new_img[:, :, np.newaxis].astype("uint8")
            # None (0, 255, 127) locations are 1
            new_img = np.where(new_img == 0, 1, 0)
            # print("new_img",new_img)

            masked_img = loaded_img_crop * new_img
            stacked = np.vstack(masked_img)
            # # print("stacked",stacked.shape)
            # # (234971, 3)
            # # afaf

            # # stacked=loaded_img_crop_mask.reshape((-1,3))
            # # print("stacked",stacked.shape)
            # # (234971, 3)

            idx = np.all(stacked == [0, 0, 0], 1)
            # # print("idx",idxs.shape)
            # # idx (234971,)
            # # print("idx",idx)
            # # afaf

            # a1=np.vstack(loaded_img_crop)

            stacked[idx] = [0, 255, 0]
            # # print("a1",a1.shape)
            # # a1 (234971, 3)

            loaded_img_crop_new = stacked.reshape(loaded_img_crop.shape[0],
                                                  loaded_img_crop.shape[1], 3)
            # print("loaded_img_crop",loaded_img_crop.shape)

            # loaded_img_crop_masked=loaded_img_crop[loaded_img_crop_mask]
            # print("loaded_img_crop_masked",loaded_img_crop_masked)
            # # loaded_img_crop[loaded_img_crop_mask] (18, 3)
            # # loaded_img_crop[loaded_img_crop_mask] (387, 3)
            # # loaded_img_crop[loaded_img_crop_mask] (234971, 3)
            # # loaded_img_crop[loaded_img_crop_mask] (1629, 3)
            # loaded_img_crop[loaded_img_crop_mask]=[0,250,0]

            # # print("loaded_img_crop",loaded_img_crop)

            # plt.imshow(loaded_img_crop*nesw_img)
            plt.imshow(loaded_img_crop_new)
            plt.title("File: 1_Region 1_mask.tif, " + str(one_color))
            # /home/young/Pictures/2019_04_14_10:01:03.png
            # /home/young/Pictures/2019_04_14_10:01:18.png
            # /home/young/Pictures/2019_04_14_10:01:34.png

            plt.show()
예제 #55
0
def dist_map(x, y, z, xmin, xmax, ymin, ymax, dx, delta_s):
    """function for centerline rasterization and distance map calculation
    inputs:
    x,y,z - coordinates of centerline
    xmin, xmax, ymin, ymax - x and y coordinates that define the area of interest
    dx - gridcell size (m)
    delta_s - distance between points along centerline (m)
    returns:
    cl_dist - distance map (distance from centerline)
    x_pix, y_pix, z_pix - x,y, and z pixel coordinates of the centerline
    s_pix - along-channel distance in pixels
    z_map - map of reference channel thalweg elevation (elevation of closest point along centerline)
    x, y, z - x,y,z centerline coordinates clipped to the 3D model domain"""
    y = y[(x > xmin) & (x < xmax)]
    z = z[(x > xmin) & (x < xmax)]
    x = x[(x > xmin) & (x < xmax)]
    dummy, dy, dz, ds, s = compute_derivatives(x, y, z)
    if len(np.where(ds > 2 * delta_s)[0]) > 0:
        inds = np.where(ds > 2 * delta_s)[0]
        inds = np.hstack((0, inds, len(x)))
        lengths = np.diff(inds)
        long_segment = np.where(lengths == max(lengths))[0][0]
        start_ind = inds[long_segment] + 1
        end_ind = inds[long_segment + 1]
        if end_ind < len(x):
            x = x[start_ind:end_ind]
            y = y[start_ind:end_ind]
            z = z[start_ind:end_ind]
        else:
            x = x[start_ind:]
            y = y[start_ind:]
            z = z[start_ind:]
    xdist = xmax - xmin
    ydist = ymax - ymin
    iwidth = int((xmax - xmin) / dx)
    iheight = int((ymax - ymin) / dx)
    xratio = iwidth / xdist
    # create list with pixel coordinates:
    pixels = []
    for i in range(0, len(x)):
        px = int(iwidth - (xmax - x[i]) * xratio)
        py = int(iheight - (ymax - y[i]) * xratio)
        pixels.append((px, py))
    # create image and numpy array:
    img = Image.new("RGB", (iwidth, iheight), "white")
    draw = ImageDraw.Draw(img)
    draw.line(pixels, fill="rgb(0, 0, 0)")  # draw centerline as black line
    pix = np.array(img)
    cl = pix[:, :, 0]
    cl[cl == 255] = 1  # set background to 1 (centerline is 0)
    y_pix, x_pix = np.where(cl == 0)
    x_pix, y_pix = order_cl_pixels(x_pix, y_pix)
    # This next block of code is kind of a hack. Looking for, and eliminating, 'bad' pixels.
    img = np.array(img)
    img = img[:, :, 0]
    img[img == 255] = 1
    img1 = morphology.binary_dilation(img,
                                      morphology.square(2)).astype(np.uint8)
    if len(np.where(img1 == 0)[0]) > 0:
        x_pix, y_pix = eliminate_bad_pixels(img, img1)
        x_pix, y_pix = order_cl_pixels(x_pix, y_pix)
    img1 = morphology.binary_dilation(
        img, np.array([[1, 0, 1], [1, 1, 1]], dtype=np.uint8)).astype(np.uint8)
    if len(np.where(img1 == 0)[0]) > 0:
        x_pix, y_pix = eliminate_bad_pixels(img, img1)
        x_pix, y_pix = order_cl_pixels(x_pix, y_pix)
    img1 = morphology.binary_dilation(
        img, np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1]],
                      dtype=np.uint8)).astype(np.uint8)
    if len(np.where(img1 == 0)[0]) > 0:
        x_pix, y_pix = eliminate_bad_pixels(img, img1)
        x_pix, y_pix = order_cl_pixels(x_pix, y_pix)
    #redo the distance calculation (because x_pix and y_pix do not always contain all the points in cl):
    cl[cl == 0] = 1
    cl[y_pix, x_pix] = 0
    cl_dist, inds = ndimage.distance_transform_edt(cl, return_indices=True)
    dx, dy, dz, ds, s = compute_derivatives(x, y, z)
    # dx_pix,dy_pix,ds_pix,s_pix = compute_derivatives(x_pix,y_pix) # needed for s_pix only
    dx_pix = np.gradient(x_pix)
    dy_pix = np.gradient(y_pix)
    ds_pix = np.sqrt(dx_pix**2 + dy_pix**2)
    s_pix = np.cumsum(ds_pix)
    f = scipy.interpolate.interp1d(s, z)
    snew = s_pix * s[-1] / s_pix[-1]
    if snew[-1] > s[-1]:
        snew[-1] = s[-1]
    snew[snew < s[0]] = s[0]
    z_pix = f(snew)
    # create z_map:
    z_map = np.zeros(np.shape(cl_dist))
    z_map[y_pix, x_pix] = z_pix
    xinds = inds[1, :, :]
    yinds = inds[0, :, :]
    for i in range(0, len(x_pix)):
        z_map[(xinds == x_pix[i]) & (yinds == y_pix[i])] = z_pix[i]
    return cl_dist, x_pix, y_pix, z_pix, s_pix, z_map, x, y, z
예제 #56
0
def run_model(file_to_predict, cnn_model):

    model = keras.models.load_model(cnn_model)

    # notice: the image_path corresponds to certain image;
    ## for desinging user interface, image_path should be automatically as the imagepath of the image imported by the user.
    ## also could design for batch prediction
    Predicting_image_path = file_to_predict

    #related to whether the image normalized
    image_path = Predicting_image_path
    image = cv2.imread(image_path, 0)
    image = cv2.resize(image, (576, 576))
    image = img_as_float(image)
    image *= 255.0/image.max()
    image = image/255.0
    
    image = np.array(image)

    img = mpimg.imread(image_path)
    # imgplot = plt.imshow(img)
    
    y = np.expand_dims(image, axis=0)

    result = model.predict(y)
    result= np.squeeze(result, axis=0)
    result= np.squeeze(result, axis=-1)

    plt.imshow(result, cmap = 'gray', interpolation = 'bicubic')
    plt.xticks([]), plt.yticks([])
    dir_path = os.path.dirname(os.path.realpath(__file__))
    print("Output for dir path: " , dir_path)
    save_file = dir_path + "/" + "result_output_test.tif"
    plt.gca().set_axis_off()
    plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0,
            hspace = 0, wspace = 0)
    plt.margins(0,0)
    plt.gca().xaxis.set_major_locator(plt.NullLocator())
    plt.gca().yaxis.set_major_locator(plt.NullLocator())
    plt.savefig(save_file, bbox_inches='tight', pad_inches=0)
    
    prediction_data = result
    
    # only show the boundary:
    ## Critical: must notice the output of CNN is pixel with continous numbers:
    # higher accuracy, the closer it is to the pixel values designated in masks.
    # thus, the number "0.6" made here is arbitary, depends on the accuracy of CNN, if Accuracy is high, it should be close to 1.0
    ### could influence the quantification result by thining or expending the boundary/membrane areas
    ## could also find ways to show other ROI predicted to make more options for the user.
    prediction_data[prediction_data >= 0.7] = 1
    prediction_data[prediction_data < 0.7] = 0
    
    prediction_data = prediction_data.astype('int')
    prediction_data = morphology.remove_small_objects(prediction_data.astype(bool),64)
    prediction_data = prediction_data.astype('float32')

    prediction_data = skimage.morphology.closing(prediction_data, square(3))

    ## add the row at the margin of image, because of bad annotation, the boundary in image margin would be miss classified.
    ROW= [0,1,2,3,4,5]
    for I in ROW:
      prediction_data[I,:]=1
      prediction_data[:,I]=1
      prediction_data[-I,:]=1
      prediction_data[:,-I]=1

    #change the interior area to value 1
    prediction_data[prediction_data == 1.0] = 0.5
    prediction_data[prediction_data == 0.0] = 1.0
    prediction_data[prediction_data == 0.5] = 0.0

    s = generate_binary_structure(2,2)

    # num_features
    # how many ROI/unconnected object it find
    labeled_array, num_features = label(prediction_data, structure=s)

    unique, counts = np.unique(labeled_array, return_counts=True)
    dict(zip(unique, counts))

    for i in range(num_features+1):
      if np.count_nonzero(labeled_array == i)>1000: #discard small ROI
        a = np.count_nonzero(labeled_array == i)
        # print('pixel area =',np.count_nonzero(labeled_array == i))
        b = ndimage.sum(image, labeled_array, index=[i])
        # print('pixel intensity =',ndimage.sum(image, labeled_array, index=[i]))
        arr_1 = (labeled_array == i).astype(int)
        a1=np.roll(arr_1, 8, axis=0)
        a2=np.roll(arr_1, -8, axis=0)
        a3=np.roll(arr_1, 8, axis=1)
        a4=np.roll(arr_1, -8, axis=1)
        a5=a1+a2+a3+a4
        a5[a5 > i-0.1] = 1
        c= ndimage.sum(image, a5, index=[1])-b
        # print('pixel intensity in boundary =',c)
        ## Critical: the value "8" in np.rool meas to expand the region i by 8 pixels
        ### the value "8" corresponds to the length of the cell membranes.
        ## Critical: the prediction accuracy of CNN also would influence the parameter used in np.rool:
        ### also related to the parameter in "#only show the boundary:" lines

        slice_x, slice_y = ndimage.find_objects(labeled_array == i)[0]
        roi = labeled_array[slice_x, slice_y]
예제 #57
0
for z in range(1, 5):
    first_patient = load_scan(INPUT_FOLDER + patients[z])
    first_patient_pixels = get_pixels_hu(first_patient)
    im = Image.fromarray(
        (first_patient_pixels[np.shape(first_patient_pixels)[0] / 2] + 1024) /
        8)
    #im.show()
    im = im.convert(mode="L")
    im = im.filter(ImageFilter.MedianFilter(15))
    #im.show()
    pxl = np.array(im)
    thresh = threshold_otsu(pxl)
    pxl2 = 255 * (pxl > thresh)
    #im2 = Image.fromarray(pxl2)
    #im2.show()
    selem = square(150)
    closed = closing(pxl2, selem)
    #im2 = Image.fromarray(closed)
    #im2.show()
    a = closed - pxl2
    selem = square(20)
    closed = closing(a, selem)
    selem = disk(6)
    #closed = erosion(closed, selem)
    #im2 = Image.fromarray(closed)
    #im2.show()
    mul = (closed > 150) * pxl
    mask = (255 - closed) + mul
    mul = 255 * (mul > .44 * (np.max(mul) - np.min(mask)))
    #mul = 255*(mul>thresh)
    mul = opening(mul, selem)
예제 #58
0
def framefeats(movie, frame, labels, counter, ring_flag):

    labels = labels.astype(int)
    labels_bin = labels == 0
    features_temp = []
    ims = []

    for j in range(movie.channels):

        im = movie.read_raw(j, frame)
        features_temp.append(regionprops(labels, im))
        if ring_flag:
            ims.append(im)

    features = dict()
    features['tracking'] = np.zeros((len(features_temp[0]), 13))
    features['data'] = np.zeros((len(features_temp[0]), 22))

    new_label = np.zeros((movie.dims[0], movie.dims[1]))

    for j in range(len(features_temp[0])):

        # Tracking Features

        cell_temp = features_temp[0][j]
        ypos = cell_temp.centroid[0]
        xpos = cell_temp.centroid[1]

        features['tracking'][j, 0] = counter
        features['tracking'][j, 2] = xpos
        features['tracking'][j, 3] = ypos
        features['tracking'][j, 4] = min(
            [ypos, movie.dims[0] - ypos, xpos, movie.dims[1] - xpos])

        # Morphology Features

        features['data'][j, 0] = cell_temp.area
        features['data'][j, 1] = cell_temp.eccentricity
        features['data'][j, 2] = cell_temp.major_axis_length
        features['data'][j, 3] = cell_temp.perimeter

        # Determine region for dilation

        if ring_flag:
            r = int(np.round(cell_temp.perimeter / 5))

            bbox = cell_temp.bbox
            bbox_dil = (np.maximum(0, bbox[0] - r), np.maximum(0, bbox[1] - r),
                        np.minimum(movie.dims[0], bbox[2] + r - 1),
                        np.minimum(movie.dims[1], bbox[3] + r - 1))
            pad = ((bbox[0] - bbox_dil[0], bbox_dil[2] - bbox[2]),
                   (bbox[1] - bbox_dil[1], bbox_dil[3] - bbox[3]))

            image_dil = np.pad(cell_temp.image, pad, 'constant')
            image_dil = dilation(image_dil, square(r))

            bin_roi = labels_bin[bbox_dil[0]:bbox_dil[2],
                                 bbox_dil[1]:bbox_dil[3]]
            ring_region = np.logical_and(image_dil, bin_roi)

        # Intensity Measurements for classification

        for k in range(0, movie.channels):

            cell_temp = features_temp[k][j]
            mu = cell_temp.mean_intensity
            im_temp = cell_temp.intensity_image.flatten()
            bin_temp = cell_temp.image.flatten()
            im_temp = im_temp[bin_temp]

            ind = k * 6 + 4
            features['data'][j, ind] = mu
            features['data'][j, ind + 1] = np.median(im_temp)
            features['data'][j, ind + 2] = np.std(im_temp)
            features['data'][j, ind + 3] = np.std(im_temp[im_temp > mu])

            if ring_flag:

                im_roi = ims[k][bbox_dil[0]:bbox_dil[2],
                                bbox_dil[1]:bbox_dil[3]]
                ring_region_vals = im_roi[ring_region].flatten()

                features['data'][j, ind + 4] = np.mean(ring_region_vals)
                features['data'][j, ind + 5] = np.median(ring_region_vals)

        new_label[labels == cell_temp.label] = counter
        counter += 1

    features['data'][np.isinf(features['data'])] = 0
    features['data'][np.isnan(features['data'])] = 0

    return features, new_label, counter
예제 #59
0
             "/home/apages/pysrc/KernelPheno/data/DSC05389.jpeg",
             "/home/apages/pysrc/KernelPheno/data/DSC05384.jpeg"]

br_thresh_sum = 0
num_images = 0

for path in img_files:
    col = imread(path)
    gray = rgb2gray(col)
    thresh = threshold_otsu(gray)

    num_images += 1
    br_thresh_sum += thresh

    print("Threshold: " + str(thresh))
    bw = closing(gray > thresh, square(3))

    masked = col.copy()
    masked[np.where(bw)] = [0,0,0]

    # plt.imshow(masked)
    # plt.show()

avg = br_thresh_sum / num_images
print("Average: " + str(avg))

for path in img_files:
    col = imread(path)
    gray = rgb2gray(col)
    thresh = threshold_otsu(gray)
    bw = closing(gray > thresh, square(3))
예제 #60
0
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from skimage import data, filters, segmentation, measure, morphology, color

#加载并裁剪硬币图片
imgname1 = "009-b.jpg"
imgname2 = "003-b.jpg"
img1 = cv2.imread(imgname1)
img2 = cv2.imread(imgname2)

image = data.coins()[50:-50, 50:-50]
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
image = gray1

thresh = filters.threshold_otsu(image)  #阈值分割
bw = morphology.closing(image > thresh, morphology.square(3))  #闭运算

cleared = bw.copy()  #复制
segmentation.clear_border(cleared)  #清除与边界相连的目标物

label_image = measure.label(cleared)  #连通区域标记
borders = np.logical_xor(bw, cleared)  #异或
label_image[borders] = -1
image_label_overlay = color.label2rgb(label_image, image=image)  #不同标记用不同颜色显示

fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(8, 6))
ax0.imshow(cleared, plt.cm.gray)
ax1.imshow(image_label_overlay)

for region in measure.regionprops(label_image):  #循环得到每一个连通区域属性集