Exemplo n.º 1
0
    def fft_kernel(self, f_):
        selected_frame = self.video[f_, :, :]
        im_fft = np.fft.fftshift(np.fft.fft2(selected_frame))
        amplitude = np.abs(im_fft)
        phase = np.angle(im_fft)

        amplitude_noiseless = np.multiply(self.mask, amplitude)
        im_fft_noiseless = np.multiply(amplitude_noiseless, np.exp(phase * 1j))
        im_fft_noiseless = np.fft.fftshift(im_fft_noiseless)
        im_noiseless = np.fft.ifft2(im_fft_noiseless)

        im_noise = self.video[f_, :, :] - np.real(im_noiseless)

        if self.direction == 'Horizontal':
            selem = rectangle(nrows=1, ncols=5)

        elif self.direction == 'Vertical':
            selem = rectangle(nrows=5, ncols=1)

        filter_img = im_noise.copy()
        for i_ in range(self.max_iterations):
            try:
                filter_img = filters.gaussian(filter_img,
                                              sigma=1.2,
                                              preserve_range=True)
                # filter_img = rank.mean(filter_img, selem=selem)

            except:
                print(
                    f"{self.WARNING}\nThe Gaussian filter can not work!{self.ENDC}"
                )

        img_FPNc = np.real(im_noiseless) + np.real(filter_img)

        return img_FPNc
Exemplo n.º 2
0
def filterseriesall(oyscacld0, sizey, sizen, sizec):
	[nday, nr, nc] = oyscacld0.shape
	oyscacldl = oyscacld0.swapaxes(1,2).reshape(oyscacld0.shape[0], -1)
	[nday, npix] = oyscacldl.shape
	oyscacldl = oyscacldl.astype(np.int8)
	oyscacldl2 = deepcopy(oyscacldl)
	oyscacldyl = 1*(oyscacldl >=1)
	oyscacldyl2 = np.vstack((np.zeros((1, npix)), oyscacldyl, np.zeros((1, npix))))
	oyscacldnl = 1*(oyscacldl <=1)
	oyscacldnl2 = np.vstack((np.zeros((1, npix)), oyscacldnl, np.zeros((1, npix))))
	oyscacldcl = 1*(oyscacldl ==1)
	oyscacldcl2 = np.vstack((np.zeros((1, npix)), oyscacldcl, np.zeros((1, npix))))
	ccc = ndimage.binary_hit_or_miss(oyscacldcl2, np.ones((sizec,1)))
	se = rectangle(sizec, 1)
	ccc = ndimage.binary_dilation(ccc, se)*1
	ccc = ccc[1:-1,:]
	
	yyy = ndimage.binary_hit_or_miss(oyscacldyl2, np.ones((sizey,1)))
	se = rectangle(sizey, 1)
	yyy = ndimage.binary_dilation(yyy, se)*1
	yyy = yyy[1:-1,:]
	yyy = yyy & 1*(ccc == 0)
	oyscacldl2[yyy==1] = 2
	
	nnn = ndimage.binary_hit_or_miss(oyscacldnl2, np.ones((sizen,1)))
	se = rectangle(sizen, 1)
	nnn = ndimage.binary_dilation(nnn, se)*1
	nnn = nnn[1:-1,:]
	nnn = nnn & 1*(ccc == 0)
	oyscacldl2[nnn==1] = 0
	oyscacldl2[(np.logical_and(yyy==1, nnn==1))] = 1
	oyscacld = oyscacldl2.reshape(nday, nr, nc, order='F')
	return oyscacld
def get_background_mask(mask, method="rectangle", r=10):
    # This function gets the background mask shape of the neuron.
    # Args:
    #   mask    : 2D array as the mask for which to find the local background
    #   method  : "rectangle" gets a rectangle surrounding the input mask, with
    #               dimensions twice the dimensions of the mask on either side.
    #             "disk" uses a disk shaped kernel to calculate dilation
    #               function on the input mask.
    #             "dilation" uses a square shaped kernel to calculate the
    #               dilation function on the input mask.
    #             TODO: add alternative versions that scale by a factor instead
    #               of by a fixed radius.
    #   r       : radius for disk or edge size for rectangle to use for the kernel.
    # Return is a 2D array with the same shape as the mask array, in which the
    # local background is 1 but the input mask and external background is 0.
    if method == "rectangle":
        dims = get_mask_shape(mask)
        bgmask = get_mask_rectangle(mask)
        bgmask = dilation(bgmask, rectangle(dims[0] + 1, dims[1] + 1))
        return np.logical_xor(bgmask, mask)
    elif method == "disk":
        dkern = disk(r)
        bgmask = dilation(mask, dkern)
        return np.logical_xor(bgmask, mask)
    elif method == "dilation":
        dkern = rectangle(r, r)
        bgmask = dilation(mask, dkern)
        return np.logical_xor(bgmask, mask)
    else:
        # default to rectangle
        print('Incorrect method specified, defaulting to rectangle')
        dims = get_mask_shape(mask)
        bgmask = get_mask_rectangle(mask)
        bgmask = dilation(bgmask, rectangle(dims[0], dims[1]))
        return np.logical_xor(bgmask, mask)
Exemplo n.º 4
0
 def predict(self, path):
     try:
         img_path = path
         img = self.read_image(img_path)
         org = img.copy()
         img_t = np.expand_dims(img, axis=0)
         mask = self.model.predict(img_t)
         mask = np.squeeze(mask, axis=0)
         mask = np.squeeze(mask, axis=-1)
         mask = misc.imresize(mask, [512, 512])
         #union
         print(mask.dtype)
         mask = cv2.erode(mask, rectangle(3, 3))
         edge = cv2.Canny(mask, 30, 100)
         edge = cv2.dilate(edge, rectangle(3, 3))
         org = cv2.cvtColor(org, cv2.COLOR_GRAY2BGR)
         org[edge != 0] = [0, 127, 255]
         cv2.imwrite('temp/mask.jpg', mask)
         cv2.imwrite('temp/edge.jpg', edge)
         cv2.imwrite('temp/2.jpg', org)
         result = 1
         print('inference sucess!')
     except:
         result = 0
         print('inference failed!')
     return result
Exemplo n.º 5
0
def overlay_masks_with_borders(images_dir, subdir_name, target_dir, borders_size=3, dilation_size=5):
    train_dir = os.path.join(images_dir, subdir_name)
    for mask_dirname in tqdm(glob.glob('{}/*/masks'.format(train_dir))):
        masks = []
        for ind, image_filepath in enumerate(glob.glob('{}/*'.format(mask_dirname))):
            image = np.asarray(Image.open(image_filepath))
            image = np.where(image > 0, ind + 1, 0)
            masks.append(image)
        labeled_masks = np.sum(masks, axis=0)
        overlayed_masks = np.where(labeled_masks, 1, 0)

        selem = rectangle(dilation_size, dilation_size)
        dilated_mask = dilation(overlayed_masks, selem=selem)
        watershed_mask = watershed((dilated_mask >= 0).astype(np.bool), labeled_masks, watershed_line=True)

        if watershed_mask.max() == watershed_mask.min():
            masks_with_borders = overlayed_masks
        else:
            borders = (watershed_mask == 0) & (dilated_mask > 0)
            selem = rectangle(borders_size, borders_size)
            dilated_borders = dilation(borders, selem=selem)
            masks_with_borders = np.where(dilated_borders, 2, overlayed_masks)

        target_filepath = '/'.join(mask_dirname.replace(images_dir, target_dir).split('/')[:-1]) + '.png'
        os.makedirs(os.path.dirname(target_filepath), exist_ok=True)
        imwrite(target_filepath, masks_with_borders)
def ex_3_c():
    # 9.5)
    scale = 16  # resolution of shape

    # draw original image
    shape = np.zeros((8 * scale, 8 * scale), dtype=np.int)
    shape[1 * scale:7 * scale, 1 * scale:3 * scale] = 1
    shape[1 * scale:7 * scale, 5 * scale:7 * scale] = 1
    shape[int(5.3 * scale):7 * scale, 1 * scale:7 * scale] = 1
    plt.imshow(shape)

    edore_a_struct = morphology.square(scale)
    edore_a_origin = (scale // 2 - 1, scale // 2 - 1)
    edore_a = ndimage.binary_erosion(shape, structure=edore_a_struct, origin=edore_a_origin)
    plt.imshow(edore_a, cmap='binary_r')

    edore_b = ndimage.binary_erosion(shape,
                                     structure=morphology.rectangle(int(5 * scale), int(.5 * scale)),
                                     origin=(int(2 * scale), 0))
    plt.imshow(edore_b)

    edore_c_struct = morphology.rectangle(scale * 2, scale * 2)
    edore_c = ndimage.binary_erosion(shape, structure=edore_c_struct)
    dilate_c_struct = morphology.disk(scale // 2)
    shape_c = ndimage.binary_dilation(edore_c, structure=dilate_c_struct)
    plt.imshow(edore_c, cmap='binary_r')

    dilate_d_struct = ndimage.binary_dilation(shape, structure=morphology.disk(scale // 2))
    edore_d_struct = ndimage.binary_erosion(dilate_d_struct, structure=morphology.disk(scale // 4))
    plt.imshow(edore_d_struct)
def get_simple_eroded_dilated_mask(mask, erode_selem_size, dilate_selem_size, small_annotations_size):
    if mask.sum() > small_annotations_size**2:
        selem = rectangle(erode_selem_size, erode_selem_size)
        mask_ = binary_erosion(mask, selem=selem)
    else:
        selem = rectangle(dilate_selem_size, dilate_selem_size)
        mask_ = binary_dilation(mask, selem=selem)
    return mask_
Exemplo n.º 8
0
def get_col_row(img):
    rows, cols = img.shape
    scale = 80
    col_selem = morphology.rectangle(cols // scale, 1)
    img_cols = dil2ero(img, col_selem)
    row_selem = morphology.rectangle(1, rows // scale)
    img_rows = dil2ero(img, row_selem)
    return img_cols, img_rows
Exemplo n.º 9
0
def pre_process_image(image_file):
    #get the image and resize
    image_data = ndi.imread(image_file, mode='L')
    resized_image = resize(image_data, (200, 200))

    0.6
    up_left, low_right = cropper(resized_image, 40, 0.6)

    resized_image = resize(
        resized_image[up_left[0]:low_right[0] + 1,
                      up_left[1]:low_right[1] + 1], (200, 200))
    binar = binarize(resized_image, 0.4)

    undilated = deepcopy(binar)

    #dilate the binarized image
    selem = rectangle(1, 2)
    dil = dilation(binar, selem)

    #binarize dilation
    dil = binarize(dil)

    #final = dil

    final = deepcopy(dil)
    for i in range(4):
        for j in range(4):
            final[i * 50 + 3:i * 50 + 25,
                  j * 50 + 3:j * 50 + 44] = undilated[i * 50 + 3:i * 50 + 25,
                                                      j * 50 + 3:j * 50 + 44]

    #Try to remove all borders and grid lines in the image.
    #Do this by scanning over rows and cols and if more than 25%
    #of the pixels are <= 0.45 then set the entire row to 1(white)

    #first rows
    for row in range(len(final)):
        count = 0
        for pixel in final[row, :]:
            if pixel == 0:
                count += 1
        if count >= 48:
            final[row, :] = final[row, :] * 0 + 1

    #columns
    for col in range(len(final[0, :])):
        count = 0
        for pixel in final[:, col]:
            if pixel == 0:
                count += 1
        if count >= 48:
            final[:, col] = final[:, col] * 0 + 1

    #add some final erosion (black) to fill out numbers and ensure they're connected
    final = binarize(erosion(final, rectangle(1, 2)), .0000001)

    return final
Exemplo n.º 10
0
def process_image(pil_image, values):
    img = np.array(pil_image)
    selected_channels = [
        values["-OTSU RED CHANNEL-"], values["-OTSU GREEN CHANNEL-"],
        values["-OTSU BLUE CHANNEL-"]
    ]

    if np.where(selected_channels)[0].size == 1 and len(img.shape) == 2:
        if values["-APPLY LOCAL OTSU-"]:
            radius = values["-LOCAL OTSU SIZE-"]
            local_mask = disk(
                radius
            ) if values["-LOCAL OTSU SHAPE-"] == "Disk" else rectangle(
                int(radius), int(radius))
            otsu_img = img >= rank.otsu(img, local_mask)
        else:
            otsu_img = img >= threshold_otsu(img)

        otsu_np_img = 255 * np.asarray(otsu_img, dtype=np.uint8)
        if values["-OTSU OVERLAY WITH IMAGE-"]:
            cnts = cv2.findContours(otsu_np_img, cv2.RETR_LIST,
                                    cv2.CHAIN_APPROX_NONE)[0]
            cv2.drawContours(img, cnts, -1, 255, 1)
            otsu_np_img = img
        ret_img = Image.fromarray(otsu_np_img)

    elif np.where(selected_channels)[0].size == 1:
        if values["-APPLY LOCAL OTSU-"]:
            radius = values["-LOCAL OTSU SIZE-"]
            local_mask = disk(
                radius
            ) if values["-LOCAL OTSU SHAPE-"] == "Disk" else rectangle(
                int(radius), int(radius))
            otsu_img = img[...,
                           np.where(selected_channels)[0][0]] >= rank.otsu(
                               img[..., np.where(selected_channels)[0][0]],
                               local_mask)
        else:
            threshold_global_otsu = threshold_otsu(
                img[..., np.where(selected_channels)[0][0]])
            otsu_img = img[..., np.where(selected_channels
                                         )[0][0]] >= threshold_global_otsu

        otsu_np_img = 255 * np.asarray(otsu_img, dtype=np.uint8)
        if values["-OTSU OVERLAY WITH IMAGE-"]:
            cnts = cv2.findContours(otsu_np_img, cv2.RETR_LIST,
                                    cv2.CHAIN_APPROX_NONE)[0]
            cv2.drawContours(img, cnts, -1, 255, 1)
            otsu_np_img = img
        ret_img = Image.fromarray(otsu_np_img)

    else:
        ret_img = pil_image

    return ret_img
Exemplo n.º 11
0
def pre_process_image(image_file):
    #get the image and resize
    image_data = ndi.imread(image_file, mode = 'L')
    resized_image = resize(image_data, (200,200))

    0.6
    up_left,low_right = cropper(resized_image,40,0.6)

    resized_image = resize(resized_image[up_left[0]:low_right[0]+1,up_left[1]:low_right[1]+1], (200,200))
    binar = binarize(resized_image, 0.4)

    undilated = deepcopy(binar)

    #dilate the binarized image
    selem = rectangle(1,2)
    dil = dilation(binar, selem)

    #binarize dilation
    dil = binarize(dil)

    #final = dil

    final = deepcopy(dil)
    for i in range(4):
        for j in range(4):
            final[i*50+3:i*50+25,j*50+3:j*50+44] = undilated[i*50+3:i*50+25,j*50+3:j*50+44]

    #Try to remove all borders and grid lines in the image.
    #Do this by scanning over rows and cols and if more than 25%
    #of the pixels are <= 0.45 then set the entire row to 1(white)

    #first rows
    for row in range(len(final)):
        count = 0
        for pixel in final[row,:]:
            if pixel == 0:
                count += 1
        if count >= 48:
            final[row,:] = final[row,:]*0 + 1

    #columns
    for col in range(len(final[0,:])):
        count = 0
        for pixel in final[:,col]:
            if pixel == 0:
                count += 1
        if count >= 48:
            final[:,col] = final[:,col]*0 + 1

    #add some final erosion (black) to fill out numbers and ensure they're connected
    final = binarize(erosion(final, rectangle(1,2)),.0000001)

    return final
Exemplo n.º 12
0
def blurHorizon(res1, maskA):
    # takes as input maskA and res1 and returns the two partial
    # images, to be comjoined within main() into the final image
    
    strel = skm.rectangle(5, 5)
    
    # now to manipulate the mask A until we have just the horizon line left
    lineAdil = skm.dilation(maskA, selem = strel)
    horizonMaskMessy = lineAdil ^ maskA     # because mask B was created from mask A anyway
    horizonMask = lgstComp(horizonMaskMessy)
    
    #these three comment lines below can be deleted, it's just another way to merge the two pieces:
    #lineAerr = skm.erosion(maskA, selem = strel)    # because mask B was created from mask A anyway
    #horizonMaskMessyB = lineAerr ^ maskA
    #horizonMaskMessy = horizonMaskMessyA | horizonMaskMessyB

    
    line = res1.copy()     # line will - eventually - have the masked horizon line only (with the median filter)
    split = res1.copy()    # split will contain the rest of the image "res1" (without the median filter)
    
    lineBlurred = sknf.median_filter(line, footprint=np.ones((4, 4, 3)))
    
    lineBlurred[~horizonMask] = 0
    split[horizonMask] = 0
    
    
    # now to convert it to a uniform format so that both images line up
    
    split2 = ske.rescale_intensity(split)
    lineBlurred2 = ske.rescale_intensity(lineBlurred)
    
    
    return lineBlurred2, split2
Exemplo n.º 13
0
def convert_dfds_to_video_events(dfdims, params):
    """Perform filtering, and peak finding
    """

    # convert dfds into an "image"
    dfdims = np.expand_dims(np.array(dfdims), 0)
    # line structure element
    selem = morph.rectangle(1, params['filter_lengths'])
    # modified top-hat
    ocdfd = morph.opening(morph.closing(dfdims, selem), selem)
    mtdfd = dfdims - np.minimum(ocdfd, dfdims)

    # make peaks sharper by diffing twice
    doublediff_mtdfd = np.diff(np.diff(mtdfd))
    use = np.abs(doublediff_mtdfd.squeeze())

    # find peaks
    sharpchange = np.where(use > params['diffthresh'])[0].tolist()
    changeloc = []
    k = 0
    while k < len(sharpchange):
        flag = k
        a = sharpchange[k]
        while (sharpchange[k] - a) < params['proximity']:
            k += 1
            if k == len(sharpchange):
                break

        changeloc.append(int(round(np.mean(sharpchange[flag:k]))))

    changeloc = [c + 2 for c in changeloc]
    # add one to compensate for the double diff
    # and one more to compensate for k being 0 indexed

    return changeloc
Exemplo n.º 14
0
def open_image(img, mask_length):
    # Morphological opening on greyscale/binary image
    
    img = img.astype(np.uint8)
    img = opening(img, rectangle(mask_length,1))
    
    return(img)
Exemplo n.º 15
0
def processVarianceVector(varVect, filterBlur):

    filtVect = filters.gaussian(varVect, sigma=filterBlur)
    scaleVect = (filtVect - np.amin(filtVect)) / (np.amax(filtVect) -
                                                  np.amin(filtVect))

    # Choose start and end for slice
    # Defined as first and last point that are above threshold of 0.5 in scaled blurred edge-transformed image

    varThresh = scaleVect > 0.5
    varClose = morphology.binary_closing(varThresh, morphology.rectangle(3, 1))

    # May be a good idea to confirm that there is a single large contiguous 'true' block here
    # If this is split or there are multiple blocks above threshold the next steps may return spurious values

    startEnd = [np.amin(np.where(varClose)[0]), np.amax(np.where(varClose)[0])]
    # Find first minimum to left of start, first to right of end
    localMin = argrelextrema(scaleVect, np.less)

    conservativeStartEnd = [
        localMin[0][(np.amax(np.where(((localMin[0] - startEnd[0]) < 0))))],
        localMin[0][(np.amin(np.where(((localMin[0] - startEnd[1]) > 0))))]
    ]

    return startEnd, conservativeStartEnd
Exemplo n.º 16
0
def preprocess(kernel_size=7):
    """
    Filter all images in the specified folder using median filter

    :param kernel_size: size of squared kernel used in median filter
    :return:
    """

    # path to original data
    PATH_ORIG = '/Users/mikhail/projects/edu/research/denoising/data/origin/noisy/'
    PATH_CLEAN = '/Users/mikhail/projects/edu/research/denoising/data/origin/clean/'

    images_names = os.listdir(PATH_ORIG)

    for i, image_name in enumerate(images_names):
        #img = cv2.imread(PATH_ORIG + image_name)
        # img_clean = cv2.medianBlur(img, 7)
        # cv2.imwrite(PATH_CLEAN + new_name, img_clean)

        img = skimage.io.imread(PATH_ORIG + image_name, as_gray=True)
        # filter image using median filter

        img_clean = median(img, selem=rectangle(kernel_size, kernel_size))

        # save image in another folder with different name
        new_name = 'F' + image_name[1:]
        skimage.io.imsave(PATH_CLEAN + new_name, img_clean)

        if (i % 200 == 0) and (i != 0):
            print(f'{i} images have been filtered')
Exemplo n.º 17
0
    def postProcessing(self, predictions, frame_size, holeFilling, areaFiltering, P, connectivity, Morph):

        nFrames, nPixels = predictions.shape

        if connectivity == 8:
            se = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]  # 8-connectivity
        else:
            se = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]  # 4-connectivity

        for frame in range(0, nFrames):
            actualFrame = np.reshape(predictions[frame, :], (frame_size[0], frame_size[1]))

            if holeFilling:
                #cv2.imwrite('Before_holefilling.png', 255 * actualFrame)
                actualFrame = binary_fill_holes(actualFrame.astype(int), structure=se)
                #cv2.imwrite('After_holefilling.png', 255 * actualFrame)

            if areaFiltering:
                actualFrame = remove_small_objects(actualFrame, P)

            if Morph:
                # SE = disk(2,2)
                SE = rectangle(4,2)
            
                #cv2.imwrite('Before_closing.png', 255 * actualFrame)
                actualFrame = binary_closing(actualFrame.astype(int), selem=SE, out=None)
                # cv2.imwrite('Results/After_closing'+str(frame)+'.png', 255 * actualFrame)

                actualFrame = binary_fill_holes(actualFrame.astype(int), structure=se)

            predictions[frame, :] = np.reshape(actualFrame.astype(int), (1, frame_size[0] * frame_size[1]))
        return predictions
Exemplo n.º 18
0
def image_opening(image, strel='rectangle', size_strel=3):
    if strel == 'rectangle':
        elem = morphology.rectangle(size_strel // 2, size_strel)
    elif strel == 'square':
        elem = morphology.square(size_strel)
    elif strel == 'diagonal':
        elem = np.zeros((size_strel, size_strel), int)
        np.fill_diagonal(elem, 1)
        elem = np.fliplr(elem)
    elif strel == 'diamond':
        elem = morphology.diamond(size_strel)
    elif strel == 'horizontal_line':
        elem = morphology.rectangle(1, size_strel)
    output_image = morphology.opening(image, elem)

    return output_image
def get_simple_eroded_mask(mask, selem_size, small_annotations_size):
    if mask.sum() > small_annotations_size**2:
        selem = rectangle(selem_size, selem_size)
        mask_eroded = binary_erosion(mask, selem=selem)
    else:
        mask_eroded = mask
    return mask_eroded
def identify_lanes(bf, bf_cropped):
    if bf == 'na':
        lane_mask = np.ones(bf_cropped.shape,dtype = int)
        lane_binary_mask = lane_mask
    else:
        bf_sobel_h = sobel_h(bf_cropped)
        if len(np.unique(bf_sobel_h)) > 1:
            bf_sobel_h_threshold = threshold_li(bf_sobel_h)
            bf_thresholded = bf_sobel_h > bf_sobel_h_threshold
        else:
            bf_thresholded = bf_sobel_h
        bf_closed = closing(bf_thresholded, rectangle(10,20))
        bf_small_removed = remove_small_objects(bf_closed)
        for point in range(len(bf_small_removed[:bf_cropped.shape[0] - 30,bf_cropped.shape[1] /2])):
            if bf_small_removed[point,bf_cropped.shape[1] /2] == True and bf_small_removed[point+30,bf_cropped.shape[1] /2] == True:
                for i in range(30):
                    bf_small_removed[point+i,bf_cropped.shape[1] /2] = True
        bf_dilated = binary_dilation(bf_small_removed)
        bf_small_holes_removed = remove_small_holes(bf_dilated,30000)
        bf_label_image = label(bf_small_holes_removed)
        lane_mask = np.zeros(bf_cropped.shape,dtype = int)
        mask_label = 1
        for region in regionprops(bf_label_image):
            if region.area > 10000:
                if region.bbox[2] == 0 or region.bbox[2] == bf_cropped.shape[0]:
                    for coord in region.coords:
                        [x,y] = coord
                        lane_mask[x,y] = 0
                else:
                    for coord in region.coords:
                        [x,y] = coord
                        lane_mask[x,y] = mask_label
                mask_label += 1
        lane_binary_mask = lane_mask > 0        
    return lane_mask,lane_binary_mask
Exemplo n.º 21
0
def test_watershed(image):
    images = {}
    img = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
    images['grayscale'] = img

    r = 3
    img = opening(img, disk(r))
    images['opening' + str(r)] = img

    w = 3
    h = 3
    img = closing_by_reconstruction(img, rectangle(w, h))
    images['closing_by_reconstruction_' + str(w) + 'x' + str(h)] = img
    img = np.uint8(img)

    sigma = 3
    img = feature.canny(img, sigma=sigma)
    images['canny_' + str(sigma)] = img

    img = np.uint8(img)
    ret, thresh = cv.threshold(img, 0, 255,
                               cv.THRESH_BINARY_INV + cv.THRESH_OTSU)
    images['threshold'] = thresh

    r = 5
    img = opening(img, disk(r))
    images['opening_' + str(r)] = img

    # noise removal
    kernel = np.ones((3, 3), np.uint8)
    opened = cv.morphologyEx(thresh, cv.MORPH_OPEN, kernel, iterations=2)
    images['opened'] = opened
    # sure background area
    sure_bg = cv.dilate(opened, kernel, iterations=3)
    images['sure_bg'] = sure_bg
    # Finding sure foreground area
    dist_transform = cv.distanceTransform(opened, cv.DIST_L2, 5)
    ret, sure_fg = cv.threshold(dist_transform, 0.7 * dist_transform.max(),
                                255, 0)
    images['dist_transform'] = dist_transform
    images['sure_fg'] = sure_fg
    # Finding unknown region
    sure_fg = np.uint8(sure_fg)
    unknown = cv.subtract(sure_bg, sure_fg)
    images['unknown'] = unknown
    # Marker labelling
    ret, markers = cv.connectedComponents(sure_fg)
    images['markers'] = markers
    # Add one to all labels so that sure background is not 0, but 1
    markers = markers + 1
    images['markers+1'] = markers
    # Now, mark the region of unknown with zero
    markers[unknown == 255] = 0
    images['markers_unknown'] = markers
    markers = cv.watershed(image, markers)
    images['markers_watershed'] = markers
    image[markers == -1] = [255, 0, 0]
    images['image'] = image

    plot_images(images, 6, 3, cmap='gray')
Exemplo n.º 22
0
def make_entropy_vars(wells_df, logs, l_foots):

    new_df = pd.DataFrame()
    grouped = wells_df.groupby(['Well Name'])

    for key in grouped.groups.keys():

        depth = grouped.get_group(key)['Depth']
        temp_df = pd.DataFrame()
        temp_df['Depth'] = depth

        for log in logs:
            temp_data = grouped.get_group(key)[log]
            image = np.vstack((temp_data, temp_data, temp_data))
            image -= np.median(image)
            image /= np.max(np.abs(image))
            image = img_as_ubyte(image)

            for l_foot in l_foots:
                footprint = rectangle(l_foot, 3)
                temp_df[log + '_entropy_foot' + str(l_foot)] = entropy(
                    image, footprint)[0, :]

        new_df = new_df.append(temp_df)

    new_df = new_df.sort_index()
    new_df = new_df.drop(['Depth'], axis=1)
    return new_df
Exemplo n.º 23
0
def post_process_v2(mask):
    """Mainly remove the convex areas"""

    bw = label(mask == 1)
    # 1) detach mislabeled pixels
    bw = binary_opening(bw, rectangle(2, 20))
    # 2) remove small objects
    bw = remove_small_objects(bw, min_size=4096, connectivity=2)
    # 3) solve the defeat, typically the convex outline
    coords = corner_peaks(corner_harris(bw, k=0.2), min_distance=5)
    valid = [c for c in coords
             if 100 < c[1] < 476]  # only cares about this valid range
    if valid:
        y, x = zip(*valid)
        # corners appear in pair
        if len(y) % 2 == 0:
            # select the lowest pair
            left_x, right_x = [func(x[0], x[1]) for func in (min, max)]
            sep_x = np.arange(left_x, right_x + 1).astype(int)
            sep_y = np.floor(np.linspace(y[0], y[1] + 1,
                                         len(sep_x))).astype(int)
            # make the gap manually
            bw[sep_y, sep_x] = 0
            bw = binary_opening(bw, disk(6))
        else:
            mask = np.zeros_like(bw)
            mask[y, x] = 1
            chull = convex_hull_image(mask)
            bw = np.logical_xor(chull, bw)
            bw = binary_opening(bw, disk(6))
    return bw
def identify_lanes(bf_img, bf_cropped):
    bf_sobel_h = sobel_h(bf_cropped)
    #    fig, ax = try_all_threshold(bf_sobel_h, figsize=(15,12), verbose=False)
    #    plt.show()
    #    if len(bf_img.shape) == 3:
    #        bf_sobel_h_threshold = threshold_otsu(bf_sobel_h)
    #    else:
    bf_sobel_h_threshold = threshold_li(bf_sobel_h)
    bf_thresholded = bf_sobel_h > bf_sobel_h_threshold
    bf_closed = closing(bf_thresholded, rectangle(10, 20))
    bf_dilated = binary_dilation(bf_closed)
    bf_small_holes_removed = remove_small_holes(bf_dilated, 15000)
    bf_small_removed = remove_small_objects(bf_small_holes_removed)
    bf_label_image = label(bf_small_removed)
    row = bf_small_removed.shape[0]
    col = bf_small_removed.shape[1]
    lane_mask = np.zeros([row, col], dtype=int)
    mask_label = 1
    for region in regionprops(bf_label_image):
        if region.area > 10000:
            if region.bbox[2] == 0 or region.bbox[2] == row:
                for coord in region.coords:
                    [x, y] = coord
                    lane_mask[x, y] = 0
            else:
                for coord in region.coords:
                    [x, y] = coord
                    lane_mask[x, y] = mask_label
            mask_label += 1
    lane_binary_mask = lane_mask > 0
    return lane_mask, lane_binary_mask, row, col
Exemplo n.º 25
0
    def bw_transform(img_src):  # To finish
        tval = threshold_otsu(img_src)
        bn_src = (img_src > tval)
        # Reducing horizontal and vertical lines
        r_vertical = rectangle(bn_src.shape[1], 1)
        r_horizontal = rectangle(1, bn_src.shape[0])

        # Reducing horizontal lines
        bn_src = white_tophat(bn_src, r_horizontal)

        # Reducing vertical lines
        bn_src = white_tophat(bn_src, r_vertical)

        # Reducing salt
        bn_src = remove_small_objects(bn_src, connectivity=2, min_size=9)
        return bn_src
Exemplo n.º 26
0
def fix_background(num):
    #print('f')
    img = session['cerenkovradii']
    tim = time.time()
    val = img.copy()
    img -= np.min(img)
    img += .001
    ideal_r = 25
    #print(ideal_r)
    b4 = morphology.opening(img, morphology.disk(ideal_r - 10))
    b0 = morphology.closing(img, morphology.disk(ideal_r))
    b1 = morphology.opening(img, morphology.disk(ideal_r + 5))
    b2 = morphology.opening(img, morphology.disk(ideal_r))
    b3 = morphology.opening(img, morphology.disk(ideal_r - 5))
    b = b1.copy()
    c = ideal_r * 2
    arr = np.array([[-1 * ideal_r, 1], [ideal_r - 10, 1], [0, 1],
                    [ideal_r - 5, 1], [ideal_r, 1], [ideal_r + 5, 1]])
    arr = np.linalg.pinv(arr)
    for i in range(len(b)):

        for j in range(len(b[i])):
            if abs(b3[i][j] - b1[i][j]) > 20:
                arr2 = arr @ np.log(
                    np.array([
                        b0[i][j], b4[i][j], img[i][j], b3[i][j], b2[i][j],
                        b1[i][j]
                    ]))
                x = arr2[0]
                y = arr2[1]
                b[i][j] = np.exp(y) * np.exp(x * c)
    b = filters.median(b, selem=morphology.rectangle(40, 2))
    b = filters.median(b, selem=morphology.rectangle(2, 40))
    img -= b
    img -= np.median(img)
    path = retrieve_image_path('cerenkovcalc', num)
    os.remove(path)
    np.save(path, img)
    img -= np.min(img)
    img /= np.max(img)
    #print(time.time()-tim)
    img = Image.fromarray((np.uint8(plt.get_cmap('viridis')(img) * 255)))
    filepath = retrieve_image_path('cerenkovdisplay', num)
    os.remove(filepath)
    img.save(filepath)

    return {'r': 2}
def text_segments(img, min_h=20, max_h=50):
    gray_scale_img = rgb2grayscale(img)

    binarized_adaptive_img = threshold_adaptive(gray_scale_img, block_size=40, offset=20)
    dilated = dilation(~binarized_adaptive_img, rectangle(1, 15))
    for segment in extract_segments(dilated.copy()):
        if min_h < height(segment) < max_h:
            yield segment
def get_completed_external_bearing_walls(input_img):

    image = get_external_bearing_walls(input_img)

    # fill horizontal gaps
    selem_horizontal = morphology.rectangle(1, 50)
    img_filtered = morphology.closing(image, selem_horizontal)

    # fill vertical gaps
    selem_vertical = morphology.rectangle(80, 1)
    img_filtered = morphology.closing(img_filtered, selem_vertical)

    # plt.imshow(img_filtered, cmap="gray")
    # plt.gca().axis("off")
    # plt.show()

    return img_filtered
Exemplo n.º 29
0
 def func(frame):
     _dtype = frame.dtype
     kernel = mor.disk(3)
     frameWP = frame - mor.white_tophat(frame, kernel) * (mor.white_tophat(frame, kernel) > 1000).astype(float)
     kernel = mor.rectangle(25, 1)
     closed = mor.closing(frameWP, kernel)
     opened = mor.opening(closed, kernel)
     result = ((frameWP.astype(float) / opened.astype(float)) * 3000.0)
     return result.astype(_dtype)
Exemplo n.º 30
0
def refine_image(img, max_size=2000):
    h, w = np.size(img, 0), np.size(img, 1)
    result = np.empty(img.shape, 'uint8')
    if max(h, w) > max_size:
        result = img
    else:
        for c in range(np.size(img, 2)):
            result[:, :, c] = filters.rank.autolevel(img[:, :, c],
                                                     kernel.rectangle(h, w))
    return result
Exemplo n.º 31
0
def BinMM(img,thresh,size):
    #threshold at sliderval (param1)
    img = thrhold(img,thresh)
    
    structElement = morphology.rectangle(size,size)
    #structElement = morphology.disk(size)
    #structElement = morphology.diamond(size)

    img = morphology.binary_opening(np.squeeze(img), structElement).astype(np.uint8)
    return img[:,:,np.newaxis]
Exemplo n.º 32
0
 def get_rectangle(width, height):
     """
     :param width: Width of rectangle
     :type width: int
     :param height: Height of rectangle
     :type height: int
     :return: A structuring element consisting only of ones, i.e. every pixel belongs to the neighborhood.
     :rtype: numpy.ndarray
     """
     return rectangle(width, height)
Exemplo n.º 33
0
def process(filename, plot=False):
    # read in image filename
    imagepath = os.path.join(os.getcwd(), filename)
    orig_img = io.imread(filename, True, 'pil')
    # binarize image
    img = orig_img > 0.9  # binary threshold
    # convert to grayscale (easier for viewing)
    img = rgb2gray(img)
    imshow(img)
    # use erosion to expland black areas in the document. This will
    # group together small text and make it easier to find contours
    # of signatures, which are usually separated from text
    eroded_img = binary_erosion(img, rectangle(30, 10))
    # get contours of eroded image
    contours, lengths = compute_contours(eroded_img)
    # compute X and Y gradients over the contours. We expect that signatures
    # will have have a constant gradient that is close to the length of
    # the contour. We take the sum of the X-gradient to remove contours
    # that are vertically biased
    c_grad_x = map(lambda x: np.gradient(x[:, 1]), contours)
    c_grad_y = map(lambda x: np.gradient(x[:, 0]), contours)
    stuffs = []
    for i, (x, y) in enumerate(zip(c_grad_x, c_grad_y)):
        stuffs.append((sum(map(abs, x)), len(x)))
    d = pd.DataFrame.from_records(stuffs)
    d['diff'] = abs(d[0] - d[1])
    d = d[d['diff'] > d['diff'].mean()]
    contours = [contours[i] for i in d.index]
    # compute bounding boxes for resulting contours
    boxes = get_boundingboxes(contours)

    # given a box, does sobel on the bounding box of that block
    # computes contours within the subimage and returns them
    def process_block(box):
        mask = get_mask_from_boundingbox(img, box)
        sobel_img = sobel(img, mask)
        contours, lengths = compute_contours(sobel_img)
        return len(contours), contours, lengths

    # get all blocks
    blocks = [process_block(box) for box in boxes]
    # retrieve the blocks that have the fewest number of contours
    num_contours = pd.Series(block[0] for block in blocks)
    num_contours = num_contours[num_contours < num_contours.mean()]
    # plot only those contours
    ret_contours = []
    for block in [blocks[i] for i in num_contours.index]:
        len_con, contours, lengths = block
        lengths = pd.Series(lengths)
        lengths = lengths[lengths > lengths.mean()]
        for i in lengths.index:
            contour = contours[i]
            ret_contours.append(contour)
            plt.plot(contour[:, 1], contour[:, 0])
    return ret_contours
Exemplo n.º 34
0
def overlay_eroded_masks_from_annotations(annotations, image_size, selem_size):
    mask = np.zeros(image_size)
    selem = rectangle(selem_size, selem_size)
    for ann in annotations:
        rle = cocomask.frPyObjects(ann['segmentation'], image_size[0],
                                   image_size[1])
        m = cocomask.decode(rle)
        m = m.reshape(image_size)
        m = binary_erosion(m, selem=selem)
        mask += m
    return np.where(mask > 0, 1, 0).astype('uint8')
def process(filename, plot=False):
    # read in image filename
    imagepath = os.path.join(os.getcwd(), filename)
    orig_img = io.imread(filename,True,'pil')
    # binarize image
    img = orig_img > 0.9 # binary threshold
    # convert to grayscale (easier for viewing)
    img = rgb2gray(img)
    imshow(img)
    # use erosion to expland black areas in the document. This will
    # group together small text and make it easier to find contours
    # of signatures, which are usually separated from text
    eroded_img = binary_erosion(img,rectangle(30,10))
    # get contours of eroded image
    contours, lengths = compute_contours(eroded_img)
    # compute X and Y gradients over the contours. We expect that signatures
    # will have have a constant gradient that is close to the length of
    # the contour. We take the sum of the X-gradient to remove contours
    # that are vertically biased
    c_grad_x = map(lambda x: np.gradient(x[:,1]), contours)
    c_grad_y = map(lambda x: np.gradient(x[:,0]), contours)
    stuffs = []
    for i,(x,y) in enumerate(zip(c_grad_x,c_grad_y)):
        stuffs.append( (sum(map(abs, x)), len(x)) )
    d = pd.DataFrame.from_records(stuffs)
    d['diff'] = abs(d[0] - d[1])
    d = d[d['diff'] > d['diff'].mean()]
    contours = [contours[i] for i in d.index]
    # compute bounding boxes for resulting contours
    boxes = get_boundingboxes(contours)
    # given a box, does sobel on the bounding box of that block
    # computes contours within the subimage and returns them
    def process_block(box):
        mask = get_mask_from_boundingbox(img,box)
        sobel_img = sobel(img,mask)
        contours, lengths = compute_contours(sobel_img)
        return len(contours),contours,lengths
    # get all blocks
    blocks = [process_block(box) for box in boxes]
    # retrieve the blocks that have the fewest number of contours
    num_contours = pd.Series(block[0] for block in blocks)
    num_contours = num_contours[num_contours < num_contours.mean()]
    # plot only those contours
    ret_contours = []
    for block in [blocks[i] for i in num_contours.index]:
        len_con,contours,lengths = block
        lengths = pd.Series(lengths)
        lengths = lengths[lengths > lengths.mean()]
        for i in lengths.index:
            contour = contours[i]
            ret_contours.append(contour)
            plt.plot(contour[:,1],contour[:,0])
    return ret_contours
Exemplo n.º 36
0
def GrayMM(img,thresh,size):
    structElement = morphology.rectangle(size,size)
  
    img[img == -9999] = 0
    img = img/5000
    img[img < 0] = 0
    img[img > 1] = 1
    
    outdata = morphology.opening(img, structElement)
    #threshold after bin
    imgOut = outdata > 255*thresh/5000
    
    return imgOut
def color_based_crop(test_image):

    median_ = np.median(test_image[:, :, 0], axis=(0, 1))

    test_image_med = median(test_image[:, :, 0], disk(10))
    test_image_treshold = (test_image_med[:, :] > (median_ + 10)) & (test_image[:, :, 0] < 180)
    eroded_thr = erosion(test_image_treshold, rectangle(5, 5))
    regions = list(regionprops(label(1 - eroded_thr)))
    if len(regions) != 0:
        biggest_region = max(regions, key=lambda x: x.area)
        minr, minc, maxr, maxc = biggest_region.bbox
        test_image = test_image[minr:maxr, minc:maxc, :]

    return transform.resize(color.rgb2gray(test_image), (100, 100))
def region_filter_crop(rgbImage):
    rgbImage = rgbImage[100:-100, 100:-100]
    rgbImage = transform.resize(rgbImage, (1000, 1000))
    eroded_mask = erosion(yen_mask(rgbImage), rectangle(20, 20))

    regions = list(regionprops(label(eroded_mask)))
    if len(regions) == 0:
        return rgbImage
    biggest_region = max(regions, key=lambda x: x.area)

    minr, minc, maxr, maxc = biggest_region.bbox
    rgbImage = rgbImage[minr:maxr, minc:maxc, :]

    minr, minc, maxr, maxc = biggest_region.bbox
    rgbImage = rgbImage[minr:maxr, minc:maxc, :]
    return rgbImage
Exemplo n.º 39
0
    def dilate_skimage(self):
        """perform the dilation of the image"""

        # set up structuring element
        # (@Giacomo, is (1, 90) and (1, 0) different? using rectangle here...
        struct_env = rectangle(1, 1)

        # perform algorithm with given environment,
        # store in same memory location
        image = dilation(self.current_image, selem=struct_env,
                         out=self.current_image)

        # update current image
        self.current_image = image

        # append function to logs
        self.logs.add_log('dilate - skimage')

        return image
Exemplo n.º 40
0
def get_clustering_image(sample4x4_crop):
    binar = binarize(sample4x4_crop)

    ################################################
    #####APPLY FILTERS TO REMOVE NUM/SYMBOLS########
    ################################################
    selem = rectangle(2,2)
    dil = dilation(binar, selem)
    #dil = erosion(dil)
    #plt.imshow(dil, cmap=mpl.cm.Greys_r)

    dil = binarize(dil)
    #plt.imshow(dil, cmap=mpl.cm.Greys_r)

    cluster_image = deepcopy(dil)

    for i in range(4):
        for j in range(4):
            cluster_image[i*50+5:i*50+40,j*50+3:j*50+38] = np.zeros((35,35))+1

    return cluster_image
Exemplo n.º 41
0
 def to_gray_scale(self, img, imbin, colorvalue=(255, 0, 0), alpha=1.0,
                   gradient=True):
     max_color = max(colorvalue)
     if max_color > 1.0:
         colorvalue = [a / max_color for a in colorvalue]
     colim = color.gray2rgb(img)
     
     if gradient:
         #se = morphology.disk(1)
         se = morphology.rectangle(3,3)
         imvis = imbin - morphology.erosion(imbin, se)
     else:
         imvis = imbin
     for i, col in enumerate(colorvalue):
         channel_img = colim[:,:,i]
         
         channel_img[imvis>0] = (1-alpha) * channel_img[imvis>0]
         
         colim[:,:,i] = alpha*col*imvis + channel_img
     
     return colim
Exemplo n.º 42
0
def rectMask(maskImg, width, height):
    boxsize = maskImg.get_xsize()
    maskArray = EMNumPy.em2numpy(maskImg)
    
    if (boxsize <= width or boxsize <= height):
        print "ERROR: the width or height of the rectangle cannot be larger than the boxsize of particles."
        sys.exit()
        
    #from skimage.morphology import rectangle
    #Generates a flat, rectangular-shaped structuring element of a given width and height.
    #Every pixel in the rectangle belongs to the neighboorhood.
    rectArray = rectangle(height, width, dtype=np.uint8)
    m, n = rectArray.shape
    
    if (m%2 == 0):
        padRow_before = (boxsize - m)/2
        padRow_after = (boxsize - m)/2
    else:
        padRow_before = (boxsize - m)/2
        padRow_after = (boxsize - m)/2+1
	
	
    if (n%2 == 0):
        padCol_before = (boxsize - n)/2
        padCol_after = (boxsize - n)/2
    else:
        padCol_before = (boxsize - n)/2
        padCol_after = (boxsize - n)/2+1
    
    #pad_x = (boxsize - height)/2
    #pad_y = (boxsize - width)/2
    #rectArrayPad = np.pad(rectArray, ((pad_x, pad_x), (pad_y, pad_y)), mode='constant')
    rectArrayPad = np.pad(rectArray, ((padRow_before, padRow_after), (padCol_before, padCol_after)), mode='constant')
    #m, n = rectArrayPad.shape
    #print m, n
    
    #convert numpy to em image
    rectImg = EMNumPy.numpy2em(rectArrayPad)
    return rectImg    
Exemplo n.º 43
0
    3,
    nw_corner,
    se_corner,
    list(range(2005, 2012)),
    'Bulk Order 397884/L4-5 TM'
)
temporal_nvdi = (temporal_band_4 - temporal_band_3) / (temporal_band_4 + temporal_band_3)
field_mask = compress_temporal_image(temporal_nvdi)
imsave(fname_template.format('temporal_nvdi', 'png'), field_mask)

# Detect Fields
print('Detecting Fields')
field_mask = field_mask >= 10

# Find the area containing the fields
field_area = binary_closing(binary_erosion(field_mask, rectangle(5, 5)), rectangle(50, 50))
between_fields = logical_and(field_area, logical_not(field_mask))

# Find the roads and separate the fields
# Separate out into smaller blocks
print('Separating Fields in image')
for stride in [100, 200, 400]:  # pixels
    num_row_strides = int_(between_fields.shape[0]/stride)
    num_col_strides = int_(between_fields.shape[1]/stride)
    r_stride = int_(between_fields.shape[0]/num_row_strides)
    c_stride = int_(between_fields.shape[1]/num_col_strides)

    for r in range(num_row_strides+1):
        for c in range(num_col_strides+1):
            h, theta, d = hough_line(between_fields[r*r_stride:(r+1)*r_stride, c*c_stride:(c+1)*c_stride])
            threshold = 0  #0.0005*max(h)
Exemplo n.º 44
0
image=io.imread(file_path)

# load classifier
clf=joblib.load("digits_cls.pkl")

""" convert image from rgb to gray_scale"""
from skimage.color import rgb2gray
gray_image=rgb2gray(image)

""" apply gaussian filter """
try:
	from skimage import filters
except ImportError:
	from skimage import filter as filters
gaussian_img=filters.gaussian_filter(gray_image,(5,5),0)

""" set threshold_otsu for convert image in binary image image """
# it optimaly set threshold value
from skimage.filters import threshold_otsu
thresh=threshold_otsu(gaussian_img)
binary=gaussian_img>thresh

""" find contours in image"""
from skimage import measure
new_b=binary
contours=measure.find_contours(binary)

# make rectangle for digits
from skimage.morphology import rectangle
rectangles=[rectangle(contour) for contour in contours]
Exemplo n.º 45
0
 def morphElement(self, width, height):
     return morphology.rectangle(width, height)
Exemplo n.º 46
0
def smoothing_func(lower_val_beta,upper_val_beta,lower_val_alpha,upper_val_alpha):
	
	"""
	This function eliminates the blocking effects appeared from the previous stages.
	The input arguments are explained in the image_normalization function.
	"""
	print 'The parameters are found successfully, and they are saved.'
	print 'Please wait to do smoothing over the parameters...'
	smooth_ngbh_x=5
	smooth_ngbh_y=5
	selem=rectangle(smooth_ngbh_x,smooth_ngbh_y)
	accuracy1=0.0025
	accuracy2=.05
	r=r_org/num_partitions_x
	c=c_org/num_partitions_y
	max_shared_array_size = 33000000

	def do_bilateral():
		
		num_threads = num_parallel
		volume_size = np.prod([r_org,c_org,s])
		volume_parts = np.ceil(np.float(volume_size)/ (100*max_shared_array_size))
		value_total = np.zeros((r_org,c_org,s))
		for z_division in np.arange(volume_parts):
			s_portion = int(s/volume_parts)
			s_portion_prev = s_portion
			if z_division==volume_parts:
				s_portion = s - volume_parts*s_portion_prev
			value_portion, value_portion_arr = make_shared_farray([r_org,c_org,s_portion])
			v_queue = multiprocessing.Queue()
			finished_count = multiprocessing.Value(ctypes.c_long, 0)
			buckets=s_portion
			for bucket in range(buckets):
				v_queue.put(bucket)

			def thread_proc():
				while True:
					try:
						bucket = v_queue.get_nowait()
					except:
						break
					
					beta_mat_block=beta[:,:,(z_division*s_portion)+bucket]
					alpha_mat_block=alpha[:,:,(z_division*s_portion)+bucket]
		
					correction_mat_pixel=scsp.diags(Dimg_org[((z_division*s_portion)+bucket)*r_org*c_org:((z_division*s_portion)+bucket+1)*r_org*c_org],0)
					correction_mat2_pixel=scsp.eye(r_org*c_org,r_org*c_org)
					correction_mat_pixel=scsp.hstack([correction_mat_pixel,correction_mat2_pixel])
					del correction_mat2_pixel
		
					beta_mat_block=np.repeat(beta_mat_block,window_size_x,axis=0)	
					beta_mat=np.repeat(beta_mat_block,window_size_y,axis=1)	
					del beta_mat_block
					beta_mat=np.uint16((float(1)/float(accuracy1))*beta_mat)
					beta_changed=rank.bilateral_mean(beta_mat,selem=selem,s0=lower_val_beta,s1=upper_val_beta)
					beta_changed=(np.float32(beta_changed))*float(accuracy1)
					del beta_mat
					beta_vec=np.reshape(beta_changed,(r_org*c_org,1),order='F')
					alpha_mat_block=np.repeat(alpha_mat_block,window_size_x,axis=0)	
					alpha_mat=np.repeat(alpha_mat_block,window_size_y,axis=1)	
					del alpha_mat_block
					alpha_mat=alpha_mat+cnst
					alpha_mat=np.uint16((float(1)/float(accuracy2))*alpha_mat)
					alpha_changed=rank.bilateral_mean(alpha_mat,selem=selem,s0=lower_val_alpha,s1=upper_val_alpha)
					alpha_changed=((np.float32(alpha_changed))*float(accuracy2))-cnst
					del alpha_mat
					alpha_vec=np.reshape(alpha_changed,(r_org*c_org,1),order='F')
					param_vec=np.vstack([beta_vec,alpha_vec])
					corrected_data_smoothed=correction_mat_pixel*param_vec
					corrected_data_smoothed=np.reshape(corrected_data_smoothed,(r_org,c_org),order='F')
			
					num_finished = 0
					with finished_count.get_lock():
						finished_count.value += 1
						num_finished = finished_count.value
					#print '  Finished %d/%d' % (num_finished, buckets)
					with value_portion_arr.get_lock():
						value_portion[...][:,:,bucket]=corrected_data_smoothed
				
	
			procs = []
			for i in range(num_threads):
				p = multiprocessing.Process(target = thread_proc)
				p.start()
				procs.append(p)
			for p in procs:
				p.join()
			value_total[:,:,(z_division*s_portion_prev):(z_division*s_portion_prev+s_portion)]=value_portion
		return value_total

	alpha=np.zeros((r_org/window_size_x,c_org/window_size_y,s))
	beta=np.zeros((r_org/window_size_x,c_org/window_size_y,s))
	#---------------------------------------------------------------------------
	for partnum_y in np.arange(num_partitions_y):
		for partnum_x in np.arange(num_partitions_x):
			partnum=num_partitions_x*partnum_y+partnum_x
			f=h5py.File('%s/parameters_final_partition%s_%s.h5' %(output_path,step,partnum),'r')
			param=f[group_name].value
			alpha_part=np.reshape(param[len(param)/2:],(r/window_size_x,c/window_size_y,s),order='F')
			beta_part=np.reshape(param[0:len(param)/2],(r/window_size_x,c/window_size_y,s),order='F')
			alpha[(partnum_x*r/window_size_x):((partnum_x+1)*r/window_size_x),(partnum_y*c/window_size_y):((partnum_y+1)*c/window_size_y),:]=alpha_part
			beta[(partnum_x*r/window_size_x):((partnum_x+1)*r/window_size_x),(partnum_y*c/window_size_y):((partnum_y+1)*c/window_size_y),:]=beta_part
			
			
	Corrected_data_smoothed=np.zeros((r_org,c_org,s))
	#---------------------------------------------------
	del alpha_part
	del beta_part
	Corrected_data_smoothed=do_bilateral()
	
	var=np.max(Corrected_data_smoothed)-np.min(Corrected_data_smoothed)
	Corrected_data_smoothed=(Corrected_data_smoothed-np.min(Corrected_data_smoothed))*float(255)/float(var)
	
	f=h5py.File('%s/normalized_data_final.h5'%output_path,'w')
	dset=f.create_dataset(group_name,data=Corrected_data_smoothed)
	f.close()
	
	
	
	return Corrected_data_smoothed
    roi_y1 = image.shape[1]
    roi_y2 = 0
    
image = image[roi_x2:roi_x1, roi_y2:roi_y1]
ground_truth = ground_truth[roi_x2:roi_x1, roi_y2:roi_y1]


if image.ndim == 3:
    print "Extract NIR channel"
    image = image[:,:,0] 
else:
    print "One dimension image"

print "OTSU Threshold with sliding window"

selem = rectangle(100,100)
local_otsu = rank.otsu(image, selem)


#
if soil_removed:
	soil_removed_image=io.imread(str(sys.argv[3]),as_grey=True)
	if (roi == True):
		soil_removed_image = soil_removed_image[roi_x2:roi_x1, roi_y2:roi_y1]
	ii, jj = np.where(soil_removed_image==0)
	image[ii, jj] = 0
 
binary_image = image > local_otsu


print "Distance Transform"