コード例 #1
0
ファイル: nn1.py プロジェクト: 722C/nn-art-critic
    def handle(self, *args, **options):
        better_thans = BetterThan.objects.all() #.filter(pk__lte=50)

        ds = SupervisedDataSet(204960, 1)
        for better_than in better_thans:
            bt = imread(better_than.better_than.image.file)
            wt = imread(better_than.worse_than.image.file)
            better_than.better_than.image.file.close()
            better_than.worse_than.image.file.close()

            bt = filters.sobel(bt)
            wt = filters.sobel(wt)

            bt_input_array = np.reshape(bt, (bt.shape[0] * bt.shape[1]))
            wt_input_array = np.reshape(wt, (wt.shape[0] * wt.shape[1]))
            input_1 = np.append(bt_input_array, wt_input_array)
            input_2 = np.append(wt_input_array, bt_input_array)
            ds.addSample(np.append(bt_input_array, wt_input_array), [-1])
            ds.addSample(np.append(wt_input_array, bt_input_array), [1])
        
        net = buildNetwork(204960, 2, 1)

        train_ds, test_ds = ds.splitWithProportion(options['train_test_split'])
        _, test_ds = ds.splitWithProportion(options['test_split'])

        trainer = BackpropTrainer(net, ds)

        avgerr = trainer.testOnData(dataset=test_ds)
        print 'untrained avgerr: {0}'.format(avgerr)

        trainer.train()

        avgerr = trainer.testOnData(dataset=test_ds)
        print 'trained avgerr: {0}'.format(avgerr)
コード例 #2
0
def nrOfEdgePixels(rgbimage, intensityImage):
    redEdges = sobel(rgbimage[:,:,0])
    grayEdges = sobel(intensityImage)
    t = redEdges - grayEdges
    t[t < 0.05] = 0
    t[t >= 0.05] = 1
    return convolve2d(t, np.ones((17,17)), mode="same")
コード例 #3
0
ファイル: OCR.py プロジェクト: morteano/TDT4173
def getSubImages(img, pixels, size):
    subImages = []
    originals = []
    for i in range(len(img)):
        subImageRow = []
        originalRow = []
        for j in range(len(img[i])):
            if i % pixels == 0 and j % pixels == 0 and i+size-1 < len(img) and j+size-1 < len(img[i]):
                subImage = []
                for k in range(i, i+size, int(size/20)):
                    line = []
                    for l in range(j, j+size, int(size/20)):
                        line.append(img[k][l])
                    subImage.append(line)
                originalRow.append(subImage)
                if preprocess == preprocessing.SOBEL:
                    subImage = denoise_bilateral(subImage, sigma_range=0.1, sigma_spatial=15)
                    subImage = sobel(subImage)
                elif preprocess == preprocessing.HOG:
                    subImage = useHoG(subImage)
                else:
                    subImage = denoise_bilateral(subImage, sigma_range=0.1, sigma_spatial=15)
                    subImage = sobel(subImage)
                    subImage = useHoG(subImage)
                subImageRow.append(subImage)
        if len(subImageRow) > 0:
            subImages.append(subImageRow)
            originals.append(originalRow)
    return subImages, originals
コード例 #4
0
ファイル: tools.py プロジェクト: mjirik/lisa
def sobel(data, sliceId=2):
    edges = np.zeros(data.shape)
    if sliceId == 2:
        for idx in range(data.shape[2]):
            edges[:, :, idx] = skifil.sobel(data[:, :, idx])
    elif sliceId == 0:
        for idx in range(data.shape[0]):
            edges[idx, :, :] = skifil.sobel(data[idx, :, :])
    return edges
コード例 #5
0
ファイル: colorimage.py プロジェクト: xingnix/learning
def color_edge():
        image=data.astronaut()
        r=np.abs(filters.sobel(image[:,:,0]))
        r=np.uint8(r/r.max()*255)
        io.imsave('astronautedger.png',r)
        g=np.abs(filters.sobel(image[:,:,1]))
        g=np.uint8(g/g.max()*255)
        io.imsave('astronautedgeg.png',g)
        b=np.abs(filters.sobel(image[:,:,2]))
        b=np.uint8(b/b.max()*255)
        io.imsave('astronautedgeb.png',b)
コード例 #6
0
 def transform(self,X):
     imgs = []
     for x in X:
         if x.ndim == 3:
             x =self.rgb2gray(x)
         imgs.append(sobel(x).ravel())
     return np.vstack(imgs)
コード例 #7
0
ファイル: read_image.py プロジェクト: MPMakris/Photo_Pro
    def _calc_crispness(self, grey_array):
        """Calculate three measures of the crispness of an channel.

        PARAMETERS
        ----------
        grey_array : 2D numpy array
            Raw data for the grey channel.

        PRODUCES
        --------
        crispnesses : list
            Three measures of the crispness in the grey channel of types:

            - ``sobel``, ``canny``, and ``laplace``
        """
        grey_array = grey_array/255
        sobel_var = filters.sobel(grey_array).var()
        canny_array = feature.canny(grey_array, sigma=1).var()
        canny_ratio = np.sum(canny_array == True)/float(
                                                    len(canny_array.flatten()))
        laplace_var = filters.laplace(grey_array, ksize=3).var()
        self.feature_data.extend([sobel_var, canny_ratio, laplace_var])
        if self.columns_out:
            self.column_names.extend(['crisp_sobel', 'crisp_canny',
                                      'crisp_laplace'])
コード例 #8
0
def main():
    """Load image, apply sobel (to get x/y gradients), plot the results."""
    img = data.camera()

    sobel_y = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
    sobel_x = np.rot90(sobel_y)  # rotates counter-clockwise

    # apply x/y sobel filter to get x/y gradients
    img_sx = signal.correlate(img, sobel_x, mode="same")
    img_sy = signal.correlate(img, sobel_y, mode="same")

    # combine x/y gradients to gradient magnitude
    # scikit-image's implementation divides by sqrt(2), not sure why
    img_s = np.sqrt(img_sx ** 2 + img_sy ** 2) / np.sqrt(2)

    # create binarized image
    threshold = np.average(img_s)
    img_s_bin = np.zeros(img_s.shape)
    img_s_bin[img_s > threshold] = 1

    # generate ground truth (scikit-image method)
    ground_truth = skifilters.sobel(data.camera())

    # plot
    util.plot_images_grayscale(
        [img, img_sx, img_sy, img_s, img_s_bin, ground_truth],
        [
            "Image",
            "Sobel (x)",
            "Sobel (y)",
            "Sobel (magnitude)",
            "Sobel (magnitude, binarized)",
            "Sobel (Ground Truth)",
        ],
    )
コード例 #9
0
def op_vs_ip(subid, image_types, imagepaths, op_direc, overlays):
	
	
	img_data_group=[]
	img_shape_group=[]
	ol_data_group=[]
	ol_shape_group=[]
	for i, path in enumerate(imagepaths):	

		axial_slice, cor_slice, sag_slice, img_aspect_axial, img_aspect_cor, img_aspect_sag = pull_midslices(path)
		if os.path.isfile(overlays[i]):
			axial_slice_ol, cor_slice_ol, sag_slice_ol, img_aspect_axial_ol, img_aspect_cor_ol, img_aspect_sag_ol = pull_midslices(overlays[i])
			ol_data_group.append([axial_slice_ol, cor_slice_ol, sag_slice_ol])
			ol_shape_group.append([img_aspect_axial_ol, img_aspect_cor_ol, img_aspect_sag_ol])
		else:
			ol_data_group.append(['null','null','null'])
			ol_shape_group.append(['null','null','null'])
		## Append to Matrices
		img_data_group.append([axial_slice, cor_slice, sag_slice])
		img_shape_group.append([img_aspect_axial,img_aspect_cor,img_aspect_sag])
		


	my_cmap=plt.cm.gray


	fig, axarr = plt.subplots(ncols=np.shape(img_shape_group)[1], nrows=np.shape(img_shape_group)[0], figsize=(np.shape(img_shape_group)[0]*5,np.shape(img_shape_group)[1]*5))
	plt.suptitle(subid+' File Comparison', fontsize=20)	
	
	titlearray=['Axial', 'Coronal', 'Saggital']
	
	for x in range(0,np.shape(img_shape_group)[0]):
		for y in range(0,np.shape(img_shape_group)[1]):
			im = axarr[x, y].imshow(img_data_group[x][y], cmap=my_cmap, aspect=img_shape_group[x][y])
			axarr[x, y].set_xlabel('(Right) Radiological Convention (Left)', fontsize=10)
			axarr[x, y].set_title(image_types[x]+' '+titlearray[y])
			#divider = make_axes_locatable(axarr[x, y])
			#cax_ = divider.append_axes("right", size="5%", pad=0.05)
			#cbar = plt.colorbar(im, cax=cax_, ticks=MultipleLocator(round(np.max(img_data_group[x][y])/5, 1)))
			axarr[x, y].xaxis.set_visible(False)
			axarr[x, y].yaxis.set_visible(False)




			if os.path.isfile(overlays[x]):
				if x == 1:
					thresh=0.25
				if x == 2:
					thresh=0.4
				sl=np.array(ol_data_group[x][y]).astype(np.float64)
				sl=filters.sobel(sl)
				sl=preprocessing.binarize(sl, np.max(sl)*thresh)
				sl[sl < 1] = 'Nan'
				axarr[x, y].imshow(sl, cmap='autumn', aspect=ol_shape_group[x][y])

	#plt.show()
	plt.tight_layout()
	plt.autoscale()
	plt.savefig(op_direc)
コード例 #10
0
def filter_bank(img, coeff_resolution):
    """
    Calculates the responses of an image to M filters.
    Returns 2-d array of the vectorial responses
    """

    h, w = img.shape

    im = np.reshape(img, (h*w, 1))

    e1 = np.reshape(entropy(img, disk(coeff_resolution*5)), (h*w, 1))
    e2 = np.reshape(entropy(img, disk(coeff_resolution*8)), (h*w, 1))
    e3 = np.reshape(entropy(img, disk(coeff_resolution*10)), (h*w, 1))

    g1 = np.reshape(gradient(img, disk(1)), (h*w, 1))
    g2 = np.reshape(gradient(img, disk(coeff_resolution*3)), (h*w, 1))
    g3 = np.reshape(gradient(img, disk(coeff_resolution*5)), (h*w, 1))

    m1 = np.reshape(ndi.maximum_filter(256-img, size=coeff_resolution*2, mode='constant'), (h*w, 1))
    m2 = np.reshape(ndi.maximum_filter(256-img, size=coeff_resolution*4, mode='constant'), (h*w, 1))
    m3 = np.reshape(ndi.maximum_filter(256-img, size=coeff_resolution*7, mode='constant'), (h*w, 1))

    #c = np.reshape(canny(img), (h*w, 1))
    s = np.reshape(sobel(img), (h*w, 1))

    return np.column_stack((im, e1, e2, e3, g1, g2, g3, m1, m2, m3, s))
コード例 #11
0
def number_nucleus(image):

    elevation_map = sobel(image)
    markers = np.zeros_like(image)
    markers[image < 250] = 1
    markers[image > 2000] = 2

    segmentation = watershed(elevation_map, markers)
    label_img = label(segmentation)
    prop = regionprops(label_img)

    width, height = plt.rcParams['figure.figsize']
    plt.rcParams['image.cmap'] = 'gray'

    image_label_overlay = label2rgb(label_img, image=image)

    fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(15, 8))
    ax1.imshow(image_label_overlay)
    ax2.imshow(image, cmap=plt.cm.gray, interpolation='nearest')

    # create list of region with are < 1000
    image_labeled = [region for region in prop if region.area > 5000]


    return len(image_labeled)
コード例 #12
0
ファイル: filter_spatial.py プロジェクト: yunjunz/PySAR
def filter(data,filtType,par):

    if   filtType == "sobel":       filt_data = sobel(data)
    elif filtType == "roberts":     filt_data = roberts(data)
    elif filtType == "canny":       filt_data = canny(data)
    elif filtType == "lowpass_avg":
        from scipy import ndimage
        p=int(par)
        kernel = np.ones((p,p),np.float32)/(p*p)
        filt_data = ndimage.convolve(data, kernel)
    elif filtType == "highpass_avg":
        from scipy import ndimage
        p=int(par)
        kernel = np.ones((p,p),np.float32)/(p*p)
        lp_data = ndimage.convolve(data, kernel)
        filt_data = data - lp_data
    elif filtType == "lowpass_gaussian":
        filt_data = gaussian(data, sigma=float(par))
    elif filtType == "highpass_gaussian":
        lp_data   = gaussian(data, sigma=float(par))
        filt_data = data - lp_data

    #elif filtType ==  "gradient":
       
    return filt_data
コード例 #13
0
def testSkimage():
    img = Image.open('../img/1.png')
    img = np.array(img)
    imggray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # (thresh, imgbw) = cv2.threshold(imggray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

    # canny detector
    # from skimage.feature import canny
    # edges = canny(imggray/ 255.)
    from scipy import ndimage as ndi
    # fill_imgbw = ndi.binary_fill_holes(edges)
    # label_objects, nb_labels = ndi.label(fill_imgbw)
    # sizes = np.bincount(label_objects.ravel())
    # mask_sizes = sizes > 20
    # mask_sizes[0] = 0
    # cleaned_imgbw = mask_sizes[label_objects]

    markers = np.zeros_like(imggray)
    markers[imggray < 120] = 1
    markers[imggray > 150] = 2

    from skimage.filters import sobel
    elevation_map = sobel(imggray)
    from skimage.morphology import watershed
    segmentation = watershed(elevation_map, markers)

    # from skimage.color import label2rgb
    # segmentation = ndi.binary_fill_holes(segmentation - 10)
    # labeled_coins, _ = ndi.label(segmentation)
    # image_label_overlay = label2rgb(labeled_coins, image=imggray)
    plt.imshow(segmentation, cmap='gray')
    plt.show()
    return
コード例 #14
0
ファイル: api.py プロジェクト: SPKhan/sarai-pest-diseases
def pestFeatureExtraction(filename):
	selem = disk(8)
	image = data.imread(filename,as_grey=True)
	thresh = threshold_otsu(image)
	elevation_map = sobel(image)
	markers = np.zeros_like(image)

	if ((image<thresh).sum() > (image>thresh).sum()):
		markers[image < thresh] = 1
		markers[image > thresh] = 2
	else:
		markers[image < thresh] = 2
		markers[image > thresh] = 1

	segmentation = morphology.watershed(elevation_map, markers)
	segmentation = dilation(segmentation-1, selem)
	segmentation = ndimage.binary_fill_holes(segmentation)

	segmentation = np.logical_not(segmentation)
	image[segmentation]=0;

	hist = np.histogram(image.ravel(),256,[0,1])

	hist = list(hist[0])
	hist[:] = [float(x) / (sum(hist) - hist[0]) for x in hist]
	hist.pop(0)

	features = np.empty( (1, len(hist)), 'float' )
	
	a = np.array(list(hist))
	f = a.astype('float')
	features[0,:]=f[:]

	return features
コード例 #15
0
ファイル: deshake.py プロジェクト: pmoret/deshake
def prepare(img):
    """
    Pre-process the image before translation detection, here we transform to black and white and use edge-detection.
    :param img: An image (as numpy array)
    :return: The preprocessed image (as numpy array)
    """
    return sobel(rgb2gray(img))
コード例 #16
0
ファイル: detection.py プロジェクト: afrutig/Moloreader
def _segment_watershed(image):
	elevation_map = sobel(image)
	markers = np.zeros(image.shape) # initialize markers as zero array 

	
	# determine thresholds for markers
	sorted_pixels = np.sort(image, axis=None)
	max_int = np.mean(sorted_pixels[-10:])
	min_int = np.mean(sorted_pixels[:10])
	#max_int = np.max(orig_image)
	#min_int = np.min(orig_image)
	
	alpha_min = 0.01
	alpha_max = 0.4
	thresh_background = (1-alpha_min)*min_int	+	alpha_min*max_int
	thresh_spots = 		(1-alpha_max)*min_int	+	alpha_max*max_int
	
	markers[image < thresh_background] = 1 # mark background
	markers[image > thresh_spots] = 2 # mark background
	
	segmentation = watershed(elevation_map, markers)
	segmentation = segmentation-1
	segmentation = ndi.binary_fill_holes(segmentation)	# fill holes
	
	return segmentation
コード例 #17
0
ファイル: test_edges.py プロジェクト: lotteanna/scikit-image
def test_sobel_vertical():
    """Sobel on a vertical edge should be a vertical line."""
    i, j = np.mgrid[-5:6, -5:6]
    image = (j >= 0).astype(float)
    result = filters.sobel(image) * np.sqrt(2)
    j[np.abs(i) == 5] = 10000
    assert (np.all(result[j == 0] == 1))
    assert (np.all(result[np.abs(j) > 1] == 0))
コード例 #18
0
    def __init__(self):
        self.logo = scipy_logo.ScipyLogo(radius=self.radius)
        self.mask_1 = self.logo.get_mask(self.image.shape, 'upper left')
        self.mask_2 = self.logo.get_mask(self.image.shape, 'lower right')

        edges = np.array([sobel(img) for img in self.image.T]).T
        # truncate and stretch intensity range to enhance contrast
        self.edges = rescale_intensity(edges, in_range=(0, 0.4))
コード例 #19
0
ファイル: planes3.py プロジェクト: Yamadads/kck
def getContourImage(imageFile):
    planeImage = data.imread(imageFile,True)
    imageArray = np.asarray(planeImage)
    averageColor = np.mean(imageArray)
    imageArray = getBlackAndWhiteImage(imageArray,averageColor*0.85)
    imageArray = filters.sobel(imageArray)
    imageArray = morphology.dilation(imageArray,morphology.disk(3))
    return imageArray
コード例 #20
0
ファイル: primitives.py プロジェクト: grayhem/inspection_port
def sobel(frame):
    """
    return the sobel importance of an rgb image
    """
    frame = grayscale(frame)
    frame = filters.sobel(frame)
    # print(frame.max())
    return normalize(frame)
コード例 #21
0
ファイル: image_tests.py プロジェクト: dgreenfi/ImageRec
def main():
    from skimage import data, io, filters
    testfolder='/Users/davidgreenfield/Downloads/pics_boots/'
    testimage='B00A0GVP8A.jpg'
    image = io.imread(testfolder+testimage,flatten=True) # or any NumPy array!
    edges = filters.sobel(image)
    io.imshow(edges)
    io.show()
コード例 #22
0
ファイル: red_cell_count.py プロジェクト: GNZ/micro-api
    def analyse(self, **kwargs):
        image_object = kwargs['image']

        if image_object is None:
            raise RuntimeError()

        # Read the image
        image = cv2.imread(self.image_utils.getOutputFilename(image_object.id))

        if image is None:
            print('File not found')
            return

        # Work on green channel
        gray = image[:, :, 1]

        # Apply otsu thresholding
        thresh = filters.threshold_otsu(gray)
        gray[gray < thresh] = 0

        # Apply histogram equalization
        gray = exposure.equalize_adapthist(gray) * 255

        # Create elevation map
        elevation_map = filters.sobel(gray)

        gray = gray.astype(int)

        # Create cell markers
        markers = numpy.zeros_like(gray)
        markers[gray < 100] = 2  # seen as white in plot
        markers[gray > 150] = 1  # seen as black in plot

        # Segment with watershed using elevation map
        segmentation = morphology.watershed(elevation_map, markers)
        segmentation = ndi.binary_fill_holes(segmentation - 1)
        # labeled_image, n = ndi.label(segmentation)

        # Watershed with distance transform
        kernel = numpy.ones((5, 5), numpy.uint8)

        distance = ndi.distance_transform_edt(segmentation)
        distance2 = cv2.erode(distance, kernel)
        distance2 = cv2.dilate(distance2, kernel)
        local_max = peak_local_max(distance2, num_peaks=1, indices=False, labels=segmentation)
        markers2 = ndi.label(local_max)[0]
        labels = morphology.watershed(-distance2, markers2, mask=segmentation)

        # Extract regions (caching signifies more memory use)
        regions = regionprops(labels, cache=True)

        # Filter out big wrong regions
        regions = [region for region in regions if region.area < 2000]

        # Set result
        result = str(len(regions))

        return result
コード例 #23
0
ファイル: preprocess.py プロジェクト: OpenPIV/openpiv-python
def dynamic_masking(image,method='edges',filter_size=7,threshold=0.005):
    """ Dynamically masks out the objects in the PIV images
    
    Parameters
    ----------
    image: image
        a two dimensional array of uint16, uint8 or similar type
        
    method: string
        'edges' or 'intensity':
        'edges' method is used for relatively dark and sharp objects, with visible edges, on 
        dark backgrounds, i.e. low contrast
        'intensity' method is useful for smooth bright objects or dark objects or vice versa, 
        i.e. images with high contrast between the object and the background
    
    filter_size: integer
        a scalar that defines the size of the Gaussian filter
    
    threshold: float
        a value of the threshold to segment the background from the object
        default value: None, replaced by sckimage.filter.threshold_otsu value
            
    Returns
    -------
    image : array of the same datatype as the incoming image with the object masked out
        as a completely black region(s) of zeros (integers or floats).
    
    
    Example
    --------
    frame_a  = openpiv.tools.imread( 'Camera1-001.tif' )
    imshow(frame_a) # original
    
    frame_a = dynamic_masking(frame_a,method='edges',filter_size=7,threshold=0.005)
    imshow(frame_a) # masked 
        
    """
    imcopy = np.copy(image)
    # stretch the histogram
    image = exposure.rescale_intensity(img_as_float(image), in_range=(0, 1))
    # blur the image, low-pass
    blurback = img_as_ubyte(gaussian_filter(image,filter_size))
    if method is 'edges':
        # identify edges
        edges = sobel(blurback)
        blur_edges = gaussian_filter(edges,21)
        # create the boolean mask 
        bw = (blur_edges > threshold)
        bw = img_as_ubyte(binary_fill_holes(bw))
        imcopy -= blurback
        imcopy[bw] = 0.0
    elif method is 'intensity':
        background = gaussian_filter(median_filter(image,filter_size),filter_size)
        imcopy[background > threshold_otsu(background)] = 0

        
    return imcopy #image
コード例 #24
0
ファイル: test_edges.py プロジェクト: lotteanna/scikit-image
def test_sobel_horizontal():
    """Sobel on a horizontal edge should be a horizontal line."""
    i, j = np.mgrid[-5:6, -5:6]
    image = (i >= 0).astype(float)
    result = filters.sobel(image) * np.sqrt(2)
    # Fudge the eroded points
    i[np.abs(j) == 5] = 10000
    assert_allclose(result[i == 0], 1)
    assert (np.all(result[np.abs(i) > 1] == 0))
コード例 #25
0
ファイル: Script_ComplexABWidget.py プロジェクト: Jothy/RTQA
    def updateCMap(self):
        self.mplwidget.axes.clear()
        self.cmapType=str(self.cmap.currentText())
        self.mplwidget.axes.imshow(self.Img1,cmap=self.cmapType,alpha=0.5)

        edge_sobel = sobel(self.Img1)
        self.mplwidget.axes.hold(True)
        self.mplwidget.axes.imshow(edge_sobel,cmap='gray',alpha=0.5)
        self.mplwidget.draw()
コード例 #26
0
def test_hsv_value_with_non_float_output():
    # Since `rgb2hsv` returns a float image and the result of the filtered
    # result is inserted into the HSV image, we want to make sure there isn't
    # a dtype mismatch.
    filtered = edges_hsv_uint(COLOR_IMAGE)
    filtered_value = color.rgb2hsv(filtered)[:, :, 2]
    value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
    # Reduce tolerance because dtype conversion.
    assert_allclose(filtered_value, filters.sobel(value), rtol=1e-5, atol=1e-5)
コード例 #27
0
ファイル: primitives.py プロジェクト: grayhem/inspection_port
def sobel_triple(frame):
    """
    compute horizontal/ vertical sobel intensities and convert to red/ blue values. green channel
    will get the un-directed sobel filter. very pleasing effect.
    """
    output = np.zeros(frame.shape, dtype=np.uint8)
    frame = grayscale(frame)
    output[:, :, 0] = normalize(np.abs(filters.sobel_h(frame)))
    output[:, :, 1] = normalize(filters.sobel(frame))
    output[:, :, 2] = normalize(np.abs(filters.sobel_v(frame)))
    return output
コード例 #28
0
ファイル: segmenting.py プロジェクト: ntadej/CenturyOfTheSun
def segmentize(image):
    # make segmentation using edge-detection and watershed
    edges = sobel(image)
    markers = np.zeros_like(image)
    foreground, background = 1, 2
    markers[image == 0] = background
    markers[image == 1] = foreground

    ws = watershed(edges, markers)

    return ndi.label(ws == foreground)
コード例 #29
0
ファイル: Script_PyramidWidget.py プロジェクト: Jothy/RTQA
    def updateCMap2(self):
        #self.pyramid2.axes.clear()
        self.cmapType2=str(self.cmap2.currentText())
        self.pyramid2.axes.imshow(self.Img2,cmap=self.cmapType2,alpha=0.5)

        edge_sobel = sobel(self.Img2)
        self.pyramid2.axes.hold(True)
        self.pyramid2.axes.imshow(edge_sobel,cmap='gray',alpha=0.5)
        self.pyramid2.axes.hold(True)
        self.pyramid2.axes.set_xlabel('Pixel No. A-B')
        self.pyramid2.axes.set_ylabel('Pixel No. T-G')
        self.pyramid2.draw()
コード例 #30
0
ファイル: image_processing_lab.py プロジェクト: rostar/rostar
 def _get_edges(self):
     img_edg, mask = self.processed_image
     if self.edge_detection_method == 'canny':
         img_edg = canny(img_edg, sigma=self.canny_sigma,
                     low_threshold=self.canny_low,
                     high_threshold=self.canny_high)
     elif self.edge_detection_method == 'roberts':
         img_edg = roberts(img_edg)
     elif self.edge_detection_method == 'sobel':
         img_edg = sobel(img_edg)
     img_edg = img_edg > 0.0
     return img_edg
コード例 #31
0
from scipy import ndimage,misc
from skimage import filters
from PIL import Image
import scipy

a = Image.open('images/moon.jpg')
b = filters.sobel(a)

b = scipy.misc.toimage(b)
b.save('images/moon_sobel.jpg')
コード例 #32
0
# -*- coding: utf-8 -*-
import cv2
from skimage.filters import sobel

image1 = cv2.imread('plane.jpg')  #讀取圖片
image2 = cv2.imread('insect.png')  #讀取圖片

gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)  #獲得灰階圖片
eq1 = cv2.equalizeHist(gray1)  #獲得均值灰階圖片

cv2.imshow("Gray1", gray1)  #輸出灰階圖片
cv2.imshow("Histogram Equalization1", eq1)  #輸出均值灰階圖片
sobel1 = sobel(gray1)  #獲得邊界圖片
cv2.imshow("Sobel operator1", sobel1)  #輸出邊界圖片
##################################################以下同上
gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
eq2 = cv2.equalizeHist(gray2)

cv2.imshow("Gray2", gray2)
cv2.imshow("Histogram Equalization2", eq2)
sobel2 = sobel(gray2)
cv2.imshow("Sobel operator2", sobel2)
##################################################
cv2.waitKey()  #防止畫面卡死
cv2.destroyAllWindows()  #防止畫面卡死
コード例 #33
0
     exitStr = "(applying canny-edge)"
     img = img_as_ubyte(
         canny(img, float(transform[1]), float(transform[2]),
               float(transform[3])))
     #io.imshow(img)
     #io.show()
 elif transform[0].lower() == "rank-order":
     exitStr = "(applying rank_order)"
     img, _ = rank_order(img)
     #io.imshow(img)
     #io.show()
     #elif transform[0].lower() == "resize":
     #    img = resize(img, <tuple>)
 elif transform[0].lower() == "sobel":
     exitStr = "(applying sobel)"
     img = img_as_ubyte(sobel(img))
     #io.imshow(img)
     #io.show()
 elif transform[0].lower() == "erosion":
     exitStr = "(applying erosion)"
     img = img_as_ubyte(erosion(img, square(int(transform[1]))))
     #io.imshow(img)
     #io.show()
 elif transform[0].lower() == "threshold-adaptive":
     exitStr = "(applying threshold_adaptive)"
     img = img_as_ubyte(
         threshold_adaptive(img, int(transform[1]), transform[2]))
     #io.imshow(img)
     #io.show()
 elif transform[0].lower() == "radon":
     theta = np.linspace(0., 180., max(img.shape), endpoint=False)
コード例 #34
0
def load_validation_dataset(car_ids_list):
    min_mask = np.ones((16, 320, 480))
    max_mask = np.zeros((16, 320, 480))
    for angle_id in range(0, 16):
        min_mask[angle_id] = misc.imread(
            'min_and_max_masks/' + 'min_' + str(angle_id) + '.jpg',
            flatten=True) / 255.
        max_mask[angle_id] = misc.imread(
            'min_and_max_masks/' + 'max_' + str(angle_id) + '.jpg',
            flatten=True) / 255.
    i = 0
    X = np.empty((len(car_ids_list) * 16, 320, 480, 12), dtype=np.float32)
    Y = np.empty((len(car_ids_list) * 16, 320, 480, 1), dtype=np.float32)
    for car_id in car_ids_list:
        for angle in [
                '01', '02', '03', '04', '05', '06', '07', '08', '09', '10',
                '11', '12', '13', '14', '15', '16'
        ]:
            image_rgb = misc.imread('resized_data/val/' + car_id + '_' +
                                    angle + '.jpg') / 255.
            image_bw = misc.imread(
                'resized_data/val/' + car_id + '_' + angle + '.jpg',
                flatten=True) / 255.
            image_all_mask = misc.imread(
                'train_all_angle_masks_resized/' + car_id + '.jpg',
                flatten=True) / 255.
            image_mask = misc.imread(
                'train_masks_resized/' + car_id + '_' + angle + '_mask.jpg',
                flatten=True).astype(int) / 255.
            image_mask = np.reshape(image_mask, (320, 480, 1))
            X[i, ..., :3] = image_rgb
            X[i, ..., 3] = image_all_mask
            X[i, ..., 4] = min_mask[int(angle) - 1]
            X[i, ..., 5] = max_mask[int(angle) - 1]
            high_contrast = filters.rank.enhance_contrast(
                image_bw, np.ones((10, 10)))
            high_contrast = filters.sobel(high_contrast)
            high_contrast = canny(high_contrast)
            high_contrast = ndi.binary_closing(high_contrast,
                                               structure=np.ones((5, 5)))
            high_contrast = ndi.binary_fill_holes(high_contrast, np.ones(
                (3, 3)))
            X[i, ..., 6] = high_contrast
            normal_contrast = filters.sobel(image_bw)
            normal_contrast = canny(normal_contrast)
            normal_contrast = ndi.binary_closing(normal_contrast,
                                                 structure=np.ones((5, 5)))
            X[i, ..., 7] = ndi.binary_fill_holes(normal_contrast,
                                                 np.ones((3, 3)))
            threshold_otsu_val = filters.threshold_otsu(image_bw)
            threshold_otsu = image_bw > threshold_otsu_val
            X[i, ..., 8] = threshold_otsu
            image_index = np.where(image_bw >= 0)
            df = pd.DataFrame()
            df['l1_dist_y'] = abs((image_index[0] - 159.5) / 159.5)
            df['l1_dist_x'] = abs((image_index[1] - 239.5) / 239.5)
            df['l2_dist'] = np.sqrt((df['l1_dist_x'])**2 +
                                    (df['l1_dist_y'])**2) / np.sqrt(2)
            X[i, ..., 9] = df.l2_dist.reshape((320, 480))
            X[i, ..., 10] = df.l1_dist_x.reshape((320, 480))
            X[i, ..., 11] = df.l1_dist_y.reshape((320, 480))
            Y[i, ...] = image_mask
            i = i + 1
    return X, Y
コード例 #35
0
cv2.waitKey()

# In[716]:

#plot the histogram of the grayscale image and obtain the quantile histogram values
hist = np.histogram(gray, bins=np.arange(0, 256))
plt.plot(hist[1][:-1], hist[0], lw=2)
x = np.quantile(hist[0], 0.25, axis=0)
print(x)
y = np.quantile(hist[0], 0.75, axis=0)
print(y)

# In[717]:

#apply sobel operator
elevation_map = sobel(gray)
plt.imshow(elevation_map, cmap=plt.cm.gray, interpolation='nearest')
plt.show()

# In[718]:

#Pepare masks based on the histogram quantile values and display the result
markers = np.zeros_like(gray)
markers[gray < 34] = 1
markers[gray > 158] = 2
plt.imshow(markers, cmap=plt.cm.gray, interpolation='nearest')
plt.show()

# In[719]:

#Apply wateshed algorithm and display the image
コード例 #36
0
ファイル: test_edges.py プロジェクト: ChiLi90/LifetimeFit
def test_sobel_zeros():
    """Sobel on an array of all zeros."""
    result = filters.sobel(np.zeros((10, 10)), np.ones((10, 10), bool))
    assert (np.all(result == 0))
コード例 #37
0
def train_data_generator(batch_size):
    image_list = os.listdir('resized_data/train/')
    np.random.shuffle(image_list)
    min_mask = np.ones((16, 320, 480))
    max_mask = np.zeros((16, 320, 480))
    for angle_id in range(0, 16):
        min_mask[angle_id] = misc.imread(
            'min_and_max_masks/' + 'min_' + str(angle_id) + '.jpg',
            flatten=True) / 255.
        max_mask[angle_id] = misc.imread(
            'min_and_max_masks/' + 'max_' + str(angle_id) + '.jpg',
            flatten=True) / 255.
    while 1:
        for batch_num in range(len(image_list) // batch_size):
            batch_images = image_list[batch_num *
                                      batch_size:(batch_num * batch_size) +
                                      (batch_size)]
            X = np.empty((batch_size, 320, 480, 12), dtype=np.float64)
            Y = np.empty((batch_size, 320, 480, 1), dtype=np.float64)
            for i, image_name in zip(range(batch_size), batch_images):
                image_rgb = misc.imread('resized_data/train/' +
                                        image_name) / 255.
                image_bw = misc.imread('resized_data/train/' + image_name,
                                       flatten=True) / 255.
                image_all_mask = misc.imread('train_all_angle_masks_resized/' +
                                             image_name.split('_')[0] + '.jpg',
                                             flatten=True) / 255.
                image_mask = misc.imread(
                    'train_masks_resized/' + image_name.split('.')[0] +
                    '_mask.jpg',
                    flatten=True) / 255.
                image_mask = np.reshape(image_mask, (320, 480, 1))
                X[i, ..., :3] = image_rgb
                X[i, ..., 3] = image_all_mask
                X[i, ...,
                  4] = min_mask[int(image_name.split('.')[0].split('_')[1]) -
                                1]
                X[i, ...,
                  5] = max_mask[int(image_name.split('.')[0].split('_')[1]) -
                                1]
                high_contrast = filters.rank.enhance_contrast(
                    image_bw, np.ones((10, 10)))
                high_contrast = filters.sobel(high_contrast)
                high_contrast = canny(high_contrast)
                high_contrast = ndi.binary_closing(high_contrast,
                                                   structure=np.ones((5, 5)))
                high_contrast = ndi.binary_fill_holes(high_contrast,
                                                      np.ones((3, 3)))
                X[i, ..., 6] = high_contrast
                normal_contrast = filters.sobel(image_bw)
                normal_contrast = canny(normal_contrast)
                normal_contrast = ndi.binary_closing(normal_contrast,
                                                     structure=np.ones((5, 5)))
                X[i, ..., 7] = ndi.binary_fill_holes(normal_contrast,
                                                     np.ones((3, 3)))
                threshold_otsu_val = filters.threshold_otsu(image_bw)
                threshold_otsu = image_bw > threshold_otsu_val
                X[i, ..., 8] = threshold_otsu
                image_index = np.where(image_bw >= 0)
                df = pd.DataFrame()
                df['l1_dist_y'] = abs((image_index[0] - 159.5) / 159.5)
                df['l1_dist_x'] = abs((image_index[1] - 239.5) / 239.5)
                df['l2_dist'] = np.sqrt((df['l1_dist_x'])**2 +
                                        (df['l1_dist_y'])**2) / np.sqrt(2)
                X[i, ..., 9] = df.l2_dist.reshape((320, 480))
                X[i, ..., 10] = df.l1_dist_x.reshape((320, 480))
                X[i, ..., 11] = df.l1_dist_y.reshape((320, 480))
                Y[i, ...] = image_mask
                i = i + 1
            yield X, Y
コード例 #38
0
ファイル: GUI.py プロジェクト: mfox94/Hist-Net
def Applysobel(im):
    curr_im = im.astype('uint8')
    curr_im = filters.sobel(color.rgb2gray(im))
    return curr_im
コード例 #39
0
def edg(img):
    res = np.zeros_like(img, dtype=np.float)
    for i in range(img.shape[2]):
        res[:, :, i] = sobel(img[:, :, i])
    res = norm(np.linalg.norm(res, axis=2))
    return res
コード例 #40
0
ファイル: watershed.py プロジェクト: mycarta/imagepy
        s = step(img, mark1d, pts, s, level, up, nbs)

    conner = [[v>>i&1 for i in range(ndim)] for v in range(2**ndim)]
    con1 = np.array(conner, dtype=np.int8)-1
    con2 = np.array(conner, dtype=np.int8)*3-2
    mark[tuple(con1.T)] = mark[tuple(con2.T)]
                      
    erose(mark1d)
    return mark
    
if __name__ == '__main__':
    import matplotlib.pyplot as plt
    from skimage.filters import sobel
    from skimage.data import coins
    coins = coins()
    dem = sobel(coins)
    markers = np.zeros_like(coins, dtype=np.uint16)
    markers[coins < 30] = 1
    markers[coins > 150] = 2
    plt.imshow(markers)
    plt.show()
    watershed(dem, markers)
    plt.imshow(markers)
    plt.show()
    '''
    from scipy.misc import imread
    import matplotlib.pyplot as plt
    from time import time
    
    dem = imread('ice.png')
    mark = imread('mark.png')
コード例 #41
0
# ax.imshow(fill_coins, cmap=plt.cm.gray)
# ax.set_title('filling the holes')
# ax.axis('off')

from skimage import morphology

coins_cleaned = morphology.remove_small_objects(fill_coins, 21)

# fig, ax = plt.subplots(figsize=(4, 3))
# ax.imshow(coins_cleaned, cmap=plt.cm.gray)
# ax.set_title('removing small objects')
# ax.axis('off')

from skimage.filters import sobel

elevation_map = sobel(coins)

# fig, ax = plt.subplots(figsize=(4, 3))
# ax.imshow(elevation_map, cmap=plt.cm.gray)
# ax.set_title('elevation map')
# ax.axis('off')

markers = np.zeros_like(coins)
markers[coins < 30] = 1
markers[coins > 150] = 2

# fig, ax = plt.subplots(figsize=(4, 3))
# ax.imshow(markers, cmap=plt.cm.nipy_spectral)
# ax.set_title('markers')
# ax.axis('off')
コード例 #42
0
 # tophat edges
 print("Black tophat edge detection")
 tophat = morph.black_tophat(GRAY, selem=morph.selem.disk(1))
 tophat = tophat < np.percentile(tophat, tophat_th)
 tophat = morph.remove_small_holes(tophat, area_threshold=cutoff, connectivity=2)
 foo = func.featAND_fast(MASK, tophat)
 MASK = np.logical_and(foo, MASK)
 # canny edges
 print("Canny edge detection")
 canny = feat.canny(GRAY, sigma=canny_sig)
 canny = np.invert(canny)
 foo = func.featAND_fast(MASK, canny)
 MASK = np.logical_and(foo, MASK)
 # sobel edges
 print("Sobel edge detection")
 sobel = filt.sobel(GRAY)
 sobel = sobel < np.percentile(sobel, sobel_th)
 sobel = morph.remove_small_holes(sobel, area_threshold=cutoff, connectivity=2)
 sobel = morph.thin(np.invert(sobel))
 sobel = np.invert(sobel)
 foo = func.featAND_fast(MASK, sobel)
 MASK = np.logical_and(foo, MASK)
 
 # find the remaining pixels in the mask
 idx = np.where(MASK == True)
 
 # skip if there's only a small number of pixels left
 # as this will lead to errors if the number of k-means clusters
 # becomes greater than the number of pixels
 if len(idx[0]) < 100:
     print("\nEmpty window, skipping\n")
コード例 #43
0
def get_segmented_image(array, marker_lower, marker_upper):
    markers = np.zeros_like(array)
    markers[array < marker_lower] = 1
    markers[array > marker_upper] = 2
    elevation_map = sobel(array)
    return watershed(elevation_map, markers)
コード例 #44
0
def GSfilter(image, sigma, mode):
    """Combine a Sobel and a Gaussian filter"""
    return filters.sobel(filters.gaussian(image, sigma=sigma, mode=mode))
コード例 #45
0
 def setup(self):
     try:
         filters.sobel(np.ones((8, 8, 8)))
     except ValueError:
         raise NotImplementedError("3d sobel unavailable")
     self.image3d = data.binary_blobs(length=256, n_dim=3).astype(float)
コード例 #46
0
def extract_binary_masks_blob(
        A,
        neuron_radius: float,
        dims: Tuple[int, ...],
        num_std_threshold: int = 1,
        minCircularity: float = 0.5,
        minInertiaRatio: float = 0.2,
        minConvexity: float = .8) -> Tuple[np.array, np.array, np.array]:
    """
    Function to extract masks from data. It will also perform a preliminary selectino of good masks based on criteria like shape and size
    Args:
        A: scipy.sparse matrix
            contains the components as outputed from the CNMF algorithm
        neuron_radius: float
            neuronal radius employed in the CNMF settings (gSiz)
        num_std_threshold: int
            number of times above iqr/1.349 (std estimator) the median to be considered as threshold for the component
        minCircularity: float
            parameter from cv2.SimpleBlobDetector
        minInertiaRatio: float
            parameter from cv2.SimpleBlobDetector
        minConvexity: float
            parameter from cv2.SimpleBlobDetector
    Returns:
        masks: np.array
        pos_examples:
        neg_examples:
    """
    params = cv2.SimpleBlobDetector_Params()
    params.minCircularity = minCircularity
    params.minInertiaRatio = minInertiaRatio
    params.minConvexity = minConvexity

    # Change thresholds
    params.blobColor = 255

    params.minThreshold = 0
    params.maxThreshold = 255
    params.thresholdStep = 3

    params.minArea = np.pi * ((neuron_radius * .75)**2)

    params.filterByColor = True
    params.filterByArea = True
    params.filterByCircularity = True
    params.filterByConvexity = True
    params.filterByInertia = True

    detector = cv2.SimpleBlobDetector_create(params)

    masks_ws = []
    pos_examples = []
    neg_examples = []

    for count, comp in enumerate(A.tocsc()[:].T):
        logging.debug(count)
        comp_d = np.array(comp.todense())
        gray_image = np.reshape(comp_d, dims, order='F')
        gray_image = (gray_image - np.min(gray_image)) / \
            (np.max(gray_image) - np.min(gray_image)) * 255
        gray_image = gray_image.astype(np.uint8)

        # segment using watershed
        markers = np.zeros_like(gray_image)
        elevation_map = sobel(gray_image)
        thr_1 = np.percentile(gray_image[gray_image > 0], 50)
        iqr = np.diff(np.percentile(gray_image[gray_image > 0], (25, 75)))
        thr_2 = thr_1 + num_std_threshold * iqr / 1.35
        markers[gray_image < thr_1] = 1
        markers[gray_image > thr_2] = 2
        edges = watershed(elevation_map, markers) - 1
        # only keep largest object
        label_objects, _ = scipy.ndimage.label(edges)
        sizes = np.bincount(label_objects.ravel())

        if len(sizes) > 1:
            idx_largest = np.argmax(sizes[1:])
            edges = (label_objects == (1 + idx_largest))
            edges = scipy.ndimage.binary_fill_holes(edges)
        else:
            logging.warning('empty component')
            edges = np.zeros_like(edges)

        masks_ws.append(edges)
        keypoints = detector.detect((edges * 200.).astype(np.uint8))

        if len(keypoints) > 0:
            pos_examples.append(count)
        else:
            neg_examples.append(count)

    return np.array(masks_ws), np.array(pos_examples), np.array(neg_examples)
コード例 #47
0
ファイル: test_edges.py プロジェクト: ChiLi90/LifetimeFit
def test_sobel_mask():
    """Sobel on a masked array should be zero."""
    result = filters.sobel(np.random.uniform(size=(10, 10)),
                           np.zeros((10, 10), dtype=bool))
    assert (np.all(result == 0))
コード例 #48
0
def sobel_watershed_method(img, op="disc", veins=False, test=False):
    if op == "disc" or op == "cup":
        to_plot = []
        img_red = img[:, :, 0]

        if test:
            to_plot.append(("Red Channel", img_red))

        img_red = skimage.util.img_as_ubyte(img_red)

        img_red = skimage.filters.gaussian(img_red, 0.1)

        if test:
            to_plot.append(("Gaussian Filter", img_red))

        img_red = enhance_contrast(img_red, disk(6))

        if test:
            to_plot.append(("Enhace Contrast", img_red))

        elevation_map = sobel(img_red)

        if test:
            to_plot.append(("gradientes", elevation_map))

        markers = np.zeros_like(img_red)

        s2 = f.target_set_mean(img_red, 8.5)

        markers[img_red < 150] = 1
        markers[img_red > s2] = 2

        seg_img = segmentation.watershed(elevation_map, markers)

        mask = (seg_img - 1) > 0

        if test:
            to_plot.append(("Sobel + WaterShed", seg_img))

        mask = binary_opening(mask, disk(2))
        mask = skimage.morphology.remove_small_objects(mask, 400)

        if test:
            to_plot.append(("Removing small objects", th.apply(img_red, mask)))

        mask = binary_closing(mask, disk(6))

        if test:
            to_plot.append(("Closing Region", th.apply(img[:, :, 0], mask)))

        mask = skimage.morphology.remove_small_objects(mask, 1700)

        if test:
            to_plot.append(
                ("Removing Big Objects", th.apply(img[:, :, 0], mask)))

        mask = binary_closing(mask, disk(6))
        mask = msr.closest_prop(img[:, :, 0], mask)

        if test:
            to_plot.append(
                ("Removing non brighter region", th.apply(img[:, :, 0], mask)))

        mask = binary_dilation(mask, disk(3))
        mask = binary_closing(mask, disk(12))

        if test:
            to_plot.append(
                ("Dilate result region", th.apply(img[:, :, 0], mask)))

        props = msr.props(img_red, mask)[0]

        minr, minc, maxr, maxc = props.bbox
        img_cut = img[minr:maxr, minc:maxc]

        if test:
            vi.plot_multy(to_plot, 3, 4, 'Sobel Watershed')

    if op == "cup":
        minr, minc, maxr, maxc = props.bbox
        img_green = img[minr:maxr, minc:maxc, 1]

        to_plot = []
        columns = 1

        if test:
            to_plot.append(("green channel", img_green))

        if not veins:
            columns = 4
            v_mask, _ = segment_veins(img_green, test)

            img_aux = closing(img_green, disk(6))
            img_aux = dilation(img_aux, disk(6))
            img_aux2 = dilation(img_green, disk(3))

            if test:
                to_plot.append(("closed + dilated", img_aux))
                to_plot.append(("dilated", img_aux2))

            img_v_closed = th.apply(img_aux, v_mask)
            img_t_dilated = th.apply(img_aux2, v_mask == False)

            if test:
                to_plot.append(("veins part", img_v_closed))
                to_plot.append(("target part", img_t_dilated))

            img_green = img_v_closed + img_t_dilated

            if test:
                to_plot.append(("without veins", img_green))

        img_green = dilation(img_green, disk(6))
        img_green = enhance_contrast(img_green, disk(10))

        elevation_map = sobel(img_green)
        markers = np.zeros_like(img_green)

        s2 = f.target_set_mean(img_green, 8.5)

        markers[img_green < 150] = 1
        markers[img_green > s2] = 2

        seg_img = segmentation.watershed(elevation_map, markers)

        mask = (seg_img - 1) > 0

        if test:
            to_plot.append(("P-tile", th.apply(img_green, mask)))

        mask1 = mask
        mask = np.zeros(img[:, :, 1].shape)
        mask[minr:maxr, minc:maxc] = mask1

        if test:
            vi.plot_multy(to_plot, 2, columns, 'cup')

    return (mask, img_cut, props)
コード例 #49
0
plt.show()

# Clear Borders

# Imadjust

#image_imadjust = Imadjust.imadjust(img)
#image_contrast = mahotas.stretch(img)
image_imadjust = exposure.rescale_intensity(img)
#cv2.imshow( "imadjust", image_imadjust );
print("imadjust")
plt.imshow(image_imadjust)
plt.show()

# Gradient Sobel
sobel = filters.sobel(image_imadjust)
#cv2.imshow( "gradient (sobel)", sobel );
print("gradient (sobel)")
plt.imshow(sobel)
plt.show()

# opening-by-reconstruction

erosion = cv2.erode(image_imadjust, disk(1))
Image_by_reconstruction_opening = reconstruction(erosion, image_imadjust)
Image_by_reconstruction_opening_uint8 = Image_by_reconstruction_opening.astype(
    np.uint8)
#cv2.imshow( "erosion", Image_by_reconstruction_opening_uint8 );
print("erosion")
plt.imshow(Image_by_reconstruction_opening_uint8)
plt.show()
コード例 #50
0
 def contrast_tenengrad(self):
     sobel_img = sobel(self.cv2_img_bw)**2
     feature_value = np.sqrt(
         np.sum(sobel_img)) / self.cv2_img_bw.size * 10000
     self.extracted_features.update({'contrast_tenengrad': feature_value})
コード例 #51
0
def show_pred_mask(num):
    image_rgb = misc.imread('resized_data/val/' + image_list[num]) / 255.
    image_all_mask = misc.imread('train_all_angle_masks_resized/' +
                                 image_list[num].split('_')[0] + '.jpg',
                                 flatten=True) / 255.
    image_bw = misc.imread('resized_data/val/' + image_list[num],
                           flatten=True) / 255.
    angle_id = int(image_list[num].split('.')[0].split('_')[1])
    min_mask = misc.imread(
        'min_and_max_masks/' + 'min_' + str(int(angle_id) - 1) + '.jpg',
        flatten=True) / 255.
    max_mask = misc.imread(
        'min_and_max_masks/' + 'max_' + str(int(angle_id) - 1) + '.jpg',
        flatten=True) / 255.
    X = np.empty((1, 320, 480, 12), dtype=np.float32)
    X[0, ..., :3] = image_rgb
    X[0, ..., 3] = image_all_mask
    X[0, ..., 4] = min_mask
    X[0, ..., 5] = max_mask
    high_contrast = filters.rank.enhance_contrast(image_bw, np.ones((10, 10)))
    high_contrast = filters.sobel(high_contrast)
    high_contrast = canny(high_contrast)
    high_contrast = ndi.binary_closing(high_contrast,
                                       structure=np.ones((5, 5)))
    high_contrast = ndi.binary_fill_holes(high_contrast, np.ones((3, 3)))
    X[0, ..., 6] = high_contrast
    normal_contrast = filters.sobel(image_bw)
    normal_contrast = canny(normal_contrast)
    normal_contrast = ndi.binary_closing(normal_contrast,
                                         structure=np.ones((5, 5)))
    X[0, ..., 7] = ndi.binary_fill_holes(normal_contrast, np.ones((3, 3)))
    threshold_otsu_val = filters.threshold_otsu(image_bw)
    threshold_otsu = image_bw > threshold_otsu_val
    X[0, ..., 8] = threshold_otsu
    image_index = np.where(image_bw >= 0)
    df = pd.DataFrame()
    df['l1_dist_y'] = abs((image_index[0] - 159.5) / 159.5)
    df['l1_dist_x'] = abs((image_index[1] - 239.5) / 239.5)
    df['l2_dist'] = np.sqrt((df['l1_dist_x'])**2 +
                            (df['l1_dist_y'])**2) / np.sqrt(2)
    X[0, ..., 9] = df.l2_dist.reshape((320, 480))
    X[0, ..., 10] = df.l1_dist_x.reshape((320, 480))
    X[0, ..., 11] = df.l1_dist_y.reshape((320, 480))
    pred_mask = model.predict(X).reshape((320, 480))
    pred_mask[pred_mask >= 0.5] = 1.
    pred_mask[pred_mask < 0.5] = 0.
    pred_mask = ndi.binary_closing(pred_mask, np.ones((1, 1))).astype(int)
    pred_mask = ndi.binary_fill_holes(pred_mask, np.ones((10, 10))).astype(int)
    pred_mask = morphology.binary_opening(pred_mask, np.ones((10, 10)))
    act_mask = misc.imread(
        'train_masks_resized/' + image_list[num].split('.')[0] + '_mask.jpg',
        flatten=True) / 255.
    z = skm.confusion_matrix(
        act_mask.astype(int).flatten(),
        pred_mask.astype(int).flatten())
    dice_coeff = 2 * (z[1][1]) / float(2 * z[1][1] + z[0][1] + z[1][0])
    print 'Dice Coeff: ' + str(dice_coeff)
    fig = plt.figure(figsize=(13, 13))
    ax1 = fig.add_subplot(121)
    io.imshow(act_mask - pred_mask)
    ax2 = fig.add_subplot(221)
    io.imshow(image_rgb)
    plt.show()
コード例 #52
0
    count = count_src + count_dst
    return {
        'count': count,
        'weight': (count_src * weight_src + count_dst * weight_dst)/count
    }


def merge_boundary(graph, src, dst):
    """Call back called before merging 2 nodes.

    In this case we don't need to do any computation here.
    """
    pass

img = data.coffee()
edges = filters.sobel(color.rgb2gray(img))
labels = segmentation.slic(img, compactness=30, n_segments=400)
g = graph.rag_boundary(labels, edges)

graph.show_rag(labels, g, img)
plt.title('Initial RAG')

labels2 = graph.merge_hierarchical(labels, g, thresh=0.08, rag_copy=False,
                                   in_place_merge=True,
                                   merge_func=merge_boundary,
                                   weight_func=weight_boundary)

graph.show_rag(labels, g, img)
plt.title('RAG after hierarchical merging')

plt.figure()
コード例 #53
0
ファイル: analysis.py プロジェクト: cartemic/DetResearch
def run(image_path,
        fft_pass,
        delta_px,
        delta_mm,
        bg_subtract=True,
        to_keep=None,
        return_plot_outputs=False):
    """
    todo: clean up this documentation

    Parameters
    ----------
    image_path
    fft_pass : list or tuple or np.array
        list of:
            [0] - angular reject band
                reject base angle +/- angular reject band
            [1] - safe radius
                radius about center where band reject is ignored
    Returns
    -------

    """
    # read in image
    img_base = load_image(image_path)
    if bg_subtract:
        img_base -= gaussian_filter(img_base, 30)
    img_base = image.grayscale(img_base)

    # get power spectral density
    psd = image.calc_psd(img_base)

    # get x,y positions and angles for each pixel
    xc, yc = image.get_center(psd)
    psd_x, psd_y = image.get_xy(psd)
    psd_x_img = image.ax_to_img_coords(psd_x, xc)
    psd_y_img = -image.ax_to_img_coords(psd_y, yc)
    rad = image.get_radius(psd_x_img, psd_y_img)
    ang = image.get_angle(psd_x_img, psd_y_img)

    # build and apply FFT mask
    reject_band = 20
    reject_mask = image.get_reject_mask(ang, rad, [0, 90, 180, 270],
                                        reject_band, fft_pass[1])

    fft = np.fft.fftshift(np.fft.fft2(img_base))
    psd_masked = image.grayscale(reject_mask * psd)  # reject only
    r_max = 250  # cut down on useless calcs by only looking at middle of psd
    best_angle_0 = image.find_best_angle(
        psd_masked[xc - r_max:xc + r_max, yc - r_max:yc + r_max],
        (reject_band, 90 - reject_band))
    best_angle_1 = image.find_best_angle(
        psd_masked[xc - r_max:xc + r_max, yc - r_max:yc + r_max],
        (90 + reject_band, 180 - reject_band))
    pass_mask = image.get_pass_mask(
        ang, rad,
        [best_angle_0, best_angle_1, 180 + best_angle_1, 180 + best_angle_0],
        fft_pass[0], fft_pass[1])
    psd_masked = image.grayscale(pass_mask * psd)  # pass only
    filtered = image.grayscale(
        np.real(np.fft.ifft2(np.fft.ifftshift(pass_mask * fft))), )

    # apply sobel filter
    edges = image.grayscale(sobel(filtered))

    # get edge detected PSD and recalculate best angles
    psd_final = image.calc_psd(edges)
    best_angle_0 = image.find_best_angle(
        psd_final[xc - r_max:xc + r_max, yc - r_max:yc + r_max],
        (reject_band, 90 - reject_band))
    best_angle_1 = image.find_best_angle(
        psd_final[xc - r_max:xc + r_max, yc - r_max:yc + r_max],
        (90 + reject_band, 180 - reject_band))

    # perform radial intensity scan
    radius_0, int_radius_0 = image.get_radial_intensity(
        psd_final,
        best_angle_0,
    )
    radius_1, int_radius_1 = image.get_radial_intensity(
        psd_final,
        best_angle_1,
    )

    # find peaks
    dist_mask_0 = (radius_0 > 0)
    idx_pks_0 = argrelmax(int_radius_0[dist_mask_0])[0]
    dist_mask_1 = (radius_1 > 0)
    idx_pks_1 = argrelmax(int_radius_1[dist_mask_1])[0]

    # collect, filter and rescale measurements
    df_cells_0 = get_measurements_from_radial_scan(
        radius_0[dist_mask_0][idx_pks_0],
        int_radius_0[dist_mask_0][idx_pks_0],
        delta_px,
        delta_mm,
        best_angle_0,
        psd_final.shape[0],
        to_keep=to_keep,
        save_original=True)
    df_cells_0["Theta"] = best_angle_0
    df_cells_1 = get_measurements_from_radial_scan(
        radius_1[dist_mask_1][idx_pks_1],
        int_radius_1[dist_mask_1][idx_pks_1],
        delta_px,
        delta_mm,
        best_angle_1,
        psd_final.shape[0],
        to_keep=to_keep,
        save_original=True)
    df_cells_1["Theta"] = best_angle_1
    df_cells = pd.concat((df_cells_0, df_cells_1)).reset_index(drop=True)
    df_cells["Relative Energy"] = rescale_energy(df_cells["Intensity"])

    if return_plot_outputs:
        out = [
            df_cells.sort_values(["Relative Energy"],
                                 ascending=False).reset_index(drop=True),
            dict(
                image_filtering=[
                    img_base,
                    psd,
                    psd_masked,
                    filtered,
                    edges,
                    psd_final,
                ],
                measurements=[(radius_0[dist_mask_0], radius_1[dist_mask_1]),
                              (int_radius_0[dist_mask_0],
                               int_radius_1[dist_mask_1]), df_cells, to_keep],
            )
        ]

    else:
        out = df_cells.sort_values(["Relative Energy"],
                                   ascending=False).reset_index(drop=True)

    return out
コード例 #54
0
from skimage import data, io, filters
from skimage import img_as_ubyte

for x in range(1, 4000):
	#print("Processing image" + str(x))
	image = io.imread("./images_jpg/"+ str(x) + ".jpg")
	# ... or any other NumPy array!
	edges = img_as_ubyte(filters.sobel(image[:,:,0]))
	#io.imshow(edges)
	io.imsave(("./result/"+ str(x)+".jpg"), edges)
	
コード例 #55
0
 def process_IN_CREATE(self,event):
     f = open(r"/home/wangxinhua/level1/Level1rev04/json.txt",'r')
     para = json.load(f)
     f.close()
     f = open(r'/home/wangxinhua/flag.txt','r')
     path = f.readline()
     f.close()
     path = path+'/HA'#"/home/wangxinhua/20190518/HA"
     redrive = para['redrive']#"/home/wangxinhua/nvst"
     darked_path = para['darked_path']
     rcxsize = int(para['rcxsize'])
     rcysize = int(para['rcysize'])
     corstart = re.findall('\d+',para['corstart'])
     corstart = [int(i) for i in corstart]
     corsize = re.findall('\d+',para['corsize'])
     corsize = [int(i) for i in corsize]
     sobel = int(para['sobel'])
     only_align_no_luckyimage = int(para['only_align_no_luckyimage'])
     redrive = para['redrive']
     only_align_no_luckyimage_path = para['only_align_no_luckyimage_path']
     pfstart = re.findall('\d+',para['pfstart'])
     pfstart = [int(i) for i in pfstart]
     pfsize = re.findall('\d+',para['pfsize'])
     pfsize = [int(i) for i in pfsize]
     lucky_align_path = para['lucky_align_path']
     win=xyy.win_gpu(int(pfsize[0]),int(pfsize[1]),0.5,winsty='hann')     #----窗函数
     diameter = float(para['diameter'])
     wavelen = float(para['wavelen'])
     pixsca = float(para['pixsca'])
     fsp = float(para['fsp'])
     srstx = int(para['srstx'])
     srsty = int(para['srsty'])
     srxsize = int(para['srxsize'])
     srysize = int(para['srysize'])
     postprocess_flag = int(para['postprocess_flag'])
     srsize = int(para['srsize'])
     winsr=xyy.win_gpu(srsize,srsize, 0.5, winsty='hann')
     diaratio = float(para['diaratio'])
     start_r0 = float(para['start_r0'])
     step_r0 = float(para['step_r0'])
     maxfre=wavelen*10.0**(-10.0)/(2.0*diameter*pixsca)*(180.0*3600.0/np.pi)
     filename = para['filename']
     sitfdata=cp.array(fits.getdata(filename),'<f4')
     gussf=xyy.gaussf2d_gpu(rcxsize,rcysize,1.5)
     infrq=(pfsize[0]//2)*0.05/maxfre
     otfrq=(pfsize[0]//2)*0.10/maxfre
     datapath, flatpath, darkpath = xyy.path_paser(path)
     new_path = event.pathname
     if_has_next_folder = new_path[:-6]
     a = 1
     while a:
         time_new =[int(i) for i in os.listdir(if_has_next_folder)}]
         if np.where(int(new_path[-6:])<time_new,1,0):
             #the fold is full
             dark = 
             flat = 
             datafits = os.listdir(new_path)
             a = 0
             numb = len(datafits)
             cube = cp.empty([numb,rcxsize,rcysize],dtype='float32')
             t = 0
             for i in datafits:
                 data = xyy.readfits(os.path.join(new_path,i))[0]
                 cube[t,:,:] = cp.array((data-dark)/(flat-dark)*np.max(flat-dark),dtype='<f4')[0:rcxsize,0:rcysize]
                 t += 1
             ini = cubedata[0,:,:]
             initmp = ini[corstart[0]:corstart[0]+corsize[0],corstart[1]:corstart[1]+corsize[1]]
             #initmp_gpu = cp.asarray(initmp) 
             print('basefile:'+ data_dir_fitstmp)
             if sobel == 1:
                 initmp = filters.sobel(filters.gaussian(initmp,5.0))
         
             t = 0
             #align 
             head=fits.getheader(os.path.join(i,data_path_fits[0]))
             for j in range(1,numb):
                 data = cubedata[j,:,:]
                 datatmp = data[corstart[0]:corstart[0]+corsize[0],corstart[1]:corstart[1]+corsize[1]]
                 
                 if sobel == 1:
                     datatmp = filters.sobel(filters.gaussian(datatmp,5.0))
                 #datatmp_gpu = cp.asarray(datatmp)
                 cc,corr = xyy.corrmaxloc_gpu(initmp,datatmp)
             
                 tmp = xyy.imgshift_gpu(data,[-cc[0],-cc[1]])#对齐后的图
             
                 if only_align_no_luckyimage == 1:
                     #不选帧,直接叠加
                     print('不选帧对齐模式')
                     ini += tmp
                 else:
                     #print('选帧后对齐模式')
                     #100,1024,1028
                 
                     cubedata[j,:,:] = tmp[0:rcxsize,0:rcysize]
         
         
             cubepf=cubedata[:,pfstart[0]:pfstart[0]+pfsize[0],pfstart[1]:pfstart[1]+pfsize[1]]
             cubemean=cp.mean(cubepf, axis=0)
             psdcube = cp.empty([numb,pfsize[0],pfsize[1]], dtype=cp.float32) 
                     
             for nn in range(numb):
                 tmp=cubepf[nn,:,:].copy()
                 meantmp=cp.mean(tmp)
                 tmp=(tmp-meantmp)*win+meantmp
                 psd=cp.abs(cp.fft.fftshift(cp.fft.fft2(tmp)))**2
                 psd=(psd/psd[pfsize[0]//2,pfsize[1]//2]).astype(cp.float32)
                 psdcube[nn,:,:]=psd   
             psdmean=cp.mean(psdcube, axis=0)
             psdcube=psdcube/psdmean
             [Y,X]=cp.meshgrid(cp.arange(pfsize[1]),cp.arange(pfsize[0])) 
             dist=((X-pfsize[0]//2)**2.0+(Y-pfsize[1]//2)**2.0)**0.5
             ring=cp.where((dist>=infrq)&(dist<=otfrq), 1.0, 0.0).astype(cp.float32)
             psdcube=psdcube*ring
             ringcube=cp.mean(cp.mean(psdcube, axis=1),axis=1)
             index0=cp.argsort(ringcube)[::-1]
                 #---------------------------------------------------------------------------------------
                 #--------------------------------  取排序前**帧, 再次相关对齐,叠加 
                 #################
                     
             #cube = cp.asnumpy(cube)
             #index0 = cp.asnumpy(index0)
                 #################
                     
                 #cubesort0=cube.copy()[index0][0:int(fsp*numb),:,:]
             cubesort0=cubedata.copy()[index0][0:int(fsp*numb),:,:]
                 ########################
             #cubesort0 = cp.array(cubesort0)
             #cube = cp.array(cube,dtype='<f4')
                 ########################
                     
             ini=cp.mean(cubesort0, axis=0).astype(cp.float32)
             initmp=ini[corstart[0]:corstart[0]+corsize[0],corstart[1]:corstart[1]+corsize[1]]
             if sobel==1:
                 initmp=filters.sobel(filters.gaussian(cp.asnumpy(initmp),5.0))      
               
                     
                     # ----------------------   对齐   
             for nn in range(cubesort0.shape[0]):                        
                 data=cubesort0[nn,:,:].copy()
                 datatmp=data[corstart[0]:corstart[0]+corsize[0],corstart[1]:corstart[1]+corsize[1]]
                 if sobel==1:
                     datatmp=filters.sobel(filters.gaussian(cp.asnumpy(datatmp),5.0))
                                   
                         #datatmp_gpu=cp.asarray(datatmp)
                 cc,corr=xyy.corrmaxloc_gpu(initmp, datatmp)
                         #cc,corr = xyy.corrmaxloc(initmp,datatmp)
                         ####cc,corr=xyy.corrmaxloc(initmp, datatmp)
                         
                 tmp=xyy.imgshift_gpu(data,[-cc[0],-cc[1]])
                 cubesort0[nn,:,:]=tmp
                     #print(tmp)
         
             averg=cp.mean(cubesort0, axis=0).astype(cp.float32)#叠加
                   
             
             if only_align_no_luckyimage == 1:
                 averg = ini/t
         #----------------------------    选帧(1计算功率谱,2环带积分,3排序)
         
         #.................................................
             aligned_path = '/home/wangxinhua/Desktop/align'+'/'.join(path.split('/')[path.split('/').index('Desktop')+1:])+'/aligned'
             try:
                 print('location of aligned:'+path+os.path.splitdrive(aligned_path)[1])
             except Exception as e:
                 print('location of aligned:'+aligned_path)
             if only_align_no_luckyimage == 1:
                 try:
                     os.mkdir(path+os.path.splitdrive(aligned_path)[1])
                 except Exception as e:
                     print('warning:'+aligned_path+'existed')
              
                 xyy.writefits(path+os.path.splitdrive(aligned_path)[1]+'/'+'aligned.fits',cp.asnumpy(initmp/len(data_path_fits)))
             
             else:
                 try:
                     os.mkdir(path+os.path.splitdrive(aligned_path)[1])
                 except Exception as e:
                     #print(path+aligned_path+'existed')
                     xyy.mkdir(aligned_path)
             
                 xyy.writefits(aligned_path+'/'+'aligned.fits',cp.asnumpy(averg))
             
             #退卷积
             if postprocess_flag == 1:
                 cubesr=cubedata[:,srstx:srstx+srxsize,srsty:srsty+srysize]
             
                 try:
                     r0,index=xyy.cubesrdevr0_gpu(cubesr,srsize,winsr,sitfdata,diameter,diaratio,maxfre,0.00,0.06,start_r0,step_r0)
                 except Exception as e:
                     #print(cube)
                     print(cubesr)
                     sys.exit()
                 sitf=xyy.GetSitf_gpu(sitfdata,maxfre,rcxsize,index)
      
                 img=xyy.ImgPSDdeconv_gpu(averg,sitf)
                 
                 head['CODE2'] = r0
                 
                 result=xyy.ImgFilted_gpu(img,gussf)
                 
                 result=result/np.median(cp.asnumpy(result))*np.median(cp.asnumpy(averg))
                 try:
                     fitsname = redrive+os.path.splitdrive(aligned_path)[1]+'/'+'post_aligned.fits'
                     xyy.mkdir(redrive+os.path.splitdrive(aligned_path)[1])
                 except Exception as e:
                     xyy.mkdir(os.path.join(redrive,i,'aligned'))
                     fitsname = os.path.join(redrive,i,'aligned','post_aligned.fits')
                 xyy.writefits(fitsname,cp.asnumpy(result).astype(np.float32),head)
                 #plt.imshow(result)'''
                 # print('align is over')
         else:
             a = 1
コード例 #56
0
#from skimage import io
from skimage import io  #, data, filters #data, io, filters

#import scikit.image.io
#import scipy #.image.io
import cv2

# loop over the image URLs
#for url in urls:
# download the image using scikit-image
#print "downloading %s" % (url)
url = r"http://twenkid.com/img/saitut-se-risuva.gif"
image = io.imread(url)
cv2.imshow("Incorrect", image)
cv2.imshow("Correct", cv2.cvtColor(image, cv2.COLOR_BGR2RGB))

from skimage import data, filters

image = data.coins()
# ... or any other NumPy array!
edges = filters.sobel(image)
io.imshow(edges)
io.show()

cv2.waitKey(0)
コード例 #57
0
def make_merge_plot(mask, input_data):
    boarders = sobel(mask)
    boarders[boarders > 0] = 1
    merge_plot = 1-input_data/np.max(input_data) + ( boarders) * 0.3
    return merge_plot
コード例 #58
0
 def time_sobel(self):
     filters.sobel(self.image)
    	markers[photo_grey_nobg <= 110] = 1
    	markers[photo_grey_nobg > 110] = 0
    if magnification is 50:
    	markers[photo_grey_nobg <= 95] = 1
    	markers[photo_grey_nobg > 95] = 0
    if magnification is 115:
        markers[photo_grey_nobg <= 95] = 1
        markers[photo_grey_nobg > 95] = 0
    
    markers_fill = ndi.morphology.binary_fill_holes(markers)
    labeled_particles, _ = ndi.label(markers_fill)

    #Create a mask of insides of the particles, identify edges from sobel filter and mask out the inside of particles
    #Need to mask out center of particles so edges are not detected on the interior of the particle (only want to detect particles whose outside edges are sharp).
    particle_erode = morphology.erosion(markers_fill, selem = morphology.disk(1))
    elevation_map = sobel(photo_grey_nobg)
    elevation_map[particle_erode==True]=0

    ##Identify sharpest edges from the sobel filter##
    #Also need to change the threshold of what is concidered "sharp" as magnification increases because more difficult to get crisp images as you zoom in(especially on a rocking ship)
    edges = np.zeros_like(photo_grey_nobg)
    if magnification is 7 or magnification is 20:
    	edges[elevation_map > 10] = 255
    if magnification is 50:
    	edges[elevation_map > 8] = 255
    if magnification is 115:
    	edges[elevation_map > 7] = 255
    labeled_edges , _ = ndi.label(edges)

    ##Identify only in-focus particles by particle indexes that overlap with edge indexes##
    infocus_object_img = np.zeros_like(labeled_particles)
コード例 #60
0
def transform_segmap(imgs, segmap, sigma=2, truncate=4.0):
    """
    imgs : list of img in different bands as numpy arrays 
    segmap : the segmap from which labels can be drawn
    sigma : passed as sigma to scipy.ndimage.filters.gaussian_filter     
    truncate: passed as truncate to scipy.ndimage.filters.gaussian_filter
    
    Uses Gaussian to blur input images and broaden passed in segmap
    """
    union_segmap = np.zeros_like(segmap)

    # create blurred segmaps
    for img in imgs:
        img = gaussian_filter(_cap_at_1(img), sigma=sigma, truncate=truncate)
        img_map = sobel(img)
        y, x = np.histogram(img, bins=100)

        split_idx = _get_split_idx(y)
        markers = np.zeros_like(img)
        markers[img < x[split_idx]] = 1
        markers[img > x[split_idx + 1]] = 2

        new_segmap = morphology.watershed(img_map, markers)

        union_segmap = _cap_at_1(union_segmap + (new_segmap - 1))

    # merge and label with passed in segmap
    dim1 = segmap.shape[0]
    dim2 = segmap.shape[1]

    for i in range(dim1):
        for j in range(dim2):
            # if the pixel has a value in the given segmap then give it that value
            if segmap[i, j] > 0:
                union_segmap[i, j] = segmap[i, j]
            # if our new segmap has a value that isn't in the given segmap then
            # find the closest label and assign it
            elif union_segmap[i, j] > 0:
                # NAIVE IMPLEMENTATION FIND A BETTER ALGORITHM!!!!!
                coords = None
                min_dist = hypot(dim1, dim2)
                for k in range(dim1):
                    for l in range(dim2):
                        if segmap[k, l] > 0 and hypot(i - k, j - l) < min_dist:
                            coords = (k, l)
                            min_dist = hypot(i - k, j - l)

                union_segmap[i, j] = segmap[coords[0], coords[1]]

    # TODO confirm that this logic will always be true
    img_id = segmap[dim1 // 2, dim2 // 2]

    # TODO integrate this into the loop above above to
    for i in range(dim1):
        vals = np.unique(union_segmap[i, :])
        # are there two sources in the row and is one of them the central source
        if len(vals) > 2 and img_id in np.unique(union_segmap[i, :]):
            for j in range(dim2):
                if union_segmap[i, j] > 0 and union_segmap[i, j] != img_id:
                    _replace_overlapping_sources((i, j), union_segmap, img_id)

    return union_segmap