Esempio n. 1
0
def get_polygon_confidence(poly, proba_matrix, class_idx):
    poly_coordinates_x, poly_coordinates_y = poly.exterior.coords.xy
    poly_coordinates_x = np.frombuffer(poly_coordinates_x).astype(int)
    poly_coordinates_y = np.frombuffer(poly_coordinates_y).astype(int)
    mask = np.zeros((proba_matrix.shape[2], proba_matrix.shape[3]), dtype=bool)
    mask[poly_coordinates_y, poly_coordinates_x] = 1
    img_proba = proba_matrix[0, class_idx, :, :]
    total_proba = img_proba[imfill(mask)].sum() / img_proba[imfill(mask)].shape[0]
    return total_proba
Esempio n. 2
0
def area_mask(img, polygon, neg_polygon=[]):
    mask = np.zeros(img.shape[:2], dtype=np.uint8)

    # Positive polygon
    if type(polygon) is list:
        for pol in polygon:
            mask = cv2.fillConvexPoly(mask, pol, 1)
    else:
        mask = cv2.fillConvexPoly(mask, polygon, 1)

    if type(neg_polygon) is list:
        for pol in neg_polygon:
            mask = cv2.fillConvexPoly(mask, pol, 0)

    else:
        mask = cv2.fillConvexPoly(mask, neg_polygon, 0)

    mask = mask.astype(bool)

    out = img[imfill(mask)]
    return out
Esempio n. 3
0
    def CreateSolidVol(self, obj=None, vRes=96, buf=4):
        """
        Searches for extant .voxverts files in <LibDir>/Objects/VOL_Files/, and from them creates 
        3D, filled object mask matrices
        Saves this voxelized verison of an object as a .vol file in the <LibDir>/Objects/VOL_Files/ directory.

        Can not be called from inside Blender, since it relies on numpy

        Volume for voxelized object mask is vRes+4 (usually 96+4=100) to allow for a couple voxels' worth
        of "wiggle room" for imprecise scalings of objects (not all will be exactly 10 units - that part
        of object creation is manual and can be difficult to get exactly right)
        
        Voxelizations are used to create shape skeletons in subsequent processing. 

        Since the voxelized mesh surfaces of objects qualifies as meta-data about the objects, 
        this function might be expected to be a method of the RenderOptions class. However, this 
        isn't directly used by any models (yet); thus it has been saved in a separate place, as 
        the data about real-world size, number of mesh vertices, etc.

        """
        # Imports
        import re, os
        from scipy.ndimage.morphology import binary_fill_holes as imfill  # Fills holes in multi-dim images

        if not obj:
            obj = self.objects
        for o in obj:
            # Check for existence of .verts file:
            ff = '%s_%s.%dx%dx%d.verts' % (
                o['semantic_category'][0].capitalize(), o['name'], vRes + buf,
                vRes + buf, vRes + buf * 2)
            fNm = os.path.join(Settings['Paths']['LibDir'], 'Objects',
                               'VOL_Files', ff)
            if not os.path.exists(fNm):
                if verbosity_level > 3:
                    print('Could not find .verts file for %s' % o['name'])
                    print('(Searched for %s' % fNm)
                continue
            # Get voxelized vert list
            with open(fNm, 'r') as fid:
                Pt = fid.readlines()
            vL = np.array([[float(x) for x in k.split(', ')] for k in Pt])
            # Get dimensions
            dim = [len(np.unique(vL.T[i])) for i in range(3)]
            # Create blank matrix
            z = np.zeros((vRes + buf, vRes + buf, vRes + buf * 2), dtype=bool)
            # Normalize matrix to indices for volume
            vLn = vL / (10. /
                        vRes) - .5 + buf / 2.  # .5 is a half-voxel shift down
            vLn.T[0:2] += vRes / 2.  # Move X, Y to center
            vLn.T[2] += buf / 2.  # Move Z up (off floor) by "buf"/2 again
            # Check for closeness of values to rounded values
            S = np.sqrt(np.sum((np.round(vLn) - vLn)**2)) / len(vLn.flatten())
            if S > .001:
                raise Exception(
                    'Your voxelized coordinates do not round to whole number indices!'
                )
            # Index into volume
            idx = np.cast['int'](np.round(vLn))
            z[tuple(idx.T)] = True
            # Fill holes w/ python
            # May need fancier strel (structure element - 2nd argumnet) for some objects
            hh = imfill(z)
            # Trim?? for more efficient computation?
            # ?
            # Save volume in binary format for pfSkel (or other) code:
            PF = o['fname']
            fDir = os.path.split(PF)[0]
            Cat = re.search('(?<=Category_)[^_^.]*', PF).group()
            Res = '%dx%dx%d' % (vRes + buf, vRes + buf, vRes + buf + buf)
            fName = os.path.join(fDir, 'VOL_Files',
                                 Cat + '_' + o['name'] + '.' + Res + '.vol')
            # Write to binary file
            print('Saving %s' % fName)
            with open(fName, 'wb') as fid:
                hh.T.tofile(fid)  # Transpose to put it in column-major form...
Esempio n. 4
0
	def CreateSolidVol(self, obj=None, vRes=96, buf=4):
		"""
		Searches for extant .voxverts files in <LibDir>/Objects/VOL_Files/, and from them creates 
		3D, filled object mask matrices
		Saves this voxelized verison of an object as a .vol file in the <LibDir>/Objects/VOL_Files/ directory.

		Can not be called from inside Blender, since it relies on numpy

		Volume for voxelized object mask is vRes+4 (usually 96+4=100) to allow for a couple voxels' worth
		of "wiggle room" for imprecise scalings of objects (not all will be exactly 10 units - that part
		of object creation is manual and can be difficult to get exactly right)
		
		Voxelizations are used to create shape skeletons in subsequent processing. 

		Since the voxelized mesh surfaces of objects qualifies as meta-data about the objects, 
		this function might be expected to be a method of the RenderOptions class. However, this 
		isn't directly used by any models (yet); thus it has been saved in a separate place, as 
		the data about real-world size, number of mesh vertices, etc.

		"""
		# Imports
		import re, os
		from scipy.ndimage.morphology import binary_fill_holes as imfill # Fills holes in multi-dim images
		
		if not obj:
			obj = self.objects
		for o in obj:
			# Check for existence of .verts file:
			ff = '%s_%s.%dx%dx%d.verts'%(o['semantic_category'][0].capitalize(), o['name'], vRes+buf, vRes+buf, vRes+buf*2)
			fNm = os.path.join(bvp.Settings['Paths']['LibDir'], 'Objects', 'VOL_Files', ff)
			if not os.path.exists(fNm):
				if bvp.Verbosity_Level>3:
					print('Could not find .verts file for %s'%o['name'])
					print('(Searched for %s'%fNm)
				continue
			# Get voxelized vert list
			with open(fNm, 'r') as fid:
				Pt = fid.readlines()
			vL = bvp.np.array([[float(x) for x in k.split(', ')] for k in Pt])
			# Get dimensions 
			dim = [len(bvp.np.unique(vL.T[i])) for i in range(3)]
			# Create blank matrix
			z = bvp.np.zeros((vRes+buf, vRes+buf, vRes+buf*2), dtype=bool)
			# Normalize matrix to indices for volume
			vLn = vL/(10./vRes) -.5 + buf/2. # .5 is a half-voxel shift down
			vLn.T[0:2]+= vRes/2. # Move X, Y to center
			vLn.T[2] += buf/2. # Move Z up (off floor) by "buf"/2 again
			# Check for closeness of values to rounded values
			S = bvp.np.sqrt(bvp.np.sum((bvp.np.round(vLn)-vLn)**2))/len(vLn.flatten())
			if S>.001:
				raise Exception('Your voxelized coordinates do not round to whole number indices!')
			# Index into volume
			idx = bvp.np.cast['int'](bvp.np.round(vLn))
			z[tuple(idx.T)] = True
			# Fill holes w/ python 
			# May need fancier strel (structure element - 2nd argumnet) for some objects 
			hh = imfill(z)
			# Trim?? for more efficient computation? 
			# ?
			# Save volume in binary format for pfSkel (or other) code:
			PF = o['fname']
			fDir = os.path.split(PF)[0]
			Cat = re.search('(?<=Category_)[^_^.]*', PF).group()
			Res = '%dx%dx%d'%(vRes+buf, vRes+buf, vRes+buf+buf)
			fName = os.path.join(fDir, 'VOL_Files', Cat+'_'+o['name']+'.'+Res+'.vol')
			# Write to binary file
			print('Saving %s'%fName)
			with open(fName, 'wb') as fid:
				hh.T.tofile(fid) # Transpose to put it in column-major form...
Esempio n. 5
0
def warping(V1, V2):
    ################# parameter setting ###################

    display_flag = True
    affine_start_flag = True
    polarity_flag = True
    nsamp = 100
    eps_dum = 0.25
    ndum_frac = 0.25
    mean_dist_global = []
    ori_weight = 0.1
    nbins_theta = 12
    nbins_r = 5
    r_inner = 0.125
    r_outer = 2
    tan_eps = 1.0
    n_iter = 6
    beta_init = 1
    r = 1
    w = 4

    ################## image loading #######################

    #V1_orig = plt.imread('/Users/liujin/Desktop/mask_0.jpeg') #print(V1_orig.shape) = (128, 128)
    #V2_orig = plt.imread('/Users/liujin/Desktop/mask_4.jpeg') #print(V1_orig.dtype) = unit8

    V1 = V1.squeeze()  #print(V1.shape) = (128, 128)
    V2 = V2.squeeze()  #print(v1.dtype) = unit8
    print(V1.shape)

    binarizer1 = Binarizer(threshold=0.5).fit(V1)
    V1 = binarizer1.transform(
        V1)  #print(V1.shape) = (128, 128) #print(v1.dtype) = unit8
    binarizer2 = Binarizer(threshold=0.5).fit(V2)
    V2 = binarizer2.transform(V2)

    V1 = imfill(V1)
    V2 = imfill(V2)

    V1 = expand_dims(
        asarray(V1),
        axis=2)  #print(V1.shape) = (128, 128, 1) #print(v1.dtype) = unit8
    V2 = expand_dims(asarray(V2), axis=2)

    V1 = V1.astype(
        float)  #print(V1.shape) = (128, 128, 1) #print(v1.dtype) = float64
    V2 = V2.astype(float)

    N1, N2, _ = V1.shape
    print("N1 is {}".format(N1))

    ################# edge detection ########################

    x2, y2, t2 = bdry_extract_3(V2)
    nsamp2 = len(x2)
    if nsamp2 >= nsamp:
        x2, y2, t2 = get_samples_1(x2, y2, t2, nsamp)
    else:
        print("error: shape #2 does not have enough samples")
    Y = np.concatenate((x2, y2), axis=1)

    x1, y1, t1 = bdry_extract_3(V1)
    nsamp1 = len(x1)
    if nsamp1 >= nsamp:
        x1, y1, t1 = get_samples_1(x1, y1, t1, nsamp)
    else:
        print("error: shape #1 does not have enough samples")
    X = np.concatenate((x1, y1), axis=1)

    # plt.plot(x2, y2,'r+')
    # axes = plt.gca()
    # axes.set_xlim(0,100)
    # axes.set_ylim(128,0)
    # plt.show()

    # plt.plot(x1, y1,'r+')
    # axes = plt.gca()
    # axes.set_xlim(0,100)
    # axes.set_ylim(128,0)
    # plt.show()

    ##################### up to here, x1 is horizontal, y1 is vertical #####################

    ################ compute correspondence ##################
    Xk = X
    tk = t1
    k = True
    signal = True

    ndum = np.round(ndum_frac * nsamp).astype(int)  #print(ndum) # = 25

    out_vec_1 = np.zeros((1, nsamp))
    out_vec_2 = np.zeros((1, nsamp))

    while signal:

        BH1, mean_dist_1 = sc_compute(Xk.T, zeros(
            (1, nsamp)), mean_dist_global, nbins_theta, nbins_r, r_inner,
                                      r_outer, out_vec_1)
        BH2, mean_dist_2 = sc_compute(Y.T, zeros(
            (1, nsamp)), mean_dist_global, nbins_theta, nbins_r, r_inner,
                                      r_outer, out_vec_2)

        # from_mat=sio.loadmat("/Users/liujin/Desktop/hist_cost.mat")
        # BH1 = from_mat['BH1']
        # BH2 = from_mat['BH2']
        # mean_dist_1 = from_mat['mean_dist_1']
        # mean_dist_2 = from_mat['mean_dist_2']
        # t1 = from_mat["t1"]
        # t2 = from_mat["t2"]
        # tk = from_mat["tk"]

        if affine_start_flag:
            if k == True:
                lambda_o = 1000
            else:
                lambda_o = beta_init * r**(k - 2)
        else:
            lambda_o = beta_init * r**(k - 1)

        beta_k = (mean_dist_2**2) * lambda_o
        #print("beta_k is {}".format(beta_k))
        costmat_shape = hist_cost_2(BH1, BH2)
        #print("costmat_shape is {}".format(costmat_shape))

        ######################################################################
        theta_diff = np.tile(tk, (1, nsamp)) - np.tile(t2.T, (nsamp, 1))
        #print("theta_diff is {}".format(theta_diff))

        if polarity_flag:
            costmat_theta = 0.5 * (1 - np.cos(theta_diff))
        else:
            costmat_theta = 0.5 * (1 - np.cos(2 * theta_diff))

        costmat = (1 - ori_weight) * costmat_shape + ori_weight * costmat_theta

        #print("costmat is {}".format(costmat))

        #######################################################################

        nptsd = nsamp + ndum
        costmat2 = eps_dum * np.ones((nptsd, nptsd))
        costmat2[:nsamp, :nsamp] = costmat
        #print("costmat2 is {}".format(costmat2))

        #######################################################################

        # m = Munkres()
        # cvec=m.compute(costmat2)
        # ## my processing to take out index
        # cvec = np.asarray(cvec)
        # print("cvec is {}".format(cvec))
        # cvec = cvec[np.newaxis, :, 1]

        #m = munkres.Munkres()
        #indexes = m.compute(costmat2.tolist())

        # from_mat=sio.loadmat("/Users/liujin/Desktop/costmat2.mat")
        # costmat2 = from_mat['costmat2']
        indexes = hungarian.lap(costmat2)
        indexes = np.asarray(indexes)
        #print(indexes.shape)
        cvec = indexes[np.newaxis, 1, :]
        #print("cvec is {}".format(cvec))
        #print("cvec shape is {}".format(cvec.shape))

        # from_mat=sio.loadmat("/Users/liujin/Desktop/cvec.mat")
        # cvec = from_mat['cvec'] -1

        # #print("cvec is {}".format(cvec))

        # nptsd = from_mat["nptsd"]
        # nptsd = int(nptsd)
        # #print("nptsd is {}".format(nptsd))

        # Xk = from_mat["Xk"]
        # #print("Xk is {}".format(Xk))
        # X = from_mat["X"]
        # #print("X is {}".format(X))
        # Y = from_mat["Y"]

        a = np.sort(cvec)
        cvec2 = np.argsort(cvec)
        #print("cvec2 is {}".format(cvec2))

        out_vec_1 = cvec2[0, :nsamp] > nsamp
        #print("out_cvec_1 is {}".format(out_vec_1))
        out_vec_2 = cvec[0, :nsamp] > nsamp
        #print("out_cvec_2 is {}".format(out_vec_2))

        X2 = np.nan * np.ones((nptsd, 2))
        X2[:nsamp, :] = Xk
        X2 = X2[cvec[:].squeeze(), :]
        #print("X2 is {}".format(X2))
        X2b = np.nan * np.ones((nptsd, 2))
        X2b[:nsamp, :] = X
        X2b = X2b[cvec[:].squeeze(), :]
        #print("X2b is {}".format(X2b))  ## attention
        Y2 = np.nan * np.ones((nptsd, 2))
        Y2[:nsamp, :] = Y

        #print("Y2 is {}".format(Y2))
        #print("X2b is {}".format(X2b))
        #print("Y is {}".format(Y))

        ind_good = np.nonzero(np.logical_not(np.isnan(X2b[:nsamp, 1])))
        n_good = size(np.asarray(ind_good))
        #print("n_good is {}".format(n_good))
        X3b = X2b[ind_good, :].squeeze()
        Y3 = Y2[ind_good, :].squeeze()

        #print("X3b is {}".format(X3b))
        #print("Y3 is {}".format(Y3))

        # ########## ##################################################
        # # plt.plot(X2[:,0], X2[:,1],'r+')
        # # axes = plt.gca()
        # # axes.set_xlim(0,100)
        # # axes.set_ylim(128,0)
        # # plt.show()

        # # plt.plot(Y2[:,0], Y2[:,1],'r+')
        # # axes = plt.gca()
        # # axes.set_xlim(0,100)
        # # axes.set_ylim(128,0)
        # # plt.show()

        # plt.plot(X3b[:,0], X3b[:,1],'r+')
        # axes = plt.gca()
        # axes.set_xlim(0,100)
        # axes.set_ylim(128,0)
        # plt.show()

        # plt.plot(Y3[:,0], Y3[:,1],'r+')
        # axes = plt.gca()
        # axes.set_xlim(0,100)
        # axes.set_ylim(128,0)
        # plt.show()

        # from_mat=sio.loadmat("/Users/liujin/Desktop/book.mat")
        # X3b = from_mat['X3b']
        # Y3 = from_mat['Y3']
        # beta_k = from_mat['beta_k']

        cx, cy, E = bookenstain(X3b, Y3, beta_k)

        #print("cx is {}".format(cx))
        #print("cy is {}".format(cy))
        #print("E is {}".format(E))

        ########################### bookenstain is the same ####################

        # calculate affine cost

        A = np.concatenate(
            (cx[n_good + 1:n_good + 3, :], cy[n_good + 1:n_good + 3, :]),
            axis=1)
        #print("A is {}".format(A))
        _, s, _ = np.linalg.svd(A)
        #print("s is {}".format(s))
        aff_cost = log(s[0] / s[1])
        #print(aff_cost)

        # calculate shape context cost
        a1 = np.min(costmat, axis=0, keepdims=True)
        a2 = np.min(costmat, axis=1, keepdims=True)
        input_lj = np.asarray([np.nanmean(a1), np.nanmean(a2)])
        sc_cost = np.max(input_lj)

        # warp each coordinate
        fx_aff = np.dot(cx[n_good:n_good + 3].T,
                        np.concatenate((np.ones((1, nsamp)), X.T), axis=0))
        d2 = dist2(X3b, X)
        d2[d2 <= 0] = 0
        U = np.multiply(d2, np.log(d2 + np.finfo(float).eps))
        fx_wrp = np.dot(cx[:n_good].T, U)
        fx = fx_aff + fx_wrp

        fy_aff = np.dot(cy[n_good:n_good + 3].T,
                        np.concatenate((np.ones((1, nsamp)), X.T), axis=0))
        fy_wrp = np.dot(cy[:n_good].T, U)
        fy = fy_aff + fy_wrp

        Z = np.concatenate((fx, fy), axis=0)
        Z = Z.T

        # apply to tangent
        Xtan = X + np.dot(tan_eps,
                          np.concatenate((np.cos(t1), np.sin(t1)), axis=1))
        fx_aff = np.dot(cx[n_good:n_good + 3].T,
                        np.concatenate((np.ones((1, nsamp)), Xtan.T), axis=0))
        d2 = dist2(X3b, Xtan)
        d2[d2 <= 0] = 0

        U = np.multiply(d2, np.log(d2 + np.finfo(float).eps))
        fx_wrp = np.dot(cx[:n_good].T, U)
        fx = fx_aff + fx_wrp

        fy_aff = np.dot(cx[n_good:n_good + 3].T,
                        np.concatenate((np.ones((1, nsamp)), Xtan.T), axis=0))
        fy_wrp = np.dot(cy[:n_good].T, U)

        Ztan = np.concatenate((fx, fy), axis=0)
        Ztan = Ztan.T

        len_lj = Ztan.shape[0]
        tk = np.zeros((len_lj, 1))
        for i in range(len_lj):
            tk[i] = atan2(Ztan[i, 1] - Z[i, 1], Ztan[i, 0] - Z[i, 0])

        Xk = Z

        if k == n_iter:
            signal = False
        else:
            k = k + 1

    # ########################   image warp    ######################################

    x, y = np.mgrid[0:N2, 0:N1]

    x = x.reshape(-1, 1)
    #print("x is {}".format(x))
    y = y.reshape(-1, 1)
    #print("y is {}".format(y))
    M = np.size(x)
    fx_aff = np.dot(cx[n_good:n_good + 3].T,
                    np.concatenate((np.ones((1, M)), x.T, y.T), axis=0))
    d2 = dist2(X3b, np.concatenate((x, y), axis=1))
    fx_wrp = np.dot(cx[:n_good].T,
                    np.multiply(d2, np.log(d2 + np.finfo(float).eps)))
    fx = fx_aff + fx_wrp

    #print("fx is {}".format(fx))

    fy_aff = np.dot(cy[n_good:n_good + 3].T,
                    np.concatenate((np.ones((1, M)), x.T, y.T), axis=0))
    fy_wrp = np.dot(cy[:n_good].T,
                    np.multiply(d2, np.log(d2 + np.finfo(float).eps)))
    fy = fy_aff + fy_wrp

    grid_x, grid_y = np.meshgrid(np.arange(0, N2, 1), np.arange(0, N1, 1))

    fx = np.asarray(fx)
    fy = np.asarray(fy)

    V1m = griddata((fx.T.squeeze(), fy.T.squeeze()),
                   V1[y, x], (grid_x, grid_y),
                   method='nearest')
    V1m = V1m.squeeze()
    V1m[np.isnan(V1m)] = 0

    binarizer = Binarizer(threshold=0.5).fit(V1m)
    V1m = binarizer.transform(V1m)

    plt.imshow(V1m.squeeze())
    plt.show()
    # fz=find(isnan(V1w)); V1w(fz)=0;
    return V1m
Esempio n. 6
0
    def keyPressEventFunc(self, event):
        # This function defines controls for coronal window (up/down a slice)
        # As well as fast key functions for power users.

        key = event.key

        if key == 'up': #scroll up a slice
            if self.z2 + 1 <= self.maxSlice:
                self.z2 = self.z2 + 1
                self.ui.lineEdit.setText(str(self.z2))
                self.img2 = self.img_data2[:,:,self.z2]
                self.z = self.z2
                self.imshowFunc()  

        if key == 'down': #scroll down a slice
            if self.z2 - 1 >= 0:
                self.z2 = self.z2 - 1
                self.ui.lineEdit.setText(str(self.z2))
                self.img2 = self.img_data2[:,:,self.z2]
                self.z = self.z2
                self.imshowFunc()  
        
        if key == '9': #tap '9' to change brush size to 9.0 
            self.ui.brushSize.setValue((9.0))
        if key == '8': #tap '8' to change brush size to 8.0 
            self.ui.brushSize.setValue((8.0))
        if key == '7': #tap '7' to change brush size to 7.0 
            self.ui.brushSize.setValue((7.0))
        if key == '6': #tap '6' to change brush size to 6.0 
            self.ui.brushSize.setValue((6.0))
        if key == '5': #tap '5' to change brush size to 5.0 
            self.ui.brushSize.setValue((5.0))
        if key == '4': #tap '4' to change brush size to 4.0 
            self.ui.brushSize.setValue((4.0))
        if key == '3': #tap '3' to change brush size to 3.0 
            self.ui.brushSize.setValue((3.0))

        if key == '2': #tap '2' to change brush size to 2.0 
            self.ui.brushSize.setValue((2.0))

        if key == '1': #tap '1' to change brush size to 1.0 
            self.ui.brushSize.setValue((1.0))

        if key == 'u': #tap 'u' to undo last segmentation edit
            self.redoHoldSeg = self.segImg.copy()
            self.segImg = self.holdLastSeg.copy()
            self.imshowFunc() 

        if key == 'r': # tap 'r' to redo an undo
            self.segImg = self.redoHoldSeg.copy()
            self.imshowFunc()

        if key == 'v': #click 'v' to recalculate and see volume
            self.volumeDisplayFunc()

        if key == 's': # toggle with 's' the overlay tool
            if self.ui.overlaySeg.checkState()==2:
                self.ui.overlaySeg.setCheckState(0)
            else:
                self.ui.overlaySeg.setCheckState(2)
            self.imshowFunc() 

        if key == 'd': #toggle with 'd' the drawing tool
            if self.ui.scribble.checkState()==2:
                self.ui.scribble.setCheckState(0)
            else:
                self.ui.scribble.setCheckState(2)

        if key == 'w': #toggle with 'w' the watershed edge detection
            if self.ui.freehand.checkState()==2:
                self.ui.freehand.setCheckState(0)
            else:
                self.ui.freehand.setCheckState(2)

        if key == 'g': #toggle with 'g' the auto gray leveling
            if self.ui.autogray.checkState()==2:
                self.ui.autogray.setCheckState(0)
                self.autoGrayFlag = 0
            else:
                self.ui.autogray.setCheckState(2)
                self.autoGrayFlag = 1

        if key == 'q': #with q keep only two largest objects (or if only two objects keep only largest)

            self.holdLastSeg = self.segImg.copy() #save previous state in case need to undo

            #first determine if region is 0's or 1's
            row = int(event.ydata)
            col = int(event.xdata) 
            regVal = self.segImg[row,col,self.z]

            mgac = self.segImg[:,:,self.z].copy()

            labelSlice,numFeatures = label(mgac)

            minVal = 0

            for i in range(0,numFeatures):
                sumLabel = np.sum(labelSlice==(i+1))
                if sumLabel > minVal:
                    maxlabel = i + 1
                    minVal = sumLabel
            minVal = 0
            for i in range(0,numFeatures):
                sumLabel = np.sum(labelSlice==(i+1))
                if sumLabel > minVal and i+1 != maxlabel:
                    max2label = i + 1
                    minVal = sumLabel


            labelPointsVal = labelSlice[row,col]

            outputFilledImage = self.segImg[:,:,self.z].copy() * 0
            outputFilledImage[labelSlice==maxlabel] = 1

            if numFeatures > 2:
                outputFilledImage[labelSlice==max2label] = 1


            self.segImg[:,:,self.z] = outputFilledImage > 0 #this will be final after flood fill.
            self.segImg[0,0,self.z] = 0 #need at least one zero value for overlay (matplotlib overlay bug???)
            self.imshowFunc() 

            (row,col,dep) = self.img_data2.shape
            self.overlayImgAX = np.zeros((row,col))

        if key == 'e': #with 'e' fill in any empty enclosed regions.
            self.holdLastSeg = self.segImg.copy() 
            self.segImg[:,:,self.z] = imfill(self.segImg[:,:,self.z])
            self.imshowFunc()