Beispiel #1
0
    def get_normal(self, heightmap, log=False, opener="viewer", **kwargs):
        self.set_normal([heightmap, [], []], **kwargs)
        normals = self.read(level="points")

        normals_x = normals["nx"].to_numpy().reshape(self._shape)
        normals_y = normals["ny"].to_numpy().reshape(self._shape)
        normals_z = normals["nz"].to_numpy().reshape(self._shape)

        self._logger(get_array_info(normals_x))
        self._logger(get_array_info(normals_y))
        self._logger(get_array_info(normals_z))

        # normals_x = normals_x*(-1.)+1.

        normals_x = imgtools.project_data_to_img(normals_x, limits=[-1., 1.])
        normals_y = imgtools.project_data_to_img(normals_y, limits=[-1., 1.])

        normals_z = normals_z * (-1.) + 1.
        normals_z = -np.log(
            np.where(normals_z > 0., normals_z,
                     np.min(normals_z[normals_z > 0.])))
        self._logger(get_array_info(normals_z))
        normals_z = imgtools.project_data_to_img(normals_z, limits=[.0, 10.])

        self._logger(get_array_info(normals_x))
        self._logger(get_array_info(normals_y))
        self._logger(get_array_info(normals_z))

        self.add_height(heightmap, show=True)
        self.add_color(np.stack([normals_x, normals_y, normals_z], axis=2))
        self.remove_normal()

        self.write()
        self._opener(opener, self._path)
    def shadow_orientation(self, event=None):
        labelimg = imgtools.get_mask_image(
            self.get_obj().get_img_from_label("{label}"),
            index=self.get_obj().get_class(value=False))

        # define the structuring element and apply the opening operation
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (75, 75))
        labelimg = cv2.morphologyEx(labelimg, cv2.MORPH_ELLIPSE, kernel)

        # get the currently displayed image
        grayimg = imgtools.gray_image(self.get_obj().get_img(show=True))
        grayimg = cv2.medianBlur(grayimg, 7)
        grayimg_label = grayimg * labelimg

        # get settings of combobox and fields
        param = self._csbox_threshold.get_dict()
        thresh = param["Thresh"]
        method = cv2.THRESH_BINARY if param[
            "Thresh"] else cv2.THRESH_BINARY + cv2.THRESH_OTSU

        # implement thresholding and assign the result to the variable dst
        ret, dst = cv2.threshold(grayimg_label[grayimg_label != 0], thresh,
                                 255, method)
        dst = np.where(grayimg_label < ret, 0, 1)
        labelimg_skel = skeletonize(labelimg.astype(np.uint8))

        shdwimg = imgtools.gray_image(
            np.stack([
                imgtools.project_data_to_img(
                    labelimg, dtype=np.uint8, factor=255),
                np.zeros(dst.shape, dtype=np.uint8),
                imgtools.project_data_to_img(dst, dtype=np.uint8, factor=255)
            ],
                     axis=2))
        shdwimg = np.where(labelimg_skel > 0, 255, shdwimg)
        shdwimg = np.where(shdwimg == 0, 105, shdwimg)
        shdwimg = np.where(shdwimg == 29, 0, shdwimg)

        dispimg = self.get_obj().get_img(show=True)
        dispimg[:, :, 0] = np.where(labelimg_skel > 0, 0, dispimg[:, :, 0])
        dispimg[:, :, 1] = np.where(labelimg_skel > 0, 255, dispimg[:, :, 1])
        dispimg[:, :, 2] = np.where(labelimg_skel > 0, 0, dispimg[:, :, 2])
        for r in range(1, shdwimg.shape[0] - 1):
            for c in range(1, shdwimg.shape[1] - 1):
                if len(np.unique(shdwimg[r - 1:r + 2, c - 1:c + 2])) == 3:
                    cv2.circle(dispimg, (c, r), 3, 255, 2)

        tw.TopWindow(self,
                     title="Segmentation",
                     dtype="img",
                     value=[grayimg, shdwimg, grayimg_label, dispimg])
Beispiel #3
0
    def gradient_image(self):
        """Calculate the horizontal and vertical gradients of the currently displayed image
        """

        # https://www.learnopencv.com/histogram-of-oriented-gradients/
        
        # get settings of combobox and fields 
        param = self._csbox_blur.get_dict()
        kernel_size = param["Kernel Size"]

        if (kernel_size%2)==0 or kernel_size>32:
            raise ValueError("Kernel size  must be odd and not larger than 31.")
        
        # get the currently displayed image
        img = imgtools.project_data_to_img(imgtools.gray_image(self.get_obj().get_img(show=True)))

        # calculate gradient
        gradient_x = cv2.Sobel(img, cv2.CV_32F, 1, 0, ksize=kernel_size)
        gradient_y = cv2.Sobel(img, cv2.CV_32F, 0, 1, ksize=kernel_size)
        
        # calculate gradient magnitude and direction (in degrees)
        magnitude, angle = cv2.cartToPolar(gradient_x, gradient_y, angleInDegrees=True)

        # set image in canvas and update histogram
        # self.get_obj().set_img(magnitude, clear_mask=False)
        # self.set_img()

        # open a topwindow with gradient images
        tw.TopWindow(self, title="Gradient Image", dtype="img", value=[img, magnitude, gradient_x, gradient_y])
Beispiel #4
0
    def compute_dimage(self, event=None):
        """Compute the difference image of the currently images in 'd_image'
        """
        
        # continue if two images are provided
        if len(self._dimage)<2:
            raise IndexError("There are not enough images available to compute the difference.")

        # compute the difference image of the currently images in 'd_image'
        img_a = self._dimage[-1]
        img_b = self._dimage[-2]

        # compute the difference image of the currently images in 'd_image'
        img = np.absolute(imgtools.gray_image(img_a.astype(np.float32))-imgtools.gray_image(img_b.astype(np.float32)))

        #check wheter the image is not empty
        if np.sum(img) == 0:
            raise ValueError("Sum of differences is zero.")

        img = imgtools.project_data_to_img(img)

        # set image in canvas and update histogram
        self.get_obj().set_img(img, clear_mask=False)
        self.set_img()

        # open a topwindow with images used for building the difference
        tw.TopWindow(self, title="Difference of images", dtype="img", value=[img, img_a, img_b])
Beispiel #5
0
    def get_edges(self):

        # get settings of combobox and fields
        param = self._csbox_edges.get_dict()

        # get the currently displayed image
        img = imgtools.project_data_to_img(imgtools.gray_image(
            self.get_obj().get_img(show=True)),
                                           dtype=np.uint8,
                                           factor=255)

        aperture_size = param["Aperture Size"]
        if (aperture_size % 2) == 0 or aperture_size < 3 or aperture_size > 7:
            raise ValueError("Aperture size should be odd between 3 and 7.")

        edges = cv2.Canny(img,
                          param["Threshold I"],
                          param["Threshold II"],
                          apertureSize=param["Aperture Size"])

        # set image in canvas and update histogram
        # self.get_obj().set_img(edges, clear_mask=False)
        # self.set_img()

        # open a topwindow with the edges of the currently displayed image computed via canny
        tw.TopWindow(self, title="Edges", dtype="img", value=[img, edges])
Beispiel #6
0
    def get_normal_img(self, heightmap, bins=None, log=False, **kwargs):
        self.set_normal([heightmap, [], []], **kwargs)
        normalimg = self.read(level="points")["nz"].to_numpy().reshape(
            self._shape)

        self._logger(get_array_info(normalimg))

        limits = [.0, 1.]
        if log:
            normalimg = normalimg * (-1.) + 1.
            normalimg = -np.log(
                np.where(normalimg > 0., normalimg,
                         np.min(normalimg[normalimg > 0.])))
            limits = [.0, 10.]

        normalimg = imgtools.project_data_to_img(normalimg, limits=limits)
        if bins is not None:
            normalimg_binned = np.zeros(normalimg.shape, dtype=np.float32)
            array = list(np.arange(1.0 / bins, 1., 1.0 / bins))
            limit = 0.
            for i in array:
                normalimg_binned += np.where(
                    np.logical_and(normalimg > limit, normalimg <= i), limit,
                    0.)
                limit = i
            normalimg = normalimg_binned + np.where(normalimg > limit, limit,
                                                    0.)

        return normalimg
    def set_threshold_img(self, event=None):
        """Set a threshold via input of windows's slider and display as a mask
        """

        # set a threshold via input of windows's slider and display as a mask
        ret, dst = self.set_threshold()

        # set image in canvas and update histogram
        self.get_obj().set_img(imgtools.project_data_to_img(dst,
                                                            dtype=np.uint8,
                                                            factor=255),
                               clear_mask=True)
        self.set_img()
    def get_normal_img(self, heightmap, bins=None, log=True, ntype="z", llimits=list(), **kwargs):
        self.set_normal([heightmap, [], []], **kwargs)

        # print(self.read(level="points"))
        if ntype=="z":
            scalar= "nz"
            limits = [.0, 1.]
        else:
            scalar = "scalar_Dip_direction_(degrees)"
            log = 0
            limits = list()
        

        normalimg = self.read(level="points")[scalar].to_numpy().reshape(self._shape)

        self._logger(get_array_info(normalimg))
        
        normalimg = imgtools.project_data_to_img(normalimg)
        

        if log:
            normalimg = normalimg*(-1.)+1.
            normalimg = -np.log(np.where(normalimg>0., normalimg, np.min(normalimg[normalimg>0.])))
            limits=llimits

        normalimg = imgtools.project_data_to_img(normalimg, limits=limits)
        if bins is not None:
            normalimg_binned = np.zeros(normalimg.shape, dtype=np.float32)
            array = list(np.arange(1.0/bins, 1., 1.0/bins))
            limit = 0.        
            for i in array:
                normalimg_binned += np.where(np.logical_and(normalimg>limit, normalimg<=i), limit, 0.)
                limit = i
            normalimg = normalimg_binned + np.where(normalimg>limit, limit, 0.)

        return normalimg
    def add_height(self, heightmap, factor=1.0, show=False, **kwargs):
        if not len(heightmap):
            return

        self._num_points = heightmap.shape[0]*heightmap.shape[1]
        self._shape = heightmap.shape[0:2]
        heightmap = imgtools.expand_image_dim(heightmap.astype(np.float32))

        if show:
            heightmap = imgtools.project_data_to_img(heightmap, dtype=np.float32, factor=1.0)*150/np.float32(factor)

        grid = np.indices((self._shape), dtype=np.float32)

        self._height.update( {
                "x": grid[0,...].reshape(self._num_points).T, 
                "y": grid[1,...].reshape(self._num_points).T, 
                "z": heightmap[...,0].reshape(self._num_points).T
            }
        )
def run(files,
        label,
        param_in,
        param_out=dict(),
        param_classes=list(),
        param_exp=list(),
        param_show=dict()):

    rsvis.utils.logger.Logger().get_logformat(
        "Start RSExp with the following parameters:",
        param_label=label,
        param_in=param_in,
        param_out=param_out,
        param_classes=param_classes,
        param_show=param_show)

    #   settings ------------------------------------------------------------
    # -----------------------------------------------------------------------
    param_label = [c["label"] for c in param_classes]
    param_color = [c["color"] for c in param_classes]
    rsio = rsvis.utils.rsioobject.RSIOObject(files,
                                             label,
                                             param_in,
                                             param_out,
                                             param_show,
                                             label=param_label,
                                             color=param_color)

    # #   set the input / output logger
    # logger =  rsvis.utils.logger.Logger(logger=lambda log: self._textbox.insert("1.0", "{}\n".format(log)))
    # data.logger = self._logger
    # opener = opener.GeneralOpener(logger=logger)

    # rsio self._data = data
    images_in = rsio.get_img_in()
    images_out = rsio.get_img_out(img_name="image")
    images_outs = rsio.get_img_out(img_name="gradient")
    images_log_out = rsio.get_param_out(**param_out["log"])
    # images_outs = rsio.get_img_out(img_name="attempt")
    # images_out = gu.PathCreator(**self._param_out)

    bbb = pathlib.Path(param_out["image"]["path_dir"])
    for i in images_in:
        for j in param_exp:
            img_list = list()
            img_lists = list()
            for k in range(j["iter"]):

                blubb = dict()
                for e, f in j["param"].items():
                    blubb[e] = f[k]

                new_shape = (math.ceil(i[0].data.shape[0] * blubb["factor"]),
                             math.ceil(i[0].data.shape[1] * blubb["factor"]))
                img = cv2.resize(i[0].data, (new_shape[1], new_shape[0]),
                                 dst=cv2.CV_8UC3,
                                 interpolation=cv2.INTER_CUBIC)

                # # Thresholding
                # img = cv2.bilateralFilter(img, blubb["filter"][0], blubb["filter"][1], blubb["filter"][2])
                # grayimg = imgtools.gray_image(img)
                # _ , dst = cv2.threshold(grayimg, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

                # GRADIENTS
                grayimg = imgtools.gray_image(img)
                gradient_x = cv2.Sobel(grayimg,
                                       cv2.CV_32F,
                                       1,
                                       0,
                                       ksize=blubb["ksize"])
                gradient_y = cv2.Sobel(grayimg,
                                       cv2.CV_32F,
                                       0,
                                       1,
                                       ksize=blubb["ksize"])

                # calculate gradient magnitude and direction (in degrees)
                dst, angle = cv2.cartToPolar(gradient_x,
                                             gradient_y,
                                             angleInDegrees=True)

                dst = imgtools.project_data_to_img(dst) * (-1.0) + 1.0
                dst = np.where(dst > 0., dst, np.min(dst[dst > 0.]))
                dst = imgtools.project_data_to_img(np.log(dst)) * (-1.0) + 1.0
                dst = np.stack([dst, dst, dst], axis=2)
                img_lists.append(dst)
                _, _, dst = rsvis.utils.imgseg.segmentation_slic(
                    dst, n_segments=blubb["slic-0"],
                    slic_zero=True)  #,boundaries="find")

                # _, _, dst = rsvis.utils.imgseg.segmentation_norm(dst, n_segments=blubb["slic-0"], slic_zero=True) #,boundaries="find")

                # # denoise image
                # # img = imgtools.gray_image(img)
                # # denoised = rank.median(img, disk(2))
                # denoised = imgtools.project_data_to_img(dst, dtype=np.uint8, factor=255)
                # # find continuous region (low gradient -
                # # where less than 10 for this image) --> markers
                # # disk(5) is used here to get a more smooth image
                # markers = rank.gradient(denoised, disk(5)) < 16
                # markers = ndi.label(markers)[0]

                # # local gradient (disk(2) is used to keep edges thin)
                # gradient = rank.gradient(denoised, disk(2))

                # # process the watershed
                # dst = watershed(gradient, markers)
                # dst = imgtools.project_data_to_img(dst, dtype=np.uint8, factor=255)

                img_list.append(dst)
                path_dir = str(bbb / "{}-{}".format(j["name"], k))
                images_out(i[0].path, img_list[-1], path_dir=path_dir)
                images_outs(i[0].path, img_lists[-1], path_dir=path_dir)
                images_log_out(i[0].path, blubb, path_dir=path_dir)
Beispiel #11
0
def run(files,
        label,
        param_in,
        param_out=dict(),
        param_classes=list(),
        param_exp=list(),
        param_show=dict()):

    rsvis.utils.logger.Logger().get_logformat(
        "Start RSExp with the following parameters:",
        param_label=label,
        param_in=param_in,
        param_out=param_out,
        param_classes=param_classes,
        param_show=param_show)

    #   settings ------------------------------------------------------------
    # -----------------------------------------------------------------------
    param_label = [c["label"] for c in param_classes]
    param_color = [c["color"] for c in param_classes]
    rsio = rsvis.utils.rsioobject.RSIOObject(files,
                                             label,
                                             param_in,
                                             param_out,
                                             param_show,
                                             label=param_label,
                                             color=param_color)

    # #   set the input / output logger
    # logger =  rsvis.utils.logger.Logger(logger=lambda log: self._textbox.insert("1.0", "{}\n".format(log)))
    # data.logger = self._logger
    # opener = opener.GeneralOpener(logger=logger)

    # rsio self._data = data
    images_in = rsio.get_img_in()
    images_out = rsio.get_img_out(img_name="image")
    images_log_out = rsio.get_param_out(**param_out["log"])
    # images_outs = rsio.get_img_out(img_name="attempt")
    # images_out = gu.PathCreator(**self._param_out)

    str_basis_path = pathlib.Path(param_out["image"]["path_dir"])
    for img_container in images_in:
        for exp in param_exp:
            # dst_list = list()
            # con = None
            for idx_exp in range(exp["iter"]):

                param = dict()
                for key, item in exp["param"].items():
                    param[key] = item[idx_exp]

                img = img_container[0].data
                if "filter" in exp.keys():
                    img = cv2.bilateralFilter(img, **exp["filter"])

                if exp["name"] == "felz":
                    param["min_size"] = int((img.shape[0] + img.shape[1]) / 4)
                    _, _, dst = rsvis.utils.imgseg.segmentation_felzenswalb(
                        img, **param)  #,boundaries="find")
                elif exp["name"] == "slic":
                    _, _, dst = rsvis.utils.imgseg.segmentation_slic(
                        img, **param)  #,boundaries="find")
                elif exp["name"] == "kmeans":
                    _, _, dst = rsvis.utils.imgseg.segmentation_kmeans_color(
                        img, **param)  #,boundaries="find")
                elif exp["name"] == "slic-0":
                    _, _, dst = rsvis.utils.imgseg.segmentation_slic(
                        img, **param, slic_zero=True)  #,boundaries="find")
                elif exp["name"] == "norm":
                    _, _, dst = rsvis.utils.imgseg.segmentation_norm(
                        img, **param, slic_zero=True)  #,boundaries="find")
                dst = imgtools.project_data_to_img(
                    dst, dtype=np.uint8,
                    factor=255)  # if boundaries without find

                # dst = imgtools.get_distance_transform(dst, label=1)
                # dst = dst*-1. + 1.

                # if con is None:
                #     con = dst
                # else:
                #     con += dst
                # dst_list = dst

                path_dir = str(str_basis_path /
                               "{}-{}".format(exp["name"], idx_exp))
                images_out(img_container[0].path, dst, path_dir=path_dir)
                images_log_out(img_container[0].path, param, path_dir=path_dir)
    def mtarsi_segmentation(self, show=True, **kwargs):
        # get the currently displayed image
        img = self.get_obj().get_img()

        param = gu.update_dict(self._csbox_seg_mt.get_dict(),
                               self._csbox_blur.get_dict())

        # define image list for visualization
        img_list = [img]
        cluster_list = list()

        new_shape = (math.ceil(img.shape[0] * param["factor"]),
                     math.ceil(img.shape[1] * param["factor"]))
        img = cv2.resize(img, (new_shape[1], new_shape[0]),
                         dst=cv2.CV_8UC3,
                         interpolation=cv2.INTER_CUBIC)

        for i in range(param["n_filters"]):
            img = cv2.bilateralFilter(img, param["Diameter"],
                                      param["Sigma Color"],
                                      param["Sigma Space"])

        img_list.append(img)

        slic = rsvis.utils.imgseg.segmentation_slic(
            img,
            **gu.update_dict(
                self._csbox_slic.get_dict(),
                {"n_segments": (int(img.shape[0] + img.shape[1]) * 4)}),
            **self._csbox_bound.get_dict(),
            slic_zero=True)
        img_list.append(slic[1])

        cluster = rsvis.utils.imgseg.segmentation_kmeans_color(
            img, **self._csbox_kmeans.get_dict(),
            **self._csbox_bound.get_dict())
        print(np.unique(cluster[0]))
        cluster_list.append(cluster[0])
        img_list.append(
            imgtools.project_data_to_img(cluster[0],
                                         dtype=np.uint8,
                                         factor=255))

        param_cluster = self._csbox_kmeans.get_dict()
        param_cluster["non_pos"] = 1
        cluster = rsvis.utils.imgseg.segmentation_kmeans_color_pos(
            img, **param_cluster, **self._csbox_bound.get_dict())
        print(np.unique(cluster[0]))
        cluster_list.append(cluster[0])
        img_list.append(
            imgtools.project_data_to_img(cluster[0],
                                         dtype=np.uint8,
                                         factor=255))

        param_cluster["non_pos"] = 0
        cluster = rsvis.utils.imgseg.segmentation_kmeans_color_pos(
            img, **param_cluster, **self._csbox_bound.get_dict())
        print(np.unique(cluster[0]))
        cluster_list.append(cluster[0])
        img_list.append(
            imgtools.project_data_to_img(cluster[0],
                                         dtype=np.uint8,
                                         factor=255))

        n_clusters = param_cluster["n_clusters"]

        img_new = np.zeros(img.shape[:2], dtype=np.uint8)

        clstr_img_src = cluster_list[0]
        clstr_img_dst = cluster_list[1]
        clstr_img_est = cluster_list[2]

        clstr_cmp_src_dst = np.zeros((n_clusters, 2))
        for clstr in range(n_clusters):
            # clstr_cmp = clstr_img_dst[clstr_img_src==clstr]
            # print(clstr_cmp.shape, np.unique(clstr_cmp))

            # print(clstr_img_src.shape,np.unique(clstr_img_src))
            # print(clstr_img_dst.shape,np.unique(clstr_img_dst))

            # print(clstr)
            clstr_src_dst = clstr_img_dst[(clstr_img_src == clstr).astype(
                np.bool)]
            # bbbb = np.where(clstr_img_src==clstr, clstr_img_dst, -1).astype(np.int8)

            # clstr_hist = np.histogram(bbbb, bins=range(0,n_clusters+1))
            # clstr_hist_norm = clstr_hist[0]/np.sum(clstr_hist[0])
            # print(clstr_hist_norm)
            clstr_hist = np.histogram(clstr_src_dst,
                                      bins=range(0, n_clusters + 1))
            clstr_hist_norm = clstr_hist[0] / np.sum(clstr_hist[0])
            print(clstr_hist_norm)
            hist_max = np.amax(clstr_hist_norm)
            print(hist_max)
            hist_max_idx = int(np.where(clstr_hist_norm == hist_max)[0])
            print("Max: {}, Idx-Max: {}, Stats: {}".format(
                hist_max, hist_max_idx, np.std(clstr_hist_norm)))
            clstr_cmp_src_dst = hist_max_idx

            cc = False
            if np.std(clstr_hist_norm) > 0.3:
                cc = True

            clstr_dst_src = clstr_img_src[clstr_img_dst == hist_max_idx]
            # bbbb = np.where(clstr_img_dst==hist_max_idx, clstr_img_src, -1).astype(np.int8)

            # clstr_hist = np.histogram(bbbb, bins=range(0,n_clusters+1))
            # clstr_hist_norm = clstr_hist[0]/np.sum(clstr_hist[0])
            # print(clstr_hist_norm)
            clstr_hist = np.histogram(clstr_dst_src,
                                      bins=range(0, n_clusters + 1))
            clstr_hist_norm = clstr_hist[0] / np.sum(clstr_hist[0])
            print(clstr_hist_norm)
            hist_max = np.amax(clstr_hist_norm)
            print(hist_max)
            hist_max_idx = int(np.where(clstr_hist_norm == hist_max)[0])
            print("Max: {}, Idx-Max: {}, Stats: {}".format(
                hist_max, hist_max_idx, np.std(clstr_hist_norm)))
            if clstr == hist_max_idx and cc:
                print("oberblubber")
                clstr_src_dst = clstr_img_est[(clstr_img_src == clstr).astype(
                    np.bool)]
                clstr_hist = np.histogram(clstr_src_dst,
                                          bins=range(0, n_clusters + 1))
                clstr_hist_norm = clstr_hist[0] / np.sum(clstr_hist[0])
                # print(clstr_hist_norm)
                hist_max = np.amax(clstr_hist_norm)
                # print(hist_max)
                hist_max_idx = int(np.where(clstr_hist_norm == hist_max)[0])
                # print("Max: {}, Idx-Max: {}, Stats: {}".format(hist_max, hist_max_idx, np.std(clstr_hist_norm)))
                clstr_cmp_src_est = hist_max_idx

                cc = False
                if np.std(clstr_hist_norm) > 0.3:
                    cc = True

                clstr_dst_src = clstr_img_src[clstr_img_est == hist_max_idx]
                clstr_hist = np.histogram(clstr_dst_src,
                                          bins=range(0, n_clusters + 1))
                clstr_hist_norm = clstr_hist[0] / np.sum(clstr_hist[0])
                # print(clstr_hist_norm)
                hist_max = np.amax(clstr_hist_norm)
                # print(hist_max)
                hist_max_idx = int(np.where(clstr_hist_norm == hist_max)[0])

                if clstr == hist_max_idx and cc:
                    print("blubber")
                    img_new += np.where(clstr_img_src == clstr, 1,
                                        0).astype(np.uint8)

        print(clstr_cmp_src_dst)

        img_list.append(img_new)

        # # # # seg_map_slic, seg_map_color_slic, seg_map_bound_slic = rsvis.utils.imgseg.segmentation_slic(img, **gu.update_dict(self._csbox_slic.get_dict(), {"n_segments":(int(img.shape[0]+img.shape[1])/2)}), **self._csbox_bound.get_dict(), slic_zero=True)

        # # # # seg_map_kmeans, seg_map_bound_kmeans = rsvis.utils.imgseg.segmentation_kmeans_color(img, **self._csbox_kmeans.get_dict(), **self._csbox_bound.get_dict())

        # # # # # define image list for visualization
        # # # # img_list.extend([seg_map_bound_kmeans, seg_map_bound_slic])
        # # # # seg_map_bound_kmeans = imgtools.project_data_to_img(seg_map_bound_kmeans,dtype=np.uint8, factor=255)
        # # # # seg_map_kmeans = seg_map_kmeans.astype(np.uint8)
        # # # # num = np.unique(seg_map_kmeans)
        # # # # seg_map_kmeans_new= np.zeros(seg_map_kmeans.shape, dtype=np.uint8)
        # # # # for i in np.unique(seg_map_slic):
        # # # #     mask = np.where(seg_map_slic==i, 1, 0).astype(np.ubyte)
        # # # #     hist = cv2.calcHist([seg_map_kmeans], [0], mask, [len(num)], [0,len(num)])
        # # # #     hist_max_index = np.where(hist == np.amax(hist))
        # # # #     if len(hist_max_index[0]) > 1:
        # # # #         hist_max_index = hist_max_index[0]
        # # # #     seg_map_kmeans_new += mask.astype(np.uint8)[:,:,np.newaxis]*np.uint8(hist_max_index[0])
        # # # # seg_map_kmeans_new_map = imgtools.project_data_to_img(seg_map_kmeans_new, factor=255, dtype=np.uint8)
        # # # # img_list.extend([seg_map_kmeans_new_map])

        # # # # hist = cv2.calcHist([seg_map_kmeans_new], [0], None, [len(num)], [0,len(num)])
        # # # # hist_max_count = np.where(hist == np.amax(hist))
        # # # # hist_list=[hist_max_count[0]]
        # # # # for i in num:
        # # # #     hist[i] = np.mean(img[np.concatenate([seg_map_kmeans_new]*3, axis=2)==i])
        # # # # hist_min_bright = np.where(hist == np.amin(hist))
        # # # # hist_list.append(hist_min_bright[0])
        # # # # print(hist_list)
        # # # # aba = [i for i in num if i not in hist_list]
        # # # # # allocate mask, background and foreground model
        # # # # seg_map_kmeans_new = seg_map_kmeans_new.astype(np.uint8)
        # # # # mask = np.zeros(img.shape[:2],dtype=np.uint8)
        # # # # mask += np.uint8(2)*np.where(np.squeeze(seg_map_kmeans_new, axis=2)==np.uint8(hist_max_count),1,0).astype(np.uint8)
        # # # # mask += np.uint8(3)*np.where(np.squeeze(seg_map_kmeans_new, axis=2)==np.uint8(hist_min_bright[0]),1,0).astype(np.uint8)
        # # # # mask += np.uint8(3)*np.where(np.squeeze(seg_map_kmeans_new, axis=2)==np.uint8(aba[0]),1,0).astype(np.uint8)
        # # # # bgdModel = np.zeros((1,65),np.float64)
        # # # # fgdModel = np.zeros((1,65),np.float64)
        # # # # w,h,d = img.shape
        # # # # roi = (0,0,w,h)
        # # # # # this modifies mask

        # # # # seg_map_slic, seg_map_color_slic, seg_map_bound_slic = rsvis.utils.imgseg.segmentation_slic(img, **gu.update_dict(self._csbox_slic.get_dict(), {"n_segments":(int(img.shape[0]+img.shape[1])*4)}), **self._csbox_bound.get_dict(), slic_zero=True)

        # # # # img_list.extend([imgtools.project_data_to_img(mask,factor=255, dtype=np.uint8)])
        # # # # # cv2.grabCut(cv2.bilateralFilter(img, d=5, sigmaColor=100, sigmaSpace=500), mask, roi, bgdModel, fgdModel, 10, mode=cv2.GC_INIT_WITH_MASK)

        # # # # cv2.grabCut(img, mask, roi, bgdModel, fgdModel, 40, mode=cv2.GC_INIT_WITH_MASK)

        # # # # # If mask==2 or mask== 1, mask2 get 0, other wise it gets 1 as 'uint8' type.
        # # # # seg_map = np.where((mask==2)|(mask==0), 0, 1).astype('bool')
        # # # # seg_map = img*seg_map[:,:,np.newaxis]
        # # # # img_list.extend([seg_map])
        # # # # seg_map = np.where((mask==2)|(mask==0), 1, 0).astype('bool')
        # # # # seg_map = img*seg_map[:,:,np.newaxis]
        # # # # img_list.extend([seg_map])

        # open a topwindow with the segmentation results of the currently displayed image
        if show:
            self._img_tw = tw.TopWindow(self,
                                        title="Segmentation",
                                        dtype="img",
                                        value=img_list)