Пример #1
0
def label_nuclei(binary, min_size):
    '''Label, watershed and remove small objects'''

    distance = medial_axis(binary, return_distance=True)[1]

    distance_blured = gaussian_filter(distance, 5)

    local_maxi = peak_local_max(distance_blured, indices=False, labels=binary, min_distance = 30)

    markers = measure_label(local_maxi)

#    markers[~binary] = -1

#    labels_rw = segmentation.random_walker(binary, markers)

#    labels_rw[labels_rw == -1] = 0

#    labels_rw = segmentation.relabel_sequential(labels_rw)

    labels_ws = watershed(-distance, markers, mask=binary)

    labels_large = remove_small_objects(labels_ws,min_size)

    labels_clean_border = clear_border(labels_large)

    labels_from_one = relabel_sequential(labels_clean_border)

#    plt.imshow(ndimage.morphology.binary_dilation(markers))
#    plt.show()

    return labels_from_one[0]
def Label(Image):
    '''
    Labels each connected reigon in Image with a different number
    '''
    Labelled_Image = measure.label(Image, background=0)
    Labelled_Image,a,b = segmentation.relabel_sequential(Labelled_Image, offset=1)
    return(Labelled_Image)
Пример #3
0
def CCLabels(image, max_size=5000):
    image = BinaryDilation(image)
    image = invertimage(image)
    labelimage = label(image)
    labelimage = ndi.maximum_filter(labelimage, size=4)
    labelclean = remove_big_objects(labelimage, max_size=max_size)
    nonormimg, forward_map, inverse_map = relabel_sequential(labelclean)

    return nonormimg
Пример #4
0
def WatershedLabels(image):
   image = BinaryDilation(image)
   image = invertimage(image)
   labelimage = label(image)
   labelimage =  filters.maximum_filter(labelimage, 4) 

   nonormimg, forward_map, inverse_map = relabel_sequential(labelimage) 


   return nonormimg 
Пример #5
0
def test_relabel_sequential_offset1():
    ar = np.array([1, 1, 5, 5, 8, 99, 42])
    ar_relab, fw, inv = relabel_sequential(ar)
    ar_relab_ref = np.array([1, 1, 2, 2, 3, 5, 4])
    assert_array_equal(ar_relab, ar_relab_ref)
    fw_ref = np.zeros(100, int)
    fw_ref[1] = 1; fw_ref[5] = 2; fw_ref[8] = 3; fw_ref[42] = 4; fw_ref[99] = 5
    assert_array_equal(fw, fw_ref)
    inv_ref = np.array([0,  1,  5,  8, 42, 99])
    assert_array_equal(inv, inv_ref)
Пример #6
0
def test():
    arr = np.load("../data/watershed.npy")
    arr = segmentation.relabel_sequential(arr)[0]
    t = time.time()

    cProfile.runctx("g = graph.construct_rag(arr)", globals(), locals(),
                    "const.prof")

    cProfile.runctx("g = g.random_merge(10)", globals(), locals(),
                    "merge.prof")
Пример #7
0
def test_relabel_sequential_dtype():
    ar = np.array([1, 1, 5, 5, 8, 99, 42, 0], dtype=float)
    ar_relab, fw, inv = relabel_sequential(ar, offset=5)
    ar_relab_ref = np.array([5, 5, 6, 6, 7, 9, 8, 0])
    assert_array_equal(ar_relab, ar_relab_ref)
    fw_ref = np.zeros(100, int)
    fw_ref[1] = 5; fw_ref[5] = 6; fw_ref[8] = 7; fw_ref[42] = 8; fw_ref[99] = 9
    assert_array_equal(fw, fw_ref)
    inv_ref = np.array([0, 0, 0, 0, 0, 1,  5,  8, 42, 99])
    assert_array_equal(inv, inv_ref)
Пример #8
0
def test_relabel_sequential_offset1():
    ar = np.array([1, 1, 5, 5, 8, 99, 42])
    ar_relab, fw, inv = relabel_sequential(ar)
    ar_relab_ref = np.array([1, 1, 2, 2, 3, 5, 4])
    assert_array_equal(ar_relab, ar_relab_ref)
    fw_ref = np.zeros(100, int)
    fw_ref[1] = 1; fw_ref[5] = 2; fw_ref[8] = 3; fw_ref[42] = 4; fw_ref[99] = 5
    assert_array_equal(fw, fw_ref)
    inv_ref = np.array([0,  1,  5,  8, 42, 99])
    assert_array_equal(inv, inv_ref)
Пример #9
0
def test_relabel_sequential_dtype():
    ar = np.array([1, 1, 5, 5, 8, 99, 42, 0], dtype=float)
    ar_relab, fw, inv = relabel_sequential(ar, offset=5)
    ar_relab_ref = np.array([5, 5, 6, 6, 7, 9, 8, 0])
    assert_array_equal(ar_relab, ar_relab_ref)
    fw_ref = np.zeros(100, int)
    fw_ref[1] = 5; fw_ref[5] = 6; fw_ref[8] = 7; fw_ref[42] = 8; fw_ref[99] = 9
    assert_array_equal(fw, fw_ref)
    inv_ref = np.array([0, 0, 0, 0, 0, 1,  5,  8, 42, 99])
    assert_array_equal(inv, inv_ref)
Пример #10
0
    def _estimate_atlas_weights(self, images, params):

        imgs_vec = np.nan_to_num(np.array([im.ravel() for im in images]))

        atlas = self.__perform_linear_combination(imgs_vec, params)
        # atlas = self._estim_atlas_as_unique_sum(atlas_ptns)
        atlas = segmentation.relabel_sequential(atlas)[0]

        weights = [weights_image_atlas_overlap_major(img, atlas) for img in self._images]
        weights = np.array(weights)

        return atlas, weights, None
Пример #11
0
def removeLargeObjects(label_image, max_size=750):
    relabeled = label_image.copy()

    num_labels = relabeled.max() + 1
    for label_index in range(1, num_labels):
        matching_pixels = label_image == label_index
        num_matching = matching_pixels.sum()
        if num_matching > max_size:
            relabeled[matching_pixels] = 0

    relabeled, __, __ = segmentation.relabel_sequential(relabeled)
    return relabeled
Пример #12
0
def reindex_labels(ds):
    """
    Make the cell labels sequential. Modifies the *labels* variable in place.

    Parameters
    ----------
    ds : (S, T, ..., Y, X) Dataset
    """
    for s in range(ds.dims['S']):
        for t in range(ds.dims['T']):
            ds['labels'][s, t] = relabel_sequential(ds['labels'][s,
                                                                 t].values)[0]
Пример #13
0
def sorted_vi_components(s1, s2, ignore1=[0], ignore2=[0], compress=False):
    """Return lists of the most entropic segments in s1|s2 and s2|s1.

    Parameters
    ----------
    s1, s2 : np.ndarray of int
        Segmentations to be compared. Usually, `s1` will be a candidate
        segmentation and `s2` will be the ground truth or target segmentation.
    ignore1, ignore2 : list of int, optional
        Labels in these lists are ignored in computing the VI. 0-labels are
        ignored by default; pass empty lists to use all labels.
    compress : bool, optional
        The 'compress' flag performs a remapping of the labels before doing
        the VI computation, resulting in memory savings when many labels are
        not used in the volume. (For example, if you have just two labels, 1
        and 1,000,000, 'compress=False' will give a vector of length
        1,000,000, whereas with 'compress=True' it will have just size 2.)

    Returns
    -------
    ii1 : np.ndarray of int
        The labels in `s2` having the most entropy. If `s1` is the automatic
        segmentation, these are the worst false merges.
    h2g1 : np.ndarray of float
        The conditional entropy corresponding to the labels in `ii1`.
    ii2 : np.ndarray of int (seg)
        The labels in `s1` having the most entropy. These correspond to the
        worst false splits.
    h2g1 : np.ndarray of float
        The conditional entropy corresponding to the labels in `ii2`.
    """
    if compress:
        s1, forw1, back1 = relabel_sequential(s1)
        s2, forw2, back2 = relabel_sequential(s2)
    _, _, _, h1g2, h2g1, _, _ = vi_tables(s1, s2, ignore1, ignore2)
    i1 = (-h1g2).argsort()
    i2 = (-h2g1).argsort()
    ii1 = back1[i1] if compress else i1
    ii2 = back2[i2] if compress else i2
    return ii1, h1g2[i1], ii2, h2g1[i2]
def main(dict_paths, nb_jobs=NB_THREADS, relabel=True):
    """ main evaluation

    :param {str: str} dict_paths:
    :param int nb_jobs: number of thred running in parallel
    :param bool relabel: whether relabel segmentation as sequential
    """
    logging.info('running...')
    if not os.path.isdir(dict_paths['output']):
        assert os.path.isdir(os.path.dirname(dict_paths['output'])), \
            'missing folder: %s' % dict_paths['output']
        os.mkdir(dict_paths['output'])

    name = os.path.basename(os.path.dirname(dict_paths['segm']))
    list_dirs = [dict_paths['annot'], dict_paths['segm']]
    if dict_paths['image'] != '':
        list_dirs.append(dict_paths['image'])
    df_paths = tl_data.find_files_match_names_across_dirs(list_dirs)
    path_csv = os.path.join(dict_paths['output'], NAME_CVS_PER_IMAGE % name)
    df_paths.to_csv(path_csv)

    annots, _ = tl_data.load_images_list(df_paths['path_1'].values.tolist())
    segms, names = tl_data.load_images_list(df_paths['path_2'].values.tolist())
    logging.info('loaded %i annots and %i segms', len(annots), len(segms))

    if relabel:
        annots = [relabel_sequential(annot)[0] for annot in annots]
        segms = list(map(wrapper_relabel_segm, zip(annots, segms)))

    path_csv = os.path.join(dict_paths['output'], NAME_CVS_PER_IMAGE % name)
    logging.debug('export to "%s"', path_csv)
    df_stat = seg_clf.compute_stat_per_image(segms, annots, names, nb_jobs)
    df_stat.to_csv(path_csv)

    path_csv = os.path.join(dict_paths['output'], NAME_CVS_OVERALL % name)
    logging.debug('export to "%s"', path_csv)
    df_desc = df_stat.describe()
    logging.info(df_desc.T[['count', 'mean', 'std']])
    df_desc.to_csv(path_csv)

    path_visu = os.path.join(dict_paths['output'], '%s__visual' % name)
    if not os.path.isdir(path_visu):
        os.mkdir(path_visu)
    # for idx, row in df_paths.iterrows():
    #     export_visual(row, path_visu)
    wrapper_visual = partial(export_visual, path_out=path_visu)
    iterate = tl_expt.WrapExecuteSequence(
        wrapper_visual, (row for idx, row in df_paths.iterrows()),
        nb_jobs=nb_jobs)
    list(iterate)

    logging.info('DONE')
Пример #15
0
def _create_label_data():
    y_true, _ = random_shapes(image_shape=(200, 200),
                              min_shapes=20,
                              max_shapes=30,
                              min_size=10,
                              multichannel=False)
    y_true[y_true == 255] = 0
    y_true, _, _ = relabel_sequential(y_true)

    y_pred = np.zeros_like(y_true)
    y_pred[3:, 3:] = y_true[:-3, :-3]

    return y_true, y_pred
Пример #16
0
def WatershedImageMarker(image, binary_fill, kernel_sizeX, kernel_sizeY, kernel_sizeZ = None, minsize = 10):
    
    if kernel_sizeZ is not None:
        kernel_size = kernel_sizeX, kernel_sizeY, kernel_sizeZ
    else:
        kernel_size = kernel_sizeX, kernel_sizeY
    
    local_maxi = peak_local_max((image), indices=False, footprint=np.ones((kernel_size)),labels=binary_fill)
    markers = ndi.label(local_maxi)[0]
    labels = watershed(-image, markers, mask=binary_fill)
    nonormimg = remove_small_objects(labels, min_size= minsize, connectivity=8, in_place=False)
    nonormimg, forward_map, inverse_map = relabel_sequential(nonormimg)    
    return nonormimg
Пример #17
0
    def processSegments(self, dump_path):
        smap, fwd, inv = relabel_sequential(self.segment_map2, offset=1)
        segs = ndimage.find_objects(smap)

        dir_name = os.path.split(self.image_src)[1]
        dir_name = os.path.splitext(dir_name)[0]

        dir_path = dump_path + dir_name
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)

        seg_path = dir_path + "/segments/"
        if not os.path.exists(seg_path):
            os.makedirs(seg_path)

        feature_file = dir_path + "/feature.list"
        saliency_file = dir_path + "/saliency.list"

        fp = open(saliency_file, 'w')
        fp1 = open(feature_file, 'w')

        self.__plot_maps(dir_path)

        for i in xrange(NUM_OF_SALIENT_OBJECTS):
            segment_id = self.saliency_list[i][1]

            fp.write("%0.6f\n" % self.saliency_list[i][0])

            new_seg_id = fwd[segment_id]
            segment_img = self.image[segs[new_seg_id - 1]]
            segment_copy = np.copy(segment_img)

            fig, ax = plt.subplots(1, 2)
            fig.set_size_inches(8, 3, forward=True)
            plt.subplots_adjust(0.05, 0.05, 0.95, 0.95, 0.05, 0.05)

            self.__dump_segment_features(segment_copy, fp1)

            mask = smap[segs[new_seg_id - 1]]
            idx = (mask != new_seg_id)
            segment_copy[idx] = 255, 255, 255

            ax[0].imshow(segment_img)
            ax[0].set_title("Image")
            ax[1].imshow(segment_copy)
            ax[1].set_title("Object")
            file_name = seg_path + str(i + 1) + ".png"
            plt.savefig(file_name)

        fp.close()
        fp1.close()
Пример #18
0
    def __init__(self,
                 image,
                 seg_type=1,
                 min_dist=0.50,
                 compactness=10.0,
                 n_segments=50,
                 sigma=1.0,
                 num_of_salient_objects=30):
        self.timer = time.time()
        self.image = image
        self.min_dist = min_dist
        self.seg_type = seg_type
        self.num_of_salient_objects = num_of_salient_objects

        self.__set_timer("superpixel clustering")
        self.slic_map, segments = slic(self.image,
                                       compactness=int(compactness),
                                       n_segments=n_segments,
                                       sigma=sigma,
                                       max_iter=10)
        print "Number of SLIC segments = ", len(np.unique(self.slic_map))
        self.__print_timer("superpixel clustering")

        self.__set_timer("merging superpixels")
        # self.smap = self.slic_map
        self.slic_map = self.__segmentation(segments)
        self.__print_timer("merging superpixel")

        # print self.slic_map
        # self.slic_map, fwd, inv = relabel_sequential(self.smap)
        # print self.slic_map

        self.__set_timer("connected components")
        dummy_map = morphology.label(self.slic_map, neighbors=4)
        # dummy_map = measure.label(self.slic_map, neigbours=4)
        num_of_segments = len(np.unique(dummy_map))
        # pixel_count = self.__find_pixel_count(dummy_map, num_of_segments)
        print "Number of connected components = ", num_of_segments
        self.__print_timer("connected components")

        self.__set_timer("deleting small segments")
        self.min_num_pixel = self.__find_minimum_pixel_count(dummy_map)
        morphology.remove_small_objects(dummy_map,
                                        self.min_num_pixel,
                                        connectivity=4,
                                        in_place=True)
        self.__print_timer("deleting small segments")

        self.smap, self.fwd, self.inv = relabel_sequential(dummy_map, offset=1)
        print "Total number of segments remaining = ", len(np.unique(
            self.smap))
Пример #19
0
def test_relabel_sequential_offset5_with0():
    ar = np.array([1, 1, 5, 5, 8, 99, 42, 0])
    ar_relab, fw, inv = relabel_sequential(ar, offset=5)
    ar_relab_ref = np.array([5, 5, 6, 6, 7, 9, 8, 0])
    assert_array_equal(ar_relab, ar_relab_ref)
    fw_ref = np.zeros(100, int)
    fw_ref[1] = 5
    fw_ref[5] = 6
    fw_ref[8] = 7
    fw_ref[42] = 8
    fw_ref[99] = 9
    assert_array_equal(fw, fw_ref)
    inv_ref = np.array([0, 0, 0, 0, 0, 1,  5,  8, 42, 99])
    assert_array_equal(inv, inv_ref)
def segmentation(vol):
    """
    Make segmentation (unet + watershed)
    Input:
        vol: a specific volume
    Gloabal variables:
        center_coordinates: center coordinates of segmented cells by watershed
        image_cell_bg: the cell/background regions obtained by unet.
        layer_num: number of the layers in the 3D image
        segmentation_auto: individual cells segmented by watershed
        image_gcn: raw image / 65536
    """
    global center_coordinates, image_cell_bg, layer_num, segmentation_auto, image_gcn
    
    # read raw 3D image of a specific volume
    t = time.time()
    image_raw=[]
    layer_num = z_siz
    for z in range(1,layer_num+1):
        path = raw_image_path+files_name%(vol,z)
        image_raw.append(cv2.imread(path, -1))
    
    # pre-processing: local contrast normalization
    image_norm = np.array(image_raw)
    image_gcn = image_norm.copy()/65536.0 # this intensity is used to correct tracking results
    image_gcn = image_gcn.transpose(1,2,0)
    background_pixles = np.where(image_norm<np.median(image_norm))
    image_norm = image_norm-np.median(image_norm)
    image_norm[background_pixles]=0
    image_norm = lcn_gpu(image_norm, noise_level, img3d_siz=(x_siz,y_siz,z_siz))
    image_norm = image_norm.reshape(1,z_siz,x_siz,y_siz,1)
    image_norm = image_norm.transpose(0,2,3,1,4)
    elapsed = time.time() - t
    print('pre-processing took %.1f s'%elapsed)
    
    # predict cell-like regions using 3D U-net
    t = time.time()
    image_cell_bg = unet3_prediction(image_norm,unet_model,shrink=shrink)
    
    # segment connected cell-like regions using watershed
    [image_watershed2d_wo_border, border]=watershed_2d(image_cell_bg[0,:,:,:,0],z_range=z_siz, min_distance=7)
    image_watershed3d_wo_border, image_watershed3d_wi_border, _, _ = watershed_3d(image_watershed2d_wo_border,
                    samplingrate=[1,1,z_xy_resolution_ratio], method="min_size", 
                    min_size=min_size, neuron_num=0, min_distance=1)
    segmentation_auto, fw, inv = relabel_sequential(image_watershed3d_wi_border)
    
    # calculate coordinates of the centers of each segmented cell
    center_coordinates = snm.center_of_mass(segmentation_auto>0,segmentation_auto, range(1, segmentation_auto.max()+1))
    elapsed = time.time() - t
    print('segmentation took %.1f s'%elapsed)
Пример #21
0
def test_ncut_stable_subgraph():
    """ Test to catch an error thrown when subgraph has all equal edges. """

    img = np.zeros((100, 100, 3), dtype='uint8')

    labels = np.zeros((100, 100), dtype='uint8')
    labels[:50, :50] = 1
    labels[:50, 50:] = 2

    rag = graph.rag_mean_color(img, labels, mode='similarity')
    new_labels = graph.cut_normalized(labels, rag, in_place=False)
    new_labels, _, _ = segmentation.relabel_sequential(new_labels)

    assert new_labels.max() == 0
def test_ncut_stable_subgraph():
    """ Test to catch an error thrown when subgraph has all equal edges. """

    img = np.zeros((100, 100, 3), dtype='uint8')

    labels = np.zeros((100, 100), dtype='uint8')
    labels[:50, :50] = 1
    labels[:50, 50:] = 2

    rag = graph.rag_mean_color(img, labels, mode='similarity')
    new_labels = graph.cut_normalized(labels, rag, in_place=False)
    new_labels, _, _ = segmentation.relabel_sequential(new_labels)

    assert new_labels.max() == 0
def SiveArea(Image,smallest=0,largest=1E9):
    '''
    Removes connected reigons smller than smallest and larger than largest in
    Image.
    '''
    Image = np.copy(Image)
    BadReigons = []
    for reigon in measure.regionprops(Image):
        if reigon.area > largest or reigon.area < smallest:
            BadReigons.append(reigon.label)
    for i in BadReigons:
        Image[Image==i] = 0
    Image,a,b = segmentation.relabel_sequential(Image)
    return(Image)
Пример #24
0
 def segment(self, image):
     """Return a segment-labelled image.
     Labels are from 1..N where N is the resulting number of superpixels found.  One-based
     labelling is done because the `skimage.regionprops' function ignores 0-labels.
     Return [labelled_pixels, N]
     """
     # SuperPixel segment and label from 1..N
     #-seg_map = vl_slic(img_data, /*regionSize:*/sp_size, /*Regularizer:*/0.1, 'MinRegionSize', 10) ;
     lbld_pxls, _, lbls = relabel_sequential(
         slic(
             im2single(image),  #-compactness=0.1,
             multichannel=True,
             convert2lab=True,
             slic_zero=True))
     return lbld_pxls, len(lbls)
Пример #25
0
def test_cut_normalized():

    img = np.zeros((100, 100, 3), dtype='uint8')
    img[:50, :50] = 255, 255, 255
    img[:50, 50:] = 254, 254, 254
    img[50:, :50] = 2, 2, 2
    img[50:, 50:] = 1, 1, 1

    labels = np.zeros((100, 100), dtype='uint8')
    labels[:50, :50] = 0
    labels[:50, 50:] = 1
    labels[50:, :50] = 2
    labels[50:, 50:] = 3

    rag = graph.rag_mean_color(img, labels, mode='similarity')

    new_labels = graph.cut_normalized(labels, rag, in_place=False)
    new_labels, _, _ = segmentation.relabel_sequential(new_labels)
    # Two labels
    assert new_labels.max() == 1

    new_labels = graph.cut_normalized(labels, rag)
    new_labels, _, _ = segmentation.relabel_sequential(new_labels)
    assert new_labels.max() == 1
Пример #26
0
    def decode_instance(cls, pic, class_id=None):
        pic = np.array(pic, copy=False)

        instance_map = np.zeros((pic.shape[0], pic.shape[1]), dtype=np.uint8)

        # contains the class of each instance, but will set the class of "unlabeled instances/groups" to bg
        class_map = np.zeros((pic.shape[0], pic.shape[1]), dtype=np.uint8)

        if class_id is not None:
            mask = np.logical_and(pic >= class_id * 1000, pic <
                                  (class_id + 1) * 1000)
            if mask.sum() > 0:
                ids, _, _ = relabel_sequential(pic[mask])
                instance_map[mask] = ids
                class_map[mask] = 1
        else:
            for i, c in enumerate(cls.class_ids):
                mask = np.logical_and(pic >= c * 1000, pic < (c + 1) * 1000)
                if mask.sum() > 0:
                    ids, _, _ = relabel_sequential(pic[mask])
                    instance_map[mask] = ids + np.amax(instance_map)
                    class_map[mask] = i + 1

        return Image.fromarray(instance_map), Image.fromarray(class_map)
def test_cut_normalized():

    img = np.zeros((100, 100, 3), dtype='uint8')
    img[:50, :50] = 255, 255, 255
    img[:50, 50:] = 254, 254, 254
    img[50:, :50] = 2, 2, 2
    img[50:, 50:] = 1, 1, 1

    labels = np.zeros((100, 100), dtype='uint8')
    labels[:50, :50] = 0
    labels[:50, 50:] = 1
    labels[50:, :50] = 2
    labels[50:, 50:] = 3

    rag = graph.rag_mean_color(img, labels, mode='similarity')

    new_labels = graph.cut_normalized(labels, rag, in_place=False)
    new_labels, _, _ = segmentation.relabel_sequential(new_labels)
    # Two labels
    assert new_labels.max() == 1

    new_labels = graph.cut_normalized(labels, rag)
    new_labels, _, _ = segmentation.relabel_sequential(new_labels)
    assert new_labels.max() == 1
Пример #28
0
def test_relabel_sequential_already_sequential(offset, with0,
                                               input_starts_at_offset):
    if with0:
        ar = np.array([1, 3, 0, 2, 5, 4])
    else:
        ar = np.array([1, 3, 2, 5, 4])
    if input_starts_at_offset:
        ar[ar > 0] += offset - 1
    ar_relab, fw, inv = relabel_sequential(ar, offset=offset)
    _check_maps(ar, ar_relab, fw, inv)
    if input_starts_at_offset:
        ar_relab_ref = ar
    else:
        ar_relab_ref = np.where(ar > 0, ar + offset - 1, 0)
    assert_array_equal(ar_relab, ar_relab_ref)
Пример #29
0
def test_relabel_sequential_dtype():
    ar = np.array([1, 1, 5, 5, 8, 99, 42, 0], dtype=np.uint8)
    ar_relab, fw, inv = relabel_sequential(ar, offset=5)
    _check_maps(ar.astype(int), ar_relab, fw, inv)
    ar_relab_ref = np.array([5, 5, 6, 6, 7, 9, 8, 0])
    assert_array_equal(ar_relab, ar_relab_ref)
    fw_ref = np.zeros(100, int)
    fw_ref[1] = 5
    fw_ref[5] = 6
    fw_ref[8] = 7
    fw_ref[42] = 8
    fw_ref[99] = 9
    assert_array_equal(fw, fw_ref)
    inv_ref = np.array([0, 0, 0, 0, 0, 1, 5, 8, 42, 99])
    assert_array_equal(inv, inv_ref)
def export_visual(name,
                  annot,
                  segm,
                  img,
                  path_out,
                  drop_labels,
                  segm_alpha=1.):
    """ given visualisation of segmented image and annotation

    :param dict df_row:
    :param str path_out: path to the visualisation directory
    :param [int] drop_labels: whether skip some labels
    """
    # relabel for simpler visualisations of class differences
    if np.sum(annot < 0) > 0:
        annot[annot < 0] = -1
        _, lut, _ = relabel_sequential(annot + 1)
        lut = fill_lut(lut, segm, offset=1)
        annot = lut[annot.astype(int) + 1] - 1
        segm = lut[segm.astype(int) + 1] - 1
    else:
        annot, lut, _ = relabel_sequential(annot)
        lut = fill_lut(lut, segm, offset=0)
        segm = lut[segm.astype(int)]

    # normalise alpha in range (0, 1)
    segm_alpha = tl_visu.norm_aplha(segm_alpha)

    fig = tl_visu.figure_overlap_annot_segm_image(annot,
                                                  segm,
                                                  img,
                                                  drop_labels=drop_labels,
                                                  segm_alpha=segm_alpha)
    logging.debug('>> exporting -> %s', name)
    fig.savefig(os.path.join(path_out, '%s.png' % name))
    plt.close(fig)
Пример #31
0
def watershed_image(image, size, targetdir, Label, Filename, Xcalibration,Time_unit):
 distance = ndi.distance_transform_edt(image)
 
 plt.imshow(distance)
 plt.title('Distance transform')   
 plt.show()  
 
 local_maxi = peak_local_max(distance, indices=False, footprint=np.ones((1, 1)),
                            labels=image)
 markers = ndi.label(local_maxi)[0]
 labels = watershed(-distance, markers, mask=image)

 nonormimg = remove_small_objects(labels, min_size=size, connectivity=4, in_place=False)
 nonormimg, forward_map, inverse_map = relabel_sequential(nonormimg)    
 labels = nonormimg

    
    
 
 plt.imshow(labels)
 plt.title('Watershed labels')   
 plt.show()
 print('Doing Hough in +' , np.unique(labels) , 'labels')
 Velocity = []
 Images = []
 Besty0 = []
 Besty1 = []
 # loop over the unique labels returned by the Watershed
 # algorithm
 for label in np.unique(labels):
      
      if label== 0:
            continue
     
      mask = np.zeros(image.shape, dtype="uint8")
      mask[labels == label] = 1
     
          
      h, theta, d = hough_line(mask)  
      img, besty0, besty1, velocity = show_hough_linetransform(mask, h, theta, d, Xcalibration, 
                               Time_unit,targetdir, Filename[0])

      if np.abs(velocity) > 1.0E-5:  
       Velocity.append(velocity)
       Images.append(img)
       Besty0.append(besty0)
       Besty1.append(besty1)
 return Velocity, Images, Besty0, Besty1    
Пример #32
0
def clean_up_labels(labels,
                    fill_holes=True,
                    radius=None,
                    size_threshold=None,
                    keep_largest=False,
                    spacing=1,
                    processes=None,
                    sequential=False):
    '''Cleans up labels, one at a time.
    
    Fill holes (slice wise), binary opening if radius provided
    and removes small disconnected components.
    
    Optionaly only processes/keeps the indices passed in arguments'''

    locs = find_objects(labels)
    # expand boundary by one px
    locs = [
        tuple(slice(max(0, s.start - 1), s.stop + 1)
              for s in loc) if loc else None for loc in locs
    ]

    clean_labels = np.zeros_like(labels)

    # create generator for cleaning label masks and multiprocess
    cleanup_inputs = ((labels[loc] == l, fill_holes, radius, size_threshold,
                       keep_largest, spacing) if loc else None
                      for l, loc in enumerate(locs, start=1))

    # increase chunksize for large number of items
    if processes is None:
        processes = os.cpu_count(
        )  # default value in Pool, but needed for chunksize
    chunksize = max(1, len(locs) // (processes * 10))

    with Pool(processes=processes) as pool:
        # ~for l, (loc, mask) in enumerate(zip(tqdm(locs), pool.imap(_pooled_clean_up_mask, cleanup_inputs, chunksize)), start=1):
        for l, (loc, mask) in enumerate(zip(
                locs,
                pool.imap(_pooled_clean_up_mask, cleanup_inputs, chunksize)),
                                        start=1):
            if loc:
                clean_labels[loc][mask] = l

    if sequential:
        clean_labels = relabel_sequential(clean_labels)[0]

    return clean_labels
Пример #33
0
 def walker_label(self):
     seeds = self._erode_images()
     seed_union = np.zeros_like(seeds[0])
     orig_intersection = np.zeros_like(self.segmentation_list[0])
     for i in range(len(self.segmentations)):
         seed_union = seed_union + seeds[i]
         orig_intersection = orig_intersection + self.segmentation_list[i]
     labels = np.where(seed_union > 0, 1, 0)
     labels = label(labels)
     labels, _, _ = relabel_sequential(labels, offset=2)
     labels[orig_intersection == 0] = 1
     data = np.dstack(
         [segmentation for segmentation in self.segmentation_list])
     final = random_walker(data, labels, beta=self.beta, mode='bf')
     final[final == 1] = 0
     return label(final)
Пример #34
0
    def _individualize(mask, topology, threshold):

        if topology is None:
            topology = -ndi.morphology.distance_transform_edt(mask)

        peak_idx = peak_local_max(-topology, min_distance, threshold_abs=threshold)
        peak_mask = np.zeros_like(mask, dtype=bool)
        peak_mask[tuple(peak_idx.T)] = True

        labelled_peaks = label(peak_mask)
        mask = watershed(topology, labelled_peaks, mask=mask, connectivity=connectivity)
        mask = relabel_sequential(mask)[0]
        if min_area is None:
            return mask, peak_mask
        else:
            return _reindex_labels(mask, min_area, inplace=None)[0], peak_mask
Пример #35
0
def UNETPrediction3D(image, model, n_tiles, axis):

    Segmented = model.predict(image, axis, n_tiles=n_tiles)

    try:
        thresh = threshold_otsu(Segmented)
        Binary = Segmented > thresh
    except:
        Binary = Segmented > 0
    #Postprocessing steps
    Filled = binary_fill_holes(Binary)
    Finalimage = label(Filled)
    Finalimage = fill_label_holes(Finalimage)
    Finalimage = relabel_sequential(Finalimage)[0]

    return Finalimage
Пример #36
0
def test():
    arr = np.load("../data/watershed.npy")
    arr = segmentation.relabel_sequential(arr)[0]
    t = time.time()

    cProfile.runctx(
        "g = graph.construct_rag(arr)",
        globals(),
        locals(),
        "const.prof")

    cProfile.runctx(
        "g = g.random_merge(10)",
        globals(),
        locals(),
        "merge.prof")
def make_predictions_dominant_v2(
    damage_probs: np.ndarray, min_size=32, assign_dominant=True, max_building_area=4096, min_solidity=0.9
):
    """
    Combines floodfill and dominant postprocessing
    :param damage_probs:
    :param min_size:
    :param assign_dominant:
    :param max_building_area:
    :param min_solidity:
    :return:
    """
    loc_pred = np.stack((damage_probs[0, ...], np.sum(damage_probs[1:, ...], axis=0)))
    loc_cls = np.argmax(loc_pred, axis=0)

    # After we have 'fixed' localization predictions, we must zero-out probabilities for damage probs
    damage_probs = damage_probs.copy()
    damage_probs[0, loc_cls > 0] = 0
    dmg_cls = np.argmax(damage_probs, axis=0)

    buildings = label(loc_cls)

    if min_size is not None:
        # If there are any objects at all
        if buildings.max() > 0:
            buildings = remove_small_objects(buildings, min_size=min_size)
            buildings, _, _ = relabel_sequential(buildings)
            loc_cls = buildings > 0
            dmg_cls[~loc_cls] = 0

    if assign_dominant:
        building_props = regionprops(buildings)
        classes = list(range(1, 5))
        for index, region in enumerate(building_props):
            region_label, area, solidity = region["label"], region["area"], region["solidity"]

            region_mask = buildings == region_label

            if area < max_building_area and solidity > min_solidity:
                label_counts = [np.sum(dmg_cls[region_mask] == cls_indxex) for cls_indxex in classes]
                max_label = np.argmax(label_counts) + 1
                dmg_cls[region_mask] = max_label

            # print(region_label, area, solidity)

    dmg_cls[dmg_cls == 0] = 1  # Fill remaining with damage type 1 (no damage)
    return loc_cls.astype(np.uint8), dmg_cls.astype(np.uint8)
Пример #38
0
def prune(labelled, rel_threshold=0.0, abs_threshold=0, default=0):
    """
    Prunes area of a labelled image by thresholding area
    :param labelled: Labelled image
    :param rel_threshold: Threshold for area of a labelled group relative to the largest label area
    :param abs_threshold: Absolute threshold for area of a labelled group
    :param default: Label to send areas below threshold to
    :return:
    """
    labels = np.array(list(set(labelled[labelled > 0])))
    label_freq = np.array([np.sum(labelled == l) for l in labels])
    valid_labels = labels[(label_freq >= abs_threshold)
                          & (label_freq >= rel_threshold * label_freq.max())]
    kept = np.logical_or.reduce([labelled == label for label in valid_labels])
    offset = -default if default < 0 else 0
    labelled = np.where(kept, labelled, default) + offset
    return relabel_sequential(labelled)[0] - offset
Пример #39
0
def compute_spot_stats(image, target, directory):
    if image.dtype != np.uint8:
        channel_mins = image.min(axis=0).min(axis=0)[np.newaxis, np.newaxis, :]
        channel_maxs = image.max(axis=0).max(axis=0)[np.newaxis, np.newaxis, :]
        image = ((image.astype(float) - channel_mins) * 255 /
                 (channel_maxs - channel_mins)).astype(np.uint8)
    v = viewer.ImageViewer(image)
    v += CentroPlugin()
    overlay = v.show()[0][0]
    overlay = seg.relabel_sequential(overlay)[0]
    mask = (overlay == 1)
    objects = nd.label(mask)[0]
    property_names = (['size', 'mean'] +
                      ['quantile-%i' % i for i in [5, 25, 50, 75, 95]])
    props = [np.concatenate(([prop.area, prop.mean_intensity], prop.quantiles))
             for prop in measure.regionprops(objects, intensity_image=target)]
    props = np.array(props)
    fout_txt = os.path.join(directory, 'measure.txt')
    np.savetxt(fout_txt, props, fmt='%.2f', delimiter='\t',
               header='\t'.join(property_names))
    fout_im = os.path.join(directory, 'mask.png')
    mh.imsave(fout_im, 64 * overlay.astype(np.uint8))
Пример #40
0
  def segment_image(self, image, unit='f', windowsize=120, windowoffset=60, n_segments=None, compactness=0.75, margin=6, segment_size_pixels=16.0):
    #print image.shape
    (h, w, b) = image.shape

    if n_segments == None:
      n_segments = int((h / segment_size_pixels) * (w / segment_size_pixels))
      print "Using %d segments" % n_segments

    if unit == 'f':
      return ([image], [''])
    elif unit == 'w':
      subunits = []
      sublabels = []
      for i in np.arange(0, h-windowsize, windowoffset):
        for j in np.arange(0, w-windowsize, windowoffset):
          subframe = image[i:i+windowsize, j:j+windowsize, :]
          subunits.append(subframe)
          sublabel = '[%d-%d, %d-%d]' % (i, i+windowsize-1, j, j+windowsize-1)
          sublabels.append(sublabel)
          #pylab.clf()
          #pylab.imshow(subframe)
          #pylab.show()
          #raw_input()
      return (subunits, sublabels)
    elif unit == 's':

      segmentation = seg.slic(image, n_segments=n_segments, compactness=compactness, enforce_connectivity=True, convert2lab=False)
      segflat = segmentation.flatten()

      # Remove segments bordering the image or close to borders
      for i in sorted(list(set(segflat))):
        segm = np.equal(segmentation, i)
        xs = segm.sum(1)
        ys = segm.sum(0)
        if any(xs[:margin] > 0) or any(xs[-margin:] > 0) or any(ys[:margin] > 0) or any(ys[-margin:] > 0):
          segmentation[segm] = 0

      (segmentation, _, _) = seg.relabel_sequential(segmentation)

      segflat = segmentation.flatten()
      print "Number of segments should be: ", n_segments
      n_segments_now = len(list(set(segflat)))
      print "Number of segments actually is: ", n_segments_now
      subunits = []
      sublabels = []
      
      for i in sorted(list(set(segflat))):
        if i == 0: continue
        subunits.append((image, segmentation, i))
        sublabels.append('~' + str(i))
        
      # newim = np.zeros([image.shape[0], image.shape[1], 3])
      # newim[...,0] = image[...,0]
      # newim[...,1] = image[...,0]
      # newim[...,2] = image[...,0]
      # pylab.imshow(seg.mark_boundaries(newim, segmentation))
      # pylab.savefig('segmentation.png')
      # exit()
      # pylab.show()
      # raw_input()
        
      return (subunits, sublabels)
    else:
      print "Error: unit %s not supported" % unit
      print "Accepted units are:"
      print "  f: full image"
      print "  w: window regions within image"
      print "  s: image segments"
      exit()
Пример #41
0
if len(sys.argv) != 4:
    print("Usage: python {} IMAGE ANNO OUTPUT".format(sys.argv[0]))
    print("")
    print("IMAGE and ANNO are inputs and OUTPUT is where the result should be written.")
    sys.exit(1)

fn_im = sys.argv[1]
fn_anno = sys.argv[2]
fn_output = sys.argv[3]

##################################
### Read images and annotation ###
##################################
img = cv2.imread(fn_im)
labels, _, _ = relabel_sequential(cv2.imread(fn_anno, 0))

# Compute the number of classes in the label image
M = len(set(labels.flat))

###########################
### Setup the CRF model ###
###########################
use_2d = False
if use_2d:
    # Example using the DenseCRF2D code
    d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], M)

    # get unary potentials (neg log probability)
    U = compute_unary(labels, M)
    d.setUnaryEnergy(U)
Пример #42
0

time2=datetime.now()
collapse=time2-time1
print('total processing time:')
print(collapse.total_seconds())
print ('here is node info')
print(g.edges())
print(g.nodes())
print('edge num')
print(len(g.edges()))
print('graph node num')
print(len(g.nodes()))
numpy.savetxt('fh_label.txt', region_fh, fmt='%1i')
numpy.savetxt('slic_label.txt', segments_slic, fmt='%1i')
relab, fw, inv = relabel_sequential(segments_slic)
numpy.savetxt('relabel_slic.txt', segments_slic, fmt='%1i')

file_g = open('g.obj', 'w')
file_g_final = open('g_final.obj', 'w')
file_fh=open('fh.obj','w')
file_num=open('num.obj','w')
pickle.dump(g, file_g)
pickle.dump(region_fh,file_fh)
pickle.dump(fhNum,file_num)
file_final=open('final.obj','w')
file_map = open('map.obj', 'w')


'''
test
Пример #43
0
print '\nLOAD DATA'

# load the raw data
hdf = h5py.File(srcfile,'r')
size = hdf['image'].shape
raw = np.zeros(size, dtype=np.uint8)
hdf['image'].read_direct(raw)
img_train = raw.transpose((2,1,0)).astype(np.uint8,copy=True)   # change to C-order, contiguous, uint8 for gala

# load the supervoxels (watershed)
hdf = h5py.File(srcfile,'r')
assert( all([x==y for x,y in zip(size, hdf['ws'].shape)]) )
raw = np.zeros(size, dtype=np.uint32)
hdf['ws'].read_direct(raw)
ws_train = raw.transpose((2,1,0)).astype(np.int32,copy=True)    # change to C-order, contiguous, int32 for gala
ws_train, fw, inv = relabel_sequential(ws_train)    # gala needs sequential labels
assert( (ws_train > 0).all() )                      # no background in watershed

# load the ground truth
hdf = h5py.File(srcfile,'r')
assert( all([x==y for x,y in zip(size, hdf['gt'].shape)]) )
raw = np.zeros(size, dtype=np.uint16)
hdf['gt'].read_direct(raw)
gt_train = raw.transpose((2,1,0)).astype(np.int32,copy=True)    # change to C-order, contiguous, int32 for gala
gt_train, fw, inv = relabel_sequential(gt_train)    # gala needs sequential labels

# load the probabilities (intracellular space)
hdf = h5py.File(srcfile,'r')
assert( all([x==y for x,y in zip(size, hdf['pr'].shape)]) )
raw = np.zeros(size, dtype=np.float32)
hdf['pr'].read_direct(raw)
Пример #44
0
"""
Usage: python dense_inference.py image annotations output

Adapted from the original C++ example: densecrf/examples/dense_inference.cpp
http://www.philkr.net/home/densecrf Version 2.2
"""

import numpy as np
import cv2
import densecrf as dcrf
from skimage.segmentation import relabel_sequential
import sys

img = cv2.imread(sys.argv[1], 1)
labels = relabel_sequential(cv2.imread(sys.argv[2], 0))[0].flatten()
output = sys.argv[3]

M = labels.max() + 1  # number of labels

# Setup the CRF model
d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], M)

# Certainty that the ground truth is correct
GT_PROB = 0.5

# Simple classifier that is 50% certain that the annotation is correct
u_energy = -np.log(1.0 / M)
n_energy = -np.log((1.0 - GT_PROB) / (M - 1))
p_energy = -np.log(GT_PROB)
Пример #45
0
import cv2
import pydensecrf.densecrf as dcrf
import matplotlib.pylab as plt
from skimage.segmentation import relabel_sequential

from pydensecrf.utils import compute_unary, create_pairwise_bilateral, \
    create_pairwise_gaussian

fn_im = sys.argv[1]
fn_anno = sys.argv[2]

##################################
### Read images and annotation ###
##################################
img = cv2.imread(fn_im)
labels = relabel_sequential(cv2.imread(fn_anno, 0))[0].flatten()
M = 21 # 21 Classes to match the C++ example

###########################
### Setup the CRF model ###
###########################
use_2d = False
if use_2d:
    # Example using the DenseCRF2D code
    d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], M)

    # get unary potentials (neg log probability)
    U = compute_unary(labels, M)
    d.setUnaryEnergy(U)

    # This adds the color-independent term, features are the locations only.
Пример #46
0
def segment_basic(img, name, output_dir='output', temp_dir='temp', save_figs=False):
    """Segment most preprocessed image and return basic stats for detected regions/cells"""

    step = 1 # Counter variable for step to make sure all intermediate outputs are in order

    if save_figs:
        tiff.imsave(joinpath(temp_dir, ''.join([name, '_', str(step),  '_orig.tif'])), img)

    # Cropping image will make everything after this faster
    #   Make sure this is appropriate for every frame
    print("Cropping...")
    step += 1
    lx, ly, lz = img.shape
    mask = np.zeros((lx, ly))
    img = img[900:lx-100, 900:ly-400, :] # Debugging: replace this later with user-specified bounds

    if save_figs:
        # also save original image with box showing kept region
        tiff.imsave(joinpath(temp_dir, ''.join([name, '_', str(step),  '_cropped.tif'])), img)

    # Convert RGB image to greyscale and convert back to 8-bit uint
    # Ignore precision loss warning
    print("Converting to greyscale...")
    step += 1
    img = img_as_ubyte(rgb2gray(img))

    if save_figs:
        tiff.imsave(joinpath(temp_dir, ''.join([name, '_', str(step),  '_greyscale.tif'])), img)

    # Threshold using custom threshold
    #   There's artifacts in the images (should fix the image segmentation output so this doesn't happen) which need to be fixed.
    #   Set a custom, very low threshold instead of trying to automatically calculate one.
    #   Need to maintain connectivity w/o introducing little dots
    #   Also need to be careful not to join together cells that are close together - such as right after division
    #       This seemed to do OK on the examples, though
    # Alternatively use adaptive thresholding
    print('Thresholding...')
    step += 1
    THRESHOLD = 20  # setting > 20 breaks connectivity
    img = img_as_ubyte(img > THRESHOLD)

    if save_figs:
        tiff.imsave(joinpath(temp_dir, ''.join([name, '_', str(step),  '_thresholded.tif'])), img)

    # Label regions
    #   http://scikit-image.org/docs/dev/auto_examples/plot_label.html
    #   http://scikit-image.org/docs/dev/user_guide/tutorial_segmentation.html
    #   https://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.ndimage.measurements.label.html
    #   http://scikit-image.org/docs/dev/auto_examples/segmentation/plot_join_segmentations.html#example-segmentation-plot-join-segmentations-py
    #   http://scikit-image.org/docs/dev/auto_examples/segmentation/plot_watershed.html#example-segmentation-plot-watershed-py
    # http://cmm.ensmp.fr/~beucher/wtshed.html
    # Note/TODO: Will probably need watershed and more fancy methods for the real segmentation
    print("Labeling regions...")
    step += 1

    label_img, n_labels = label(img, connectivity=2, return_num=True)
    regions = regionprops(label_img)

    # Keep the labeled regions bigger than some cutoff area
    #   The larger regions are cells, the smaller regions are noise
    REGION_AREA_CUTOFF = 10  # px
    kept_regions = []
    kept_labels = []
    for region in regions:
        if region.area > REGION_AREA_CUTOFF:
            kept_regions.append(region)
            kept_labels.append(region.label)

    # label_img_cleaned = label_img
    label_img[np.in1d(label_img, kept_labels, invert=True).reshape(label_img.shape)] = 0  # Make a mask of all positions not in kept_labels
    label_img, _, _ = relabel_sequential(label_img)

    regions = regionprops(label_img)

    # Get image with only the cells
    img = img_as_ubyte(label_img > 0)

    # Display regions with unique colors
    label_img_overlay = label2rgb(label_img, image=img)

    if save_figs:
        tiff.imsave(joinpath(temp_dir, ''.join([name, '_', str(step), '_labeled.tif'])),
                    img_as_ubyte(img))
        tiff.imsave(joinpath(temp_dir, ''.join([name, '_', str(step), '_labeled_overlay.tif'])),
                    img_as_ubyte(label_img_overlay))
        # TODO: output additional fig with number of region overlaid - use matplotlib or similar

    # Return just the minimum stats needed for cell tracking
    cells = []
    for region in regions:
        cell = {'label': region.label,
                'centroid': region.centroid,
                'area': region.area}
        cells.append(cell)

    return cells
def evaluate_images(inputPath):
    """
    Filters images and analyses them.
    """
    start = default_timer()
    resultDir = inputPath+"/"+_NAME_OF_CREATED_DIRECTORY
    if not path_file.isdir(resultDir):
        mkdir(resultDir)
    resultDir += "/"
    outputString = []
    outputNumbers = []
    for i, imageName in enumerate(listdir(inputPath)):
        
        # read image
        pathName = path_file.join(inputPath, imageName)
        image = cv2.imread(pathName)
        if image is None:
            continue
        print "\nImage:", imageName
        name = ".".join(imageName.split(".")[:-1])
        outputNumbers.append(imageName)
        
        print "Filter in progress...",
        dilated = filterImage(image)
        print "done!"
        
        # segmentation with watershed
        print "Detecting particles...",
        connectedParticles, segmented, maxima = segmentationize(dilated)
        newlabels, fw, inv = relabel_sequential(segmented, offset=10)
        particleNum = len(fw)
        print "done!"
        
        # indicate discovered particles
        integratedMax = newlabels.copy()
        maxima1 = ndimage.binary_dilation(maxima, iterations=6).astype(maxima.dtype)
        integratedMax[maxima1] = (newlabels.max() + 50)
        Shift = (integratedMax != 0)
        integratedMax[Shift] += 20
        
        binary = integratedMax > 0
        
        plt.imsave(resultDir+name+"_"+_NAME_OF_PARTICLES_IMAGE, integratedMax, cmap=plt.cm.spectral)
        saveEdges(binary, resultDir+name)
        
        # evaluate the particles
        fs, so, particleArea, particleFcirc = analyseParticles(connectedParticles, binary, newlabels, particleNum)
        diameter = ( particleArea * (4. / np.pi)) ** 0.5   # estimate diameter
        
        # evaluate the clusters
        print "Detecting clusters...",
        clusterImage, clusterData, clusterNum = analyseClusters(binary, newlabels)
        plt.imsave(resultDir+name+"_"+_NAME_OF_CLUSTER_IMAGE, clusterImage, cmap=plt.cm.spectral)
        print "done!"
        
        # histograms
        print "Create histograms...",
        histoData = getHistoData(diameter, particleArea, clusterData, particleFcirc)
        shapeData, numbersText = saveHistos(histoData, resultDir, name)
        outputNumbers.append(numbersText)
        print "done!"
        
        # information for the text file
        meanData = getMeanData(diameter, clusterData, particleFcirc)
        text = getText(imageName, particleNum, clusterNum, so, fs, meanData, shapeData)
        outputString.append(text)
    
    # write data into text file
    file = open(resultDir+_NAME_OF_CREATED_TEXTFILE+".txt", "w")
    print >> file, "\n\n".join(outputString)
    file.close()
    file2 = open(resultDir+_NAME_OF_CREATED_TEXTFILE2+".txt", "w")
    print >> file2, "\n\n".join(outputNumbers)
    file2.close()
    print "Time:", default_timer() - start
def evaluate_images(inputPath):
    """
    Filters images and analyses them.
    """
    start = default_timer()
    resultDir = inputPath+"/"+_NAME_OF_CREATED_DIRECTORY
    if not path_file.isdir(resultDir):
        mkdir(resultDir)
    resultDir += "/"
    imageData = []
    for imageName in listdir(inputPath):
        
        # read image
        pathName = path_file.join(inputPath, imageName)
        image = cv2.imread(pathName)
        if image is None:
            continue
        print "\nImage:", imageName
        name = ".".join(imageName.split(".")[:-1])
        
        print "Filter in progress...",
        dilated = filterImage(image)
        print "done!"
        
        # segmentation with watershed
        print "Detecting particles...",
        connectedParticles, segmented, maxima = segmentationize(dilated)
        newlabels, fw, inv = relabel_sequential(segmented, offset=10)
        particleNum = len(fw)
        print "done!"
        
        # indicate discovered particles
        integratedMax = newlabels.copy()
        maxima1 = ndimage.binary_dilation(maxima, iterations=6).astype(maxima.dtype)
        integratedMax[maxima1] = (newlabels.max() + 50)
        Shift = (integratedMax != 0)
        integratedMax[Shift] += 20
        
        binary = integratedMax > 0
        
        # evaluate the particles
        fs, so, particleArea, particleFcirc = analyseParticles(connectedParticles, binary, newlabels, particleNum)
        diameter = ( particleArea * (4. / np.pi)) ** 0.5   # estimate diameter
        
        # evaluate the clusters
        print "Detecting clusters...",
        clusterImage, clusterData, clusterNum = analyseClusters(binary, newlabels)
        print "done!"
        
        # histograms
        print "Create histograms...",
        histoData = getHistoData(diameter, particleArea, clusterData, particleFcirc)
        histoData.append(name)
        imageData.append(histoData)
        print "done!"
        
        if len(imageData) == 2:
            saveHistos(imageData, resultDir)
            break
    
    print "Time:", default_timer() - start