Example #1
0
    def post_process(self):
        '''

        '''
        # Normalize energy
        self.energy = self._buffer['energy'][:]
        if self.energy.max():
            self.energy = self.energy / self.energy.max()

        silences = [1 if e < self.max_energy else 0 for e in self.energy]
        step = float(self.input_stepsize) / float(self.samplerate())

        models_dir = os.path.join(timeside.__path__[0], 'analyzer',
                                  'trained_models')
        prototype1_file = os.path.join(models_dir,
                                       'irit_noise_startSilences_proto1.dat')
        prototype2_file = os.path.join(models_dir,
                                       'irit_noise_startSilences_proto2.dat')

        prototype = numpy.load(prototype1_file)
        prototype2 = numpy.load(prototype2_file)

        # Lissage pour éliminer les petits segments dans un sens ou l'autre
        struct = [1] * len(prototype)
        silences = binary_closing(silences, struct)
        silences = binary_opening(silences, struct)

        seg = [0, -1, silences[0]]
        silencesList = []
        for i, v in enumerate(silences):
            if not (v == seg[2]):
                seg[1] = i
                silencesList.append(tuple(seg))
                seg = [i, -1, v]
        seg[1] = i
        silencesList.append(tuple(seg))
        selected_segs = []
        candidates = []

        for s in silencesList:
            if s[2] == 1:
                shape = numpy.array(self.energy[s[0]:s[1]])

                d1, _ = computeDist2(prototype, shape)
                d2, _ = computeDist2(prototype2, shape)
                dist = min([d1, d2])

                candidates.append((s[0], s[1], dist))
                if dist < self.threshold:
                    selected_segs.append(s)

        label = {0: 'Start', 1: 'Session'}

        segs = self.new_result(data_mode='label', time_mode='segment')
        segs.id_metadata.id += '.' + 'segments'
        segs.id_metadata.name += ' ' + 'Segments'
        segs.data_object.label_metadata.label = label
        segs.data_object.label = [s[2] for s in selected_segs]
        segs.data_object.time = [(float(s[0]) * step) for s in selected_segs]
        segs.data_object.duration = [(float(s[1] - s[0]) * step)
                                     for s in selected_segs]
        self.add_result(segs)
Example #2
0
INPUT_DIR = '/projects/neuro/iSeg-2017/Testing'
OUTPUT_DIR = '/projects/neuro/iSeg-2017/Testing'


images = glob(join(INPUT_DIR,'*img'))
for image in images:
    print(image)
    image_array = io.imread(image,plugin='simpleitk')
    subject = basename(normpath(image))
    subject_name = subject.split(sep='-')[1]
    modality_name = subject.split(sep='-')[2][:-4]


    if not exists(join(INPUT_DIR,subject_name)):
        os.makedirs(join(INPUT_DIR,subject_name))

    if modality_name == 'T1':
        mask = np.ones_like(image_array)
        mask[np.where(image_array < 90)] = 0

        struct_element_size = (20, 20, 20)
        mask_augmented = np.pad(mask, [(21, 21), (21, 21), (21, 21)], 'constant', constant_values=(0, 0))
        mask_augmented = binary_closing(mask_augmented, structure=np.ones(struct_element_size, dtype=bool)).astype(np.int)
        img = nib.Nifti1Image(mask_augmented[21:-21, 21:-21, 21:-21], np.ones((4,4)))
        nib.save(img, join(INPUT_DIR,subject_name,'mask.nii.gz'))

    img = nib.Nifti1Image(image_array, np.ones((4,4)))
    nib.save(img, join(INPUT_DIR,subject_name,modality_name+'.nii.gz'))

    def split_by_class_index(self,
                             i,
                             sigma=2,
                             threshold_split=0.25,
                             expand_mask=1,
                             minimum_pixels=1):
        """
        If class i contains multiple non-contiguous segments in real space, divide these
        regions into distinct classes.

        Algorithm is as described in the docstring for self.split.

        Args:
            i (int): index of the class to split
            sigma (float): std of gaussian kernel used to smooth the class images before
                thresholding and splitting.
            threshold_split (float): used to threshold the class image to create a binary
                mask.
            expand_mask (int): number of pixels by which to expand the mask before
                separating into contiguous regions.
            minimum_pixels (int): if, after splitting, a potential new class contains
                fewer than this number of pixels, ignore it
        """
        assert isinstance(i, (int, np.integer))
        assert isinstance(expand_mask, (int, np.integer))
        assert isinstance(minimum_pixels, (int, np.integer))
        W_next = np.zeros((self.N_feat, 1))
        H_next = np.zeros((1, self.N_meas))

        # Get the class in real space
        class_image = self.get_class_image(i)

        # Turn into a binary mask
        class_image = gaussian_filter(class_image, sigma)
        mask = class_image > (np.max(class_image) * threshold_split)
        mask = binary_opening(mask, iterations=1)
        mask = binary_closing(mask, iterations=1)
        mask = binary_dilation(mask, iterations=expand_mask)

        # Get connected regions
        labels, nlabels = label(mask,
                                background=0,
                                return_num=True,
                                connectivity=2)

        # Add each region to the new W and H matrices
        for j in range(nlabels):
            mask = (labels == (j + 1))
            mask = binary_erosion(mask, iterations=expand_mask)

            if np.sum(mask) >= minimum_pixels:

                # Leave the Bragg peak weightings the same
                W_next = np.hstack((W_next, self.W[:, i, np.newaxis]))

                # Use the existing real space pixel weightings
                h_i = np.zeros(self.N_meas)
                h_i[mask.ravel()] = self.H[i, :][mask.ravel()]
                H_next = np.vstack((H_next, h_i[np.newaxis, :]))

        W_prev = np.delete(self.W, i, axis=1)
        H_prev = np.delete(self.H, i, axis=0)
        self.W_next = np.concatenate((W_next[:, 1:], W_prev), axis=1)
        self.H_next = np.concatenate((H_next[1:, :], H_prev), axis=0)
        self.N_c_next = self.W_next.shape[1]

        return
Example #4
0
def mark_orders(
    im,
    min_cluster=None,
    min_width=None,
    filter_size=None,
    noise=None,
    opower=4,
    border_width=None,
    degree_before_merge=2,
    regularization=0,
    closing_shape=(5, 5),
    opening_shape=(2, 2),
    plot=False,
    manual=True,
    auto_merge_threshold=0.9,
    merge_min_threshold=0.1,
    sigma=2,
):
    """ Identify and trace orders

    Parameters
    ----------
    im : array[nrow, ncol]
        order definition image
    min_cluster : int, optional
        minimum cluster size in pixels (default: 500)
    filter_size : int, optional
        size of the running filter (default: 120)
    noise : float, optional
        noise to filter out (default: 8)
    opower : int, optional
        polynomial degree of the order fit (default: 4)
    border_width : int, optional
        number of pixels at the bottom and top borders of the image to ignore for order tracing (default: 5)
    plot : bool, optional
        wether to plot the final order fits (default: False)
    manual : bool, optional
        wether to manually select clusters to merge (strongly recommended) (default: True)

    Returns
    -------
    orders : array[nord, opower+1]
        order tracing coefficients (in numpy order, i.e. largest exponent first)
    """

    # Convert to signed integer, to avoid underflow problems
    im = np.asanyarray(im)
    im = im.astype(int)

    if filter_size is None:
        col = im[:, im.shape[0] // 2]
        col = median_filter(col, 5)
        threshold = np.percentile(col, 90)
        npeaks = find_peaks(col, height=threshold)[0].size
        filter_size = im.shape[0] // npeaks
        logger.info("Median filter size, estimated: %i", filter_size)
    elif filter_size <= 0:
        raise ValueError(f"Expected filter size > 0, but got {filter_size}")

    if border_width is None:
        # find width of orders, based on central column
        col = im[:, im.shape[0] // 2]
        col = median_filter(col, 5)
        idx = np.argmax(col)
        width = peak_widths(col, [idx])[0][0]
        border_width = int(np.ceil(width))
        logger.info("Image border width, estimated: %i", border_width)
    elif border_width < 0:
        raise ValueError(f"Expected border width > 0, but got {border_width}")

    if min_cluster is None:
        min_cluster = im.shape[1] // 4
        logger.info("Minimum cluster size, estimated: %i", min_cluster)
    elif not np.isscalar(min_cluster):
        raise TypeError(
            f"Expected scalar minimum cluster size, but got {min_cluster}")

    if min_width is None:
        min_width = 0.25
    if min_width == 0:
        pass
    elif isinstance(min_width, (float, np.floating)):
        min_width = int(min_width * im.shape[0])
        logger.info("Minimum order width, estimated: %i", min_width)

    # blur image along columns, and use the median + blurred + noise as threshold
    blurred = gaussian_filter1d(im, filter_size, axis=0)

    if noise is None:
        tmp = np.abs(blurred.flatten())
        noise = np.percentile(tmp, 5)
        logger.info("Background noise, estimated: %f", noise)
    elif not np.isscalar(noise):
        raise TypeError(f"Expected scalar noise level, but got {noise}")

    threshold = np.ma.median(blurred - im, axis=0)
    mask = im > blurred + noise + np.abs(threshold)
    # remove borders
    if border_width != 0:
        mask[:border_width, :] = mask[-border_width:, :] = False
        mask[:, :border_width] = mask[:, -border_width:] = False
    # remove masked areas with no clusters
    mask = np.ma.filled(mask, fill_value=False)
    # close gaps inbetween clusters
    struct = np.full(closing_shape, 1)
    mask = morphology.binary_closing(mask, struct, border_value=1)
    # remove small lonely clusters
    struct = np.full(opening_shape, 1)
    # struct = morphology.generate_binary_structure(2, 1)
    mask = morphology.binary_opening(mask, struct)

    # label clusters
    clusters, _ = label(mask)

    # remove small clusters
    sizes = np.bincount(clusters.ravel())
    mask_sizes = sizes > min_cluster
    mask_sizes[
        0] = True  # This is the background, which we don't need to remove
    for i in np.arange(len(sizes))[~mask_sizes]:
        clusters[clusters == i] = 0

    # # Reorganize x, y, clusters into a more convenient "pythonic" format
    # # x, y become dictionaries, with an entry for each order
    # # n is just a list of all orders (ignore cluster == 0)
    n = np.unique(clusters)
    n = n[n != 0]
    x = {i: np.where(clusters == c)[0] for i, c in enumerate(n)}
    y = {i: np.where(clusters == c)[1] for i, c in enumerate(n)}

    def best_fit_degree(x, y):
        L1 = np.sum((np.polyval(np.polyfit(y, x, 1), y) - x)**2)
        L2 = np.sum((np.polyval(np.polyfit(y, x, 2), y) - x)**2)

        # aic1 = 2 + 2 * np.log(L1) + 4 / (x.size - 2)
        # aic2 = 4 + 2 * np.log(L2) + 12 / (x.size - 3)

        if L1 < L2:
            return 1
        else:
            return 2

    if sigma > 0:
        degree = {i: best_fit_degree(x[i], y[i]) for i in x.keys()}
        bias = {i: np.polyfit(y[i], x[i], deg=degree[i])[-1] for i in x.keys()}
        n = list(x.keys())
        yt = np.concatenate([y[i] for i in n])
        xt = np.concatenate([x[i] - bias[i] for i in n])
        coef = np.polyfit(yt, xt, deg=degree_before_merge)

        res = np.polyval(coef, yt)
        cutoff = sigma * (res - xt).std()

        # DEBUG plot
        # uy = np.unique(yt)
        # mask = np.abs(res - xt) > cutoff
        # plt.plot(yt, xt, ".")
        # plt.plot(yt[mask], xt[mask], "r.")
        # plt.plot(uy, np.polyval(coef, uy))
        # plt.show()
        #

        m = {
            i: np.abs(np.polyval(coef, y[i]) - (x[i] - bias[i])) < cutoff
            for i in x.keys()
        }

        k = max(x.keys()) + 1
        for i in range(1, k):
            new_img = np.zeros(im.shape, dtype=int)
            new_img[x[i][~m[i]], y[i][~m[i]]] = 1
            clusters, _ = label(new_img)

            x[i] = x[i][m[i]]
            y[i] = y[i][m[i]]
            if len(x[i]) == 0:
                del x[i], y[i]

            nnew = np.max(clusters)
            if nnew != 0:
                xidx, yidx = np.indices(im.shape)
                for j in range(1, nnew + 1):
                    xn = xidx[clusters == j]
                    yn = yidx[clusters == j]
                    if xn.size >= min_cluster:
                        x[k] = xn
                        y[k] = yn
                        k += 1
                # plt.imshow(clusters, origin="lower")
                # plt.show()

    if plot:  #pragma: no cover
        plt.title("Identified clusters")
        plt.xlabel("x [pixel]")
        plt.ylabel("y [pixel]")
        clusters = np.ma.zeros(im.shape, dtype=int)
        for i in x.keys():
            clusters[x[i], y[i]] = i + 1
        clusters[clusters == 0] = np.ma.masked

        plt.imshow(clusters, origin="lower", cmap="prism")
        plt.show()

    # Merge clusters, if there are even any possible mergers left
    x, y, n = merge_clusters(
        im,
        x,
        y,
        n,
        manual=manual,
        deg=degree_before_merge,
        auto_merge_threshold=auto_merge_threshold,
        merge_min_threshold=merge_min_threshold,
    )

    if min_width > 0:
        sizes = {k: v.max() - v.min() for k, v in y.items()}
        mask_sizes = {k: v > min_width for k, v in sizes.items()}
        for k, v in mask_sizes.items():
            if not v:
                del x[k]
                del y[k]
        n = x.keys()

    orders = fit_polynomials_to_clusters(x, y, n, opower)

    # sort orders from bottom to top, using relative position

    def compare(i, j):
        _, xi, i_left, i_right = i
        _, xj, j_left, j_right = j

        if i_right < j_left or j_right < i_left:
            return xi.mean() - xj.mean()

        left = max(i_left, j_left)
        right = min(i_right, j_right)

        return xi[left:right].mean() - xj[left:right].mean()

    xp = np.arange(im.shape[1])
    keys = [(c, np.polyval(orders[c], xp), y[c].min(), y[c].max())
            for c in x.keys()]
    keys = sorted(keys, key=cmp_to_key(compare))
    key = [k[0] for k in keys]

    n = np.arange(len(n), dtype=int)
    x = {c: x[key[c]] for c in n}
    y = {c: y[key[c]] for c in n}
    orders = np.array([orders[key[c]] for c in n])

    column_range = np.array([[np.min(y[i]), np.max(y[i]) + 1] for i in n])

    if plot:  #pragma: no cover
        plot_orders(im, x, y, n, orders, column_range)

    return orders, column_range
    def lungs_segmentation(self, lungs_threshold=-360):
        seg_prub = np.array(self.data3d <= lungs_threshold)
        seg_prub = morphology.binary_closing(
            seg_prub,
            iterations=self.iteration()).astype(self.segmentation.dtype)
        seg_prub = morphology.binary_opening(seg_prub, iterations=5)
        counts, labeled_seg = self.volume_count(seg_prub)
        #self.segmentation = seg_prub
        #for x in np.nditer(labeled_seg, op_flags=['readwrite']):
        #    if x[...]!=0:890/
        #    	counts[x[...]]=counts[x[...]]+1
        #index=np.argmax(counts) #pozadí
        #counts[index]=0
        index = np.argmax(counts)  #jedna nebo obě plíce
        velikost1 = counts[index]
        counts[index] = 0
        index2 = np.argmax(counts)  # druhá plíce nebo nečo jiného
        velikost2 = counts[index2]
        if (1.0 - self.maximal_lung_diff) <= float(velikost2) / velikost1:
            print("plice separované")
        else:
            print("plice neseparované")
            pocet = 0
            seg_prub = np.array(self.data3d <= lungs_threshold)
            seg_prub = morphology.binary_closing(
                seg_prub,
                iterations=self.iteration()).astype(self.segmentation.dtype)
            seg_prub = morphology.binary_opening(seg_prub, iterations=5)

            while not (1.0 -
                       self.maximal_lung_diff) <= float(velikost2) / velikost1:
                seg_prub = morphology.binary_erosion(seg_prub, iterations=1)
                counts, labeled_seg = self.volume_count(seg_prub)
                index = np.argmax(counts)  #jedna nebo obě plíce
                velikost1 = counts[index]
                counts[index] = 0
                index2 = np.argmax(counts)  # druhá plíce nebo nečo jiného
                velikost2 = counts[index2]
                pocet = pocet + 1
            seg_prub = morphology.binary_dilation(self.segmentation,
                                                  iterations=pocet).astype(
                                                      self.segmentation.dtype)
        #self.segmentation = self.segmentation + np.array(labeled_seg==index).astype(np.int8)*self.slab['lungs']
        #self.segmentation = self.segmentation + np.array(labeled_seg==index2).astype(np.int8)*self.slab['lungs']
        plice1 = np.array(labeled_seg == index)
        z, x, y = np.nonzero(plice1)
        m1 = np.max(y)
        if m1 < (self.segmentation.shape[1] / 2):
            self.segmentation = self.segmentation + np.array(
                labeled_seg == index).astype(np.int8) * self.slab['llung']
            self.segmentation = self.segmentation + np.array(
                labeled_seg == index2).astype(np.int8) * self.slab['rlung']
        else:
            self.segmentation = self.segmentation + np.array(
                labeled_seg == index).astype(np.int8) * self.slab['rlung']
            self.segmentation = self.segmentation + np.array(
                labeled_seg == index2).astype(np.int8) * self.slab['llung']
        self.orientation()
        if self.smer == 1:
            self.segmentation[self.segmentation == self.slab['llung']] = 3
            self.segmentation[self.segmentation ==
                              self.slab['rlung']] = self.slab['llung']
            self.segmentation[self.segmentation == 3] = self.slab['rlung']
        pass
Example #6
0
def calculator_border_len(grid) -> int:
    n = len(grid)
    m = len(grid[0])
    border = [0 for i in range(m)]
    height = 0

    length = 0
    # search border
    for j in range(m):
        for i in reversed(range(n)):
            if grid[i][j] == 0:
                continue
            border[j] = i
            break
    # calculate length of simple line (ignore hole)
    for j in range(m - 1):
        # add square root to differ convex roughness
        height += int(math.sqrt(abs(border[j] - border[j + 1])) * 5)
    length = height + len(border)
    print(length)
    #find area of holes
    ext = np.zeros(m, dtype=int)
    extent = copy.deepcopy(grid)

    # extent = np.append(extent, [ext], axis=0 )
    k = [
        (1, 1, 1, 1, 1),
        (1, 1, 1, 1, 1),
        (1, 1, 0, 1, 1),
        (1, 1, 1, 1, 1),
        (1, 1, 1, 1, 1),
    ]
    print("before: ", extent)
    extent = binary_closing(extent, structure=k).astype(int)
    print("after: ", extent)
    lable, empty = find_empty_hole(extent)
    S = [0] * (empty - 1)
    for index in range(1, empty):
        for j in range(m):
            for i in range(n):
                if index == lable[i][j]:
                    S[index - 1] += 1
    print(S)
    parameter = calculate_parameter_boundary(grid)
    print("parameter: ", parameter)
    coe_para_area = int(parameter / (np.amax(S)) * 100)
    print(coe_para_area)

    empty_hole = (empty - 1) * 50

    #calculate roughness
    grid_separate_hole = copy.deepcopy(grid)

    # for j in range(m):
    #     for i in reversed(range(n)):
    #         if grid_separate_hole[(i,j)] == 0:
    #             grid_separate_hole[(i,j)] = 1
    #             continue
    #         if grid_separate_hole [(i,j)] != 0:
    #             break

    # lab,ncom = find_empty_hole(grid_separate_hole)
    #
    # roughness = ncom*30
    #
    # length += roughness
    # print(length)

    length += empty_hole + coe_para_area

    return length
Example #7
0
#Create the masks
mask = np.zeros(shape, dtype=np.int32)
minmed = np.percentile(med_image, args.minmed)
maxmed = np.percentile(med_image, args.maxmed)
miniqr = np.percentile(iqr_image, args.miniqr)
maxiqr = np.percentile(iqr_image, args.maxiqr)

log.info("writing mask bits")
#Set the Bad flag absed on thresholds
mask[(med_image > maxmed) | (med_image < minmed) | (iqr_image > maxiqr) |
     (iqr_image < miniqr)] |= ccdmask.BAD

#Close incompletely blocked regions
closed_mask = binary_closing(mask,
                             iterations=args.closeiter,
                             structure=np.ones([2, 2]).astype(
                                 np.int32))  #returns binary array
closed_mask[closed_mask] = ccdmask.BAD
mask |= closed_mask

#Block entire columns above a certain threshold per amplifier
bad_pix = (mask > 0)
bad_pix_upper = bad_pix[0:bad_pix.shape[0] // 2, :]
bad_pix_lower = bad_pix[bad_pix.shape[0] // 2:bad_pix.shape[0], :]
bad_frac_upper = np.sum(bad_pix_upper, axis=0) / (bad_pix.shape[0] // 2)
bad_frac_lower = np.sum(bad_pix_lower, axis=0) / (bad_pix.shape[0] // 2)
bad_cols_upper = np.where(bad_frac_upper >= args.colfrac)
bad_cols_lower = np.where(bad_frac_lower >= args.colfrac)
mask[0:bad_pix.shape[0] // 2, bad_cols_upper] |= ccdmask.BAD
mask[bad_pix.shape[0] // 2:bad_pix.shape[0], bad_cols_lower] |= ccdmask.BAD
Example #8
0
def main(args):
    # Configure argument parser
    parser = argparse.ArgumentParser(
        description='Segment buildings by comparing a DSM to a DTM')
    parser.add_argument("source_dsm",
                        help="Digital surface model (DSM) image file name")
    parser.add_argument("source_dtm",
                        help="Digital terrain model (DTM) image file name")
    parser.add_argument("--input-ndvi",
                        help="Optional Normalized Difference Vegetation "
                        "Index image file")
    parser.add_argument("--ndsm", help="Write out the normalized DSM image")
    parser.add_argument('--road-vector-shapefile-dir',
                        help='Path to road vector shapefile directory')
    parser.add_argument('--road-vector-shapefile-prefix',
                        help='Prefix for road vector shapefile')
    parser.add_argument('--road-vector', help='Path to road vector file')
    # XXX this is not ideal
    parser.add_argument('--road-rasterized',
                        help='Path to save rasterized road image')
    parser.add_argument('--road-rasterized-bridge',
                        help='Path to save rasterized bridge image')
    parser.add_argument("-d",
                        "--debug",
                        action="store_true",
                        help="Enable debug output and visualization")
    parser.add_argument("destination_mask", help="Building mask output image")
    args = parser.parse_args(args)

    # For now assume the input DSM and DTM are in the same resolution,
    # aligned, and in the same coordinates.  Later we can warp the DTM
    # to the DSM, if needed.

    # open the DSM
    dsm_file = gdal_open(args.source_dsm)
    dsm_band = dsm_file.GetRasterBand(1)
    dsm = dsm_band.ReadAsArray()
    dsm_nodata_value = dsm_band.GetNoDataValue()
    print("DSM raster shape {}".format(dsm.shape))

    # open the DTM
    dtm_file = gdal_open(args.source_dtm)
    dtm_band = dtm_file.GetRasterBand(1)
    dtm = dtm_band.ReadAsArray()
    print("DTM raster shape {}".format(dtm.shape))

    # Compute the normalized DSM by subtracting the terrain
    ndsm = dsm - dtm

    # consider any point above 2m as possible buildings
    mask = ndsm > 2
    # Use any point above 4m as a high confidence seed point
    seeds = ndsm > 4

    # if requested, write out the normalized DSM
    if args.ndsm:
        ndsm[dsm == dsm_nodata_value] = dsm_nodata_value
        save_ndsm(ndsm, dsm_file, args.ndsm)

    # if an NDVI image was specified, us it to filter
    if args.input_ndvi:
        # Load normalized difference vegetation index (NDVI) file
        ndvi_file = gdal_open(args.input_ndvi)
        ndvi_band = ndvi_file.GetRasterBand(1)
        ndvi = ndvi_band.ReadAsArray()

        # remove building candidates with high vegetation likelihood
        mask[ndvi > 0.2] = False
        # reduce seeds to areas with high confidence non-vegetation
        seeds[ndvi > 0.1] = False

    use_roads = (args.road_vector or args.road_vector_shapefile_dir
                 or args.road_vector_shapefile_prefix or args.road_rasterized
                 or args.road_rasterized_bridge)
    if use_roads:
        use_shapefile_dir_with_prefix = (args.road_vector_shapefile_dir
                                         and args.road_vector_shapefile_prefix)
        if not ((args.road_vector or use_shapefile_dir_with_prefix)
                and args.road_rasterized and args.road_rasterized_bridge):
            raise RuntimeError(
                "All road path arguments must be provided if any is provided")

        if args.road_vector and use_shapefile_dir_with_prefix:
            raise RuntimeError("Should specify EITHER --road-vector OR \
both --road-vector-shapefile-dir AND --road-vector-shapefile-prefix")

        if use_shapefile_dir_with_prefix:
            input_road_vector = os.path.join(
                args.road_vector_shapefile_dir,
                "{}.shx".format(args.road_vector_shapefile_prefix))
        else:
            input_road_vector = args.road_vector

        # The dilation is intended to create semi-realistic widths
        roads = rasterize_file_dilated_line(
            input_road_vector,
            dsm_file,
            args.road_rasterized,
            numpy.ones((3, 3)),
            dilation_iterations=20,
        )
        road_bridges = rasterize_file_dilated_line(
            input_road_vector,
            dsm_file,
            args.road_rasterized_bridge,
            numpy.ones((3, 3)),
            dilation_iterations=20,
            query=ELEVATED_ROADS_QUERY,
        )

        # Remove building candidates that overlap with a road
        mask[roads] = False
        seeds[roads] = False

    # use morphology to clean up the mask
    mask = morphology.binary_opening(mask, numpy.ones((3, 3)), iterations=1)
    mask = morphology.binary_closing(mask, numpy.ones((3, 3)), iterations=1)
    # use morphology to clean up the seeds
    seeds = morphology.binary_opening(seeds, numpy.ones((3, 3)), iterations=1)
    seeds = morphology.binary_closing(seeds, numpy.ones((3, 3)), iterations=1)

    # compute connected components on the seeds
    label_img = ndm.label(seeds)[0]
    # compute the size of each connected component
    labels, counts = numpy.unique(label_img, return_counts=True)
    # filter seed connected components to keep only large areas
    to_remove = numpy.extract(counts < 500, labels)
    print("Removing {} small connected components".format(len(to_remove)))
    seeds[numpy.isin(label_img, to_remove)] = False

    # visualize initial seeds if in debug mode
    if args.debug:
        cv2.imshow(
            'seeds',
            mask.astype(numpy.uint8) * 127 + seeds.astype(numpy.uint8) * 127)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    # label the larger mask image
    label_img = ndm.label(mask)[0]
    # extract the unique labels that match the seeds
    selected = numpy.unique(numpy.extract(seeds, label_img))
    # filter out very oblong objects
    subselected = []
    for i in selected:
        dim_large, dim_small = estimate_object_scale(label_img == i)
        if dim_large / dim_small < 6:
            subselected.append(i)

    print("Keeping {} connected components".format(len(subselected)))

    # keep only the mask components selected above
    good_mask = numpy.isin(label_img, subselected)

    # a final mask cleanup
    good_mask = morphology.binary_closing(good_mask,
                                          numpy.ones((3, 3)),
                                          iterations=3)

    # visualize final mask if in debug mode
    if args.debug:
        cv2.imshow('mask', good_mask.astype(numpy.uint8) * 255)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    # convert the mask to label map with value 2 (background),
    # 6 (building), and 17 (elevated roadway)
    cls = numpy.full(good_mask.shape, 2)
    cls[good_mask] = 6
    if use_roads:
        cls[road_bridges] = 17

    # create the mask image
    print("Create destination mask of size:({}, {}) ...".format(
        dsm_file.RasterXSize, dsm_file.RasterYSize))
    gdal_save(cls,
              dsm_file,
              args.destination_mask,
              gdal.GDT_Byte,
              options=['COMPRESS=DEFLATE'])
Example #9
0
def thin(image,c=1):
    if c>0: image = binary_closing(image,iterations=c)
    image = array(image,'B')
    image = BA(image)
    iulib.thin(image)
    return array(N(image),'B')
Example #10
0
 def imClose(self, img=None, structElementSize=1, iterations=1):
     print('Image closing...')
     se = self.structElement(structElementSize)
     return binary_closing(img,
                           self.structElement(structElementSize),
                           iterations=iterations)
Example #11
0
def display_images(xval,yval,preds,savedir, saveVectorIm=False, save=False,  preds_logits= [], uncertainty=[]):
    
    sz = np.shape(preds)
    N_images = np.shape(preds)[0]
    N_classes = np.shape(preds)[3]
    
    if N_classes == 2:
        ColorMap = [[0,0,0],
                    [255,0,0]]
    else:
        ColorMap = [[0,0,0],
                    [0,0,255],
                    [0,255,0],
                    [255,128,0],
                    [255,0,0]]
    
    ColorMap = np.array(ColorMap)
    
    contours = np.zeros((N_images,sz[1],sz[2]))
    # Process data
    for im in range(N_images):
        pred = preds[im,:,:,:]
        
        seg = np.argmax(pred,axis=2)
        mask = ColorMap[seg]
        
        if N_classes == 2:
            unhealty_merged = seg[:,:]>=1
        else:
            unhealty_merged = seg[:,:]>=2
            
            
        unhealty_merged = morphology.binary_closing(unhealty_merged,structure=disk(7))
        
        contour = measure.find_contours(unhealty_merged,0.5)
        
        for cont in contour:
                cont = np.array(cont, np.int32)
                contours[im,cont[:,0],cont[:,1]] = 1
           
            
        if np.array(uncertainty).size:
            plot_masks_and_contours_per_im(xval, yval, im, mask, contour, contours[im], ColorMap, uncertainty[im])        
        else:
            plot_masks_and_contours_per_im(xval, yval, im, mask, contour, contours[im], ColorMap, uncertainty)        


                 
        #Save first image from the val set every epoch       
        
        if save == True:          
            if np.array(uncertainty).size:
                plt.savefig(savedir+'/results_MCdropout_{}.png'.format(im), dpi=100, bbox_inches='tight')    
                if saveVectorIm:
                    plt.savefig(savedir/'results_MCdropout_{}.svg'.format(im), dpi=100, bbox_inches='tight')    
                plt.close()      
            else:
                plt.savefig(savedir+'/results_{}.png'.format(im), dpi=100, bbox_inches='tight')    
                if saveVectorIm:
                    plt.savefig(savedir/'results_{}.svg'.format(im), dpi=100, bbox_inches='tight')    
                plt.close()
Example #12
0
Brown[0] = np.median(I[:, :, 0])
Brown[1] = np.median(I[:, :, 1])
Brown[2] = np.median(I[:, :, 2])

# TODO(danvk): does removing the np.sqrt have an effect on perfomance?
B = (np.sqrt(((I - Brown)**2).sum(2) / 3) < 20)


def showBinaryArray(b, title=None):
    im = Image.fromarray(255 * np.uint8(b))
    im.show(B, title)


#showBinaryArray(B)
# this kills small features and introduces an 11px black border on every side
B = binary_closing(B, structure=np.ones((11, 11)))
showBinaryArray(B)
#
#sys.exit(0)


def randomWhitePixel(ary):
    h, w = ary.shape
    while True:
        x, y = int(random.uniform(0, w)), int(random.uniform(0, h))
        x = min(x, w - 1)
        y = min(y, h - 1)
        if ary[y][x]:
            return x, y

Example #13
0
def closing_ratio(seg, n_iters):
    seg_closed = binary_closing(seg, iterations=n_iters)
    m1 = float(seg.sum())
    m2 = float(seg_closed.sum())
    return m2 / m1
Example #14
0
a.set_title('Original')
plt.imshow(img, cmap=plt.cm.gray)

# Create disk structuring element
r = 15
y, x = np.ogrid[-r:r + 1, -r:r + 1]
mask = x**2 + y**2 <= r**2
mask = mask.astype(int)

a = fig.add_subplot(2, 3, 2)
a.set_title('Disk')
plt.imshow(mask, cmap=plt.cm.gray)

a = fig.add_subplot(2, 3, 3)
a.set_title('Closed')
img_closing = morph.binary_closing(img, structure=mask)
plt.imshow(img_closing, cmap=plt.cm.gray)

a = fig.add_subplot(2, 3, 4)
a.set_title('Opening')
img_opening = morph.binary_opening(img, structure=mask)
plt.imshow(img_opening, cmap=plt.cm.gray)

a = fig.add_subplot(2, 3, 5)
a.set_title('Eroded')
img_eroded = morph.binary_erosion(img, structure=mask)
plt.imshow(img_eroded, cmap=plt.cm.gray)

a = fig.add_subplot(2, 3, 6)
a.set_title('Dilated')
img_dilated = morph.binary_dilation(img, structure=mask)
Example #15
0
    def segment(self,
                fill_holes=False,
                edt_sampling=(3, 1, 1),
                edt_smooth=[1, 3, 3]):
        """Segment objects within the image according to attributes provided.

        Yields: a PexSegmentObj containing segmented objects as well as all
            images generated during segmentation (for post-hoc analysis) as
            well as relevant values, e.g. numbers and names of segmented
            particles. See PexSegmentObj documentation for more details.
        """
        starttime = time.time()  # begin timing
        f_directory = os.getcwd()
        pdout = [
        ]  # list of PexSegmentObj attributes to pass to pandas for csv
        # data import
        if self.filename != '':
            print('reading' + self.filename)
            raw_img = io.imread(self.filename)
        elif self.src_data is not None:
            raw_img = self.src_data
        print('raw image imported.')
        if self.seg_method == 'pre-thresholded':
            gaussian_img = raw_img
        else:
            # gaussian filter
            print('performing gaussian filtering...')
            gaussian_img = gaussian_filter(raw_img,
                                           [self.g_z, self.g_xy, self.g_xy])
            print('Image smoothed.')
        print('preprocessing complete.')
        ## SEGMENTATION BY THRESHOLDING THE GAUSSIAN ##
        if self.seg_method == 'threshold':
            # binary thresholding and cleanup
            print('thresholding...')
            threshold_img = np.copy(gaussian_img)
            if self.mode == 'threshold':
                print('mode = threshold.')
                # make binary image
                threshold_img[threshold_img < self.threshold] = 0
                threshold_img[threshold_img > 0] = 1
                print('thresholding complete.')
                if fill_holes:
                    print('filling holes in objects.')
                    for i in range(0, threshold_img.shape[0]):
                        threshold_img[i, :, :] = binary_fill_holes(
                            threshold_img[i, :, :])
            elif self.mode == 'bg_scaled':
                print('mode = background-scaled.')
                self.thresholds = {}
                threshold_img = np.zeros(shape=raw_img.shape)
                for i in self.cells.obj_nums:
                    if i == 0:
                        pass
                    else:
                        print('thresholding cell ' + str(i))
                        # get median for the cell
                        cell_median = np.median(
                            gaussian_img[self.cells.final_cells == i])
                        # generate the thresholded binary mask for each cell
                        threshold_img[np.logical_and(
                            self.cells.final_cells == i,
                            gaussian_img > cell_median + self.bg_diff)] = 1
                        self.thresholds[
                            i] = cell_median + self.bg_diff  #store val
                print('thresholding complete.')
            else:
                raise ValueError(
                    'mode parameter must be bg_scaled or threshold.')
            # distance and maxima transformation to find objects
            # next two steps assume 100x objective and 0.2 um slices
            print('generating distance map...')
            dist_map = distance_transform_edt(threshold_img,
                                              sampling=edt_sampling)
            print('distance map complete.')
            print('smoothing distance map...')
            # smooth the distance map
            smooth_dist = gaussian_filter(dist_map, edt_smooth)
            print('distance map smoothed.')
            print('identifying maxima...')
            # find local maxima in the smoothed distance map
            # these will be the watershed seeds
            max_strel = generate_binary_structure(3, 2)
            maxima = maximum_filter(smooth_dist,
                                    footprint=max_strel) == smooth_dist
            # clean up background and edges
            bgrd_3d = smooth_dist == 0
            eroded_bgrd = binary_erosion(bgrd_3d,
                                         structure=max_strel,
                                         border_value=1)
            maxima = np.logical_xor(maxima, eroded_bgrd)
            print('maxima identified.')
            # watershed segmentation
            labs = self.watershed_labels(maxima)
            print('watershedding...')
            peroxisomes = watershed(-smooth_dist, labs, mask=threshold_img)
            print('watershedding complete.')
            if self.mode == 'bg_scaled':
                # find cell boundaries and define objects that are on the
                # edges, then assign segmented objects to parent cells
                edge_struct = generate_binary_structure(3, 1)
                self.c_edges = {}
                print('finding edges of cells...')
                for i in self.cells.obj_nums:
                    self.c_edges[i] = np.logical_xor(
                        self.cells.final_cells == i,
                        binary_erosion(self.cells.final_cells == i,
                                       edge_struct))
                print('cell edges found.')
                self.primary_objs = [
                    x for x in np.unique(peroxisomes) if x != 0
                ]
                self.parent = {}
                self.obj_edges = {}
                self.on_edge = {}
                pex_mask = peroxisomes != 0
                for obj in self.primary_objs:
                    self.parent[obj] = self.cells.final_cells[labs == obj][0]
                    obj_mask = peroxisomes == obj
                    obj_edge = np.logical_xor(
                        obj_mask, binary_erosion(obj_mask, edge_struct))
                    self.obj_edges[obj] = obj_edge
                    # test if the object's edge and its cell's edge overlap
                    if np.any(
                            np.logical_and(obj_edge,
                                           self.c_edges[self.parent[obj]])):
                        self.on_edge[obj] = True
                        print('object on the edge: ' + str(obj))
                        print('parent cell: ' + str(self.parent[obj]))
                        new_obj = obj_mask
                        search_obj = obj_mask
                        tester = 0
                        iteration = 1
                        while tester == 0:
                            # TODO: FIX THIS BLOCK OF CODE! GETTING STUCK WITHIN
                            # IT! NOT SURE HOW MANY ITERATIONS ITS DOING, OR FOR
                            # HOW MANY DIFFERENT PEROXISOMES.
                            new_px = binary_dilation(search_obj, edge_struct)
                            new_px[np.logical_or(new_obj, pex_mask)] = False
                            print('iteration: ' + str(iteration))
                            # print('new pixels for iteration ' + str(iteration) + \
                            #      ': ')
                            # print(np.nonzero(new_px))
                            if np.any(gaussian_img[new_px] > self.thresholds[
                                    self.parent[obj]]):
                                to_add = np.logical_and(
                                    new_px, gaussian_img >
                                    self.thresholds[self.parent[obj]])
                                new_obj = np.logical_or(new_obj, to_add)
                                #    print('object pixels after iteration '
                                #          + str(iteration) + ': ')
                                #    print(np.nonzero(new_obj))
                                search_obj = to_add  # only search from new pixels
                            else:
                                peroxisomes[new_obj] = obj
                                tester = 1
                            iteration = iteration + 1
                    else:
                        self.on_edge[obj] = False
        elif self.seg_method == 'canny':
            ## EDGE-DETECTION BASED SEGMENTATION ##
            threshold_img = np.empty_like(gaussian_img)
            edge_img = np.empty_like(gaussian_img)
            c_strel = generate_binary_structure(2, 1)
            # perform canny edge detection on each slice s
            for s in range(0, gaussian_img.shape[0]):
                if self.mode == 'absolute':
                    c = canny(gaussian_img[s, :, :],
                              sigma=0,
                              low_threshold=self.low_threshold,
                              high_threshold=self.high_threshold)
                elif self.mode == 'scaled':
                    c = canny(gaussian_img[s, :, :],
                              sigma=0,
                              low_threshold=self.low_threshold,
                              high_threshold=self.high_threshold,
                              use_quantiles=True)
                # clean up object edges that have gaps
                c = binary_closing(c, c_strel)
                edge_img[s, :, :] = np.copy(c)
                # fill holes to generate binary mask of objects
                c = binary_fill_holes(c)
                c = binary_opening(c, c_strel)  # eliminate incomplete lines
                threshold_img[s, :, :] = c
            print('generating distance map...')
            dist_map = distance_transform_edt(threshold_img,
                                              sampling=(3, 1, 1))
            print('distance map complete.')
            print('smoothing distance map...')
            smooth_dist = gaussian_filter(dist_map, [1, 2, 2])
            print('distance map smoothed.')
            print('identifying maxima...')
            max_strel = generate_binary_structure(3, 2)
            # identify local maxima (these will be the seed points for
            # watershed segmentation)
            maxima = maximum_filter(smooth_dist,
                                    footprint=max_strel) == smooth_dist
            # clean up background and edges
            bgrd_3d = smooth_dist == 0
            eroded_bgrd = binary_erosion(bgrd_3d,
                                         structure=max_strel,
                                         border_value=1)
            maxima = np.logical_xor(maxima, eroded_bgrd)
            print('maxima identified.')
            # watershed segmentation
            labs = self.watershed_labels(maxima)
            print('watershedding...')
            peroxisomes = watershed(-smooth_dist, labs, mask=threshold_img)
            print('watershedding complete.')
            if hasattr(self, 'cells'):
                # assign segmented objects to cells if a CellSegmentObj was
                # included
                self.primary_objs = [x for x in np.unique(peroxisomes) \
                                     if x != 0]
                self.parent = {}
                for obj in self.primary_objs:
                    o_parent = self.cells.final_cells[labs == obj][0]
                    if o_parent == 0:
                        self.primary_objs.remove(obj)
                    else:
                        self.parent[obj] = o_parent
        elif self.seg_method == 'pre-thresholded':
            threshold_img = np.copy(gaussian_img)
            if fill_holes:
                print('filling holes in objects.')
                for i in range(0, threshold_img.shape[0]):
                    threshold_img[i, :, :] = binary_fill_holes(
                        threshold_img[i, :, :])
                print('holes filled.')
            dist_map = distance_transform_edt(threshold_img,
                                              sampling=edt_sampling)
            print('distance map complete.')
            print('smoothing distance map...')
            # smooth the distance map
            smooth_dist = gaussian_filter(dist_map, edt_smooth)
            print('distance map smoothed.')
            print('identifying maxima...')
            # find local maxima in the smoothed distance map
            # these will be the watershed seeds
            max_strel = generate_binary_structure(3, 2)
            maxima = maximum_filter(smooth_dist,
                                    footprint=max_strel) == smooth_dist
            # clean up background and edges
            bgrd_3d = smooth_dist == 0
            eroded_bgrd = binary_erosion(bgrd_3d,
                                         structure=max_strel,
                                         border_value=1)
            maxima = np.logical_xor(maxima, eroded_bgrd)
            print('maxima identified.')
            # watershed segmentation
            labs = self.watershed_labels(maxima)
            print('watershedding...')
            peroxisomes = watershed(-smooth_dist, labs, mask=threshold_img)
            print('watershedding complete.')
        # Sometimes the watershedding algorithm inaccurately separates objects
        # on different Z-slices. The next section merges objects with
        # significant overlap
        for s in range(1, peroxisomes.shape[0]):
            cslice = peroxisomes[s, :, :]
            lslice = peroxisomes[s - 1, :, :]
            for obj in np.unique(cslice)[np.unique(cslice) != 0]:
                lslice_vals, cts = np.unique(lslice[cslice == obj],
                                             return_counts=True)
                lslice_vals = lslice_vals.tolist()
                cts = cts.tolist()
                ordered_by_ct = sorted(zip(lslice_vals, cts),
                                       key=itemgetter(1))
                if ordered_by_ct[-1][0] == 0 or ordered_by_ct[-1][0] == obj:
                    continue
                else:
                    # if >75% of pixels in the slice below obj are from another
                    # object, change obj to that object #
                    if float(ordered_by_ct[-1][1]) / cslice[cslice ==
                                                            obj].size > 0.5:
                        peroxisomes[s, :, :][cslice ==
                                             obj] = ordered_by_ct[-1][0]
        obj_nums, volumes = np.unique(peroxisomes, return_counts=True)
        volumes = dict(zip(obj_nums.astype('uint16'), volumes))
        # remove the background
        del volumes[0]
        obj_nums = obj_nums.astype('uint16').tolist()
        obj_nums.remove(0)
        # generate dict of relevant parameters to pass to PexSegmentObj
        mode_params = {}
        if hasattr(self, 'parent'):
            pdout.append('parent')
            mode_params['parent'] = self.parent
        if self.seg_method == 'canny':
            mode_params['high_threshold'] = self.high_threshold
            mode_params['low_threshold'] = self.low_threshold
            mode_params['edges'] = edge_img
            pdout.append('volumes')
        if self.seg_method == 'threshold':
            if self.mode == 'threshold':
                mode_params['threshold'] = self.threshold
                pdout.append('volumes')
            elif self.mode == 'bg_scaled':
                mode_params['thresholds'] = self.thresholds
                mode_params['bg_diff'] = self.bg_diff
                mode_params['cells'] = self.cells
                mode_params['cell_edges'] = self.c_edges
                mode_params['cell_nums'] = self.cells.obj_nums
                mode_params['obj_edges'] = self.obj_edges
                mode_params['on_edge'] = self.on_edge
                for x in ['thresholds', 'on_edge', 'parent', 'volumes']:
                    pdout.append(x)
        return PexSegmentObj(f_directory,
                             self.filename,
                             raw_img,
                             gaussian_img,
                             self.seg_method,
                             self.mode,
                             threshold_img,
                             dist_map,
                             smooth_dist,
                             maxima,
                             labs,
                             peroxisomes,
                             obj_nums,
                             volumes,
                             to_pdout=pdout,
                             mode_params=mode_params)
Example #16
0
    def get_background_map(self,
                           percentile=75.0,
                           safety1=10,
                           safety2=10,
                           window_MM=6):
        """
        We first use a local roughness check to determine edges. We do this on the mean images, 
        but could easly use the full hypercube if need be.
        """

        # get a mean image
        mean_img = self.U[:, 0].reshape(self.data_shape[0:2])
        mean_img = mean_img - np.mean(mean_img)

        # lets padd the image to avoid FFT periodicity issues
        X, Y = mean_img.shape
        # we need to fill this with some decent values
        fill_sigma = np.std(mean_img)
        fill_mean = np.median(mean_img)
        new_img = np.random.normal(fill_mean, fill_sigma,
                                   (X + 2 * safety1, Y + 2 * safety1))
        # blur it a bit
        new_img = gaussian_filter(new_img, sigma=10)

        # paste the real image in here
        new_img[safety1:safety1 + X, safety1:safety1 + Y] = mean_img
        mean_img = new_img + 0

        # define a windowing function of MM by MM pixels
        MM = window_MM
        kernel = np.zeros(mean_img.shape)
        kernel[0:MM, 0:MM] = 1.0

        # compute a local mean
        FT_img = np.fft.fft2(mean_img)
        FT_kernel = np.fft.fft2(kernel)
        local_mean = np.fft.ifft2(
            FT_img * FT_kernel.conjugate()).real / (MM * MM)

        # compute a local variance
        mean_img_sq = mean_img * mean_img
        FT_img_sq = np.fft.fft2(mean_img_sq)
        local_var = np.fft.ifft2(
            FT_img_sq * FT_kernel.conjugate()).real / (MM * MM)
        local_var = local_var - local_mean * local_mean
        local_sigma = np.sqrt(local_var)

        ##############################################################
        ##           Now we build a rough mask
        ##############################################################
        threshold = np.percentile(local_sigma.flatten(), percentile)
        sel = local_sigma > threshold
        mask = np.zeros(mean_img.shape)
        mask[sel] = 1.0

        # The morphological operators need some room on the sides
        safety2 = 0
        V, W = mask.shape
        nV = V + safety2 * 2
        nW = W + safety2 * 2
        new_mask = np.zeros((nV, nW))
        # place the mask
        new_mask[safety2:safety2 + V, safety2:safety2 + W] = mask

        BM = 5
        structure = np.ones((BM, BM))
        closed = binary_closing(new_mask, structure)

        done = False
        BM = 5
        while not done:
            BM = BM + 1
            structure = np.ones((BM, BM))
            new_closed = binary_closing(closed, structure).astype(int)
            delta = np.sum(np.abs(new_closed.astype(int) - closed.astype(int)))
            if delta == 0:
                done = True
            if BM > 20:
                done = True
            closed = new_closed + 0

        new_mask = closed + 0

        # for some reason, the whole mask is shifted along one axis. This has likely to do with some
        # origin definition of the structuring elements, but lets solve this using an FFT based shift calculation
        # The origin option in the scipy morphology toolbox kill my kernel

        mask = new_mask[safety2:safety2 + V, safety2:safety2 + W]
        ft_mask = np.fft.fft2(mask)
        ft_mean = np.fft.fft2(np.abs(mean_img))
        TF = np.fft.ifft2(ft_mean * ft_mask.conjugate()).real
        dX, dY = np.meshgrid(np.arange(mask.shape[1]),
                             np.arange(mask.shape[0]))
        here = np.argmax(TF)
        dX = dX.flatten()[here]
        dY = dY.flatten()[here]

        # no wild shifts please
        if dX > 10:
            dX = 0
        if dY > 10:
            dY = 0

        mask = np.roll(mask, dX, axis=0)
        mask = np.roll(mask, dY, axis=1)
        # lift out the section of the mask that we are interested in
        mask = mask[safety1:safety1 + X, safety1:safety1 + Y]

        ######################################################
        # Here we build a more fine tuned mask / z_score map
        ######################################################

        # now we use this mask to define the background
        bg_sel = mask.flatten() < 0.5
        background = self.U[bg_sel, :]
        mean_bg = np.mean(background, axis=0)
        var_covar = np.cov((background - mean_bg).transpose())
        inv_vcv = np.linalg.pinv(var_covar)
        t = self.U - mean_bg
        z_scores = []
        for tt in t:
            z = tt.reshape(1, -1)
            z_scores.append(np.sqrt(z.dot(inv_vcv).dot(z.transpose())))
        z_scores = np.array(z_scores).reshape(self.data_shape[0:2])
        return z_scores
Example #17
0
from pylab import *
from scipy.ndimage import measurements, morphology
from scipy import misc
import numpy

#im = array(Image.open('board.jpeg').convert('L'))
im = misc.lena()
im = 1 * (im < 128)
gray()

subplot(2, 3, 1)
title('original')
imshow(im)

subplot(2, 3, 2)
im_close = morphology.binary_closing(im, ones((5, 5)), iterations=2)
title('closing')
imshow(im_close)

subplot(2, 3, 5)
labels_close, obj_count_close = measurements.label(im_close)
# FIXME: This seems to miss the left half of the face?
imshow(labels_close)
print obj_count_close, 'object on closing image'

subplot(2, 3, 3)
im_open = morphology.binary_opening(im, ones((5, 5)), iterations=2)
title('opening')
imshow(im_open)

subplot(2, 3, 6)
Example #18
0
def seg_pose(csvpath, dump):
    THRESHOLD_EYE = 0.8
    THRESHOLD_EAR = 0.8
    THRESHOLD_SHOULDER = 0.75
    THRESHOLD_HIP = 0.2
    THRESHOLD_KNEE = 0.1
    MIN_SEGMENT_LENGTH = 30
    EXPAND_SEGMENT_LENGTH = 9
    MED_WND = 9
    MAX_SEGMENTS_PER_HOUR = 8

    # read csv
    df = dict()
    with open(csvpath) as f:
        columns = f.readline().split(',')
        for col in columns:
            df[col] = []
        for line in f:
            for idx, val in enumerate(line.split(',')):
                df[columns[idx]].append(float(val))
        for col in columns:
            df[col] = np.array(df[col])

    t = df['pts']

    if len(t) <= 10:
        print(f'Too few body pose samples, ignored. {csvpath}',
              file=sys.stderr)
        exit(0)

    intra_frame_interval = np.diff(t).mean()

    print((f'        frames: {len(t)}\n'
           f'      duration: {sec_to_time_repr(t.max())}\n'
           f'frame interval: {round(intra_frame_interval, 3)}/s'),
          file=sys.stderr)

    # eye width
    eye2, eye = smooth(np.abs(df['leftEyeX'] - df['rightEyeX']), MED_WND)
    eye_c2, eye_c = smooth((df['leftEye'] + df['rightEye']) / 2, MED_WND)
    mode_eye = find_mode(eye, t)

    # ear width
    ear2, ear = smooth(np.abs(df['leftEarX'] - df['rightEarX']), MED_WND)
    ear_c2, ear_c = smooth((df['leftEar'] + df['rightEar']) / 2, MED_WND)
    mode_ear = find_mode(ear, t)

    # shoulder width
    sld2, sld = smooth(np.abs(df['leftShoulderX'] - df['rightShoulderX']),
                       MED_WND)
    sld_c2, sld_c = smooth((df['leftShoulder'] + df['rightShoulder']) / 2,
                           MED_WND)
    mode_sld = find_mode(sld, t)

    # knee detection confidence
    knee_c2, knee_c = smooth((df['leftKnee'] + df['rightKnee']) / 2, MED_WND)
    mode_knee_c = find_mode(knee_c, t)

    # hip detection confidence
    hip_c2, hip_c = smooth((df['leftHip'] + df['rightHip']) / 2, MED_WND)
    mode_hip_c = find_mode(hip_c, t)

    # score
    decision_eye = mode_eye * THRESHOLD_EYE
    decision_ear = mode_ear * THRESHOLD_EAR
    decision_sld = mode_sld * THRESHOLD_SHOULDER,
    decision_hip = (1 - mode_hip_c) * THRESHOLD_HIP + mode_hip_c
    decision_knee = (1 - mode_knee_c) * THRESHOLD_KNEE + mode_knee_c

    weight = np.array([[0.25], [0.35], [0.4], [0.5], [1]])
    feat_score = [
        eye2 < decision_eye,
        ear2 < decision_ear,
        sld2 < decision_sld,
        hip_c2 > decision_hip,
        knee_c2 > decision_knee,
    ]
    score = np.sum(np.multiply(weight, feat_score), axis=0)

    # make decision
    segment_frames = int(round(MIN_SEGMENT_LENGTH / intra_frame_interval))
    expand_frames = int(round(EXPAND_SEGMENT_LENGTH / intra_frame_interval))
    expand_struct = [True] * expand_frames
    filter_struct = [True] * segment_frames

    decision = score > 0.8
    decision = morphology.binary_closing(decision, filter_struct)
    decision = morphology.binary_opening(decision, filter_struct)
    decision = morphology.binary_dilation(decision, expand_struct)

    # scan through decision score, build segment list
    segments = []
    ignored_segments = []
    cur_start = None

    for i in range(decision.shape[0]):
        # mark start position
        if not cur_start and decision[i]:
            cur_start = t[i], i

        # mark end position, do extra checks
        if cur_start and not decision[i]:
            # check duration is reasonable
            # check segment is constructed with sufficient samples
            # detection in samples are dynamic (e.g. not from a static photo)

            start_t, start_i = cur_start
            end_t, end_i = t[i], i
            duration = end_t - start_t
            frames = end_i - start_i

            cur_start = None  # unmark start position for next iteration

            n_actual_samples = end_i - start_i
            n_expected_samples = floor(
                (end_t - start_t) / intra_frame_interval) + 1
            sample_ratio = n_actual_samples / n_expected_samples

            static_duration = duration / frames * np.mean([
                compute_number_of_static_frames(eye[start_i:end_i + 1]),
                compute_number_of_static_frames(ear[start_i:end_i + 1]),
                compute_number_of_static_frames(sld[start_i:end_i + 1]),
            ])

            if duration > 600:
                # most likely misdetection
                # a typical dance should not last more than 10 minutes
                print(
                    f'Ignore segment {round(start_t, 3)} to {round(end_t, 3)}: too long duration, time = {round(duration)} secs',
                    file=sys.stderr)
                ignored_segments.append((start_t, end_t, 'too long', 'T'))
                continue

            if sample_ratio < 0.3:
                # most likely static image
                print(
                    f'Ignore segment {round(start_t, 3)} to {round(end_t, 3)}: too few valid samples, ratio = {round(sample_ratio, 2)}',
                    file=sys.stderr)
                ignored_segments.append((start_t, end_t, 'valid samples', 'S'))
                continue

            if static_duration > 0.8 * duration:
                print(
                    f'Ignore segment {round(start_t, 3)} to {round(end_t, 3)}, too many static frames, static_duration = {round(static_duration, 2)}, ratio = {round(static_duration / duration, 2)}',
                    file=sys.stderr)
                ignored_segments.append((start_t, end_t, 'volatility', 'V'))
                continue

            segments.append((start_t, end_t))

    if len(segments) > (np.max(t) - np.min(t)) / 3600 * MAX_SEGMENTS_PER_HOUR:
        print(f'Too many segments, possibly wrong type. {csvpath}',
              file=sys.stderr)
        print(f'Ignoring all segments for automatic extraction.',
              file=sys.stderr)
        for start_t, end_t in segments:
            print(
                f'    {{"start_t": {round(start_t, 3)}, "end_t": {round(end_t, 3)}}}',
                file=sys.stderr)
        segments = []

    for start_t, end_t in segments:
        print(
            f'{{"start_t": {round(start_t, 3)}, "end_t": {round(end_t, 3)}}}')

    if dump:
        try:
            import matplotlib
            matplotlib.use('Agg')
            matplotlib.rcParams.update({'font.size': 18})

            import matplotlib.pyplot as plt

            fig, ax = plt.subplots(6, 1, figsize=(36, 24), sharex=True)
            fig.suptitle(os.path.basename(csvpath))

            f_eye = plot_yc(ax[0], eye, eye2, eye_c, eye_c2, t, mode_eye,
                            decision_eye, 'eye')
            f_ear = plot_yc(ax[1], ear, ear2, ear_c, ear_c2, t, mode_ear,
                            decision_ear, 'ear')
            f_sld = plot_yc(ax[2], sld, sld2, sld_c, sld_c2, t, mode_sld,
                            decision_sld, 'shoulder')
            f_hip = plot_c(ax[3], hip_c, hip_c2, t, mode_hip_c, decision_hip,
                           'hip')
            f_knee = plot_c(ax[4], knee_c, knee_c2, t, mode_knee_c,
                            decision_knee, 'knee')
            f_decision = plot_c(ax[5], score,
                                decision * score.max() * 1.1, t, None, None,
                                'decision')

            for start_t, end_t, reason, code in ignored_segments:
                m_time = (end_t + start_t) / 2
                ax[5].text(m_time,
                           1.55,
                           code,
                           horizontalalignment='center',
                           verticalalignment='bottom',
                           color='red',
                           fontsize=26)
                ax[5].fill_between([start_t, end_t],
                                   0,
                                   1.5,
                                   color=[1, 0.6, 0.6])

            labels = [sec_to_time_repr(t) for t in ax[5].get_xticks()]
            ax[5].set_xticklabels(labels)
            ax[5].tick_params(axis='x', length=8, width=2, colors='black')
            fig.tight_layout()

            if not dump.endswith('.png'):
                png_path = dump + '.png'
            else:
                png_path = dump

            fig.savefig(png_path,
                        dpi=144,
                        optimize=True,
                        facecolor='w',
                        format='png')
        except:
            print(f'Fail to dump analysis diagram, error:', file=sys.stderr)
            print(traceback.format_exc(), file=sys.stderr)
            try:
                trace_path = dump + '.png'
                with open(trace_path, 'w') as f:
                    print(f'Fail to dump analysis diagram, error:', file=f)
                    print(traceback.format_exc(), file=f)
            except:
                print(f'Fail to write dump trace, error:', file=sys.stderr)
                print(traceback.format_exc(), file=sys.stderr)
Example #19
0
def shift_process(img_path, roi_path, out_label):
    image_nii = nib.load(img_path)
    label_nii = nib.load(roi_path)
    image = np.copy(image_nii.get_data())
    label = np.copy(label_nii.get_data())
    reso = image_nii.header['pixdim'][1:4]
    assert image.shape == label.shape, 'Image shape != Label shape'
    print('Image size:', image.shape, 'image reso:', reso)

    # Get threshold for boundary
    thresh_min = threshold_minimum(image[label > 0])
    thresh_otsu = threshold_otsu(image[label > 0])

    region_1 = np.logical_and(image > thresh_otsu, label > 0).astype(np.int8)
    region_2 = np.logical_and(image <= thresh_otsu, label > 0).astype(np.int8)

    new_label = np.zeros_like(label)
    new_label[region_1 > 0] = 2
    new_label[region_2 > 0] = 3

    # slice-wise
    boundaries, slice_indices = get_boundary(new_label, 2)
    print(f'Found valid {len(slice_indices)} slices:', slice_indices)

    margin_label = np.zeros_like(new_label)
    for bound, slice_idx in zip(boundaries, slice_indices):
        new_slice = np.zeros(region_1.shape[:2]).astype(np.int)
        for pt in bound:
            new_slice[pt[0], pt[1]] = 1

        order = 5
        delta_dist = 5  # 5mm
        coords = np.array(bound).transpose()
        z = np.polyfit(coords[0], coords[1], order)
        p = np.poly1d(z)

        # judge direction
        pos_direction = False
        center_idx = len(coords[0]) // 2
        center_pt = (coords[0][center_idx], coords[1][center_idx])
        center_delta = get_delta_xy(center_pt, reso, p, 10)
        center_pt_pos = (center_pt[0] + center_delta[0],
                         center_pt[1] + center_delta[1])
        if intersect(center_pt, center_pt_pos, region_2[..., slice_idx]):
            pos_direction = True
        print('Positive direction:', pos_direction)

        new_line = []
        for x, y in zip(coords[0], coords[1]):
            delta_x_px, delta_y_px = get_delta_xy(
                (x, y), reso, p, delta_dist, k=-1 / p.deriv(1)(center_pt[0]))
            new_pt_pos = (x + delta_x_px, y + delta_y_px)
            new_pt_neg = (x - delta_x_px, y - delta_y_px)
            if pos_direction:
                inter_pts = get_line_segment((x, y), new_pt_pos)
                new_line = new_line + inter_pts
            else:
                inter_pts = get_line_segment((x, y), new_pt_neg)
                new_line = new_line + inter_pts

        for pt in new_line:
            new_slice[pt[0], pt[1]] = out_label + 8
        new_slice = binary_closing(new_slice,
                                   generate_binary_structure(2, 1),
                                   iterations=1).astype(np.int)
        new_slice[new_slice > 0] = out_label + 8
        margin_label[..., slice_idx] = new_slice

    margin_label[new_label == 2] = out_label
    #return margin_label
    nib.save(nib.Nifti1Image(margin_label, image_nii.affine, image_nii.header),
             os.path.join(out_dir, os.path.basename(roi_path)))
Example #20
0
def preprocess(camera, fn, outpath):
    if not os.path.isdir(outpath + fn[-18:-10]):
        os.makedirs(outpath + fn[-18:-10])
    t = datetime.strptime(fn[-18:-4], '%Y%m%d%H%M%S')
    t_prev = t - timedelta(seconds=30)
    t_prev = t_prev.strftime('%Y%m%d%H%M%S')
    fn_prev = fn.replace(fn[-18:-4], t_prev)
    if len(glob.glob(fn_prev)) <= 0:
        return None

    flist = [fn_prev, fn]
    q = deque()
    for f in flist:
        img = image(camera, f)
        ###img object contains four data fields: rgb, red, rbr, and cm
        img.undistort(camera, rgb=True)
        ###undistortion
        if img.rgb is None:
            return None
        q.append(img)

        if len(q) <= 1:
            continue
        ####len(q) is always 2 beyond this point

        r1 = q[-2].red.astype(np.float32)
        r1[r1 <= 0] = np.nan
        r2 = q[-1].red.astype(np.float32)
        r2[r2 <= 0] = np.nan
        err0 = r2 - r1

        dif = np.abs(err0)
        dif = st.rolling_mean2(dif, 20)
        semi_static = (abs(dif) < 10) & (r1 - 127 > 100)
        semi_static = morphology.binary_closing(semi_static, np.ones((10, 10)))
        semi_static = remove_small_objects(semi_static,
                                           min_size=200,
                                           in_place=True)
        q[-1].rgb[semi_static] = 0
        r2[semi_static] = np.nan

        cloud_mask(camera, q[-1], q[-2])
        ###one-layer cloud masking
        if (q[-1].cm is None):
            q.popleft()
            continue
        if (np.sum(
            (q[-1].cm > 0)) < 2e-2 * img.nx * img.ny):  ######cloud free case
            q[-1].layers = 0
        else:
            dilated_cm = morphology.binary_dilation(q[-1].cm, np.ones(
                (15, 15)))
            dilated_cm &= (r2 > 0)
            vy, vx, max_corr = cloud_motion(r1,
                                            r2,
                                            mask1=r1 > 0,
                                            mask2=dilated_cm,
                                            ratio=0.7,
                                            threads=4)
            if np.isnan(vy):
                q[-1].layers = 0
            else:
                q[-1].v += [[vy, vx]]
                q[-1].layers = 1

    #         err = r2-st.shift2(r1,-vx,-vy); err[(r2+st.shift2(r1,-vx,-vy)==0)]=np.nan;
    #
    #         mask2=st.rolling_mean2(np.abs(err)-np.abs(err0),40)<-2
    #         mask2=remove_small_objects(mask2,min_size=300, in_place=True)
    #         mask2=morphology.binary_dilation(mask2,np.ones((15,15)))
    #         mask2 = (~mask2) & (r2>0) & (np.abs(r2-127)<30) & (err>-100) #& (q[-1].cm>0)
    #         if np.sum(mask2 & (q[-1].cm>0))>200e-2*img.nx*img.ny:
    #             vy,vx,max_corr = cloud_motion(r1,r2,mask1=r1>0,mask2=mask2, ratio=0.7, threads=4);
    #             if np.isnan(vy):
    #                 q.popleft();
    #                 continue
    #             vdist = np.sqrt((vy-q[-1].v[-1][0])**2+(vx-q[-1].v[-1][1])**2)
    #             if vdist>=5 and np.abs(vy)+np.abs(vx)>2.5 and vdist>0.3*np.sqrt(q[-1].v[-1][0]**2+q[-1].v[-1][1]**2):
    #                 score1=np.nanmean(np.abs(err[mask2]));
    #                 err2=r2-st.shift2(r1,-vx,-vy); err2[(r2==0) | (st.shift2(r1,-vx,-vy)==0)]=np.nan;
    #                 score2=np.nanmean(np.abs(err2[mask2]));
    #                 if score2<score1:
    #                     q[-1].v += [[vy,vx]]; q[-1].layers=2;
    #                     dif=st.rolling_mean2(np.abs(err)-np.abs(err2),40)>0
    #                     dif=remove_small_objects(dif,min_size=300, in_place=True)
    #                     q[-1].cm[dif & (q[-1].cm>0)]=q[-1].layers;

        q[-1].dump_img(outpath + f[-18:-10] + '/' + f[-23:-4] + '.pkl')
        q.popleft()

    return q[-1]
Example #21
0
def triangulation(pts,
                  downsampling=(1, 1, 1),
                  n_closings=0,
                  single_cc=False,
                  decimate_mesh=0,
                  gradient_direction='descent',
                  force_single_cc=False):
    """
    Calculates triangulation of point cloud or dense volume using marching cubes
    by building dense matrix (in case of a point cloud) and applying marching
    cubes.

    Parameters
    ----------
    pts : np.array
        [N, 3] or [N, M, O] (dtype: uint8, bool)
    downsampling : Tuple[int]
        Magnitude of downsampling, e.g. 1, 2, (..) which is applied to pts
        for each axis
    n_closings : int
        Number of closings applied before mesh generation
    single_cc : bool
        Returns mesh of biggest connected component only
    decimate_mesh : float
        Percentage of mesh size reduction, i.e. 0.1 will leave 90% of the
        vertices
    gradient_direction : str
        defines orientation of triangle indices. '?' is needed for KNOSSOS
         compatibility. TODO: check compatible index orientation, switched to `descent`, 23April2019
    force_single_cc : bool
        If True, performans dilations until only one foreground CC is present
        and then erodes with the same number to maintain size.

    Returns
    -------
    array, array, array
        indices [M, 3], vertices [N, 3], normals [N, 3]

    """
    if boundaryDistanceTransform is None:
        raise ImportError(
            '"boundaryDistanceTransform" could not be imported from VIGRA. '
            'Please install vigra, see SyConn documentation.')
    assert type(
        downsampling) == tuple, "Downsampling has to be of type 'tuple'"
    assert (pts.ndim == 2 and pts.shape[1] == 3) or pts.ndim == 3, \
        "Point cloud used for mesh generation has wrong shape."
    if pts.ndim == 2:
        if np.max(pts) <= 1:
            msg = "Currently this function only supports point " \
                  "clouds with coordinates >> 1."
            log_proc.error(msg)
            raise ValueError(msg)
        offset = np.min(pts, axis=0)
        pts -= offset
        pts = (pts / downsampling).astype(np.uint32)
        # add zero boundary around object
        margin = n_closings + 5
        pts += margin
        bb = np.max(pts, axis=0) + margin
        volume = np.zeros(bb, dtype=np.float32)
        volume[pts[:, 0], pts[:, 1], pts[:, 2]] = 1
    else:
        volume = pts
        if np.any(np.array(downsampling) != 1):
            ndimage.zoom(volume, downsampling, order=0)
        offset = np.array([0, 0, 0])
    if n_closings > 0:
        volume = binary_closing(volume,
                                iterations=n_closings).astype(np.float32)
        if force_single_cc:
            n_dilations = 0
            while True:
                labeled, nb_cc = ndimage.label(volume)
                # log_proc.debug('Forcing single CC, additional dilations {}, num'
                #                'ber connected components: {}'
                #                ''.format(n_dilations, nb_cc))
                if nb_cc == 1:  # does not count background
                    break
                # pad volume to maintain margin at boundary and correct offset
                volume = np.pad(volume, [(1, 1), (1, 1), (1, 1)],
                                mode='constant',
                                constant_values=0)
                offset -= 1
                volume = binary_dilation(volume,
                                         iterations=1).astype(np.float32)
                n_dilations += 1
    else:
        volume = volume.astype(np.float32)
    if single_cc:
        labeled, nb_cc = ndimage.label(volume)
        cnt = Counter(labeled[labeled != 0])
        l, occ = cnt.most_common(1)[0]
        volume = np.array(labeled == l, dtype=np.float32)
    # InterpixelBoundary, OuterBoundary, InnerBoundary
    dt = boundaryDistanceTransform(volume, boundary="InterpixelBoundary")
    dt[volume == 1] *= -1
    volume = gaussianSmoothing(dt, 1)
    if np.sum(volume < 0) == 0 or np.sum(volume > 0) == 0:  # less smoothing
        volume = gaussianSmoothing(dt, 0.5)
    try:
        verts, ind, norm, _ = measure.marching_cubes_lewiner(
            volume, 0, gradient_direction=gradient_direction)
    except Exception as e:
        raise ValueError(e)
    if pts.ndim == 2:  # account for [5, 5, 5] offset
        verts -= margin
    verts = np.array(verts) * downsampling + offset
    if decimate_mesh > 0:
        if not __vtk_avail__:
            msg = "vtki not installed. Please install vtki.'" \
                  "pip install vtki'."
            log_proc.error(msg)
            raise ImportError(msg)
        # log_proc.warning("'triangulation': Currently mesh-sparsification"
        #                  " may not preserve volume.")
        # add number of vertices in front of every face (required by vtki)
        ind = np.concatenate(
            [np.ones((len(ind), 1)).astype(np.int64) * 3, ind], axis=1)
        mesh = vtki.PolyData(verts, ind.flatten())
        mesh.decimate(decimate_mesh, volume_preservation=True)
        # remove face sizes again
        ind = mesh.faces.reshape((-1, 4))[:, 1:]
        verts = mesh.points
        mo = MeshObject("", ind, verts)
        # compute normals
        norm = mo.normals.reshape((-1, 3))
    return np.array(ind, dtype=np.int), verts, norm
Example #22
0
def Volume_Label():
    """
    Creates binary labels from .stl files, that are sliced at height locations
    given by the position of the slices in the corresponding volume.nii file.
    It Saves at "filenamelabel" the binary labels of the selected anatomy.
    Returns:

    """
    setDirVariables()

    ## Options
    # b_display = 0
    # compare_Matlab_Python = 0

    # Output size

    outSize = [128, 128, 128]
    templateSize = str(outSize[0]) + '_' + str(outSize[1]) + '_' + str(
        outSize[2])

    # Sets
    InStlSet = 'stl'
    InDCMmSet = 'reshape_knee_'
    InDCMmSetdicom = 'knee'

    fileTemplate = InDCMmSet + templateSize
    anatomy = 'tibia'  # 'femur', 'patella', 'fibula'
    position = 'prox'  # 'dist', '', ''
    InSurfSet = 'ct_' + anatomy + '_'
    outSet = position + anatomy

    os.chdir(mainInputDataDirectoryLoc)

    fp = open('case.txt', 'r+')
    casePatient = fp.read()
    casePatient = int(casePatient)
    fp.close()
    print('Patient no. {}'.format(casePatient))

    # Read excel file to get patients' codes
    xlsName = os.path.join(mainInputDataDirectoryLoc, 'Case_statistics.xlsx')
    # name = pandas.ExcelFile(xlsName)
    name = xlrd.open_workbook(xlsName)
    sheet = name.sheet_by_index(0)
    rows = sheet.nrows
    study = [sheet.cell_value(i, 0) for i in range(1, rows)]
    patientCode = study[casePatient - 1]

    ## Read volume nii
    mainPatientDirectory = 'Patient{:04d}'.format(casePatient)
    mainInputPatientDirectoryLoc = mainInputDataDirectoryLoc + '/preprocessedData/' + mainPatientDirectory + '/'
    mainInputPatientDirectoryNAS = mainInputDataDirectoryNAS + '/OriginalData/' + patientCode
    mainInputDicomDirectory = mainInputPatientDirectoryNAS + '/dicom/'
    if os.path.isdir(mainInputDicomDirectory + '/ct/'):
        mainInputDicomDirectory = mainInputDicomDirectory + '/ct/' + InDCMmSetdicom + '/'
    else:
        mainInputDicomDirectory = mainInputDicomDirectory + InDCMmSetdicom + '/'

    os.chdir(mainInputPatientDirectoryLoc)

    niiFilename = 'volumeCT_' + fileTemplate + '_{:04d}.nii'.format(
        casePatient)
    VolumeCT = loadNiiVolume(niiFilename, mainInputDicomDirectory)
    ## Normalize
    VolumeCT = normVolumeScan(VolumeCT)

    # Read stl and display
    mainInputStlDirectory = mainInputPatientDirectoryNAS + '/' + InStlSet + '/'
    os.chdir(mainInputStlDirectory)
    filename = InSurfSet + study[casePatient - 1] + '.stl'
    my_mesh1 = trimesh.load(filename)

    os.chdir(mainCodeDirectory)

    # Build  binary volume of the reference surface corresponding to the CT volume
    VolumeSurf = VolumeCT
    VolumeSurf.volumeData = np.zeros(VolumeSurf.volumeData.shape, dtype=int)

    heights = []
    for i in range(VolumeSurf.volumeDim[2]):
        heights.append(
            float(VolumeSurf.volumeOffset[2]) + i * VolumeSurf.voxelSize[2])

    contours = mesh2vol(my_mesh1, heights, VolumeSurf.volumeOffset,
                        VolumeSurf.voxelSize, VolumeSurf.volumeDim)
    indicesX = []
    indicesY = []

    for ip in range(VolumeSurf.volumeDim[2]):

        if contours[ip].shape[0] != 0:

            val = contours[ip][:, 0] - VolumeSurf.volumeOffset[0]
            val = val / (VolumeSurf.volumeDim[0] * VolumeSurf.voxelSize[0])
            val = np.round(val * VolumeSurf.volumeDim[0], 0)
            valX = val.astype(int)

            val = contours[ip][:, 1] - VolumeSurf.volumeOffset[1]
            val = val / (VolumeSurf.volumeDim[1] * VolumeSurf.voxelSize[1])
            val = np.round(val * VolumeSurf.volumeDim[0], 0)
            valY = val.astype(int)

            val_index = np.zeros((valX.shape[0], 2))
            val_index[:, 0] = valX
            val_index[:, 1] = valY
            val_index = np.unique(val_index, axis=0).astype(int)

            indicesX.append(val_index[:, 0])
            indicesY.append(val_index[:, 1])

            for i, j in zip(valY, valX):
                VolumeSurf.volumeData[i - 1, j - 1, ip] = 1
        else:
            indicesX.append([])
            indicesY.append([])

    VolumeSurfLabeled = VolumeSurf
    counter1 = 0
    counter2 = 0
    # fill in the image each contour
    for ip in range(VolumeSurfLabeled.volumeDim[2]):

        if contours[ip].shape[0] != 0:

            non_zero_start = np.count_nonzero(VolumeSurf.volumeData[:, :, ip])

            ######## REGION FILL
            binaryImage = binary_fill_holes(VolumeSurf.volumeData[:, :, ip])
            binaryImage = binaryImage > 1 / 255
            ######### CLOSING
            kernel = np.ones((5, 5), np.uint8)
            binaryImage = binary_closing(binaryImage, kernel)
            ######### FILL HOLES AGAIN
            binaryImage = binary_fill_holes(binaryImage)

            non_zero_end = np.count_nonzero(binaryImage)

            ######### ALTERNATIVE PROCESSING FOR NON CLOSED CONTOURS
            if non_zero_end < non_zero_start * 4:
                strel = disk(2)
                binaryImage = binary_dilation(VolumeSurf.volumeData[:, :, ip],
                                              strel)
                binaryImage = binary_dilation(binaryImage, strel)
                binaryImage = binary_fill_holes(binaryImage)
                binaryImage = binary_erosion(binaryImage, strel)
                binaryImage = binary_erosion(binaryImage, strel)
                counter1 = counter1 + 1

                non_zero_end2 = np.count_nonzero(binaryImage)

                ######### ALTERNATIVE PROCESSING FOR STILL-NON-CLOSED CONTOURS
                if non_zero_end2 < non_zero_start * 4:
                    strel = disk(3)
                    binaryImage = binary_dilation(
                        VolumeSurf.volumeData[:, :, ip], strel)
                    binaryImage = binary_dilation(binaryImage, strel)
                    binaryImage = binary_fill_holes(binaryImage)
                    binaryImage = binary_erosion(binaryImage, strel)
                    binaryImage = binary_erosion(binaryImage, strel)
                    counter2 = counter2 + 1

            VolumeSurfLabeled.volumeData[:, :, ip] = binaryImage

            dMin = VolumeSurfLabeled.volumeData.min()
            D = VolumeSurfLabeled.volumeData + abs(dMin)
            D = D / D.max() * 255

    print('Alternative processing no 1: {} \n Alternative proessing no 2: {}'.
          format(counter1, counter2))

    ###### PLOT AND SCROLL ACROSS SLICES
    #if b_display == 1:
    #    fig, ax = plt.subplots(1, 1)
    #    tracker = IndexTracker(ax, D)
    #    fig.canvas.mpl_connect('scroll_event', tracker.onscroll)
    #    plt.show()
    #
    #if compare_Matlab_Python == 1:
    #    name_dir = outSet + 'Label'
    #    mainLabelDirectory = os.path.join(mainPatientDirectory, '{}'.format(name_dir))
    #    os.chdir(mainLabelDirectory)
    #    mean_dice = dice_coeff(VolumeSurfLabeled.volumeData, outSet)
    #    print(mean_dice)

    # Make nii file label
    volumeData = VolumeSurfLabeled.volumeData.astype(np.short)
    volumeData = np.transpose(volumeData, [1, 0, 2])
    voxelSize = VolumeSurfLabeled.voxelSize
    volumeOffset = VolumeSurfLabeled.volumeOffset

    affine = np.eye(4)
    niiVolumeLabel = nib.Nifti1Image(volumeData, affine)
    niiVolumeLabel.header.set_slope_inter(VolumeSurfLabeled.rescaleSlope,
                                          VolumeSurfLabeled.rescaleIntercept)
    niiVolumeLabel.header.set_qform(affine, 1)
    niiVolumeLabel.header.set_zooms(voxelSize)
    niiVolumeLabel.header['qoffset_x'] = volumeOffset[0]
    niiVolumeLabel.header['qoffset_y'] = volumeOffset[1]
    niiVolumeLabel.header['qoffset_z'] = volumeOffset[2]

    os.chdir(mainInputPatientDirectoryLoc)

    # Save nii
    filenameLabel = 'volumeLabel_' + outSet + '_' + templateSize + '_{:04d}_py.nii'.format(
        casePatient)
    nib.nifti1.save(niiVolumeLabel, filenameLabel)

    os.chdir(mainCodeDirectory)
Example #23
0
def smooth(on, smoothing=(1, 1)):
    on = on | binary_closing(on, structure=np.ones(smoothing[0], dtype="bool"))
    on = on & binary_opening(on, structure=np.ones(smoothing[1], dtype="bool"))
    return on
Example #24
0
def calculate_edge(features,
                   metadata,
                   centroid_name_y,
                   centroid_name_x,
                   image_size_y=2160,
                   image_size_x=2560,
                   object_name="Nuclei",
                   edge_expansion=200,
                   edge_site_range=1):

    # TODO: Implement a downsample_factor
    # TODO: Add a global option => don't calculate per site, but for the whole
    # well at the same time. Would save computation, but be expensive in RAM
    # Edge detection: Run massive dilation based on centroids of objects
    # Using centroids, because loading masks is slow and cell segmentation is
    # not always available

    # Parameters:
    # How far out is an edge searched for each site? Defaults to 1 => looks at the 3x3 grid around the site

    edge_measurements = pd.DataFrame(
        columns=['mapobject_id', 'DistanceToEdge', 'isEdge'])

    # Function gets the parameters for a whole well, calculates site by site to avoid huge memory usage
    metadata = metadata.assign(
        well_pos_combined=zip(metadata['well_pos_y'], metadata['well_pos_x']))
    existing_sites = list(metadata['well_pos_combined'].unique())

    for site in existing_sites:
        logger.info('Edge detection for site {}'.format(site))
        surrounding_sites = []
        min_y = site[0]
        min_x = site[1]
        max_y = site[0]
        max_x = site[1]
        for y in range(-edge_site_range, edge_site_range + 1):
            for x in range(-edge_site_range, edge_site_range + 1):
                potential_site = (site[0] + y, site[1] + x)
                if potential_site in existing_sites:
                    surrounding_sites.append(potential_site)
                    # Check if the current site is a new min or max in x or y direction
                    if potential_site[0] < min_y:
                        min_y = potential_site[0]
                    if potential_site[1] < min_x:
                        min_x = potential_site[1]
                    if potential_site[0] > max_y:
                        max_y = potential_site[0]
                    if potential_site[1] > max_x:
                        max_x = potential_site[1]

        # Create a binary numpy array
        surrounding_size = ((max_y - min_y + 1) * image_size_y,
                            (max_x - min_x + 1) * image_size_x)
        local_surrounding = np.zeros(surrounding_size, dtype=bool)
        # Go through each image and set the centroids to True
        # Need to calculate the position in the local_surrounding image depending on where an image is relative to the others
        for sub_site in surrounding_sites:
            x_shift = (sub_site[1] - min_x) * image_size_x
            y_shift = (sub_site[0] - min_y) * image_size_y
            subsite_centroids = features.loc[metadata['well_pos_combined'] ==
                                             sub_site]
            for row_index in range(subsite_centroids.shape[0]):
                y_pos = int(
                    subsite_centroids[centroid_name_y].iloc[row_index] +
                    y_shift)
                x_pos = int(
                    subsite_centroids[centroid_name_x].iloc[row_index] +
                    x_shift)
                # print(subsite_centroids[centroid_name_y].iloc[row_index], subsite_centroids[centroid_name_x].iloc[row_index])
                # print(y_pos, x_pos)
                local_surrounding[y_pos, x_pos] = True

        # Do a dilation of the points to fill in holes between cells
        from scipy.ndimage.morphology import binary_dilation, binary_closing, distance_transform_edt
        local_surrounding = binary_dilation(local_surrounding,
                                            iterations=edge_expansion)
        local_surrounding = binary_closing(local_surrounding,
                                           structure=np.ones((15, 15)))

        # Distance transform currently treats edge as 0 => funny biases in dense regions
        local_surrounding = distance_transform_edt(local_surrounding)

        site_centroids = features.loc[metadata['well_pos_combined'] == site]
        site_centroids = site_centroids.assign(DistanceToEdge=0)
        site_centroids = site_centroids.assign(isEdge=0)
        x_shift = (site[1] - min_x) * image_size_x
        y_shift = (site[0] - min_y) * image_size_y
        for row_index in range(site_centroids.shape[0]):
            y_pos = int(site_centroids[centroid_name_y].iloc[row_index] +
                        y_shift)
            x_pos = int(site_centroids[centroid_name_x].iloc[row_index] +
                        x_shift)

            # Write values back to the data frame. For TissueMaps: Write to database here?
            col_index = site_centroids.columns.get_loc('DistanceToEdge')
            site_centroids.iloc[row_index, col_index] = max(
                local_surrounding[y_pos, x_pos] - edge_expansion, 0)

            col_index2 = site_centroids.columns.get_loc('isEdge')
            site_centroids.iloc[row_index, col_index2] = int(
                (local_surrounding[y_pos, x_pos] - edge_expansion) <= 0)

        edge_measurements = edge_measurements.append(
            site_centroids[['mapobject_id', 'DistanceToEdge', 'isEdge']],
            ignore_index=False,
            verify_integrity=False,
            sort=None)

    return edge_measurements
Example #25
0
    def _run_interface(self, runtime):
        if isdefined(self.inputs.intensity_threshold):
            threshold = self.inputs.intensity_threshold
            img = image.math_img('img>{0}'.format(threshold),
                                 img=self.inputs.in_file)
        else:
            img = nibabel.load(self.inputs.in_file)

        lower_cutoff = self.inputs.upper_cutoff - 0.05
        mask_img = masking.compute_epi_mask(
            img,
            lower_cutoff=lower_cutoff,
            upper_cutoff=self.inputs.upper_cutoff,
            connected=self.inputs.connected,
            opening=self.inputs.opening)
        mask_data = mask_img.get_data()
        n_voxels_mask = np.sum(mask_data > 0)

        # Find the optimal lower cutoff
        affine_det = np.abs(np.linalg.det(mask_img.affine[:3, :3]))
        n_voxels_min = int(self.inputs.volume_threshold * .9 / affine_det)
        while (n_voxels_mask < n_voxels_min) and (lower_cutoff >
                                                  self.inputs.lower_cutoff):
            lower_cutoff -= .05
            mask_img = masking.compute_epi_mask(
                img,
                lower_cutoff=lower_cutoff,
                upper_cutoff=self.inputs.upper_cutoff,
                connected=self.inputs.connected,
                opening=self.inputs.opening)
            mask_data = mask_img.get_data()
            n_voxels_mask = np.sum(mask_data > 0)
            if self.inputs.verbose:
                print('volume {0}, lower_cutoff {1}'.format(
                    n_voxels_mask * affine_det, lower_cutoff))

        n_voxels_max = int(self.inputs.volume_threshold * 1.1 / affine_det)
        previous_n_voxels_mask = copy.copy(lower_cutoff)
        while (n_voxels_mask >
               n_voxels_max) and (previous_n_voxels_mask >=
                                  n_voxels_mask) and (lower_cutoff + 0.01 <
                                                      self.inputs.upper_cutoff):
            lower_cutoff += .01
            mask_img = masking.compute_epi_mask(
                img,
                lower_cutoff=lower_cutoff,
                upper_cutoff=self.inputs.upper_cutoff,
                connected=self.inputs.connected,
                opening=self.inputs.opening)
            mask_data = mask_img.get_data()
            n_voxels_mask = np.sum(mask_data > 0)
            if self.inputs.verbose:
                print('volume {0}, lower_cutoff {1}'.format(
                    n_voxels_mask * affine_det, lower_cutoff))

        else:
            if n_voxels_mask < n_voxels_min:
                lower_cutoff -= .01
                mask_img = masking.compute_epi_mask(
                    img,
                    lower_cutoff=lower_cutoff,
                    upper_cutoff=self.inputs.upper_cutoff,
                    connected=self.inputs.connected,
                    opening=self.inputs.opening)
                mask_data = mask_img.get_data()
                n_voxels_mask = np.sum(mask_data > 0)
                if self.inputs.verbose:
                    print('volume {0}, lower_cutoff {1}'.format(
                        n_voxels_mask * affine_det, lower_cutoff))

        # Find the optimal opening
        n_voxels_max = int(self.inputs.volume_threshold * 1.5 / affine_det)
        opening = 0
        while n_voxels_mask > n_voxels_max and opening < self.inputs.opening:
            opening += 1
            mask_img = masking.compute_epi_mask(
                img,
                lower_cutoff=lower_cutoff,
                upper_cutoff=self.inputs.upper_cutoff,
                connected=self.inputs.connected,
                opening=opening)
            mask_data = mask_img.get_data()
            n_voxels_mask = np.sum(mask_data > 0)
            if self.inputs.verbose:
                print('volume {0}, lower_cutoff {1}, opening {2}'.format(
                    n_voxels_mask * affine_det, lower_cutoff, opening))

        # Find the optimal closing
        iterations = 0
        n_voxels_min = int(self.inputs.volume_threshold * .8 / affine_det)
        while (n_voxels_mask < n_voxels_min) and (iterations <
                                                  self.inputs.closing):
            iterations += 1
            for structure_size in range(1, 4):
                structure = generate_binary_structure(3, structure_size)
                mask_data = binary_closing(
                    mask_data, structure=structure, iterations=iterations)
                n_voxels_mask = np.sum(mask_data > 0)
                if self.inputs.verbose:
                    print('volume {0}, lower_cutoff {1}, opening {2}, closing '
                          '{3}'.format(n_voxels_mask * affine_det,
                                       lower_cutoff, opening, iterations))
                if n_voxels_mask > n_voxels_min:
                    break

        if self.inputs.verbose:
            print('volume {0}, lower_cutoff {1}, opening {2}, closing '
                  '{3}'.format(n_voxels_mask * affine_det, lower_cutoff,
                               opening, iterations))

        # Fill holes
        n_voxels_min = int(self.inputs.volume_threshold / affine_det)
        if n_voxels_mask < n_voxels_min:
            for structure_size in range(1, 4):
                structure = generate_binary_structure(3, structure_size)
                mask_data = binary_fill_holes(
                    mask_data, structure=structure)
                n_voxels_mask = np.sum(mask_data > 0)
                if self.inputs.verbose:
                    print('volume {0}, structure_size {1}'.format(
                        n_voxels_mask * affine_det, structure_size))
                if n_voxels_mask > n_voxels_min:
                    break

        # Dilation if needed
        size = self.inputs.dilation_size
        for n in range(3):
            if n_voxels_mask < n_voxels_min * .9:
                previous_n_voxels_mask = copy.copy(n_voxels_mask)
                mask_data = grey_dilation(mask_data, size=size)
                n_voxels_mask = np.sum(mask_data > 0)
                if n_voxels_mask == previous_n_voxels_mask:
                    size2 = (size[0] + 1, size[1] + 1, size[2] + 1)
                    mask_data = grey_dilation(mask_data, size=size2)
                    n_voxels_mask = np.sum(mask_data > 0)

                if self.inputs.verbose:
                    print('volume {0}, grey dilation {1}'.format(
                        n_voxels_mask * affine_det, n))
            else:
                break

        # Fill holes
        for structure_size in range(1, 4):
            structure = generate_binary_structure(3, structure_size)
            mask_data = binary_fill_holes(
                mask_data, structure=structure)
            n_voxels_mask = np.sum(mask_data > 0)

        if self.inputs.verbose:
            print('final volume {0}'.format(n_voxels_mask * affine_det))

        mask_img = image.new_img_like(mask_img, mask_data, mask_img.affine)
        if isdefined(self.inputs.out_file):
            mask_img.to_filename(os.path.abspath(self.inputs.out_file))
        else:
            mask_img.to_filename(os.path.abspath(
                fname_presuffix(os.path.basename(self.inputs.in_file),
                                suffix='_histo_mask')))
        return runtime
Example #26
0
def get_biggest_component(binary_mask: np.ndarray) -> np.ndarray:
    binary_mask = binary_closing(binary_mask, structure=square(6))
    labels = label(binary_mask)
    largestCC = labels == (np.argmax(np.bincount(labels.flat)[1:]) + 1)
    return largestCC
    def split(self,
              sigma=2,
              threshold_split=0.25,
              expand_mask=1,
              minimum_pixels=1):
        """
        If any classes contain multiple non-contiguous segments in real space, divide
        these regions into distinct classes.

        Algorithm is as follows:
        First, an image of each class is obtained from its scan position weights.
        Then, the image is convolved with a gaussian of std sigma.
        This is then turned into a binary mask, by thresholding with threshold_split.
        Stray pixels are eliminated by performing a one pixel binary closing, then binary
        opening.
        The mask is then expanded by expand_mask pixels.
        Finally, the contiguous regions of the resulting mask are found. These become the
        new class components by scan position.

        The splitting itself involves creating two classes - i.e. adding a column to W
        and a row to H.  The new BP classes (W columns) have exactly the same values as
        the old BP class. The two new scan position classes (H rows) divide up the
        non-zero entries of the old scan position class into two or more non-intersecting
        subsets, each of which becomes its own new class.

        Args:
            sigma (float): std of gaussian kernel used to smooth the class images before
                thresholding and splitting.
            threshold_split (float): used to threshold the class image to create a binary mask.
            expand_mask (int): number of pixels by which to expand the mask before separating
                into contiguous regions.
            minimum_pixels (int): if, after splitting, a potential new class contains fewer than
                this number of pixels, ignore it
        """
        assert isinstance(expand_mask, (int, np.integer))
        assert isinstance(minimum_pixels, (int, np.integer))

        W_next = np.zeros((self.N_feat, 1))
        H_next = np.zeros((1, self.N_meas))
        for i in range(self.N_c):
            # Get the class in real space
            class_image = self.get_class_image(i)

            # Turn into a binary mask
            class_image = gaussian_filter(class_image, sigma)
            mask = class_image > (np.max(class_image) * threshold_split)
            mask = binary_opening(mask, iterations=1)
            mask = binary_closing(mask, iterations=1)
            mask = binary_dilation(mask, iterations=expand_mask)

            # Get connected regions
            labels, nlabels = label(mask,
                                    background=0,
                                    return_num=True,
                                    connectivity=2)

            # Add each region to the new W and H matrices
            for j in range(nlabels):
                mask = (labels == (j + 1))
                mask = binary_erosion(mask, iterations=expand_mask)

                if np.sum(mask) >= minimum_pixels:

                    # Leave the Bragg peak weightings the same
                    W_next = np.hstack((W_next, self.W[:, i, np.newaxis]))

                    # Use the existing real space pixel weightings
                    h_i = np.zeros(self.N_meas)
                    h_i[mask.ravel()] = self.H[i, :][mask.ravel()]
                    H_next = np.vstack((H_next, h_i[np.newaxis, :]))

        self.W_next = W_next[:, 1:]
        self.H_next = H_next[1:, :]
        self.N_c_next = self.W_next.shape[1]

        return
Example #28
0
def Stroke_closing(img):
    new_img = np.zeros_like(img)
    new_img = morphology.binary_closing(img, structure=np.ones((2, 2, 2)))
    return new_img
Example #29
0
def threshold_components_parallel(pars):
    """
       Post-processing of spatial components which includes the following steps

       (i) Median filtering
       (ii) Thresholding
       (iii) Morphological closing of spatial support
       (iv) Extraction of largest connected component ( to remove small unconnected pixel )
       /!\ need to be called through the function threshold components

       Parameters:
        ---------
        [parsed]
       A:      np.ndarray
           2d matrix with spatial components

       dims:   tuple
           dimensions of spatial components

       medw: [optional] tuple
           window of median filter

       thr_method: [optional] string
           Method of thresholding:
               'max' sets to zero pixels that have value less than a fraction of the max value
               'nrg' keeps the pixels that contribute up to a specified fraction of the energy

       maxthr: [optional] scalar
           Threshold of max value

       nrgthr: [optional] scalar
           Threshold of energy

       extract_cc: [optional] bool
           Flag to extract connected components (might want to turn to False for dendritic imaging)

       se: [optional] np.intarray
           Morphological closing structuring element

       ss: [optinoal] np.intarray
           Binary element for determining connectivity

       Returns:
        -------
           Ath: np.ndarray
               2d matrix with spatial components thresholded
       """

    A_i, i, dims, medw, d, thr_method, se, ss, maxthr, nrgthr, extract_cc = pars
    A_i = A_i.toarray()
    # we reshape this one dimension column of the 2d components into the 2D that
    A_temp = np.reshape(A_i, dims[::-1])
    # we apply a median filter of size medw
    A_temp = median_filter(A_temp, medw)
    if thr_method == 'max':
        BW = (A_temp > maxthr * np.max(A_temp))
    elif thr_method == 'nrg':
        Asor = np.sort(np.squeeze(np.reshape(A_temp, (d, 1))))[::-1]
        temp = np.cumsum(Asor ** 2)
        ff = np.squeeze(np.where(temp < nrgthr * temp[-1]))
        if ff.size > 0:
            ind = ff if ff.ndim == 0 else ff[-1]
            A_temp[A_temp < Asor[ind]] = 0
            BW = (A_temp >= Asor[ind])
        else:
            BW = np.zeros_like(A_temp)
    # we want to remove the components that are valued 0 in this now 1d matrix
    Ath = np.squeeze(np.reshape(A_temp, (d, 1)))
    Ath2 = np.zeros((d))
    # we do that to have a full closed structure even if the values have been trehsolded
    BW = binary_closing(BW.astype(np.int), structure=se)

    # if we have deleted the element
    if BW.max() == 0:
        return Ath2, i
    #
    if extract_cc: # we want to extract the largest connected component ( to remove small unconnected pixel )
        # we extract each future as independent with the cross structuring elemnt
        labeled_array, num_features = label(BW, structure=ss)
        labeled_array = np.squeeze(np.reshape(labeled_array, (d, 1)))
        nrg = np.zeros((num_features, 1))
        # we extract the energy for each component
        for j in range(num_features):
            nrg[j] = np.sum(Ath[labeled_array == j + 1] ** 2)
        indm = np.argmax(nrg)
        Ath2[labeled_array == indm + 1] = Ath[labeled_array == indm + 1]

    else:
        BW = BW.flatten()
        Ath2[BW] = Ath[BW]

    return Ath2, i
Example #30
0
 def get_our_labels(th):
     mask = morphology.binary_closing(d_min > th, iterations=3)
     return get_labels(mask)