コード例 #1
0
def find_initial_worm(small_image, well_mask):
    # plan here is to find known good worm edges with Canny using a stringent threshold, then
    # relax the threshold in the vicinity of the good edges.
    # back off another pixel from the well edge to avoid gradient from the edge
    shrunk_mask = ndimage.binary_erosion(well_mask, structure=S)
    smoothed, gradient, sobel = canny.prepare_canny(small_image, 2, shrunk_mask)
    local_maxima = canny.canny_local_maxima(gradient, sobel)
    # Calculate stringent and medium-stringent thresholds. The stringent threshold
    # is the 200th-brightest edge pixel, and the medium is the 450th-brightest pixel
    highp = 100 * (1-200/local_maxima.sum())
    highp = max(highp, 94)
    mediump = 100 * (1-450/local_maxima.sum())
    mediump = max(mediump, 94)
    low_worm, medium_worm, high_worm = numpy.percentile(gradient[local_maxima], [94, mediump, highp])
    stringent_worm = canny.canny_hysteresis(local_maxima, gradient, low_worm, high_worm)
    # Expand out 20 pixels from the stringent worm edges to make our search space
    stringent_area = ndimage.binary_dilation(stringent_worm, mask=well_mask, iterations=20)
    # now use the relaxed threshold but only in the stringent area
    relaxed_worm = canny.canny_hysteresis(local_maxima, gradient, low_worm, medium_worm) & stringent_area
    # join very close-by objects, and remove remaining small objects
    candidate_worm = ndimage.binary_dilation(relaxed_worm, structure=S)
    candidate_worm = ndimage.binary_erosion(candidate_worm)
    candidate_worm = mask.remove_small_area_objects(candidate_worm, 30, structure=S)
    # Now figure out the biggest blob of nearby edges, and call that the worm region
    glommed_candidate = ndimage.binary_dilation(candidate_worm, structure=S, iterations=2)
    glommed_candidate = ndimage.binary_erosion(glommed_candidate, iterations=2)
    # get just outline, not any regions filled-in due to closing
    glommed_candidate ^= ndimage.binary_erosion(glommed_candidate)
    glommed_candidate = mask.get_largest_object(glommed_candidate, structure=S)
    worm_area = ndimage.binary_dilation(glommed_candidate, mask=well_mask, structure=S, iterations=12)
    worm_area = mask.fill_small_radius_holes(worm_area, max_radius=15)
    candidate_edges = relaxed_worm & candidate_worm & worm_area
    return candidate_edges, worm_area
コード例 #2
0
	def cell_to_image(self):
	
		# Find x, y coordinate bounds
		x_res = max(self.pix_list, key=itemgetter(0))[0]
		y_res = max(self.pix_list, key=itemgetter(1))[1]

		# Creating labeled_img
		self.cell_img = NP.zeros([x_res+2, y_res+2], dtype=NP.int_)
		
		for (x_pix, y_pix) in self.pix_list:
			self.cell_img[x_pix-1, y_pix-1] = 1

		# Find the pixels that make up the perimeter
		eroded_image = NDI.binary_erosion(self.cell_img)

		eroded_image_open = NDI.binary_opening(eroded_image, structure=NP.ones((3,3)))
		eroded_image_open2 = NDI.binary_erosion(eroded_image_open)

		# self.perim_img = self.cell_img - eroded_image
		self.eroded_img = eroded_image_open - eroded_image_open2
		self.perim_img = self.cell_img - eroded_image

		# Create a list of the coordinates of the pixels (use the center of the pixels)
		perim_image_ind = NP.where(self.perim_img == 1)
		perim_image_coord = NP.array([perim_image_ind[0], perim_image_ind[1]])
		self.perim_coord = NP.transpose(perim_image_coord)

		return
コード例 #3
0
def prepare_roi_from_probtissue(in_file, epi_mask, epi_mask_erosion_mm=0,
                                erosion_mm=0):
    import os
    import nibabel as nb
    import scipy.ndimage as nd

    probability_map_nii = nb.load(in_file)
    probability_map_data = probability_map_nii.get_data()

    # thresholding
    probability_map_data[probability_map_data < 0.95] = 0
    probability_map_data[probability_map_data != 0] = 1

    epi_mask_nii = nb.load(epi_mask)
    epi_mask_data = epi_mask_nii.get_data()
    if epi_mask_erosion_mm:
        epi_mask_data = nd.binary_erosion(epi_mask_data,
                                      iterations=int(epi_mask_erosion_mm/max(probability_map_nii.header.get_zooms()))).astype(int)
        eroded_mask_file = os.path.abspath("erodd_mask.nii.gz")
        nb.Nifti1Image(epi_mask_data, epi_mask_nii.affine, epi_mask_nii.header).to_filename(eroded_mask_file)
    else:
        eroded_mask_file = epi_mask
    probability_map_data[epi_mask_data != 1] = 0

    # shrinking
    if erosion_mm:
        iter_n = int(erosion_mm/max(probability_map_nii.header.get_zooms()))
        probability_map_data = nd.binary_erosion(probability_map_data,
                                                 iterations=iter_n).astype(int)


    new_nii = nb.Nifti1Image(probability_map_data, probability_map_nii.affine,
                             probability_map_nii.header)
    new_nii.to_filename("roi.nii.gz")
    return os.path.abspath("roi.nii.gz"), eroded_mask_file
コード例 #4
0
ファイル: binary.py プロジェクト: Cadair/scikit-image
def binary_erosion(image, selem=None, out=None):
    """Return fast binary morphological erosion of an image.

    This function returns the same result as greyscale erosion but performs
    faster for binary images.

    Morphological erosion sets a pixel at ``(i,j)`` to the minimum over all
    pixels in the neighborhood centered at ``(i,j)``. Erosion shrinks bright
    regions and enlarges dark regions.

    Parameters
    ----------
    image : ndarray
        Binary input image.
    selem : ndarray, optional
        The neighborhood expressed as a 2-D array of 1's and 0's.
        If None, use cross-shaped structuring element (connectivity=1).
    out : ndarray of bool, optional
        The array to store the result of the morphology. If None is
        passed, a new array will be allocated.

    Returns
    -------
    eroded : ndarray of bool or uint
        The result of the morphological erosion taking values in
        ``[False, True]``.

    """
    if out is None:
        out = np.empty(image.shape, dtype=np.bool)
    ndi.binary_erosion(image, structure=selem, output=out, border_value=True)
    return out
コード例 #5
0
ファイル: reachability.py プロジェクト: benkehoe/python
def get_base_positions_bimanual(rars, joints):
    PR2.SetDOFValues(joints)
    f = np.load("/home/joschu/bulletsim/data/knots/l_reachability.npz")
    
    
    xtorso, ytorso, ztorso = xyz_torso = PR2.GetLink("torso_lift_link").GetTransform()[:3,3] - PR2.GetLink("base_footprint").GetTransform()[:3,3]
        
    xyz_l = rars["xyz_l"]
    xyz_r = rars["xyz_r"]
    
    left_used = (rars["grab_l"] > -1).any()
    right_used = (rars["grab_r"] > -1).any()
    print "left_used: %i, right_used: %i"%(left_used, right_used)
    
    invreachL = f["reachable"][::-1, ::-1, :] #places that can reach the origin
    invreachR = f["reachable"][::-1, :, :] #places that can reach the origin

    invreachL = ndi.binary_erosion(invreachL,np.ones((3,3,3)))
    invreachR = ndi.binary_erosion(invreachR,np.ones((3,3,3)))
    

    xticksir = - f["xticks"][::-1]
    yticksirL = - f["yticks"][::-1]
    yticksirR = f["yticks"]
    zticksir = f["zticks"]
    
    leftbounds = [xminL, xmaxL, yminL, ymaxL] = get_xy_bounds(xyz_l, xticksir, yticksirL) # bounds for torso position array
    rightbounds = [xminR, xmaxR, yminR, ymaxR] = get_xy_bounds(xyz_r, xticksir, yticksirR) 
    
    [xmin, xmax, ymin, ymax] = [min(xminL, xminR), max(xmaxL, xmaxR), min(yminL, yminR), max(ymaxL, ymaxR)]

    if WITH_VIEWER:
        HANDLES.append(ENV.drawlinestrip(points=np.array([[xmin, ymin, 0],
                                                        [xmin, ymax, 0],
                                                        [xmax, ymax, 0],
                                                        [xmax, ymin, 0]]),
                               linewidth=1.0))  

    xticks = np.arange(xmin-DL, xmax+DL, DL) # torso positions
    yticks = np.arange(ymin-DL, ymax+DL, DL)

    collision_cost = 1e9
    left_fail_cost = 1000000. if left_used else 100
    right_fail_cost = 1000000. if right_used else 100
    dist_cost = 1.

    base_costs = np.zeros((len(rars), xticks.size, yticks.size))
    coll_mask = get_collision_mask(xticks, yticks)
    base_costs += collision_cost * coll_mask[None,:,:]
    
    for (i, (xl, yl, zl), (xr, yr, zr)) in zip(xrange(len(rars)), xyz_l, xyz_r):
        zlind = intround(  (zl - ztorso - zticksir[0]) / DL  )
        zrind = intround(  (zr - ztorso - zticksir[0]) / DL  )
                
        base_costs[i] += (shift_and_place_image(invreachL[:,:,zlind], xl, yl, xticksir, yticksirL, xticks, yticks) <= 0) * left_fail_cost + dist_cost
        base_costs[i] += (shift_and_place_image(invreachR[:,:,zrind], xr, yr, xticksir, yticksirR, xticks, yticks) <= 0) * right_fail_cost + dist_cost
            
    xinds_base, yinds_base = get_feasible_path(base_costs).T
    return np.c_[xticks[xinds_base] - xtorso, yticks[yinds_base] - ytorso]
コード例 #6
0
def erosion():
    image_list = get_one_imagefrom_mnist()
    image_array =np.asarray(image_list)
    image =image_array.reshape(28, 28)
    
    ndimage.binary_erosion(image).astype(int)
    plt.imshow(image, cmap=cm.binary)
    plt.show()
コード例 #7
0
def edge_detect_first(i):
    import numpy as np
    from scipy import ndimage
    ero =  ndimage.binary_erosion(i, iterations=2).astype(i.dtype)
    dil =  ndimage.binary_dilation(i, iterations=1).astype(i.dtype)
    sep = dil-ero
    sep = ndimage.binary_erosion(sep, iterations=1).astype(sep.dtype)
    sep[sep==0]=np.nan
    return sep
コード例 #8
0
def analyseClusters(binary, newlabels):
    """
    Calculates the sizes and porosities of the clusters.
    """
    
    # dilate particles to find cluster
    dilated = ndimage.binary_dilation(binary, iterations=_DILATIONFACTOR_TO_FIND_CLUSTER)
    labels, num = label(dilated, background=0, return_num=True)
    pxArea = (_CONVERSIONFACTOR_FOR_PIXEL) ** 2
    outputImage = labels.copy()
    clusterAreas = np.zeros(num)
    porosities = np.zeros(num)
    circumference = np.zeros(num)
    fcirc = np.zeros(num)
    particlesPerCluster = np.zeros(num)
    illegalIndex = []
    
    for i in range(num):
        cluster = labels == i
        cluster = ndimage.binary_fill_holes(cluster)
        helper = np.zeros_like(newlabels)
        helper[cluster] = newlabels[cluster]
        newLabel, particleNum = label(helper, background=0, return_num=True)
        particlesPerCluster[i] = particleNum
        particleArea = float(np.sum(binary[cluster].astype(np.int)))
        
        # cluster area and porosity
        outputImage[cluster] = i
        helper = ndimage.binary_erosion(cluster, iterations=_DILATIONFACTOR_TO_FIND_CLUSTER-3, border_value=1)        
        helper = ndimage.binary_erosion(helper, iterations=3, border_value=0)
        fl = float(np.sum(helper[cluster].astype(np.int)))
        clusterAreas[i] = fl * pxArea
        porosity = (fl - particleArea)/ fl
        porosity = porosity if porosity >= 0 else 0.0  # porosity can not be less than 0
        porosities[i] = porosity
        
        # circumference
        new = np.zeros((helper.shape[0],helper.shape[1],3), dtype=np.uint8)
        new[:,:,1] = helper
        gray = cv2.cvtColor(new, cv2.COLOR_RGB2GRAY)
        gray[gray > 0] = 255
        blur = cv2.GaussianBlur(gray,(5,5),0)
        gray = cv2.Canny(blur, 10, 200)
        contours, hierarchy = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        arclength = 0
        for con in contours:
            arclength += cv2.arcLength(con,True)
        circumference[i] = arclength * _CONVERSIONFACTOR_FOR_PIXEL
        fcirc[i] = (4. * np.pi * fl) / arclength**2
        
        if fcirc[i] > 1.0:  # fcirc can not be greater than 1
            illegalIndex.append(i)
    
    fcirc = np.delete(fcirc, illegalIndex)
    clusterData = {'areas':clusterAreas,'circ':circumference,'ppc':particlesPerCluster,'fcirc':fcirc,'porosities':porosities}
    return outputImage, clusterData, num
コード例 #9
0
def test_binary_closing_noninteger_brute_force_passes_when_true():
    # regression test for gh-9905, gh-9909: ValueError for
    # non integer iterations
    data = numpy.ones([1])

    assert sndi.binary_erosion(
        data, iterations=2, brute_force=1.5
    ) == sndi.binary_erosion(data, iterations=2, brute_force=bool(1.5))
    assert sndi.binary_erosion(
        data, iterations=2, brute_force=0.0
    ) == sndi.binary_erosion(data, iterations=2, brute_force=bool(0.0))
コード例 #10
0
ファイル: volume.py プロジェクト: mtrellet/disvis
def erode(volume, iterations, out=None):
    if out is None:
        out = zeros_like(volume)
    if SCIPY:
        binary_erosion(volume.array, iterations=iterations, output=out.array)
    else:
        tmp = volume.array.copy()
        for i in range(iterations):
            binary_erosion(tmp, out.array)
            tmp[:] = out.array[:]

    return out
コード例 #11
0
ファイル: utils.py プロジェクト: poldracklab/niworkflows
def _tpm2roi(in_tpm, in_mask, mask_erosion_mm=None, erosion_mm=None,
             mask_erosion_prop=None, erosion_prop=None, pthres=0.95,
             newpath=None):
    """
    Generate a mask from a tissue probability map
    """
    tpm_img = nb.load(in_tpm)
    roi_mask = (tpm_img.get_data() >= pthres).astype(np.uint8)

    eroded_mask_file = None
    erode_in = (mask_erosion_mm is not None and mask_erosion_mm > 0 or
                mask_erosion_prop is not None and mask_erosion_prop < 1)
    if erode_in:
        eroded_mask_file = fname_presuffix(in_mask, suffix='_eroded',
                                           newpath=newpath)
        mask_img = nb.load(in_mask)
        mask_data = mask_img.get_data().astype(np.uint8)
        if mask_erosion_mm:
            iter_n = max(int(mask_erosion_mm / max(mask_img.header.get_zooms())), 1)
            mask_data = nd.binary_erosion(mask_data, iterations=iter_n)
        else:
            orig_vol = np.sum(mask_data > 0)
            while np.sum(mask_data > 0) / orig_vol > mask_erosion_prop:
                mask_data = nd.binary_erosion(mask_data, iterations=1)

        # Store mask
        eroded = nb.Nifti1Image(mask_data, mask_img.affine, mask_img.header)
        eroded.set_data_dtype(np.uint8)
        eroded.to_filename(eroded_mask_file)

        # Mask TPM data (no effect if not eroded)
        roi_mask[~mask_data] = 0

    # shrinking
    erode_out = (erosion_mm is not None and erosion_mm > 0 or
                 erosion_prop is not None and erosion_prop < 1)
    if erode_out:
        if erosion_mm:
            iter_n = max(int(erosion_mm / max(tpm_img.header.get_zooms())), 1)
            iter_n = int(erosion_mm / max(tpm_img.header.get_zooms()))
            roi_mask = nd.binary_erosion(roi_mask, iterations=iter_n)
        else:
            orig_vol = np.sum(roi_mask > 0)
            while np.sum(roi_mask > 0) / orig_vol > erosion_prop:
                roi_mask = nd.binary_erosion(roi_mask, iterations=1)

    # Create image to resample
    roi_fname = fname_presuffix(in_tpm, suffix='_roi', newpath=newpath)
    roi_img = nb.Nifti1Image(roi_mask, tpm_img.affine, tpm_img.header)
    roi_img.set_data_dtype(np.uint8)
    roi_img.to_filename(roi_fname)
    return roi_fname, eroded_mask_file or in_mask
コード例 #12
0
ファイル: render_app.py プロジェクト: royaljava/PRNet
def get_uv_mask(vertices_vis, triangles, uv_coords, h, w, resolution):
    triangles = triangles.T
    vertices_vis = vertices_vis.astype(np.float32)
    uv_mask = render_texture(uv_coords.T, vertices_vis[np.newaxis, :], triangles, resolution, resolution, 1)
    uv_mask = np.squeeze(uv_mask > 0)
    uv_mask = ndimage.binary_closing(uv_mask)
    uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))  
    uv_mask = ndimage.binary_closing(uv_mask)
    uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))  
    uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))  
    uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4)))  
    uv_mask = uv_mask.astype(np.float32)

    return np.squeeze(uv_mask)
コード例 #13
0
def process_blob(cim):
    #cim = ndimage.binary_erosion(cim>0)
    for i in range(4):
        cim = ndimage.binary_erosion(cim>0)
        cim=ndimage.binary_dilation(cim>0)

    filterk = np.ones(Param.process_conv_size);
    cim = ndimage.convolve(cim, filterk, mode='constant', cval=0.0)

    for i in range(Param.num_dilation):
        cim = ndimage.binary_dilation(cim>0)
    for i in range(Param.num_erosion):
        cim = ndimage.binary_erosion(cim>0)
    return cim
コード例 #14
0
ファイル: masking.py プロジェクト: jeromedockes/nilearn
def _post_process_mask(mask, affine, opening=2, connected=True,
                       warning_msg=""):
    if opening:
        opening = int(opening)
        mask = ndimage.binary_erosion(mask, iterations=opening)
    mask_any = mask.any()
    if not mask_any:
        warnings.warn("Computed an empty mask. %s" % warning_msg,
            MaskWarning, stacklevel=2)
    if connected and mask_any:
        mask = largest_connected_component(mask)
    if opening:
        mask = ndimage.binary_dilation(mask, iterations=2 * opening)
        mask = ndimage.binary_erosion(mask, iterations=opening)
    return mask, affine
コード例 #15
0
ファイル: masking.py プロジェクト: VirgileFritsch/nilearn
def _post_process_mask(mask, affine, opening=2, connected=True, msg=""):
    if opening:
        opening = int(opening)
        mask = ndimage.binary_erosion(mask, iterations=opening)
    mask_any = mask.any()
    if not mask_any:
        warnings.warn("Computed an empty mask. %s" % msg,
            MaskWarning, stacklevel=2)
    if connected and mask_any:
        mask = largest_connected_component(mask)
    if opening:
        mask = ndimage.binary_dilation(mask, iterations=2*opening)
        mask = ndimage.binary_erosion(mask, iterations=opening)
    return Nifti1Image(_utils.as_ndarray(mask, dtype=np.int8),
                       affine)
コード例 #16
0
 def calculate_distance_to_edge(self, workspace):
     m = workspace.measurements
     edge = workspace.image_set.get_image(self.edge_image.value,
                                          must_be_binary=True)
     edge = edge.pixel_data
     objects = workspace.object_set.get_objects(self.object_name.value)
     distance = np.ones(objects.count) * np.sqrt(np.prod(edge.shape)) / 2
     for e in (edge, ~edge):
         d = distance_transform_edt(e)
         for labels, indices in objects.get_labels():
             #
             # A mask of labeled points outside of the edge object
             #
             mask = (labels != 0) & (d != 0)
             dm = d[mask]
             lm = labels[mask]
             #
             # Order by distance, then label, take the first of
             # each label to find the minimum
             #
             order = np.lexsort((dm, lm))
             lm, dm = lm[order], dm[order]
             smallest = np.hstack([[True], lm[:-1] != lm[1:]])
             distance[lm[smallest]-1] = \
                 np.minimum(dm[smallest], distance[lm[smallest]-1])
     m.add_measurement(self.object_name.value,
                       self.edge_feature(),
                       distance)
     if workspace.frame is not None:
         dpicture = workspace.display_data.distances = -np.ones(edge.shape)
         for labels, indices in objects.get_labels():
             dpicture[labels!=0] = distance[labels[labels!=0] - 1]
         workspace.display_data.edge = (
             binary_dilation(edge, structure=np.ones((3,3), bool)) !=
             binary_erosion(edge, structure=np.ones((3,3), bool), border_value=1))
コード例 #17
0
ファイル: reachability.py プロジェクト: benkehoe/python
def get_base_positions2(xyzs_world):
    xtorso, ytorso, ztorso = xyz_torso = PR2.GetLink("torso_lift_link").GetTransform()[:3,3] - PR2.GetLink("base_footprint").GetTransform()[:3,3]


    f = np.load("/home/joschu/bulletsim/data/knots/l_reachability.npz")

    invreach = f["reachable"][::-1, ::-1, :] #places that can reach the origin
    invreach = ndi.binary_erosion(invreach,np.ones((3,3)))

    xticksir = - f["xticks"][::-1]
    yticksir = - f["yticks"][::-1]
    zticksir = f["zticks"]

    [xmin, xmax, ymin, ymax] = get_xy_bounds(xyzs_world, xticksir, yticksir) # bounds for torso position array

    xticks = np.arange(xmin-DL, xmax+DL, DL) # torso positions
    yticks = np.arange(ymin-DL, ymax+DL, DL)

    base_costs = np.zeros((len(xyzs_world), xticks.size, yticks.size))
    for (i,(x, y, z)) in enumerate(xyzs_world):
        zind = intround(  (z - ztorso) / DL  )
        base_costs[i] = shift_and_place_image(1 + (invreach[:,:,zind]<=0)*1000, x, y, xticksir, yticksir, xticks, yticks)

    xinds_base, yinds_base = get_feasible_path(base_costs).T
    return np.c_[xticks[xinds_base] - xtorso, yticks[yinds_base] - ytorso]
コード例 #18
0
def filterImage(image):
    """
    Filters the given image and returns a binary representation of it.
    """
    
    # otsu to bring out edges
    t_loc_otsu = otsu(image[:, :, 1])
    loc_otsu = np.zeros_like(image, dtype=np.bool)
    loc_otsu[:, :, 1] = image[:, :, 1] <= t_loc_otsu + 5
    image[loc_otsu] = 0
    
    # bring out single particles and smooth the rest
    foot = circarea(8)
    green = rank_filter(image[:,:,1], foot, rank=44)
    nonzero = green > 10
    weak = (green > 20) & (green < green[nonzero].mean())
    green[weak] += 40
    
    # remove pollution
    gray = cv2.medianBlur(green, ksize=13)
    
    # black and white representation of particles and surroundings
    binary = gray < 25
    
    # dilatation and erosion
    dilated1 = ndimage.binary_dilation(binary, iterations=6)
    erosed = ndimage.binary_erosion(dilated1, iterations=_EROSIONFACTOR+3)
    dilated = ndimage.binary_dilation(erosed, iterations=_EROSIONFACTOR)
    return dilated
コード例 #19
0
def segmentationize(imageSe):
    """
    Divides coherent forms of an image in smaller groups of type integer.
    """
    
    # create an matrix of distances to the next sourrounding area
    distance = ndimage.distance_transform_edt(imageSe, sampling=3)
    erosed = ndimage.binary_erosion(imageSe, iterations=8).astype(imageSe.dtype)
    distanceE = ndimage.distance_transform_edt(erosed, sampling=3)
    distance += (2 * distanceE)
    labels, num = label(imageSe, background=0, return_num='True')
    sizes_image = ndimage.sum(imageSe, labels, range(num))
    sizes_image = np.sort(sizes_image, axis=None)
    pos = int(0.4 * num)
    areal = int(sizes_image[pos] ** 0.5)
    if areal <= 10:
        areal = 10
    elif (areal % 2) != 0:
        areal += 1
    footer = circarea(areal) # draw circle area
    
    # find the positions of the maxima from the distances
    local_maxi = peak_local_max(distance, indices=False, footprint=footer, labels=imageSe)
    markers = label(local_maxi)
    
    # watershed algorithm starts at the maxima and returns labels of particles
    simplefilter("ignore", FutureWarning)   # avoid warning in watershed method
    labels_ws = watershed(-distance, markers, mask=imageSe)
    simplefilter("default", FutureWarning)
    
    return labels, labels_ws, local_maxi
コード例 #20
0
ファイル: masked.py プロジェクト: luukhoavn/menpo
    def erode(self, n_pixels=1):
        r"""
        Returns a copy of this :map:`MaskedImage` in which the mask has been
        shrunk by n pixels along its boundary.

        Parameters
        ----------
        n_pixels : int, optional
            The number of pixels by which we want to shrink the mask along
            its own boundary.

        Returns
        -------
         : :map:`MaskedImage`
            The copy of the masked image in which the mask has been shrunk
            by n pixels along its boundary.
        """
        global binary_erosion
        if binary_erosion is None:
            from scipy.ndimage import binary_erosion  # expensive
        # Erode the edge of the mask in by one pixel
        eroded_mask = binary_erosion(self.mask.mask, iterations=n_pixels)

        image = self.copy()
        image.mask = BooleanImage(eroded_mask)
        return image
コード例 #21
0
ファイル: texseg.py プロジェクト: janfrs/kwc-ros-pkg
    def clean_classified_image(self):
        """
        clean the binary image resulting from pixel classification using morphological operators 
        """
        if self.class_image is None:
            self.classify_image()

        bim = self.class_image
        feature_mask = self.features_object.mask_image
        if feature_mask is not None:
            bim = bim & feature_mask

        bim = ni.binary_fill_holes(bim)
        min_gap = 0
        for n in range(min_gap):
            bim = ni.binary_dilation(bim)
            #bim = ni.binary_closing(bim)
        #bim = ni.binary_fill_holes(bim)
        min_radius = 8
        for n in range(min_radius):
            bim = ni.binary_erosion(bim)
            #bim = ni.binary_opening(bim)
        for n in range(min_radius):
            bim = ni.binary_dilation(bim)
        #bim = ni.binary_dilation(bim)
        #bim = ni.binary_erosion(bim)
        self.clean_class_image = bim.astype(np.uint8) * 255
コード例 #22
0
ファイル: ocr.py プロジェクト: MMChambers/Geist
def character_seg_erosion(grey_scale_image, max_w_h_ratio=0.85):
    bin_img = grey_scale_image > 0
    labels, num_labels = label(binary_erosion(bin_img > 0))
    for span, mask in _create_spans_and_masks(labels, num_labels):
        char_img = grey_scale_image[:, span[0]:span[1]].copy()
        char_img[mask == False] = 0
        yield char_img
コード例 #23
0
ファイル: anatomical.py プロジェクト: oesteban/mriqc
    def _run_interface(self, runtime):

        in_file = nb.load(self.inputs.in_file)
        wm_mask = nb.load(self.inputs.wm_mask).get_data()
        wm_mask[wm_mask < 0.9] = 0
        wm_mask[wm_mask > 0] = 1
        wm_mask = wm_mask.astype(np.uint8)

        if self.inputs.erodemsk:
            # Create a structural element to be used in an opening operation.
            struc = nd.generate_binary_structure(3, 2)
            # Perform an opening operation on the background data.
            wm_mask = nd.binary_erosion(wm_mask, structure=struc).astype(np.uint8)

        data = in_file.get_data()
        data *= 1000.0 / np.median(data[wm_mask > 0])

        out_file = fname_presuffix(self.inputs.in_file,
                                   suffix='_harmonized', newpath='.')
        in_file.__class__(data, in_file.affine, in_file.header).to_filename(
            out_file)

        self._results['out_file'] = out_file

        return runtime
コード例 #24
0
ファイル: _ndvar.py プロジェクト: christianbrodbeck/Eelbrain
def erode(ndvar, dim):
    ax = ndvar.get_axis(dim)
    struct = np.zeros((3,) * ndvar.ndim, bool)
    index = tuple(slice(None) if i == ax else 1 for i in range(ndvar.ndim))
    struct[index] = True
    x = ndimage.binary_erosion(ndvar.x, struct)
    return NDVar(x, ndvar.dims, ndvar.info.copy(), ndvar.name)
コード例 #25
0
ファイル: masked.py プロジェクト: OlivierML/menpo
    def set_boundary_pixels(self, value=0.0, n_pixels=1):
        r"""
        Returns a copy of this :map:`MaskedImage` for which n pixels along
        the its mask boundary have been set to a particular value. This is
        useful in situations where there is absent data in the image which
        can cause, for example, erroneous computations of gradient or features.

        Parameters
        ----------
        value : float or (n_channels, 1) ndarray
        n_pixels : int, optional
            The number of pixels along the mask boundary that will be set to 0.

        Returns
        -------
         : :map:`MaskedImage`
            The copy of the image for which the n pixels along its mask
            boundary have been set to a particular value.
        """
        global binary_erosion
        if binary_erosion is None:
            from scipy.ndimage import binary_erosion  # expensive
        # Erode the edge of the mask in by one pixel
        eroded_mask = binary_erosion(self.mask.mask, iterations=n_pixels)

        # replace the eroded mask with the diff between the two
        # masks. This is only true in the region we want to nullify.
        np.logical_and(~eroded_mask, self.mask.mask, out=eroded_mask)
        # set all the boundary pixels to a particular value
        self.pixels[..., eroded_mask] = value
コード例 #26
0
ファイル: morphsnakes.py プロジェクト: flamholz/guvs
 def step(self):
     """Perform a single step of the morphological snake evolution."""
     # Assign attributes to local variables for convenience.
     u = self._u
     gI = self._data
     dgI = self._ddata
     theta = self._theta
     v = self._v
     
     if u is None:
         raise ValueError, "the levelset is not set (use set_levelset)"
     
     res = np.copy(u)
     
     # Balloon.
     if v > 0:
         aux = binary_dilation(u, self.structure)
     elif v < 0:
         aux = binary_erosion(u, self.structure)
     if v!= 0:
         res[self._threshold_mask_v] = aux[self._threshold_mask_v]
     
     # Image attachment.
     aux = np.zeros_like(res)
     dres = np.gradient(res)
     for el1, el2 in zip(dgI, dres):
         aux += el1*el2
     res[aux > 0] = 1
     res[aux < 0] = 0
     
     # Smoothing.
     for i in xrange(self.smoothing):
         res = curvop(res)
     
     self._u = res
コード例 #27
0
 def __init__(self, XYZ, sigma, n=1):
     self.XYZ = XYZ
     self.sigma = sigma
     if np.isscalar(sigma):
         self.sigma = sigma * (XYZ.max(axis=1) > 1)
     self.n = n
     self.XYZ_vol = np.zeros(XYZ.max(axis=1) + 2, int) - 1
     p = XYZ.shape[1]
     self.XYZ_vol[list(XYZ)] = np.arange(p)
     mask_vol = np.zeros(XYZ.max(axis=1) + 1, int)
     mask_vol[list(XYZ)] += 1
     mask_vol = binary_erosion(mask_vol.squeeze(), iterations=int(round(1.5*self.sigma.max())))
     mask_vol = mask_vol.reshape(XYZ.max(axis=1) + 1).astype(int)
     XYZ_mask = np.array(np.where(mask_vol > 0))
     self.mask = self.XYZ_vol[XYZ_mask[0], XYZ_mask[1], XYZ_mask[2]]
     q = len(self.mask)
     dX, dY, dZ = XYZ.max(axis=1) + 1
     self.U_vol = np.zeros((3, dX, dY, dZ), float)
     self.U_vol[:, XYZ_mask[0], XYZ_mask[1], XYZ_mask[2]] += 1
     self.U_vol = square_gaussian_filter(self.U_vol, [0, self.sigma[0], self.sigma[1], self.sigma[2]], mode='constant')
     self.norm_coeff = 1 / np.sqrt(self.U_vol.max())
     self.U = np.zeros((3, n, q), float)
     self.V = np.zeros((3, n, p), float)
     self.W = np.zeros((3, n, p), int)
     self.I = np.arange(p).reshape(1, p) * np.ones((n, 1), int)
     self.XYZ_min = self.XYZ.min(axis=1).reshape(3, 1) - 1
     self.XYZ_max = self.XYZ.max(axis=1).reshape(3, 1) + 1
コード例 #28
0
def measure_fluorescence(image, worm_mask, well_mask=None):
    if well_mask is not None:
        restricted_mask = ndimage.binary_erosion(well_mask, iterations=15)
        background = polyfit.fit_polynomial(image[::4,::4], mask=restricted_mask[::4,::4], degree=2).astype(numpy.float32)
        background = ndimage.zoom(background, 4)
        background /= background[well_mask].mean()
        background[background <= 0.01] = 1 # we're going to divide by background, so prevent div/0 errors
        image = image.astype(numpy.float32) / background
        image[~well_mask] = 0

    worm_pixels = image[worm_mask]
    low_px_mean, low_px_std = mcd.robust_mean_std(worm_pixels[worm_pixels < worm_pixels.mean()], 0.5)
    expression_thresh = low_px_mean + 2.5*low_px_std
    high_expression_thresh = low_px_mean + 6*low_px_std
    fluo_px = worm_pixels[worm_pixels > expression_thresh]
    high_fluo_px = worm_pixels[worm_pixels > high_expression_thresh]

    area = worm_mask.sum()
    integrated = worm_pixels.sum()
    median, percentile95 = numpy.percentile(worm_pixels, [50, 95])
    expression_area = fluo_px.size
    expression_area_fraction = expression_area / area
    expression_mean = fluo_px.mean()
    high_expression_area = high_fluo_px.size
    high_expression_area_fraction = high_expression_area / area
    high_expression_mean = high_fluo_px.mean()
    high_expression_integrated = high_fluo_px.sum()

    expression_mask = (image > expression_thresh) & worm_mask
    high_expression_mask = (image > high_expression_thresh) & worm_mask

    return data_row(area, integrated, median, percentile95,
     expression_area, expression_area_fraction, expression_mean,
     high_expression_area, high_expression_area_fraction,
     high_expression_mean, high_expression_integrated), (image, background, expression_mask, high_expression_mask)
コード例 #29
0
ファイル: calc.py プロジェクト: islenv/openradar
def declutter_experimental(data, ratio):
    """
    For regions with size less or equal than size, set to zero if ratio
    of of egde sum to region sum exceeds ratio.
    """
    # label
    data_nonzero = data > 0
    lbl, nlbl = ndimage.label(data_nonzero)

    # data
    data_size = ndimage.measurements.sum(data_nonzero, lbl, lbl)
    data_sum = ndimage.measurements.sum(data, lbl, lbl)

    # edge
    edge = np.where(np.logical_and(ndimage.binary_erosion(data_nonzero), data_nonzero), 0, data)
    edge_nonzero = edge > 0
    edge_size = ndimage.measurements.sum(edge_nonzero, lbl, lbl)
    edge_sum = ndimage.measurements.sum(edge, lbl, lbl)

    # ratio
    data_mean = data_sum[data_nonzero] / data_size[data_nonzero]
    edge_mean = edge_sum[data_nonzero] / edge_size[data_nonzero]
    data_ratio = np.zeros(data.shape, data.dtype)
    data_ratio[data_nonzero] = edge_mean / data_mean

    return np.where(data_ratio > ratio, 0, data)
コード例 #30
0
ファイル: landscape_modifier.py プロジェクト: yabellini/LecoS
 def InDecPatch(self,which,amount):
     s = ndimage.generate_binary_structure(2,1) # taxi-cab struct
     if which == 0:
         ras = ndimage.binary_dilation(self.cl_array,s,iterations=amount,border_value=0)
     else:
         ras = ndimage.binary_erosion(self.cl_array,s,iterations=amount,border_value=0)
     return(ras)
コード例 #31
0
    def _aux_generator(self, batch_size=16, sample_set='train', datatype = None, depthres = 256, seg_joint_res = 64):
        """ Auxiliary Generator
        Args:
            See Args section in self._generator
        """
        generated_batch = {}
        random.seed(time.time())
        generated_batch['train_img'] = np.zeros((batch_size, 256, 256, 3), dtype=np.float32)
        generated_batch['train_gtseg'] = np.zeros([batch_size, seg_joint_res, seg_joint_res], dtype = np.int8)
        generated_batch['train_gt2dheat'] = np.zeros([batch_size, seg_joint_res, seg_joint_res, self.joints_num], dtype = np.float32)
        generated_batch['train_gtjoints'] = np.zeros((batch_size, 64, 64, self.joints_num * self.Zres_joint), dtype=np.float32)
        generated_batch['train_gtdepthre'] =  np.zeros((batch_size, depthres, depthres), dtype= np.float32)
        generated_batch['train_mask'] = np.zeros([batch_size, depthres, depthres],dtype = np.bool)
        generated_batch['train_2djoints'] = np.zeros([batch_size, 2, self.joints_num ],dtype= np.float32)
        generated_batch['train_3djoints'] = np.zeros([batch_size, 3, self.joints_num ],dtype= np.float32)

        i=0
        if datatype == 'normal_dataset':
            generated_batch['normal_train_img'] = np.zeros((batch_size, self.normalres[0], self.normalres[1], 3), dtype=np.float32)
            generated_batch['normal_train_gtnormal'] = np.zeros([batch_size, self.normalres[0], self.normalres[1], 3],
                                                                dtype=np.float32)
            generated_batch['normal_train_gtdepthre'] = np.zeros((batch_size, self.normalres[0], self.normalres[1]),
                                                                 dtype=np.float32)
            generated_batch['normal_train_mask'] = np.zeros([batch_size, self.normalres[0], self.normalres[1]],
                                                            dtype=np.bool)
            while i < batch_size:
                img_name = self.filelist[self.currentindex]
                type_dir = os.path.join(self.test_dir, img_name.split('/')[-4])#random.sample(getsubfolders(self.train_dir), 1)[0])
                depth_dir = type_dir + '/depth_maps'
                normal_dir = type_dir + '/normals'

                view_type = img_name.split('/')[-2]

                depth_dir = os.path.join(depth_dir, view_type)
                normal_dir = os.path.join(normal_dir, view_type)

                index = img_name[-9:-5]
                depth_name = depth_dir + '/depth_' + index + '.npz'
                normal_name = normal_dir + '/normals_' + index + '.npz'


                bg_name = os.path.join(self.bg_dir, random.sample(os.listdir(self.bg_dir), 1)[0])
                bg_name = os.path.join(bg_name, random.sample(os.listdir(bg_name), 1)[0])

                try:
                    bg_img = io.imread(bg_name)
                except:
                    self.currentindex +=1
                    continue
                bg_img = scipy.misc.imresize(bg_img, [self.normalres[0], self.normalres[1]], interp='bilinear')
                img = io.imread(img_name)
                nmap = np.load(normal_name)['normals']
                dmap = np.load(depth_name)['depth']
                mask = dmap > 1e-4

                generated_mask = np.zeros([self.normalres[0], self.normalres[1]], dtype=np.bool)
                generated_mask[15:239, 15:239] = mask
                generated_batch['normal_train_mask'][i] = generated_mask
                img_pad = np.zeros((self.normalres[0], self.normalres[1], 3), dtype=np.uint8)
                img_pad[15: 239, 15: 239, :] = img.astype(np.float32)
                bg_img[generated_mask] = img_pad[generated_mask]

                # plt.figure()
                # plt.imshow(bg_img, aspect='auto',
                #            cmap=plt.get_cmap('jet'))
                # plt.show()

                bg_img = bg_img.astype(np.float32)
                # color augmentation
                if sample_set == 'train':
                    for j in range(3):
                        bg_img[:, :, j] = np.clip(
                            bg_img[:, :, j].astype(np.float32) / 255 * np.random.uniform(0.6, 1.4), 0.0,
                            1.0)
                else:
                    for j in range(3):
                        bg_img[:, :, j] = np.clip(bg_img[:, :, j].astype(np.float32) / 255, 0.0, 1.0)
                # print('color augmentation done!')

                # whitening rgb image
                meanstd = load_lua(self.meanRgb_dir)
                for j in range(3):
                    bg_img[:, :, j] = bg_img[:, :, j] - meanstd['mean'][j]
                    bg_img[:, :, j] = bg_img[:, :, j] / meanstd['std'][j]
                generated_batch['normal_train_img'][i,:,:,:] = bg_img

                generated_batch['normal_train_gtnormal'][i, 15:239, 15:239, :] = nmap


                if self.show:
                    plt.figure()
                    plt.imshow(generated_batch['normal_train_gtnormal'][i, :, :, 0], aspect='auto', cmap=plt.get_cmap('jet'))
                    plt.show()
                #
                # plt.figure()
                # plt.imshow(generated_batch['normal_train_gtnormal'][i, :, :, 1], aspect='auto', cmap=plt.get_cmap('jet'))
                # plt.show()
                #
                # plt.figure()
                # plt.imshow(generated_batch['normal_train_gtnormal'][i, :, :, 2], aspect='auto', cmap=plt.get_cmap('jet'))
                # plt.show()
                # print(generated_batch['normal_train_mask'].shape)
                # plt.figure()
                # plt.imshow(generated_batch['normal_train_mask'][i, :, :, 0], aspect='auto', cmap=plt.get_cmap('jet'))
                # plt.show()

                generated_batch['normal_train_gtdepthre'][i, 15:239, 15:239] = dmap

                i = i + 1

                self.currentindex+=1
                if(self.currentindex == self.datanum-1):
                    self._reset_filelist(datatype,sample_set)
            return  generated_batch


        if datatype == 'realtest':
            while i < batch_size:
                #name = random.sample(glob.glob(self.test_dir + "/*.jpg"), 1)[0]
                name = self.filelist[self.currentindex]
                testimg = io.imread(name)
                testimg = scipy.misc.imresize(testimg, [self.insize[1], self.insize[1]], interp='bilinear').astype(np.float32)
                meanstd = load_lua(self.meanRgb_dir)
                for j in range(3):
                    testimg[:, :, j] = np.clip(testimg[:, :, j].astype(np.float32) / 255.0, 0.0, 1.0)
                    testimg[:, :, j] = testimg[:, :, j] - meanstd['mean'][j]
                    testimg[:, :, j] = testimg[:, :, j] / meanstd['std'][j]
                generated_batch['train_img'][i] = cv2.resize(testimg, (self.insize[0], self.insize[1]), interpolation=cv2.INTER_NEAREST)
                i += 1
                self.currentindex += 1

                if self.show:
                    plt.figure()
                    plt.imshow(generated_batch['train_img'][0], aspect='auto', cmap=plt.get_cmap('jet'))
                    plt.show()


                if(self.currentindex == self.datanum-1):
                    self._reset_filelist('realtest','test')
            return generated_batch

        while i < batch_size:
            if datatype != 'detail_data' and datatype != 'up-3d':
                name = self.filelist[self.currentindex]
                #name = '/home/sicong/surreal/data/SURREAL/data/cmu/train/run1/ung_91_33/ung_91_33_c0001.mp4'
                cap = cv2.VideoCapture(name)
                length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
                frameindex = random.randint(1, length)
                #frameindex = 82
                if(sample_set == 'test'):
                    print('test file: ',name, 'frameindex: ', frameindex)
                cap.set(1, frameindex - 1)
                _, img_full = cap.read()
                try:
                    img_full = cv2.cvtColor(img_full, cv2.COLOR_BGR2RGB)
                except:
                    continue
                bodyinfo = sio.loadmat(name[0:-4] + '_info.mat')

            elif datatype == 'detail_data':
                name = self.filelist[self.currentindex]
                frameindex = name[-12:-8]
                name = '/home/sicong/detail_data/data/3/0235_rgb.png'
                try:
                    img_full = io.imread(name)
                except:
                    self.currentindex += 1
                    continue

            elif datatype == 'up-3d':
                if sample_set == 'train':
                    info_dir = self.train_dir + '/pose_prepared/91/500/up-p91/'
                    seg_dir = self.train_dir + '/segment/up-s31/s31/'
                elif sample_set == 'valid':
                    info_dir = self.valid_dir + '/pose_prepared/91/500/up-p91/'
                    seg_dir = self.valid_dir + '/segment/up-s31/s31/'
                elif sample_set == 'test':
                    info_dir = self.test_dir + '/pose_prepared/91/500/up-p91/'
                    seg_dir = self.test_dir + '/segment/up-s31/s31/'

                name = self.filelist[self.currentindex]
                if(sample_set == 'test'):
                    print('test file: ',name)
                # name = '/media/sicong/a86d93af-1a2e-469b-972c-f819c47cd5ee/datasets/pose_prepared/91/500/up-p91/04877_image.png'
                frameindex = name[-15:-10]

                try:
                    img_full = io.imread(name)
                except:
                    self.currentindex +=1
                    continue
                try:
                    bodyinfo = sio.loadmat(info_dir + frameindex+ '_info.mat')
                except:
                    self.currentindex += 1
                    continue
            if self.show:
                img = Image.fromarray(img_full, 'RGB')
                img.show()

            if datatype != 'detail_data':
                # load 2d joints to determine the bounding box
                # [2 x njoints]
                if datatype != 'up-3d':
                    if bodyinfo is None:
                        self.currentindex += 1
                        continue
                    joints2dfull = bodyinfo['joints2D']
                    if joints2dfull is None:
                        self.currentindex += 1
                        continue
                    if len(joints2dfull.shape) < 3:
                        self.currentindex += 1
                        continue
                    if frameindex - 1 >= joints2dfull.shape[2]:
                        self.currentindex += 1
                        continue
                    joints2d = joints2dfull[:, self.joints_subset, frameindex - 1].astype(np.int64)

                    joints3dfull = bodyinfo['joints3D']
                    if joints3dfull is None:
                        self.currentindex += 1
                        continue
                    if frameindex - 1 >= joints2dfull.shape[2]:
                        self.currentindex += 1
                        continue
                    joints3d = joints3dfull[:, self.joints_subset, frameindex - 1]

                    generated_batch['train_2djoints'][i,:] = joints2d
                    generated_batch['train_3djoints'][i,:] = joints3d

                    depth_full = sio.loadmat(name[0:-4] + '_depth.mat')['depth_' + str(frameindex)]
                elif datatype == 'up-3d':
                    if bodyinfo is None:
                        self.currentindex += 1
                        continue
                    joints2dfull = bodyinfo['joints2D']
                    if joints2dfull is None:
                        self.currentindex += 1
                        continue
                    if len(joints2dfull.shape) < 2:
                        self.currentindex += 1
                        continue
                    joints2d = joints2dfull[:, self.joints_subset].astype(np.int64)
                    joints3dfull = np.transpose(bodyinfo['joints3D'])
                    if joints3dfull is None:
                        self.currentindex += 1
                        continue
                    joints3d = joints3dfull[:, self.joints_subset]

                    depth_full = sio.loadmat(info_dir + frameindex+ '_depth.mat')['depth']

                #set pelvis as the original point
                camLoc = bodyinfo['camLoc'][0]
                if datatype == 'up-3d':
                    # camlocation = camLoc[2]
                    # joints3d[2, :] = camlocation - joints3d[2, :]
                    dPelvis = joints3d[2, 0]
                else:
                    camlocation = camLoc
                    joints3d[0, :] = camlocation - joints3d[0, :]
                    dPelvis = joints3d[0, 0]

                if datatype != 'up-3d':
                    segm_raw = sio.loadmat(name[0:-4] + '_segm.mat')['segm_'+str(frameindex)]

                    segm_full = util.changeSegmIx(segm_raw,
                                                  [2, 12, 9, 2, 13, 10, 2, 14, 11, 2, 14, 11, 2, 2, 2, 1, 6, 3, 7, 4, 8,
                                                   5, 8,
                                                   5]).astype(np.int8)

                else:
                    segm_raw = cv2.imread(seg_dir+ frameindex + '_ann_vis.png')
                    segm_full = util.up3dtosurreal(segm_raw)

                if self.show:
                    plt.figure()
                    plt.imshow(segm_full, aspect='auto', cmap=plt.get_cmap('jet'))
                    plt.show()

                if datatype == 'up-3d':
                    quantized_joints3d, _ = util.quantize(joints3d[2, :], dPelvis, self.stp_joint, self.Zres_joint)
                    quantized_joints3d = quantized_joints3d * -1
                    relative_depth, _ = util.relative_up3d(depth_full, dPelvis, self.stp, self.Zres)  # self.halfrange
                elif datatype != 'detail_data':
                    quantized_joints3d, _ = util.quantize(joints3d[0, :], dPelvis, self.stp_joint, self.Zres_joint)
                    quantized_joints3d = quantized_joints3d * -1
                    relative_depth, _ = util.relative(depth_full,dPelvis, self.stp, self.Zres) #self.halfrange

                # TODO: 1. resize quantized_depth 2. output dense continuous relative depth in util.quantize
                if self.show:
                    plt.figure()
                    plt.imshow(depth_full, aspect='auto', cmap=plt.get_cmap('jet'))
                    plt.show()
                if self.show:
                    plt.figure()
                    plt.imshow(relative_depth, aspect='auto', cmap=plt.get_cmap('jet'))
                    plt.show()
            else:
                depth_full = io.imread(name[0:-8] + '_depth.png')
                depthcount = np.sum(depth_full > 100)
                if depthcount < 100 * 100:
                    self.currentindex += 1
                    continue

            if datatype != 'detail_data':
                # crop, scale
                rot = 0
                scale = util.getScale(joints2d)
                center = util.getCenter(joints2d)
            else:
                # crop, scale
                rot = 0
                scale = util.getScale_detail(depth_full)
                center = util.getCenter_detail(depth_full)

            if (center[0] < 1 or center[1] < 1 or center[1] > img_full.shape[0] or center[0] > img_full.shape[1]):
                self.currentindex +=1
                continue


## for rgb image
            if datatype != 'up-3d' and datatype!= 'detail_data':
                img = util.cropfor3d(img_full, center, scale, rot, self.insize[1], 'bilinear')
            elif datatype == 'detail_data':
                img = util_detail.cropfor3d(img_full, center, scale, rot, self.insize[1], 'bilinear')
            elif datatype == 'up-3d':
                norm_factor = np.array([self.insize[1]/img_full.shape[1], self.insize[1]/img_full.shape[0]], dtype=np.float32)
                img = scipy.misc.imresize(img_full, [self.insize[1], self.insize[1]], interp= 'bilinear')
                badexample = False
                for j in range(joints2d.shape[1]):
                    joints2d_rescaled = np.multiply(joints2d[:,j],norm_factor).astype(np.int64)
                    if joints2d_rescaled[0] < 0 or joints2d_rescaled[0] > 256 or joints2d_rescaled[1] < 0 or joints2d_rescaled[1] > 256:
                        badexample = True
                if badexample:
                    self.currentindex += 1
                    continue

            if img is None:
                self.currentindex+=1
                continue
            if (img.shape[0] == 0 or img.shape[1] == 0):
                self.currentindex+=1
                continue

            if self.show:
                imgnew = Image.fromarray(img, 'RGB')
                imgnew.show()

            # color augmentation
            img_bak = img
            img = img.astype(np.float32)
            if sample_set == 'train':
                for j in range(3):
                    img[:, :, j] = np.clip(img[:, :, j].astype(np.float32) / 255 * np.random.uniform(0.6, 1.4), 0.0,
                                           1.0)
            else:
                for j in range(3):
                    img[:, :, j] = np.clip(img[:, :, j].astype(np.float32) / 255, 0.0, 1.0)
            # print('color augmentation done!')

            # whitening rgb image
            meanstd = load_lua(self.meanRgb_dir)
            for j in range(3):
                img[:, :, j] = img[:, :, j] - meanstd['mean'][j]
                img[:, :, j] = img[:, :, j] / meanstd['std'][j]

            generated_batch['train_img'][i] = img

## for depth
            if datatype == 'detail_data':
                depm_continue = util_detail.cropfor3d(depth_full,center,scale,rot,self.insize[1],'bilinear')

                if self.show:
                    plt.figure()
                    plt.imshow(depth_full, aspect='auto', cmap=plt.get_cmap('jet'))
                    plt.show()

                if self.show:
                    plt.figure()
                    plt.imshow(depm_continue, aspect='auto', cmap=plt.get_cmap('jet'))
                    plt.show()
                mask =depm_continue>100
                depm_continue[depm_continue < 100] = 15 * 1000.0
                final_depth = depm_continue/1000.0
                median_value =np.median(final_depth[final_depth<5])
                final_depth = final_depth - median_value + 0.10
                final_depth[final_depth>5] = 0.60
                generated_batch['train_gtdepthre'][i, :, :] = final_depth

                mask = ndimage.binary_erosion(mask).astype(mask.dtype)
                generated_batch['train_mask'][i,:,:] = mask

            elif datatype == 'up-3d':
                depm_continue = cv2.resize(relative_depth.astype(np.float32), (depthres, depthres), interpolation=cv2.INTER_NEAREST)
                generated_batch['train_gtdepthre'][i, :, :] = depm_continue
                mask = depm_continue<0.59

                mask = ndimage.binary_erosion(mask).astype(mask.dtype)
                generated_batch['train_mask'][i,:,:] = mask

            else:
                depm_continue = util.cropdepth(relative_depth,center,scale,rot,self.insize[1],0.60)
                generated_batch['train_gtdepthre'][i, :, :] = cv2.resize(depm_continue,(depthres, depthres),interpolation=cv2.INTER_NEAREST)
                mask = depm_continue<0.59

                mask = ndimage.binary_erosion(mask).astype(mask.dtype)
                generated_batch['train_mask'][i,:,:] = mask




            if self.show:
                plt.figure()
                plt.imshow(generated_batch['train_gtdepthre'][i, :, :], aspect='auto', cmap=plt.get_cmap('jet'))
                plt.show()

            # if self.show:
            #     plt.figure()
            #     plt.imshow(mask, aspect='auto', cmap=plt.get_cmap('jet'))
            #     plt.show()

## for 2d segmentation

            if datatype == 'up-3d':
                segm = cv2.resize(segm_full, (seg_joint_res, seg_joint_res),
                                                            interpolation=cv2.INTER_NEAREST)
                generated_batch['train_gtseg'][i,:,:] = segm

            elif datatype != 'detail_data':
                segm = util.cropfor3d(segm_full, center, scale, rot, self.insize[1],'nearest')
                generated_batch['train_gtseg'][i,:,:] = cv2.resize(segm, (seg_joint_res, seg_joint_res),
                                                            interpolation=cv2.INTER_NEAREST)
                if self.show:
                    plt.figure()
                    plt.imshow(segm, aspect='auto', cmap=plt.get_cmap('jet'))
                    plt.show()

## for 2d joints

            if datatype != 'detail_data':
                # TODO: create 2d heatmaps
                sigma_2d_inscale = math.floor(2 * self.insize[0]/self.outsize[0])
                out_2d = np.zeros([self.insize[0], self.insize[1], self.joints_num])

                for j in range(self.joints_num):
                    if datatype == 'up-3d':
                        #pt = util.transform(joints2d[:, j], center, scale, 0, self.insize[0], False)
                        pt = np.multiply(joints2d[:, j], norm_factor).astype(np.int64)
                        # print('joints: ', joints2d[:, j], 'pt: ', pt)
                    else:
                        pt = util.transform(joints2d[:, j], center, scale, 0, self.insize[0], False)
                    heat_slice = util.Drawgaussian2D(img,pt,sigma_2d_inscale)
                    # if np.sum(heat_slice) > 1e-2:
                    #     heat_slice /= np.sum(heat_slice)
                    # else:
                    #     heat_slice *= 0
                    #print('heat_slice.shape',heat_slice.shape)

                    out_2d[:, :, j] = heat_slice
                    # if self.show:
                    #     plt.figure()
                    #     plt.imshow(heat_slice, aspect='auto', cmap=plt.get_cmap('jet'))
                    #     plt.show()

                out_2d = cv2.resize(out_2d,(seg_joint_res,seg_joint_res),interpolation=cv2.INTER_NEAREST)
                generated_batch['train_gt2dheat'][i] = out_2d
                if self.show:
                    # img4show = img
                    # for j in range(3):
                    #     img4show[:, :, j] = img4show[:, :, j] - meanstd['mean'][j]
                    #     img4show[:, :, j] = img4show[:, :, j] / meanstd['std'][j]
                    # img4show = img4show * 255.0
                    visualizer.draw2dskeleton(img_bak.astype(np.uint8), out_2d)

            
# for 3d joints

            #print('draw3d---------------------------------------------------')
            if datatype != 'detail_data':
                out = np.zeros([self.outsize[0], self.outsize[1], self.joints_num * self.Zres_joint])
                sigma_2d = 2
                size_z = 2 * math.floor((6* sigma_2d * self.Zres_joint / self.outsize[0] +1) / 2) + 1
                for j in range(self.joints_num):
                    #if joints2d[1,j] >= img_full.shape[0] or joints2d[0,j] >=img_full.shape[1] or joints2d[1,j]<0 or joints2d[0,j]<0:
                        #continue
                    z = quantized_joints3d[j]
                    if datatype == 'up-3d':
                        pt = np.multiply(joints2d[:, j], norm_factor/4).astype(np.int64)
                    else:
                        pt = util.transform(joints2d[:, j], center, scale, 0, self.outsize[0], False)
                    out[:,:,j * self.Zres_joint : (j+1) * self.Zres_joint] = util.Drawguassian3D(out[:,:,j * self.Zres_joint : (j+1) * self.Zres_joint], pt, z , sigma_2d, size_z)

                generated_batch['train_gtjoints'][i] = out
                if self.show:
                    visualizer.draw3dskeleton(self.joints_num,self.Zres_joint,out)
            i = i+1
            self.currentindex +=1
            if(self.currentindex==self.datanum-1):
                self._reset_filelist(datatype,sample_set)


        return generated_batch
コード例 #32
0
            def __call__(self, grid, color, opacity=1.0):

                if grid.dtype == np.bool:
                    xo, yo, zo = np.where(grid)
                else:
                    xo, yo, zo = grid[:, 0], grid[:, 1], grid[:, 2]
                mlab.points3d(self.voxel_data["x_grid"][xo],
                              self.voxel_data["y_grid"][yo],
                              self.voxel_data["z_grid"][zo],
                              color=color,
                              scale_mode="none",
                              scale_factor=self.voxel_data["vox_size"],
                              mode='cube',
                              opacity=opacity)

        # Make an outline of the air_inside for quicker visualization.
        ero_vis = ndimage.binary_erosion(air_inside)
        air_inside = air_inside & ~ero_vis

        scene = mlab.figure(size=(400, 400))
        scene.scene.background = (0.2, 0.2, 0.2)

        vox_plotter = Voxel_Plotter(voxel_dat)

        vox_plotter(air_inside, (0.6, 0.6, 0.8), 0.1)
        vox_plotter(exit_grid, (1.0, 0.0, 0.0))
        vox_plotter(crew_points, (0.3, 1.0, 0.3))

        mlab.show()
コード例 #33
0
ファイル: fmap.py プロジェクト: tknapen/fmriprep
    def _run_interface(self, runtime):
        from scipy import ndimage as sim

        fmap_nii = nb.load(self.inputs.in_file)
        data = np.squeeze(fmap_nii.get_data().astype(np.float32))

        # Despike / denoise (no-mask)
        if self.inputs.despike:
            data = _despike2d(data, self.inputs.despike_threshold)

        mask = None
        if isdefined(self.inputs.in_mask):
            masknii = nb.load(self.inputs.in_mask)
            mask = masknii.get_data().astype(np.uint8)

            # Dilate mask
            if self.inputs.mask_erode > 0:
                struc = sim.iterate_structure(
                    sim.generate_binary_structure(3, 2), 1)
                mask = sim.binary_erosion(
                    mask, struc,
                    iterations=self.inputs.mask_erode).astype(np.uint8)  # pylint: disable=no-member

        self._results['out_file'] = fname_presuffix(self.inputs.in_file,
                                                    suffix='_enh',
                                                    newpath=runtime.cwd)
        datanii = nb.Nifti1Image(data, fmap_nii.affine, fmap_nii.header)

        if self.inputs.unwrap:
            data = _unwrap(data, self.inputs.in_magnitude, mask)
            self._results['out_unwrapped'] = fname_presuffix(
                self.inputs.in_file, suffix='_unwrap', newpath=runtime.cwd)
            nb.Nifti1Image(data, fmap_nii.affine, fmap_nii.header).to_filename(
                self._results['out_unwrapped'])

        if not self.inputs.bspline_smooth:
            datanii.to_filename(self._results['out_file'])
            return runtime
        else:
            from ..utils import bspline as fbsp
            from statsmodels.robust.scale import mad

            # Fit BSplines (coarse)
            bspobj = fbsp.BSplineFieldmap(datanii,
                                          weights=mask,
                                          njobs=self.inputs.num_threads)
            bspobj.fit()
            smoothed1 = bspobj.get_smoothed()

            # Manipulate the difference map
            diffmap = data - smoothed1.get_data()
            sderror = mad(diffmap[mask > 0])
            LOGGER.info('SD of error after B-Spline fitting is %f', sderror)
            errormask = np.zeros_like(diffmap)
            errormask[np.abs(diffmap) > (10 * sderror)] = 1
            errormask *= mask

            nslices = 0
            try:
                errorslice = np.squeeze(
                    np.argwhere(errormask.sum(0).sum(0) > 0))
                nslices = errorslice[-1] - errorslice[0]
            except IndexError:  # mask is empty, do not refine
                pass

            if nslices > 1:
                diffmapmsk = mask[..., errorslice[0]:errorslice[-1]]
                diffmapnii = nb.Nifti1Image(
                    diffmap[..., errorslice[0]:errorslice[-1]] * diffmapmsk,
                    datanii.affine, datanii.header)

                bspobj2 = fbsp.BSplineFieldmap(diffmapnii,
                                               knots_zooms=[24., 24., 4.],
                                               njobs=self.inputs.num_threads)
                bspobj2.fit()
                smoothed2 = bspobj2.get_smoothed().get_data()

                final = smoothed1.get_data().copy()
                final[..., errorslice[0]:errorslice[-1]] += smoothed2
            else:
                final = smoothed1.get_data()

            nb.Nifti1Image(final, datanii.affine, datanii.header).to_filename(
                self._results['out_file'])

        return runtime
コード例 #34
0
def main(visualize=False,
         learn=False,
         actions=None,
         subjects=None,
         n_frames=220):
    # learn = True
    # learn = False
    if actions is []:
        actions = [2]
    if subjects is []:
        subjects = [2]
    # actions = [1]
    # actions = [1, 2, 3, 4, 5]
    # subjects = [1]
    if 1:
        MHAD = True
        cam = MHADPlayer(base_dir='/Users/colin/Data/BerkeleyMHAD/',
                         kinect=1,
                         actions=actions,
                         subjects=subjects,
                         reps=[1],
                         get_depth=True,
                         get_color=True,
                         get_skeleton=True,
                         fill_images=False)
    else:
        MHAD = False
        cam = KinectPlayer(base_dir='./',
                           device=1,
                           bg_subtraction=True,
                           get_depth=True,
                           get_color=True,
                           get_skeleton=True,
                           fill_images=False)
        bg = Image.open(
            '/Users/colin/Data/JHU_RGBD_Pose/CIRL_Background_A.tif')
        # cam = KinectPlayer(base_dir='./', device=2, bg_subtraction=True, get_depth=True, get_color=True, get_skeleton=True, fill_images=False)
        # bg = Image.open('/Users/colin/Data/JHU_RGBD_Pose/CIRL_Background_B.tif')
        cam.bgSubtraction.backgroundModel = np.array(bg.getdata()).reshape(
            [240, 320]).clip(0, 4500)
    height, width = cam.depthIm.shape
    skel_previous = None

    face_detector = FaceDetector()
    hand_detector = HandDetector(cam.depthIm.shape)
    # curve_detector = CurveDetector(cam.depthIm.shape)

    # Video writer
    # video_writer = cv2.VideoWriter("/Users/colin/Desktop/test.avi", cv2.cv.CV_FOURCC('M','J','P','G'), 15, (320,240))

    # Save Background model
    # im = Image.fromarray(cam.depthIm.astype(np.int32), 'I')
    # im.save("/Users/Colin/Desktop/k2.png")

    # Setup pose database
    append = True
    append = False
    # pose_database = PoseDatabase("PoseDatabase.pkl", learn=learn, search_joints=[0,4,7,10,13], append=append)
    pose_database = PoseDatabase("PoseDatabase.pkl",
                                 learn=learn,
                                 search_joints=[0, 2, 4, 5, 7, 10, 13],
                                 append=append)

    # Setup Tracking
    skel_init, joint_size, constraint_links, features_joints, skel_parts, convert_to_kinect = get_14_joint_properties(
    )
    constraint_values = []
    for c in constraint_links:
        constraint_values += [
            np.linalg.norm(skel_init[c[0]] - skel_init[c[1]], 2)
        ]
    constraint_values = np.array(constraint_values)

    skel_current = None  #skel_init.copy()
    skel_previous = None  #skel_current.copy()

    # Evaluation
    accuracy_all_db = []
    accuracy_all_track = []
    joint_accuracy_db = []
    joint_accuracy_track = []
    # geo_accuracy = []
    # color_accuracy = []
    # lbp_accuracy = []

    frame_count = 0
    frame_rate = 1
    if not MHAD:
        cam.next(350)
    frame_prev = 0
    # try:
    if 1:
        while cam.next(frame_rate):  # and frame_count < n_frames:
            if frame_count - frame_prev > 100:
                print ""
                print "Frame #{0:d}".format(frame_count)
                frame_prev = frame_count

            if not MHAD:
                if len(cam.users) == 0:
                    continue
                else:
                    # cam.users = [np.array(cam.users[0]['jointPositions'].values())]
                    if np.any(cam.users[0][0] == -1):
                        continue
                    cam.users[0][:, 1] *= -1
                    cam.users_uv_msr = [
                        cam.camera_model.world2im(cam.users[0], [240, 320])
                    ]

            # Apply mask to image
            if MHAD:
                mask = cam.get_person(2) > 0
            else:
                mask = cam.get_person() > 0
                if np.all(mask == False):
                    continue

            im_depth = cam.depthIm
            cam.depthIm[cam.depthIm > 3000] = 0
            im_color = cam.colorIm * mask[:, :, None]
            cam.colorIm *= mask[:, :, None]
            pose_truth = cam.users[0]
            pose_truth_uv = cam.users_uv_msr[0]

            # Get bounding box around person
            box = nd.find_objects(mask)[0]
            d = 20
            # Widen box
            box = (slice(np.maximum(box[0].start-d, 0), \
              np.minimum(box[0].stop+d, height-1)), \
                slice(np.maximum(box[1].start-d, 0), \
              np.minimum(box[1].stop+d, width-1)))
            box_corner = [box[0].start, box[1].start]
            ''' ---------- ----------------------------------- --------'''
            ''' ----------- Feature Detector centric approach ---------'''
            ''' ---------- ----------------------------------- --------'''
            ''' ---- Calculate Detectors ---- '''
            # Face detection
            face_detector.run(im_color[box])
            # Skin detection
            hand_markers = hand_detector.run(im_color[box], n_peaks=3)
            # Calculate Geodesic Extrema
            im_pos = cam.camera_model.im2PosIm(
                cam.depthIm * mask)[box] * mask[box][:, :, None]
            geodesic_markers = geodesic_extrema_MPI(im_pos,
                                                    iterations=5,
                                                    visualize=False)
            _, geo_map = geodesic_extrema_MPI(im_pos,
                                              iterations=1,
                                              visualize=True)
            geodesic_markers_pos = im_pos[geodesic_markers[:, 0],
                                          geodesic_markers[:, 1]]

            markers = list(geodesic_markers) + list(
                hand_markers)  #+ list(lop_markers) + curve_markers
            markers = np.array([list(x) for x in markers])
            ''' ---- Database lookup ---- '''
            if 1:
                pts_mean = im_pos[(im_pos != 0)[:, :, 2]].mean(0)
                if learn:
                    # Normalize pose
                    pose_uv = cam.users_uv[0]
                    if np.any(pose_uv == 0):
                        print "skip"
                        frame_count += frame_rate
                        continue
                    pose_database.update(pose_truth - pts_mean)

                else:
                    # Concatenate markers
                    markers = list(geodesic_markers) + hand_markers
                    markers = np.array([list(x) for x in markers])

                    # Normalize pose
                    pts = im_pos[markers[:, 0], markers[:, 1]]
                    pts = np.array([x for x in pts if x[0] != 0])
                    pts -= pts_mean

                    # Get closest pose
                    pose = pose_database.query(pts, knn=5)
                    # embed()
                    for i in range(5):
                        pose_tmp = cam.camera_model.world2im(
                            pose[i] + pts_mean, cam.depthIm.shape)
                        cam.colorIm = display_skeletons(cam.colorIm,
                                                        pose_tmp,
                                                        skel_type='Kinect',
                                                        color=(0, i * 40 + 50,
                                                               0))
                    pose = pose[0]

                    # im_pos -= pts_mean
                    # R,t = IterativeClosestPoint(pose, im_pos.reshape([-1,3])-pts_mean, max_iters=5, min_change=.001, pt_tolerance=10000)
                    # pose = np.dot(R.T, pose.T).T - t
                    # pose = np.dot(R, pose.T).T + t

                    pose += pts_mean
                    pose_uv = cam.camera_model.world2im(
                        pose, cam.depthIm.shape)

                    # print pose
                    surface_map = nd.distance_transform_edt(
                        -nd.binary_erosion(mask[box]),
                        return_distances=False,
                        return_indices=True)
                    try:
                        pose_uv[:, :2] = surface_map[:, pose_uv[:, 0] -
                                                     box_corner[0],
                                                     pose_uv[:, 1] -
                                                     box_corner[1]].T + [
                                                         box_corner[0],
                                                         box_corner[1]
                                                     ]
                    except:
                        pass
                    pose = cam.camera_model.im2world(pose_uv,
                                                     cam.depthIm.shape)
                    # print pose
            ''' ---- Tracker ---- '''
            # surface_map = nd.distance_transform_edt(-mask[box], return_distances=False, return_indices=True)
            # surface_map = nd.distance_transform_edt(im_pos[:,:,2]==0, return_distances=False, return_indices=True)

            if skel_previous is None:
                # if 1:
                skel_previous = pose.copy()
                skel_current = pose.copy()
                skel_previous_uv = pose_uv.copy()
                skel_current_uv = pose_uv.copy()

            for _ in range(1):

                # ---- (Step 1A) Find feature coordespondences ----
                try:
                    skel_previous_uv[:, :
                                     2] = surface_map[:,
                                                      skel_previous_uv[:, 0] -
                                                      box_corner[0],
                                                      skel_previous_uv[:, 1] -
                                                      box_corner[1]].T + [
                                                          box_corner[0],
                                                          box_corner[1]
                                                      ]
                except:
                    pass
                skel_current = cam.camera_model.im2world(
                    skel_previous_uv, cam.depthIm.shape)

                # Alternative method: use kdtree
                ## Calc euclidian distance between each pixel and all joints
                px_corr = np.zeros(
                    [im_pos.shape[0], im_pos.shape[1],
                     len(skel_current)])
                # for i,s in enumerate(pose):
                # for i,s in enumerate(skel_current):
                # px_corr[:,:,i] = np.sqrt(np.sum((im_pos - s)**2, -1))# / joint_size[i]**2
                # for i,s in enumerate(pose_uv):

                # Geodesics
                for i, s in enumerate(skel_previous_uv):
                    ''' Problem: need to constrain pose_uv to mask '''
                    _, geo_map = geodesic_extrema_MPI(
                        im_pos, [s[0] - box_corner[0], s[1] - box_corner[1]],
                        iterations=1,
                        visualize=True)
                    px_corr[:, :, i] = geo_map
                    subplot(2, 7, i + 1)
                    # imshow(geo_map, vmin=0, vmax=2000)
                    # axis('off')
                    px_corr[geo_map == 0, i] = 9999
                cv2.imshow('gMap', (px_corr.argmin(-1) + 1) / 15. * mask[box])
                ## Handle occlusions by argmax'ing over set of skel parts
                # visible_configurations = list(it.product([0,1], repeat=5))[1:]
                visible_configurations = [
                    # [0,1,1,1,1],
                    # [1,0,0,0,0],
                    [1, 1, 1, 1, 1]
                ]
                px_visibility_label = np.zeros([
                    im_pos.shape[0], im_pos.shape[1],
                    len(visible_configurations)
                ],
                                               dtype=np.uint8)
                visible_scores = np.ones(len(visible_configurations)) * np.inf
                # Try each occlusion configuration set
                for i, v in enumerate(visible_configurations):
                    visible_joints = list(
                        it.chain.from_iterable(skel_parts[np.array(v) > 0]))
                    px_visibility_label[:, :, i] = np.argmin(
                        px_corr[:, :, visible_joints],
                        -1)  #.reshape([im_pos.shape[0], im_pos.shape[1]])
                    visible_scores[i] = np.min(px_corr[:, :, visible_joints],
                                               -1).sum()
                # Choose best occlusion configuration
                occlusion_index = np.argmin(visible_scores)
                occlusion_configuration = visible_configurations[
                    occlusion_index]
                occlusion_set = list(
                    it.chain.from_iterable(skel_parts[np.array(
                        visible_configurations[occlusion_index]) > 0]))
                # Choose label for pixels based on occlusion configuration
                px_label = px_visibility_label[:, :,
                                               occlusion_index] * mask[box]
                px_label_flat = px_visibility_label[:, :, occlusion_index][
                    mask[box]].flatten()

                visible_joints = [
                    1 if x in occlusion_set else 0 for x in range(len(pose))
                ]
                # print visible_joints

                # Project distance to joint's radius
                px_joint_displacement = im_pos[
                    mask[box]] - skel_current[px_label_flat]
                px_joint_magnitude = np.sqrt(
                    np.sum(px_joint_displacement**2, -1))
                joint_mesh_pos = skel_current[
                    px_label_flat] + px_joint_displacement * (
                        joint_size[px_label_flat] / px_joint_magnitude)[:,
                                                                        None]
                px_joint_displacement = joint_mesh_pos - im_pos[mask[box]]
                # Ensure pts aren't too far away
                px_joint_displacement[np.abs(px_joint_displacement) > 500] = 0
                # embed()
                if 0:
                    x = im_pos.copy() * 0
                    x[mask[box]] = joint_mesh_pos

                    for i in range(3):
                        subplot(1, 4, i + 1)
                        imshow(x[:, :, i])
                        axis('off')
                    subplot(1, 4, 4)
                    imshow((px_label + 1) * mask[box])

                # Calc the correspondance change in position for each joint
                correspondence_displacement = np.zeros([len(skel_current), 3])
                ii = 0
                for i, _ in enumerate(skel_current):
                    if i in occlusion_set:
                        labels = px_label_flat == i
                        correspondence_displacement[i] = np.sum(
                            px_joint_displacement[px_label_flat == ii], 0
                        ) / np.sum(
                            px_joint_displacement[px_label_flat == ii] != 0)
                        ii += 1
                correspondence_displacement = np.nan_to_num(
                    correspondence_displacement)
                # print correspondence_displacement
                # Viz correspondences
                if 0:
                    x = im_pos.copy() * 0
                    x[mask[box]] = px_joint_displacement

                    for i in range(3):
                        subplot(1, 4, i + 1)
                        imshow(x[:, :, i])
                        axis('off')
                    subplot(1, 4, 4)
                    imshow((px_label + 1) * mask[box])
                    # embed()
                    # for j in range(3):
                    # 	for i in range(14):
                    # 		subplot(3,14,j*14+i+1)
                    # 		imshow(x[:,:,j]*((px_label==i)*mask[box]))
                    # 		axis('off')
                    show()

                # ---- (Step 2) Update pose state, x ----
                lambda_p = .0
                lambda_c = 1.
                skel_prev_difference = (skel_current - skel_previous)
                # print skel_prev_difference
                skel_current = skel_previous \
                    + lambda_p  * skel_prev_difference \
                    - lambda_c  * correspondence_displacement#\

                # ---- (Step 3) Add constraints ----
                if 1:
                    # A: Link lengths / geometry
                    # skel_current = link_length_constraints(skel_current, constraint_links, constraint_values, alpha=.5)
                    skel_current = geometry_constraints(skel_current,
                                                        joint_size,
                                                        alpha=0.5)
                    skel_current = collision_constraints(
                        skel_current, constraint_links)

                    skel_img_box = (cam.camera_model.world2im(
                        skel_current, cam.depthIm.shape) -
                                    [box[0].start, box[1].start, 0]
                                    )  #/mask_interval
                    skel_img_box = skel_img_box.clip([0, 0, 0], [
                        box[0].stop - box[0].start - 1,
                        box[1].stop - box[1].start - 1, 9999
                    ])
                    # skel_img_box = skel_img_box.clip([0,0,0], [cam.depthIm.shape[0]-1, cam.depthIm.shape[1]-1, 9999])
                    # B: Ray-cast constraints
                    # embed()
                    skel_current, skel_current_uv = ray_cast_constraints(
                        skel_current, skel_img_box, im_pos, surface_map,
                        joint_size)
                    # skel_img_box -= [box[0].start, box[1].start, 0]

                    # # Map back from mask to image
                    # try:
                    # 	skel_current_uv[:,:2] = surface_map[:, skel_img_box[:,0], skel_img_box[:,1]].T# + [box_corner[0], box_corner[1]]
                    # except:
                    # 	pass
                    prob = link_length_probability(skel_current,
                                                   constraint_links,
                                                   constraint_values, 100)
                    # print "Prob:", np.mean(prob), np.min(prob), prob
                    print frame_count
                    thresh = .05
                    if np.min(prob) < thresh:  # and frame_count > 1:
                        print 'Resetting pose'
                        for c in constraint_links[prob < thresh]:
                            for cc in c:
                                skel_current_uv[c] = pose_uv[c] - [
                                    box[0].start, box[1].start, 0
                                ]
                                skel_current[c] = pose[c]
                        # skel_current_uv = pose_uv.copy() - [box[0].start, box[1].start, 0]
                        # skel_current = pose.copy()

                    skel_current_uv = skel_current_uv + [
                        box[0].start, box[1].start, 0
                    ]
                    skel_current = cam.camera_model.im2world(
                        skel_current_uv, cam.depthIm.shape)
                else:
                    skel_current_uv = (cam.camera_model.world2im(
                        skel_current, cam.depthIm.shape))
                    # skel_img_box = skel_img_box.clip([0,0,0], [cam.depthIm.shape[0]-1, cam.depthIm.shape[1]-1, 9999])

            # Update for next round
            skel_previous = skel_current.copy()
            skel_previous_uv = skel_current_uv.copy()
            ''' ---- Accuracy ---- '''
            # embed()
            if 1 and not learn:
                # pose_truth = cam.users[0]
                error_db = pose_truth - pose
                error_track = pose_truth - skel_current
                # print "Error", error
                error_l2_db = np.sqrt(np.sum(error_db**2, 1))
                error_l2_track = np.sqrt(np.sum(error_track**2, 1))
                joint_accuracy_db += [error_l2_db]
                joint_accuracy_track += [error_l2_track]
                accuracy_db = np.sum(error_l2_db < 150) / 14.
                accuracy_track = np.sum(error_l2_track < 150) / 14.
                print "Current db:", accuracy_db, error_l2_db.mean()
                print "Current track:", accuracy_track, error_l2_track.mean()
                print ""
                accuracy_all_db += [accuracy_db]
                accuracy_all_track += [accuracy_track]
                # print "Running avg:", np.mean(accuracy_all)
                # print "Joint avg (per-joint):", np.mean(joint_accuracy_all, -1)
                # print "Joint avg (overall):", np.mean(joint_accuracy_all)
            ''' --- Visualization --- '''

            display_markers(cam.colorIm,
                            hand_markers[:2],
                            box,
                            color=(0, 250, 0))
            if len(hand_markers) > 2:
                display_markers(cam.colorIm, [hand_markers[2]],
                                box,
                                color=(0, 200, 0))
            display_markers(cam.colorIm,
                            geodesic_markers,
                            box,
                            color=(200, 0, 0))
            # display_markers(cam.colorIm, curve_markers, box, color=(0,100,100))
            # display_markers(cam.colorIm, lop_markers, box, color=(0,0,200))

            cam.colorIm = display_skeletons(cam.colorIm,
                                            pose_truth_uv,
                                            skel_type='Kinect',
                                            color=(0, 255, 0))
            cam.colorIm = display_skeletons(cam.colorIm,
                                            pose_uv,
                                            skel_type='Kinect')
            cam.colorIm = display_skeletons(cam.colorIm,
                                            skel_current_uv,
                                            skel_type='Kinect',
                                            color=(0, 0, 255))
            # cam.visualize(color=True, depth=False)
            cam.visualize(color=True, depth=True)

            # embed()
            # ------------------------------------------------------------

            # video_writer.write((geo_clf_map/float(geo_clf_map.max())*255.).astype(np.uint8))
            # video_writer.write(cam.colorIm[:,:,[2,1,0]])

            frame_count += frame_rate
    # except:
    # pass

    print "-- Results for subject {:d} action {:d}".format(
        subjects[0], actions[0])
    print "Running avg (db):", np.mean(accuracy_all_db)
    print "Running avg (track):", np.mean(accuracy_all_track)
    print "Joint avg (overall db):", np.mean(joint_accuracy_db)
    print "Joint avg (overall track):", np.mean(joint_accuracy_track)
    # print 'Done'

    embed()
    return
コード例 #35
0
ファイル: option.py プロジェクト: hughplay/TVR
 def _erosion(self, mat, r):
     if r > 0:
         struct = self.get_circular_mask(r)
         mat = ndimage.binary_erosion(mat, struct, border_value=1)
     return mat
コード例 #36
0
def make_golden_standard_and_mask(path, sc=1, zoom=False):
    # %%
    start = time.time()
    preprocess_data(path, 'good', sc, redo=False)
    proc_path = path + 'processed_data/'

    if sc == 1:
        g = np.load(proc_path + 'g_good.npy')
    else:
        g = np.load(proc_path + 'g_good_sc' + str(sc) + '.npy')
    g = np.transpose(g, (2, 0, 1))
    gc.collect()
    # %%
    meta = load_meta(path + 'good/', sc)
    vox = g.shape[0]
    dpixsize = meta['pix_size']
    s2d = meta['s2d']
    s2o = meta['s2o']
    o2d = meta['o2d']
    if zoom:
        magn = 1
    else:
        magn = s2o / s2d
    vox_size = dpixsize * magn
    minsize = -vox * vox_size / 2
    maxsize = vox * vox_size / 2
    vol_geom = astra.create_vol_geom(vox, vox, vox, minsize, maxsize, minsize,
                                     maxsize, minsize, maxsize)

    ang = np.shape(g)[1]
    angles = np.linspace(np.pi / ang, (2 + 1 / ang) * np.pi, ang, False)
    proj_geom = astra.create_proj_geom('cone', dpixsize, dpixsize,
                                       np.shape(g)[0],
                                       np.shape(g)[2], angles, s2o, o2d)

    # %%
    # Create projection data from this
    #proj_id, proj_data = astra.create_sino3d_gpu(cube, proj_geom, vol_geom)
    proj_id = astra.data3d.create('-proj3d', proj_geom, g)
    #proj_id = astra.data3d.create('-proj3d', proj_geom, vol_geom)
    projector_id = astra.create_projector('cuda3d', proj_geom, vol_geom)
    # %%

    # Create a data object for the reconstruction
    rec_id = astra.data3d.create('-vol', vol_geom)

    # Set up the parameters for a reconstruction algorithm using the GPU
    #W = astra.OpTomo(proj_id)
    astra.plugin.register(astra.plugins.SIRTPlugin)
    cfg = astra.astra_dict('SIRT-PLUGIN')
    #    cfg = astra.astra_dict('FDK_CUDA')
    cfg['ReconstructionDataId'] = rec_id
    cfg['ProjectionDataId'] = proj_id
    cfg['ProjectorId'] = projector_id
    cfg['option'] = {}
    cfg['option']['MinConstraint'] = 0

    # Create the algorithm object from the configuration structure
    alg_id = astra.algorithm.create(cfg)

    # SIRT
    astra.algorithm.run(alg_id, 300)

    # Get the result
    rec = astra.data3d.get(rec_id)
    rec = np.transpose(rec, (2, 1, 0))
    save = np.zeros((3, vox, vox))
    save[0, :, :], save[1, :, :] = rec[:, :, vox // 2], rec[:, vox // 2, :]
    save[2, :, :] = rec[vox // 2, :, :]
    np.save(proc_path + 'rec_ax_SIRT300', save)
    # %%
    if sc == 1:
        np.save(proc_path + 'rec_SIRT300', rec)
    else:
        np.save(proc_path + 'rec_SIRT300_sc' + str(sc), rec)
    end = time.time()
    print((end - start), 'Finished SIRT 300 reconstructionn')
    ## Clean up. Note that GPU memory is tied up in the algorithm object,
    ## and main RAM in the data objects.
    astra.algorithm.delete(alg_id)
    astra.data3d.delete(rec_id)
    astra.data3d.delete(proj_id)
    astra.projector3d.delete(projector_id)

    # %%
    rec *= (rec > 0.03)
    edge = vox // 32
    edge_t = np.zeros(np.shape(rec))
    edge_t[edge:-edge, edge:-edge, edge:-edge] = 1
    rec *= edge_t
    del edge_t
    save[0, :, :], save[1, :, :] = rec[:, :, vox // 2], rec[:, vox // 2, :]
    save[2, :, :] = rec[vox // 2, :, :]
    np.save(proc_path + 'GT_ax', save)
    diter = int(1.5 * 2**(np.log2(vox) - 5))
    it = 5
    mask = sp.binary_erosion(rec, iterations=it)
    mask = sp.binary_dilation(mask, iterations=diter + it)
    save[0, :, :], save[1, :, :] = mask[:, :, vox // 2], mask[:, vox // 2, :]
    save[2, :, :] = mask[vox // 2, :, :]
    np.save(proc_path + 'mask_ax', save)

    # %%
    if sc == 1:
        np.save(proc_path + 'ground_truth.npy', rec)
        np.save(proc_path + 'mask.npy', mask)

    else:
        np.save(proc_path + 'ground_truth_sc' + str(sc) + '.npy', rec)
        np.save(proc_path + 'mask_sc' + str(sc) + '.npy', mask)
    t3 = time.time()
    print(t3 - end, 'Finished computing mask and ground truth')
コード例 #37
0
    mybet.inputs.in_file = dpath + 'img_sm.nii.gz'
    mybet.inputs.out_file = dpath + 'img_bet.nii.gz'
    mybet.inputs.frac = 0.01
    mybet.inputs.robust = True
    mybet.inputs.output_type = "NIFTI_GZ"
    result = mybet.run()

    # step 6
    img_bet = nib.load(dpath + 'img_bet.nii.gz')
    img_bet_pixels = img_bet.get_data().astype(np.int16)
    img_bet_bin = cv2.threshold(img_bet_pixels, 0, 1, cv2.THRESH_BINARY)[1]
    img_bet_mask = np.transpose(img_bet_bin, (2, 0, 1))
    for i in range(len(img_bet_mask)):
        img_bet_mask[i] = scipy.ndimage.morphology.binary_fill_holes(img_bet_mask[i])
    img_bet_mask = np.transpose(img_bet_mask, (1, 2, 0))
    img_final_msk = ndimage.binary_erosion(img_bet_mask).astype(img_bet_mask.dtype)

    # step 7
    img_final_msk = largest_cc(img_final_msk)
    img_final_msk = np.transpose(img_final_msk, (2, 0, 1))
    for i in range(len(img_final_msk)):
        img_final_msk[i] = scipy.ndimage.morphology.binary_fill_holes(img_final_msk[i])
    img_final_msk = np.transpose(img_final_msk, (1, 2, 0))

    # step 8
    img_preprocessed = img_pixels * img_final_msk
    save_img = nib.Nifti1Image(img_preprocessed, affine=img.affine, header = img_hdr)
    nib.save(save_img, cur_dir + '/Brain' + basename[5:])


コード例 #38
0
def detect_corpus_callosum(tracks,
                           plane=91,
                           ysize=217,
                           zsize=181,
                           width=1.0,
                           use_atlas=0,
                           use_preselected_tracks=0,
                           ball_radius=5):
    """ Detect corpus callosum in a mni registered dataset of shape
    (181,217,181)
    
    Parameters:
    ----------------
    tracks: sequence 
            of tracks
    
    Returns:
    ----------
    cc_indices: sequence
            with the indices of the corpus_callosum tracks
    
    left_indices: sequence
            with the indices of the rest of the brain
       
    """

    cc = []

    #for every track
    for (i, t) in enumerate(tracks):

        #for every index of any point in the track
        for pi in range(len(t) - 1):

            #if track segment is cutting the plane (assuming the plane is at the x-axis X=plane)
            if (t[pi][0] <= plane
                    and t[pi + 1][0] >= plane) or (t[pi + 1][0] <= plane
                                                   and t[pi][0] >= plane):

                v = t[pi + 1] - t[pi]
                k = (plane - t[pi][0]) / v[0]

                hit = k * v + t[pi]

                #report the index of the track and the point of intersection with the plane
                cc.append((i, hit))

    #indices
    cc_i = [c[0] for c in cc]

    print 'Number of tracks cutting plane Before', len(cc_i)

    #hit points
    cc_p = np.array([c[1] for c in cc])

    # p_neighb=len(cc_p)*[0]

    # cnt=0
    #imaging processing from now on

    im = np.zeros((ysize, zsize))
    im2 = np.zeros((ysize, zsize))

    im_track = {}

    cnt = 0
    for p in cc_p:

        p1 = int(round(p[1]))
        p2 = int(round(p[2]))

        im[p1, p2] = 1
        im2[p1, p2] = im2[p1, p2] + 1

        try:
            im_track[(p1, p2)] = im_track[(p1, p2)] + [cc_i[cnt]]
        except:
            im_track[(p1, p2)] = [cc_i[cnt]]

        cnt += 1

    #create a cross structure
    cross = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])

    im = (255 * im).astype('uint8')
    im2 = (np.interp(im2, [0, im2.max()], [0, 255])).astype('uint8')

    #erosion
    img = nd.binary_erosion(im, structure=cross)

    #and another one erosion
    #img=nd.binary_erosion(img,structure=cross)
    #im2g=nd.grey_erosion(im2,structure=cross)
    #im2g2=nd.grey_erosion(im2g,structure=cross)

    indg2 = np.where(im2 == im2.max())
    p1max = indg2[0][0]
    p2max = indg2[1][0]

    #label objects
    imgl = nd.label(img)
    # no_labels=imgl[1]
    imgl = imgl[0]

    #find the biggest objects the second biggest should be the cc the biggest should be the background
    """
    find_big=np.zeros(no_labels)
    
    for i in range(no_labels):
        
        ind=np.where(imgl==i)
        find_big[i]=len(ind[0])
        
    print find_big
    
    find_bigi=np.argsort(find_big)
    """
    cc_label = imgl[p1max, p2max]

    imgl2 = np.zeros((ysize, zsize))

    #cc is found and copied to a new image here
    #imgl2[imgl==int(find_bigi[-2])]=1
    imgl2[imgl == int(cc_label)] = 1

    imgl2 = imgl2.astype('uint8')

    #now do another dilation to recover some cc shape from the previous erosion
    imgl2d = nd.binary_dilation(imgl2, structure=cross)
    #and another one
    #imgl2d=nd.binary_dilation(imgl2d,structure=cross)

    imgl2d = imgl2d.astype('uint8')

    #get the tracks back
    cc_indices = []
    indcc = np.where(imgl2d > 0)
    for i in range(len(indcc[0])):
        p1 = indcc[0][i]
        p2 = indcc[1][i]
        cc_indices = cc_indices + im_track[(p1, p2)]

    print 'After', len(cc_indices)

    #export also the rest of the brain
    indices = range(len(tracks))
    left = set(indices).difference(set(cc_indices))
    left_indices = [l for l in left]

    #return im,im2,imgl2d,cc_indices,left_indices
    return cc_indices, left_indices
コード例 #39
0
    def S2_PSF_optimization(self):
        self.h, self.v = mtile_cal(self.lat, self.lon)
        m = mgrs.MGRS()
        #mg_coor = m.toMGRS(self.lat, self.lon, MGRSPrecision=4)
        #self.place = self.S2_fname
        #self.Hfiles = glob.glob(directory +'l_data/LC8%03d%03d%d*LGN00_sr_band1.tif'%(self.path, self.row, self.year))
        if len(self.S2_fname) == 5:
            self.Hfile = os.getcwd()+'/s_data/%s/%s/%s/%d/%d/%d/0/'%(self.S2_fname[:2], self.S2_fname[2],\
                                                                     self.S2_fname[3:5], self.year, self.S2_month, self.S2_day)
        else:
            self.Hfile = os.getcwd()+'/s_data/%s/%s/%s/%d/%d/%d/0/'%(self.S2_fname[:1], self.S2_fname[1],\
                                                                     self.S2_fname[2:4], self.year, self.S2_month, self.S2_day)
        #Lfile = glob.glob('m_data/MCD43A1.A%d%03d.h%02dv%02d.006.*.hdf'%(year,doy,h,v))[0]
        #self.doy = datetime .datetime(self.year, self.month, self.day).timetuple().tm_yday
        self.Lfile = glob.glob('m_data/MCD43A1.A%d%03d.h%02dv%02d.006.*.hdf' %
                               (self.year, self.S2_doy, self.h, self.v))[0]

        if glob.glob(self.Hfile + 'cloud.tif') == []:
            cl = classification(fhead=self.Hfile,
                                bands=(2, 3, 4, 8, 11, 12, 13),
                                bounds=None)
            cl.Get_cm_p()
            self.cloud = cl.cm.copy()
            tifffile.imsave(self.Hfile + 'cloud.tif', self.cloud.astype(int))
            self.H_data = np.repeat(np.repeat(cl.b12, 2, axis=1), 2, axis=0)
            del cl
        else:
            b12 = gdal.Open(self.Hfile + 'B12.jp2').ReadAsArray() * 0.0001
            self.H_data = np.repeat(np.repeat(b12, 2, axis=1), 2, axis=0)
            self.cloud = tifffile.imread(self.Hfile + 'cloud.tif').astype(bool)
        cloud_cover = 1. * self.cloud.sum() / self.cloud.size
        cloud_cover = 1. * self.cloud.sum() / self.cloud.size
        if cloud_cover > 0.2:
            print 'Too much cloud, cloud proportion: %.03f !!' % cloud_cover
            return []
        else:
            mete = readxml('%smetadata.xml' % self.Hfile)
            self.sza = np.zeros(7)
            self.sza[:] = mete['mSz']
            self.saa = self.sza.copy()
            self.saa[:] = mete['mSa']
            try:
                self.vza = (mete['mVz'])[[1, 2, 3, 7, 11, 12, 8], ]
                self.vaa = (mete['mVa'])[[1, 2, 3, 7, 11, 12, 8], ]
            except:
                self.vza = np.repeat(np.nanmean(mete['mVz']), 7)
                self.vaa = np.repeat(np.nanmean(mete['mVa']), 7)

            ll, ul, lr, ur = mg.toWgs(u'%s0000000000'%self.S2_fname), mg.toWgs(u'%s0000099999'%self.S2_fname),\
            mg.toWgs(u'%s9999900000'%self.S2_fname), mg.toWgs(u'%s9999999999'%self.S2_fname)

            dic = {
                'LL_LAT': ll[0],
                'LL_LON': ll[1],
                'LR_LAT': lr[0],
                'LR_LON': lr[1],
                'UL_LAT': ul[0],
                'UL_LON': ul[1],
                'UR_LAT': ur[0],
                'UR_LON': ur[1]
            }
            corners = 10000, 10000

            #self.L_inds, self.H_inds = get_coords(self.lat,self.lon)
            self.L_inds, self.H_inds = MSL_geo_trans(self.lat, self.lon, dic,
                                                     corners)

            self.Lx, self.Ly = self.L_inds
            self.Hx, self.Hy = self.H_inds

            angles = (self.sza[-2], self.vza[-2], (self.vaa - self.saa)[-2])

            self.brdf, self.qa = get_brdf_six(self.Lfile,
                                              angles=angles,
                                              bands=(7, ),
                                              Linds=self.L_inds)
            self.brdf, self.qa = self.brdf.flatten(), self.qa.flatten()

            struct = ndimage.generate_binary_structure(2, 2)
            dia_cloud = ndimage.binary_dilation(self.cloud,
                                                structure=struct,
                                                iterations=60).astype(
                                                    self.cloud.dtype)

            mask = ~(self.H_data <= 0).astype('bool')
            small_mask = ndimage.binary_erosion(mask,
                                                structure=struct,
                                                iterations=60).astype(
                                                    mask.dtype)
            self.val_mask = (~dia_cloud) & small_mask

            self.L_data = np.zeros(self.brdf.shape[0])
            self.L_data[:] = np.nan
            self.L_data[self.qa == 0] = self.brdf[self.qa == 0]
            #args = s, self.L_data,

            avker = np.ones((120, 120))
            navker = avker / avker.sum()
            self.s = signal.fftconvolve(self.H_data, navker, mode='same')
            self.s[~self.val_mask] = np.nan

            min_val = [-100, -100]
            max_val = [100, 100]

            ps, distributions = create_training_set(['xs', 'ys'],
                                                    min_val,
                                                    max_val,
                                                    n_train=50)
            solved = parmap(self.op1, ps, nprocs=10)
            paras, costs = np.array([i[0] for i in solved
                                     ]), np.array([i[1] for i in solved])
            xs, ys = paras[costs == costs.min()][0]

            if costs.min() < 0.1:
                min_val = [5, 5, -15, xs - 5, ys - 5]
                max_val = [100, 100, 15, xs + 5, ys + 5]

                self.bounds = [5,
                               100], [5,
                                      100], [-15,
                                             15], [xs - 5,
                                                   xs + 5], [ys - 5, ys + 5]

                ps, distributions = create_training_set(
                    ['xstd', 'ystd', 'ang', 'xs', 'ys'],
                    min_val,
                    max_val,
                    n_train=50)
                print 'Start solving...'

                self.solved = parmap(self.op, ps, nprocs=10)

                print self.solved
                return self.solved, self.brdf, self.qa
            else:
                print 'Cost is too large, plese check!', xs, ys, costs.min()
                return []
コード例 #40
0
    def L8_PSF_optimization(self):
        self.h, self.v = mtile_cal(self.lat, self.lon)
        #pr=get_wrs(self.lat, self.lon)
        #self.path, self.row = pr[0]['path'],pr[0]['row']
        #self.Hfiles = glob.glob(directory +'l_data/LC8%03d%03d%d*LGN00_sr_band1.tif'%(self.path, self.row, self.year))
        self.Hfile = directory + 'l_data/%s_toa_' % (self.L8_fname)
        #Lfile = glob.glob('m_data/MCD43A1.A%d%03d.h%02dv%02d.006.*.hdf'%(year,doy,h,v))[0]
        self.Lfile = glob.glob('m_data/MCD43A1.A%d%03d.h%02dv%02d.006.*.hdf' %
                               (self.year, self.L8_doy, self.h, self.v))[0]

        if self.read_meta(self.Hfile, self.path, self.row) == []:
            print 'Too much cloud!!'
            return []
        else:
            self.sza, self.saa, self.vza, self.vaa, dic, corners = self.read_meta(
                self.Hfile, self.path, self.row)
            self.L_inds, self.H_inds = MSL_geo_trans(self.lat, self.lon, dic,
                                                     corners)
            self.Lx, self.Ly = self.L_inds
            self.Hx, self.Hy = self.H_inds

            tems = np.zeros((3, 6))
            tems[0, :] = self.sza
            tems[1, :] = self.vza
            tems[2, :] = self.vaa - self.saa
            angles = (tems[0][-1], tems[1][-1], tems[2][-1])

            self.brdf, self.qa = get_brdf_six(self.Lfile,
                                              angles=angles,
                                              bands=(7, ),
                                              Linds=self.L_inds)
            self.brdf, self.qa = self.brdf.flatten(), self.qa.flatten()

            cloud = gdal.Open(self.Hfile[:-5] + '_cfmask.tif').ReadAsArray()
            cl_mask = cloud == 4  # cloud pixels; strictest way is to set the clear pixels with cloud==0
            struct = ndimage.generate_binary_structure(2, 2)
            dia_cloud = ndimage.binary_dilation(cl_mask,
                                                structure=struct,
                                                iterations=20).astype(
                                                    cl_mask.dtype)

            self.H_data = gdal.Open(self.Hfile +
                                    'band%d.tif' % 7).ReadAsArray() * 0.0001
            mask = ~(self.H_data < 0).astype('bool')
            small_mask = ndimage.binary_erosion(mask,
                                                structure=struct,
                                                iterations=20).astype(
                                                    mask.dtype)
            self.val_mask = (~dia_cloud) & small_mask

            self.L_data = np.zeros(self.brdf.shape[0])
            self.L_data[:] = np.nan
            self.L_data[self.qa == 0] = self.brdf[self.qa == 0]
            #args = s, self.L_data,

            avker = np.ones((40, 40))
            navker = avker / avker.sum()
            self.s = signal.fftconvolve(self.H_data, navker, mode='same')
            self.s[~self.val_mask] = np.nan

            min_val = [-40, -40]
            max_val = [40, 40]

            ps, distributions = create_training_set(['xs', 'ys'],
                                                    min_val,
                                                    max_val,
                                                    n_train=50)
            solved = parmap(self.op1, ps, nprocs=10)
            paras, costs = np.array([i[0] for i in solved
                                     ]), np.array([i[1] for i in solved])
            xs, ys = paras[costs == costs.min()][0]

            if costs.min() < 0.1:
                min_val = [5, 5, -15, xs - 5, ys - 5]
                max_val = [100, 100, 15, xs + 5, ys + 5]
                self.bounds = [5,
                               100], [5,
                                      100], [-15,
                                             15], [xs - 5,
                                                   xs + 5], [ys - 5, ys + 5]
                ps, distributions = create_training_set(
                    ['xstd', 'ystd', 'ang', 'xs', 'ys'],
                    min_val,
                    max_val,
                    n_train=50)
                print 'Start solving...'
                self.solved = parmap(self.op, ps, nprocs=10)
                print self.solved
                return self.solved, self.brdf, self.qa
            else:
                print 'Cost is too large, plese check!', xs, ys, costs.min()
                return []
コード例 #41
0
	return SD
					
if __name__== "__main__":

	img=misc.imread('result.png')
	img2=misc.imread('threshold.png')
	plt.title('Processed image')
	plt.imshow(img)
	plt.show()
	#plt.title('threshold')
	#plt.imshow(img2,cmap=cm.Greys_r)
	#plt.show()
	dilimg=ndimage.binary_dilation(img2)
	for x in range(25):
		dilimg=ndimage.binary_dilation(dilimg)
	erimg=ndimage.binary_erosion(dilimg)
	for x in range(25):
		erimg=ndimage.binary_erosion(erimg)
	plt.title('Closed image')
	plt.imshow(erimg,cmap=cm.Greys_r)
	plt.show()
	a,A=Area(erimg)
	#print(a)
	#print(A)
	Asymmetry=((a/A)*100)/10
	if Asymmetry<0 :
		Asymmetry=Asymmetry*(-1)
	print('Asymmetry')
	print(Asymmetry)

	#BORDER
コード例 #42
0
ファイル: mainapp.py プロジェクト: zrdail/chestdetect
def processMatrix(mtx,dim):

    #-------------------#
    #       LPF         #
    #-------------------#

    original = mtx

    # Apply Large Gaussian Filter To Cropped Image
    #gmtx = ndimage.gaussian_filter(mtx, (2,2,2), order=0)
    gmtx = mtx
    gmtx = (gmtx > gmtx.mean())
    gmtx = gmtx.astype('uint8')
    gmtx = gmtx * 255
    print gmtx.shape

    #------------------------#
    #        DENOISING       #
    #------------------------#
    erodim = 4 # erosion dimension
    cr = 3 # custom radius for the structured 

    if dim == 512:
        erodim = 4
        cr = 3
    elif dim == 712:
        erodim = 5
        cr = 5

    gmtx_eroded = ndimage.binary_erosion(gmtx, structure=custom_cross(erodim)).astype(gmtx.dtype)
    #gmtx_eroded = ndimage.binary_erosion(gmtx, structure=myball).astype(gmtx.dtype)
    gmtx_eroded = gmtx_eroded.astype('uint8')
    eroded = gmtx_eroded * 255

    #gmtx_eroded = gmtx
    #eroded = gmtx_eroded
    
    #---------------------#
    #       SKIMAGE       #
    #---------------------#
    markers, nummarks = ndimage.label(gmtx_eroded)
    bins = np.bincount(markers.flatten())
    bins[0] = 0
    cwmark = np.argmax(bins)
    for zdim in xrange(markers.shape[2]):
        for col in xrange(markers.shape[1]):
            first = np.argmax(markers[:,col,zdim] == cwmark)
            last = markers.shape[1] - 1 - np.argmax(markers[::-1,col,zdim] == cwmark)
            markers[0:first,col,zdim] = cwmark
            markers[last:,col,zdim] = cwmark

    #markers = markers.astype('uint8')
    markers[markers == cwmark] = 0 # in markers image, 0 is background and 1 is the largest blob (i.e. chest wall & mediastinum)
    markers = markers > 0
    markers = markers.astype('uint8')

    myelem = custom_square(cr)
    opened = ndimage.morphology.binary_opening(markers, myelem)
    opened = opened.astype('uint8')

    markers, nummarks = ndimage.label(opened)
    opened = opened * 255

    bins = np.bincount(markers.flatten())

    for i in range(1, nummarks+1):
        if bins[i] > 10:
            com = ndimage.measurements.center_of_mass(markers == i)
            print com
            tmpimg_orig = np.array(original[:,:,int(com[2])])
            tmpimg_open = np.array(opened[:,:,int(com[2])])
            cv2.circle(tmpimg_orig,(int(com[1]),int(com[0])),50,[255,255,255],10)            
            cv2.circle(tmpimg_open,(int(com[1]),int(com[0])),50,[255,255,255],10)            
            original[:,:,com[2]] = tmpimg_orig
            opened[:,:,com[2]] = tmpimg_open

    return original, eroded, markers, opened
コード例 #43
0
def canny(image,
          sigma=1.,
          low_threshold=None,
          high_threshold=None,
          mask=None,
          use_quantiles=False):
    pl = Plot("canny_test")
    pl.append(image, "defalut")
    """Edge filter an image using the Canny algorithm.
    Parameters
    -----------
    image : 2D array
        Greyscale input image to detect edges on; can be of any dtype.
    sigma : float
        Standard deviation of the Gaussian filter.
    low_threshold : float
        Lower bound for hysteresis thresholding (linking edges).
        If None, low_threshold is set to 10% of dtype's max.
    high_threshold : float
        Upper bound for hysteresis thresholding (linking edges).
        If None, high_threshold is set to 20% of dtype's max.
    mask : array, dtype=bool, optional
        Mask to limit the application of Canny to a certain area.
    use_quantiles : bool, optional
        If True then treat low_threshold and high_threshold as quantiles of the
        edge magnitude image, rather than absolute edge magnitude values. If True
        then the thresholds must be in the range [0, 1].
    Returns
    -------
    output : 2D array (image)
        The binary edge map.
    See also
    --------
    skimage.sobel
    Notes
    -----
    The steps of the algorithm are as follows:
    * Smooth the image using a Gaussian with ``sigma`` width.
    * Apply the horizontal and vertical Sobel operators to get the gradients
      within the image. The edge strength is the norm of the gradient.
    * Thin potential edges to 1-pixel wide curves. First, find the normal
      to the edge at each point. This is done by looking at the
      signs and the relative magnitude of the X-Sobel and Y-Sobel
      to sort the points into 4 categories: horizontal, vertical,
      diagonal and antidiagonal. Then look in the normal and reverse
      directions to see if the values in either of those directions are
      greater than the point in question. Use interpolation to get a mix of
      points instead of picking the one that's the closest to the normal.
    * Perform a hysteresis thresholding: first label all points above the
      high threshold as edges. Then recursively label any point above the
      low threshold that is 8-connected to a labeled point as an edge.
    References
    -----------
    .. [1] Canny, J., A Computational Approach To Edge Detection, IEEE Trans.
           Pattern Analysis and Machine Intelligence, 8:679-714, 1986
    .. [2] William Green's Canny tutorial
           http://dasl.mem.drexel.edu/alumni/bGreen/www.pages.drexel.edu/_weg22/can_tut.html
    Examples
    --------
    >>> from skimage import feature
    >>> # Generate noisy image of a square
    >>> im = np.zeros((256, 256))
    >>> im[64:-64, 64:-64] = 1
    >>> im += 0.2 * np.random.rand(*im.shape)
    >>> # First trial with the Canny filter, with the default smoothing
    >>> edges1 = feature.canny(im)
    >>> # Increase the smoothing for better results
    >>> edges2 = feature.canny(im, sigma=3)
    """

    #
    # The steps involved:
    #
    # * Smooth using the Gaussian with sigma above.
    #
    # * Apply the horizontal and vertical Sobel operators to get the gradients
    #   within the image. The edge strength is the sum of the magnitudes
    #   of the gradients in each direction.
    #
    # * Find the normal to the edge at each point using the arctangent of the
    #   ratio of the Y sobel over the X sobel - pragmatically, we can
    #   look at the signs of X and Y and the relative magnitude of X vs Y
    #   to sort the points into 4 categories: horizontal, vertical,
    #   diagonal and antidiagonal.
    #
    # * Look in the normal and reverse directions to see if the values
    #   in either of those directions are greater than the point in question.
    #   Use interpolation to get a mix of points instead of picking the one
    #   that's the closest to the normal.
    #
    # * Label all points above the high threshold as edges.
    # * Recursively label any point above the low threshold that is 8-connected
    #   to a labeled point as an edge.
    #
    # Regarding masks, any point touching a masked point will have a gradient
    # that is "infected" by the masked point, so it's enough to erode the
    # mask by one and then mask the output. We also mask out the border points
    # because who knows what lies beyond the edge of the image?
    #
    assert_nD(image, 2)

    if low_threshold is None:
        low_threshold = 0.1 * dtype_limits(image, clip_negative=False)[1]

    if high_threshold is None:
        high_threshold = 0.2 * dtype_limits(image, clip_negative=False)[1]

    if mask is None:
        mask = np.ones(image.shape, dtype=bool)

    def fsmooth(x):
        return gaussian_filter(x, sigma, mode='constant')

    smoothed = smooth_with_function_and_mask(image, fsmooth, mask)
    jsobel = ndi.sobel(smoothed, axis=1)
    isobel = ndi.sobel(smoothed, axis=0)
    abs_isobel = np.abs(isobel)
    abs_jsobel = np.abs(jsobel)
    magnitude = np.hypot(
        isobel, jsobel
    )  #  Given the “legs” of a right triangle, return its hypotenuse.

    pl.append(abs_isobel, "jsobel")
    pl.append(abs_isobel, "isobel")
    pl.append(magnitude, "magitude")
    #
    # Make the eroded mask. Setting the border value to zero will wipe
    # out the image edges for us.
    #
    s = generate_binary_structure(2, 2)
    eroded_mask = binary_erosion(mask, s, border_value=0)
    eroded_mask = eroded_mask & (magnitude > 0)
    #
    #--------- Find local maxima --------------
    #
    # Assign each point to have a normal of 0-45 degrees, 45-90 degrees,
    # 90-135 degrees and 135-180 degrees.
    #
    local_maxima = np.zeros(image.shape, bool)
    #----- 0 to 45 degrees ------
    pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
    pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
    pts = pts_plus | pts_minus
    pts = eroded_mask & pts
    # Get the magnitudes shifted left to make a matrix of the points to the
    # right of pts. Similarly, shift left and down to get the points to the
    # top right of pts.
    c1 = magnitude[1:, :][pts[:-1, :]]
    c2 = magnitude[1:, 1:][pts[:-1, :-1]]
    m = magnitude[pts]
    w = abs_jsobel[pts] / abs_isobel[pts]
    c_plus = c2 * w + c1 * (1 - w) <= m
    c1 = magnitude[:-1, :][pts[1:, :]]
    c2 = magnitude[:-1, :-1][pts[1:, 1:]]
    c_minus = c2 * w + c1 * (1 - w) <= m
    local_maxima[pts] = c_plus & c_minus
    pl.append(local_maxima, "local_maxima: 0 to 45")
    pl.append(pts, "pts")
    #----- 45 to 90 degrees ------
    # Mix diagonal and vertical
    #
    pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
    pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
    pts = pts_plus | pts_minus
    pts = eroded_mask & pts
    c1 = magnitude[:, 1:][pts[:, :-1]]
    c2 = magnitude[1:, 1:][pts[:-1, :-1]]
    m = magnitude[pts]
    w = abs_isobel[pts] / abs_jsobel[pts]
    c_plus = c2 * w + c1 * (1 - w) <= m
    c1 = magnitude[:, :-1][pts[:, 1:]]
    c2 = magnitude[:-1, :-1][pts[1:, 1:]]
    c_minus = c2 * w + c1 * (1 - w) <= m
    local_maxima[pts] = c_plus & c_minus
    pl.append(local_maxima, "local_maxima: 45 to 90")
    pl.append(pts, "pts")
    #----- 90 to 135 degrees ------
    # Mix anti-diagonal and vertical
    #
    pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
    pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
    pts = pts_plus | pts_minus
    pts = eroded_mask & pts
    c1a = magnitude[:, 1:][pts[:, :-1]]
    c2a = magnitude[:-1, 1:][pts[1:, :-1]]
    m = magnitude[pts]
    w = abs_isobel[pts] / abs_jsobel[pts]
    c_plus = c2a * w + c1a * (1.0 - w) <= m
    c1 = magnitude[:, :-1][pts[:, 1:]]
    c2 = magnitude[1:, :-1][pts[:-1, 1:]]
    c_minus = c2 * w + c1 * (1.0 - w) <= m
    local_maxima[pts] = c_plus & c_minus
    pl.append(local_maxima, "local_maxima: 90 to 135")
    pl.append(pts, "pts")
    #----- 135 to 180 degrees ------
    # Mix anti-diagonal and anti-horizontal
    #
    pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
    pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
    pts = pts_plus | pts_minus
    pts = eroded_mask & pts
    c1 = magnitude[:-1, :][pts[1:, :]]
    c2 = magnitude[:-1, 1:][pts[1:, :-1]]
    m = magnitude[pts]
    w = abs_jsobel[pts] / abs_isobel[pts]
    c_plus = c2 * w + c1 * (1 - w) <= m
    c1 = magnitude[1:, :][pts[:-1, :]]
    c2 = magnitude[1:, :-1][pts[:-1, 1:]]
    c_minus = c2 * w + c1 * (1 - w) <= m
    local_maxima[pts] = c_plus & c_minus
    pl.append(local_maxima, "local_maxima: 135-180")
    pl.append(pts, "pts")

    #
    #---- If use_quantiles is set then calculate the thresholds to use
    #
    if use_quantiles:
        if high_threshold > 1.0 or low_threshold > 1.0:
            raise ValueError("Quantile thresholds must not be > 1.0")
        if high_threshold < 0.0 or low_threshold < 0.0:
            raise ValueError("Quantile thresholds must not be < 0.0")

        high_threshold = np.percentile(magnitude, 100.0 * high_threshold)
        low_threshold = np.percentile(magnitude, 100.0 * low_threshold)

    #
    #---- Create two masks at the two thresholds.
    #
    high_mask = local_maxima & (magnitude >= high_threshold)
    low_mask = local_maxima & (magnitude >= low_threshold)
    pl.append(high_mask, "high_mask")
    pl.append(low_mask, "low_max")
    #
    # Segment the low-mask, then only keep low-segments that have
    # some high_mask component in them
    #
    strel = np.ones((3, 3), bool)
    labels, count = label(low_mask, strel)
    if count == 0:
        return low_mask

    sums = (np.array(ndi.sum(high_mask, labels,
                             np.arange(count, dtype=np.int32) + 1),
                     copy=False,
                     ndmin=1))
    good_label = np.zeros((count + 1, ), bool)
    good_label[1:] = sums > 0
    output_mask = good_label[labels]
    pl.append(output_mask, "output")
    pl.show()
    return output_mask
コード例 #44
0
    mask_predicted = mask_predicted.detach().cpu().numpy()[0, :, :, :]

    mask_split = split_nuclei(mask_predicted > 0.5, minimal_nuclei_size, h,
                              sphere, min_dist)

    mask_label_dilated = balloon(mask_split, sphere)

    factor = np.array(original_img_size) / np.array(mask_label_dilated.shape)
    mask_final = zoom(mask_label_dilated, factor, order=0)

    mask_erroded = np.zeros(mask.shape, dtype=bool)
    for nuclei_value in range(1, 5):

        mask_current = mask == nuclei_value

        mask_current = binary_erosion(binary_closing(mask_current, sphere),
                                      sphere)

        mask_erroded[mask_current] = True

    mask_erroded = remove_small_objects(mask_erroded, minimal_nuclei_size)

    mask = balloon(mask_erroded, sphere)

    factor = np.array(original_img_size) / np.array(mask.shape)
    mask = zoom(mask, factor, order=0)

    plt.imshow(
        np.concatenate((np.max(mask_final, axis=2), np.max(mask, axis=2)),
                       axis=1))
    plt.show()
コード例 #45
0
im = ndimage.gaussian_filter(im, sigma=l / (4. * n))

mask = (im > im.mean()).astype(np.float)

img = mask + 0.3 * np.random.randn(*mask.shape)

binary_img = img > 0.5

## oping and closing
# Remove small white regions
open_img = ndimage.binary_opening(binary_img)
# Remove small black hole
close_img = ndimage.binary_closing(open_img)

## errosion and propgation
eroded_img = ndimage.binary_erosion(binary_img)
reconstruct_img = ndimage.binary_propagation(eroded_img, mask=binary_img)

tmp = np.logical_not(reconstruct_img)
eroded_tmp = ndimage.binary_erosion(tmp)
reconstruct_final = np.logical_not(
    ndimage.binary_propagation(eroded_tmp, mask=tmp))

print(np.abs(mask - close_img).mean())

print(np.abs(mask - reconstruct_final).mean())

plt.figure(figsize=(12, 3))

l = 128
コード例 #46
0
def morphological_geodesic_active_contour(gimage,
                                          num_iter,
                                          init_level_set='disk',
                                          smoothing=1,
                                          threshold='auto',
                                          balloon=0,
                                          iter_callback=lambda x: None):
    """Morphological Geodesic Active Contours (MorphGAC).

    Geodesic active contours implemented with morphological operators. It can
    be used to segment objects with visible but noisy, cluttered, broken
    borders.

    Parameters
    ----------
    gimage : (M, N) or (L, M, N) array
        Preprocessed image or volume to be segmented. This is very rarely the
        original image. Instead, this is usually a preprocessed version of the
        original image that enhances and highlights the borders (or other
        structures) of the object to segment.
        `morphological_geodesic_active_contour` will try to stop the contour
        evolution in areas where `gimage` is small. See
        `morphsnakes.inverse_gaussian_gradient` as an example function to
        perform this preprocessing. Note that the quality of
        `morphological_geodesic_active_contour` might greatly depend on this
        preprocessing.
    num_iter : uint
        Number of num_iter to run.
    init_level_set : str, (M, N) array, or (L, M, N) array
        Initial level set. If an array is given, it will be binarized and used
        as the initial level set. If a string is given, it defines the method
        to generate a reasonable initial level set with the shape of the
        `image`. Accepted values are 'checkerboard' and 'disk'. See the
        documentation of `checkerboard_level_set` and `disk_level_set`
        respectively for details about how these level sets are created.
    smoothing : uint, optional
        Number of times the smoothing operator is applied per iteration.
        Reasonable values are around 1-4. Larger values lead to smoother
        segmentations.
    threshold : float, optional
        Areas of the image with a value smaller than this threshold will be
        considered borders. The evolution of the contour will stop in this
        areas.
    balloon : float, optional
        Balloon force to guide the contour in non-informative areas of the
        image, i.e., areas where the gradient of the image is too small to push
        the contour towards a border. A negative value will shrink the contour,
        while a positive value will expand the contour in these areas. Setting
        this to zero will disable the balloon force.
    iter_callback : function, optional
        If given, this function is called once per iteration with the current
        level set as the only argument. This is useful for debugging or for
        plotting intermediate results during the evolution.

    Returns
    -------
    out : (M, N) or (L, M, N) array
        Final segmentation (i.e., the final level set)

    See Also
    --------
    inverse_gaussian_gradient, disk_level_set, checkerboard_level_set

    Notes
    -----
    This is a version of the Geodesic Active Contours (GAC) algorithm that uses
    morphological operators instead of solving partial differential equations
    (PDEs) for the evolution of the contour. The set of morphological operators
    used in this algorithm are proved to be infinitesimally equivalent to the
    GAC PDEs (see [1]_). However, morphological operators are do not suffer
    from the numerical stability issues typically found in PDEs (e.g., it is
    not necessary to find the right time step for the evolution), and are
    computationally faster.

    The algorithm and its theoretical derivation are described in [1]_.

    References
    ----------
    .. [1] A Morphological Approach to Curvature-based Evolution of Curves and
           Surfaces, Pablo Márquez-Neila, Luis Baumela, Luis Álvarez. In IEEE
           Transactions on Pattern Analysis and Machine Intelligence (PAMI),
           2014, :DOI:`10.1109/TPAMI.2013.106`
    """

    image = gimage
    init_level_set = _init_level_set(init_level_set, image.shape)

    _check_input(image, init_level_set)

    if threshold == 'auto':
        threshold = np.percentile(image, 40)

    structure = np.ones((3, ) * len(image.shape), dtype=np.int8)
    dimage = np.gradient(image)
    # threshold_mask = image > threshold
    if balloon != 0:
        threshold_mask_balloon = image > threshold / np.abs(balloon)

    u = np.int8(init_level_set > 0)

    iter_callback(u)

    for _ in range(num_iter):

        # Balloon
        if balloon > 0:
            aux = ndi.binary_dilation(u, structure)
        elif balloon < 0:
            aux = ndi.binary_erosion(u, structure)
        if balloon != 0:
            u[threshold_mask_balloon] = aux[threshold_mask_balloon]

        # Image attachment
        aux = np.zeros_like(image)
        du = np.gradient(u)
        for el1, el2 in zip(dimage, du):
            aux += el1 * el2
        u[aux > 0] = 1
        u[aux < 0] = 0

        # Smoothing
        for _ in range(smoothing):
            u = _curvop(u)

        iter_callback(u)

    return u
コード例 #47
0
    im = np.transpose(im, [2, 0, 1])
    return im


for _i in range(len(seg_list)):

    img_name = image_list[_i]
    seg_name = seg_list[_i]

    seg = np.array(Image.open(seg_name), dtype=np.float32)
    seg = cv2.resize(seg, (256, 256))
    seg = (seg - 127.5) / 127.5
    seg = np.transpose(seg, [2, 0, 1])
    seg = 0.5 * seg + 0.5
    seg = (seg[0, :, :] > 0.999999).astype(dtype=np.int)
    seg = ndimage.binary_erosion(seg, structure=np.ones(
        (2, 2))).astype(dtype=np.float32)
    seg = seg[np.newaxis, :, :]

    img = np.array(Image.open(img_name), dtype=np.float32)[:, :, :3]
    img = cv2.resize(img, (256, 256))
    img = (img / 255.0)**2.2
    img = 2 * img - 1
    img = np.transpose(img, [2, 0, 1])

    seg = torch.from_numpy(seg).cuda().unsqueeze(0)

    image_s = torch.from_numpy(img).cuda().unsqueeze(0) * seg
    light_s = torch.zeros(seg.size(0), 3).float().cuda().clamp(0, 1)

    if angle == 0:
        light_t = gen_sliding_lights()
コード例 #48
0
def get_edge_image(data):
    inside = np.empty_like(data)
    for z in range(data.shape[0]):
        inside[z] = ndimage.binary_erosion(data[z]).astype(data.dtype)
    return data - inside
コード例 #49
0
def run_geometrics(config_file, ref_path=None, test_path=None, output_path=None,
                   align=True, allow_test_ignore=False, save_aligned=False, save_plots=None):

    # check inputs
    if not os.path.isfile(config_file):
        raise IOError("Configuration file does not exist")

    if output_path is not None and not os.path.isdir(output_path):
        raise IOError('"output_path" not a valid folder <{}>'.format(output_path))

    # parse configuration
    config_path = os.path.dirname(config_file)

    config = geo.parse_config(config_file,
                              refpath=(ref_path or config_path),
                              testpath=(test_path or config_path))

    # Get test model information from configuration file.
    test_dsm_filename = config['INPUT.TEST']['DSMFilename']
    test_dtm_filename = config['INPUT.TEST'].get('DTMFilename', None)
    test_cls_filename = config['INPUT.TEST']['CLSFilename']
    test_conf_filename = config['INPUT.TEST'].get('CONFFilename', None)
    test_mtl_filename = config['INPUT.TEST'].get('MTLFilename', None)

    # Get reference model information from configuration file.
    ref_dsm_filename = config['INPUT.REF']['DSMFilename']
    ref_dtm_filename = config['INPUT.REF']['DTMFilename']
    ref_cls_filename = config['INPUT.REF']['CLSFilename']
    ref_mtl_filename = config['INPUT.REF'].get('MTLFilename', None)

    # Get material label names and list of material labels to ignore in evaluation.
    material_names = config['MATERIALS.REF']['MaterialNames']
    material_indices_to_ignore = config['MATERIALS.REF']['MaterialIndicesToIgnore']

    # Get image pair files
    performer_pair_file = config['INPUT.TEST'].get('ImagePairFilename', None)
    performer_pair_data_file = config['INPUT.TEST'].get('ImagePairDataFilename', None)
    performer_files_chosen_file = config['INPUT.TEST'].get('FilesChosenFilename', None)
    
    # Get plot settings from configuration file
    PLOTS_SHOW = config['PLOTS']['ShowPlots']
    PLOTS_SAVE = config['PLOTS']['SavePlots']
    if save_plots is not None:  # Commandline line argument overrided config file setting
        PLOTS_SAVE = save_plots
    PLOTS_ENABLE = PLOTS_SHOW or PLOTS_SAVE

    # default output path
    if output_path is None:
        output_path = os.path.dirname(test_dsm_filename)

    if align:
        align = config['OPTIONS']['AlignModel']
    save_aligned = config['OPTIONS']['SaveAligned'] | save_aligned

    # Determine multiprocessing usage
    use_multiprocessing = config['OPTIONS']['UseMultiprocessing']

    # Configure plotting
    basename = os.path.basename(test_dsm_filename)
    if PLOTS_ENABLE:
        plot = geo.plot(saveDir=output_path, autoSave=PLOTS_SAVE, savePrefix=basename + '_', badColor='black', showPlots=PLOTS_SHOW, dpi=900)
    else:
        plot = None

    # copy testDSM to the output path
    # this is a workaround for the "align3d" function with currently always
    # saves new files to the same path as the testDSM
    src = test_dsm_filename
    dst = os.path.join(output_path, os.path.basename(src))
    if not os.path.isfile(dst): shutil.copyfile(src, dst)
    test_dsm_filename_copy = dst

    # Register test model to ground truth reference model.
    if not align:
        print('\nSKIPPING REGISTRATION')
        xyz_offset = (0.0, 0.0, 0.0)
    else:
        print('\n=====REGISTRATION====='); sys.stdout.flush()
        try:
            align3d_path = config['REGEXEPATH']['Align3DPath']
        except:
            align3d_path = None
        xyz_offset = geo.align3d(ref_dsm_filename, test_dsm_filename_copy, exec_path=align3d_path)
        print(xyz_offset)
        #xyz_offset = geo.align3d_python(ref_dsm_filename, test_dsm_filename_copy)

    # Explicitly assign a no data value to warped images to track filled pixels
    no_data_value = -9999

    # Read reference model files.
    print("\nReading reference model files...")
    ref_cls, tform = geo.imageLoad(ref_cls_filename)
    ref_dsm = geo.imageWarp(ref_dsm_filename, ref_cls_filename, noDataValue=no_data_value)
    ref_dtm = geo.imageWarp(ref_dtm_filename, ref_cls_filename, noDataValue=no_data_value)

    # Validate shape of reference files
    if ref_cls.shape != ref_dsm.shape or ref_cls.shape != ref_dtm.shape:
        print("Need to rescale")

    if ref_mtl_filename:
        ref_mtl = geo.imageWarp(ref_mtl_filename, ref_cls_filename, interp_method=gdalconst.GRA_NearestNeighbour).astype(np.uint8)
        if save_aligned:
            geo.arrayToGeotiff(ref_mtl, os.path.join(output_path, basename + '_ref_mtl_reg_out'), ref_cls_filename, no_data_value)
    else:
        ref_mtl = None
        print('NO REFERENCE MTL')

    # Read test model files and apply XYZ offsets.
    print("\nReading test model files...")
    test_cls = geo.imageWarp(test_cls_filename, ref_cls_filename, xyz_offset, gdalconst.GRA_NearestNeighbour)
    test_dsm = geo.imageWarp(test_dsm_filename, ref_cls_filename, xyz_offset, noDataValue=no_data_value)

    if test_dtm_filename:
        test_dtm = geo.imageWarp(test_dtm_filename, ref_cls_filename, xyz_offset, noDataValue=no_data_value)
        if save_aligned:
            geo.arrayToGeotiff(test_dtm, os.path.join(output_path, basename + '_test_dtm_reg_out'), ref_cls_filename,
                               no_data_value)
    else:
        print('NO TEST DTM: defaults to reference DTM')
        test_dtm = ref_dtm

    if test_conf_filename:
        test_conf = geo.imageWarp(test_conf_filename,  ref_cls_filename, xyz_offset, noDataValue=no_data_value)
        conf_viz_path = Path(str(Path(test_conf_filename).parent.absolute()),
                             Path(test_conf_filename).stem + '_VIZ.tif')
        test_conf_viz = geo.imageWarpRGB(str(conf_viz_path.absolute()), ref_cls_filename, xyz_offset)
        geo.arrayToGeotiffRGB(test_conf_viz, os.path.join(output_path, 'CONF_VIZ_aligned'), ref_cls_filename,
                              no_data_value)

        geo.arrayToGeotiff(test_conf, os.path.join(output_path, 'CONF_aligned'), ref_cls_filename,
                           no_data_value)
    else:
        test_conf = None
        print("NO TEST CONF")

    if save_aligned:
        geo.arrayToGeotiff(test_cls, os.path.join(output_path, basename + '_test_cls_reg_out'), ref_cls_filename,
                           no_data_value)
        geo.arrayToGeotiff(test_dsm, os.path.join(output_path, basename + '_test_dsm_reg_out'), ref_cls_filename,
                           no_data_value)
        geo.arrayToGeotiff(ref_cls, os.path.join(output_path, basename + '_ref_cls_reg_out'), ref_cls_filename,
                           no_data_value)
        geo.arrayToGeotiff(ref_dsm, os.path.join(output_path, basename + '_ref_dsm_reg_out'), ref_cls_filename,
                           no_data_value)
        geo.arrayToGeotiff(ref_dtm, os.path.join(output_path, basename + '_ref_dtm_reg_out'), ref_cls_filename,
                           no_data_value)

    if test_mtl_filename:
        test_mtl = geo.imageWarp(test_mtl_filename, ref_cls_filename, xyz_offset,
                                 gdalconst.GRA_NearestNeighbour).astype(np.uint8)
        if save_aligned:
            geo.arrayToGeotiff(test_mtl, os.path.join(output_path, basename + '_test_mtl_reg_out'), ref_cls_filename,
                               no_data_value)
    else:
        print('NO TEST MTL')

    print("\n\n")

    # Apply registration offset, only to valid data to allow better tracking of bad data
    print("Applying offset of Z:  %f" % (xyz_offset[2]))
    test_valid_data = (test_dsm != no_data_value)
    if test_dtm_filename:
        test_valid_data &= (test_dtm != no_data_value)

    test_dsm[test_valid_data] = test_dsm[test_valid_data] + xyz_offset[2]
    if test_dtm_filename:
        test_dtm[test_valid_data] = test_dtm[test_valid_data] + xyz_offset[2]

    # Create mask for ignoring points labeled NoData in reference files.
    ref_dsm_no_data_value = no_data_value
    ref_dtm_no_data_value = no_data_value
    ref_cls_no_data_value = geo.getNoDataValue(ref_cls_filename)
    if ref_cls_no_data_value != 65:
        print("WARNING! NODATA TAG IN CLS FILE IS LIKELY INCORRECT. IT SHOULD BE 65.")
        ref_cls_no_data_value = 65
    ignore_mask = np.zeros_like(ref_cls, np.bool)

    # Get reference and test classifications
    ref_cls_match_sets, test_cls_match_sets = geo.getMatchValueSets(config['INPUT.REF']['CLSMatchValue'],
                                                                    config['INPUT.TEST']['CLSMatchValue'],
                                                                    np.unique(ref_cls).tolist(),
                                                                    np.unique(test_cls).tolist())
    # Add ignore mask on boundaries of cls
    # Ignore edges
    ignore_edges = False
    if ignore_edges is True:
        print("Applying ignore mask to edges of buildings...")
        for index, (ref_match_value, test_match_value) in enumerate(zip(ref_cls_match_sets, test_cls_match_sets)):
            import scipy.ndimage as ndimage
            ref_mask = np.zeros_like(ref_cls, np.bool)
            for v in ref_match_value:
                ref_mask[ref_cls == v] = True
            strel = ndimage.generate_binary_structure(2, 2)
            dilated_cls = ndimage.binary_dilation(ref_mask, structure=strel, iterations=3)
            eroded_cls = ndimage.binary_erosion(ref_mask, structure=strel, iterations=3)
            dilation_mask = np.bitwise_xor(ref_mask, dilated_cls)
            erosion_mask = np.bitwise_xor(ref_mask, eroded_cls)
            ref_cls[dilation_mask == True] = ref_cls_no_data_value
            ref_cls[erosion_mask == True] = ref_cls_no_data_value
        print("Finished applying ignore mask to edges of buildings...")

    # Create ignore mask
    if ref_dsm_no_data_value is not None:
        ignore_mask[ref_dsm == ref_dsm_no_data_value] = True
    if ref_dtm_no_data_value is not None:
        ignore_mask[ref_dtm == ref_dtm_no_data_value] = True
    if ref_cls_no_data_value is not None:
        ignore_mask[ref_cls == ref_cls_no_data_value] = True

    # optionally ignore test NoDataValue(s)
    if allow_test_ignore:
        if allow_test_ignore == 1:
            test_cls_no_data_value = geo.getNoDataValue(test_cls_filename)
            if test_cls_no_data_value is not None:
                print('Ignoring test CLS NoDataValue')
                ignore_mask[test_cls == test_cls_no_data_value] = True

        elif allow_test_ignore == 2:
            test_dsm_no_data_value = no_data_value
            test_dtm_no_data_value = no_data_value
            if test_dsm_no_data_value is not None:
                print('Ignoring test DSM NoDataValue')
                ignore_mask[test_dsm == test_dsm_no_data_value] = True
            if test_dtm_filename and test_dtm_no_data_value is not None:
                print('Ignoring test DTM NoDataValue')
                ignore_mask[test_dtm == test_dtm_no_data_value] = True

        else:
            raise IOError('Unrecognized test ignore value={}'.format(allow_test_ignore))

        print("")

    # sanity check
    if np.all(ignore_mask):
        raise ValueError('All pixels are ignored')

    ##### COMMENT HERE FOR TESTING METICS IMAGES #####
    # report "data voids"
    num_data_voids = np.sum(ignore_mask > 0)
    print('Number of data voids in ignore mask = ', num_data_voids)

    # If quantizing to voxels, then match vertical spacing to horizontal spacing.
    QUANTIZE = config['OPTIONS']['QuantizeHeight']
    if QUANTIZE:
        unit_hgt = geo.getUnitHeight(tform)
        ref_dsm = np.round(ref_dsm / unit_hgt) * unit_hgt
        ref_dtm = np.round(ref_dtm / unit_hgt) * unit_hgt
        test_dsm = np.round(test_dsm / unit_hgt) * unit_hgt
        test_dtm = np.round(test_dtm / unit_hgt) * unit_hgt
        no_data_value = np.round(no_data_value / unit_hgt) * unit_hgt

    if PLOTS_ENABLE:
        # Make image pair plots
        plot.make_image_pair_plots(performer_pair_data_file, performer_pair_file, performer_files_chosen_file, 201,
                                   saveName="image_pair_plot")
        # Reference models can include data voids, so ignore invalid data on display
        plot.make(ref_dsm, 'Reference DSM', 111, colorbar=True, saveName="input_refDSM", badValue=no_data_value)
        plot.make(ref_dtm, 'Reference DTM', 112, colorbar=True, saveName="input_refDTM", badValue=no_data_value)
        plot.make(ref_cls, 'Reference Classification', 113,  colorbar=True, saveName="input_refClass")

        # Test models shouldn't have any invalid data
        # so display the invalid values to highlight them,
        # unlike with the refSDM/refDTM
        plot.make(test_dsm, 'Test DSM', 151, colorbar=True, saveName="input_testDSM")
        plot.make(test_dtm, 'Test DTM', 152, colorbar=True, saveName="input_testDTM")
        plot.make(test_cls, 'Test Classification', 153, colorbar=True, saveName="input_testClass")

        plot.make(ignore_mask, 'Ignore Mask', 181, saveName="input_ignoreMask")

        # material maps
        if ref_mtl_filename and test_mtl_filename:
            plot.make(ref_mtl, 'Reference Materials', 191, colorbar=True, saveName="input_refMTL", vmin=0, vmax=13)
            plot.make(test_mtl, 'Test Materials', 192, colorbar=True, saveName="input_testMTL", vmin=0, vmax=13)

    # Run the threshold geometry metrics and report results.
    metrics = dict()

    # Run threshold geometry and relative accuracy
    threshold_geometry_results = []
    relative_accuracy_results = []
    objectwise_results = []

    if PLOTS_ENABLE:
        # Update plot prefix include counter to be unique for each set of CLS value evaluated
        original_save_prefix = plot.savePrefix

    # Loop through sets of CLS match values
    for index, (ref_match_value,test_match_value) in enumerate(zip(ref_cls_match_sets, test_cls_match_sets)):
        print("Evaluating CLS values")
        print("  Reference match values: " + str(ref_match_value))
        print("  Test match values: " + str(test_match_value))

        # object masks based on CLSMatchValue(s)
        ref_mask = np.zeros_like(ref_cls, np.bool)
        for v in ref_match_value:
            ref_mask[ref_cls == v] = True

        test_mask = np.zeros_like(test_cls, np.bool)
        if len(test_match_value):
            for v in test_match_value:
                test_mask[test_cls == v] = True

        if PLOTS_ENABLE:
            plot.savePrefix = original_save_prefix + "%03d" % index + "_"
            plot.make(test_mask.astype(np.int), 'Test Evaluation Mask', 154, saveName="input_testMask")
            plot.make(ref_mask.astype(np.int), 'Reference Evaluation Mask', 114, saveName="input_refMask")

        if config['OBJECTWISE']['Enable']:
            print("\nRunning objectwise metrics...")
            merge_radius = config['OBJECTWISE']['MergeRadius']
            [result, test_ndx, ref_ndx] = geo.run_objectwise_metrics(ref_dsm, ref_dtm, ref_mask, test_dsm, test_dtm,
                                                                     test_mask, tform, ignore_mask, merge_radius,
                                                                     plot=plot, geotiff_filename=ref_dsm_filename,
                                                                     use_multiprocessing=use_multiprocessing)

            # Get UTM coordinates from pixel coordinates in building centroids
            print("Creating KML and CSVs...")
            import gdal, osr, simplekml, csv
            kml = simplekml.Kml()
            ds = gdal.Open(ref_dsm_filename)
            # get CRS from dataset
            crs = osr.SpatialReference()
            crs.ImportFromWkt(ds.GetProjectionRef())
            # create lat/long crs with WGS84 datum
            crsGeo = osr.SpatialReference()
            crsGeo.ImportFromEPSG(4326)  # 4326 is the EPSG id of lat/long crs
            t = osr.CoordinateTransformation(crs, crsGeo)
            # Use CORE3D objectwise
            current_class = test_match_value[0]
            with open(Path(output_path, "objectwise_numbers_class_" + str(current_class) + ".csv"), mode='w') as \
                    objectwise_csv:
                objectwise_writer = csv.writer(objectwise_csv, delimiter=',', quotechar='"',
                                               quoting=csv.QUOTE_MINIMAL)
                objectwise_writer.writerow(
                    ['Index', 'iou_2d', 'iou_3d', 'hrmse', 'zrmse', 'x_coord', 'y_coord', 'geo_x_coord',
                     'geo_y_coord', 'long', 'lat'])
                for current_object in result['objects']:
                    test_index = current_object['test_objects'][0]
                    iou_2d = current_object['threshold_geometry']['2D']['jaccardIndex']
                    iou_3d = current_object['threshold_geometry']['3D']['jaccardIndex']
                    hrmse = current_object['relative_accuracy']['hrmse']
                    zrmse = current_object['relative_accuracy']['zrmse']
                    x_coords, y_coords = np.where(test_ndx == test_index)
                    x_coord = np.average(x_coords)
                    y_coord = np.average(y_coords)
                    geo_x_coord = tform[0] + y_coord * tform[1] + x_coord * tform[2]
                    geo_y_coord = tform[3] + y_coord * tform[4] + x_coord * tform[5]
                    (lat, long, z) = t.TransformPoint(geo_x_coord, geo_y_coord)
                    objectwise_writer.writerow([test_index, iou_2d, iou_3d, hrmse, zrmse, x_coord, y_coord,
                                                geo_x_coord, geo_y_coord, long, lat])
                    pnt = kml.newpoint(name="Building Index: " + str(test_index),
                                       description="2D IOU: " + str(iou_2d) + ' 3D IOU: ' + str(iou_3d) + ' HRMSE: '
                                                   + str(hrmse) + ' ZRMSE: ' + str(zrmse),
                                       coords=[(lat, long)])
                kml.save(Path(output_path, "objectwise_ious_class_" + str(current_class) + ".kml"))

            # Use FFDA objectwise
            with open(Path(output_path, "objectwise_numbers_no_morphology_class_" + str(current_class) + ".csv"),
                      mode='w') as objectwise_csv:
                objectwise_writer = csv.writer(objectwise_csv, delimiter=',', quotechar='"',
                                               quoting=csv.QUOTE_MINIMAL)
                objectwise_writer.writerow(['iou', 'x_coord', 'y_coord', 'geo_x_coord',
                                            'geo_y_coord', 'long', 'lat'])
                for i in result['metrics_container_no_merge'].iou_per_gt_building.keys():
                    iou = result['metrics_container_no_merge'].iou_per_gt_building[i][0]
                    x_coord = result['metrics_container_no_merge'].iou_per_gt_building[i][1][0]
                    y_coord = result['metrics_container_no_merge'].iou_per_gt_building[i][1][1]
                    geo_x_coord = tform[0] + y_coord * tform[1] + x_coord * tform[2]
                    geo_y_coord = tform[3] + y_coord * tform[4] + x_coord * tform[5]
                    (lat, long, z) = t.TransformPoint(geo_x_coord, geo_y_coord)
                    objectwise_writer.writerow([iou, x_coord, y_coord, geo_x_coord, geo_y_coord, long, lat])
                    pnt = kml.newpoint(name="Building Index: " + str(i), description=str(iou), coords=[(lat, long)])
            kml.save(Path(output_path, "objectwise_ious_no_morphology_class_" + str(current_class) + ".kml"))
            # Result
            if ref_match_value == test_match_value:
                result['CLSValue'] = ref_match_value
            else:
                result['CLSValue'] = {'Ref': ref_match_value, "Test": test_match_value}
            # Delete non-json dumpable metrics
            del result['metrics_container_no_merge'], result['metrics_container_merge_fp'], result[
                'metrics_container_merge_fn']
            objectwise_results.append(result)

            # Save index files to compute objectwise metrics
            obj_save_prefix = basename + "_%03d" % index + "_"
            geo.arrayToGeotiff(test_ndx, os.path.join(output_path, obj_save_prefix + '_test_ndx_objs'),
                               ref_cls_filename, no_data_value)
            geo.arrayToGeotiff(ref_ndx, os.path.join(output_path, obj_save_prefix + '_ref_ndx_objs'),
                               ref_cls_filename,
                               no_data_value)

        # Evaluate threshold geometry metrics using refDTM as the testDTM to mitigate effects of terrain modeling
        # uncertainty
        result, _, stoplight_fn, errhgt_fn = geo.run_threshold_geometry_metrics(ref_dsm, ref_dtm, ref_mask, test_dsm, test_dtm, test_mask, tform,
                                                    ignore_mask, testCONF=test_conf, plot=plot)
        if ref_match_value == test_match_value:
            result['CLSValue'] = ref_match_value
        else:
            result['CLSValue'] = {'Ref': ref_match_value, "Test": test_match_value}
        threshold_geometry_results.append(result)

        # Run the relative accuracy metrics and report results.
        # Skip relative accuracy is all of testMask or refMask is assigned as "object"
        if not ((ref_mask.size == np.count_nonzero(ref_mask)) or (test_mask.size == np.count_nonzero(test_mask))) and len(test_match_value) != 0:
            try:
                result = geo.run_relative_accuracy_metrics(ref_dsm, test_dsm, ref_mask, test_mask, ignore_mask,
                                                           geo.getUnitWidth(tform), plot=plot)
                if ref_match_value == test_match_value:
                    result['CLSValue'] = ref_match_value
                else:
                    result['CLSValue'] = {'Ref': ref_match_value, "Test": test_match_value}
                relative_accuracy_results.append(result)
            except Exception as e:
                print(str(e))

    if PLOTS_ENABLE:
        # Reset plot prefix
        plot.savePrefix = original_save_prefix

    metrics['threshold_geometry'] = threshold_geometry_results
    metrics['relative_accuracy'] = relative_accuracy_results
    metrics['objectwise'] = objectwise_results

    if align:
        metrics['registration_offset'] = xyz_offset
        metrics['geolocation_error'] = np.linalg.norm(xyz_offset)

    # Run the terrain model metrics and report results.
    if test_dtm_filename:
        dtm_z_threshold = config['OPTIONS'].get('TerrainZErrorThreshold', 1)

        # Make reference mask for terrain evaluation that identified elevated object where underlying terrain estimate
        # is expected to be inaccurate
        dtm_cls_ignore_values = config['INPUT.REF'].get('TerrainCLSIgnoreValues', [6, 17]) # Default to building and bridge deck
        dtm_cls_ignore_values = geo.validateMatchValues(dtm_cls_ignore_values,np.unique(ref_cls).tolist())
        ref_mask_terrain_acc = np.zeros_like(ref_cls, np.bool)
        for v in dtm_cls_ignore_values:
            ref_mask_terrain_acc[ref_cls == v] = True

        metrics['terrain_accuracy'] = geo.run_terrain_accuracy_metrics(ref_dtm, test_dtm, ref_mask_terrain_acc,
                                                                       dtm_z_threshold, plot=plot)
    else:
        print('WARNING: No test DTM file, skipping terrain accuracy metrics')

    # Run the threshold material metrics and report results.
    if test_mtl_filename and ref_mtl:
        metrics['threshold_materials'] = geo.run_material_metrics(ref_ndx, ref_mtl, test_mtl, material_names,
                                                                  material_indices_to_ignore, plot=plot)
    else:
        print('WARNING: No test MTL file or no reference material, skipping material metrics')

    fileout = os.path.join(output_path, os.path.basename(config_file) + "_metrics.json")
    with open(fileout, 'w') as fid:
        json.dump(metrics, fid, indent=2)
    print(json.dumps(metrics, indent=2))
    print("Metrics report: " + fileout)

    #  If displaying figures, wait for user before exiting
    if PLOTS_SHOW:
            input("Press Enter to continue...")

    # Write final metrics out
    output_folder = os.path.join(output_path, "metrics_final")
    try:
        os.mkdir(output_folder)
    except OSError as e:
        if e.errno == errno.EEXIST:
            pass
        else:
            print("Can't create directory, please check permissions...")
            raise

    # Run Roof slope metrics
    # Roof Slope Metrics
    try:
        from core3dmetrics.geometrics.ang import calculate_metrics as calculate_roof_metrics
    except:
        from ang import calculate_metrics as calculate_roof_metrics

    IOUC, IOUZ, IOUAGL, IOUMZ, orderRMS = calculate_roof_metrics(ref_dsm, ref_dtm, ref_cls, test_dsm, test_dtm,
                           test_cls, tform, kernel_radius=3, output_path=output_path)
    files = [str(Path(output_path, filename).absolute()) for filename in os.listdir(output_path) if
             filename.startswith("Roof")]

    # Save all of myrons outputs here
    metrics_formatted = {}
    metrics_formatted["2D"] = {}
    metrics_formatted["2D"]["Precision"] = metrics["threshold_geometry"][0]['2D']['precision']
    metrics_formatted["2D"]["Recall"] = metrics["threshold_geometry"][0]['2D']['recall']
    metrics_formatted["2D"]["IOU"] = metrics["threshold_geometry"][0]['2D']['jaccardIndex']
    metrics_formatted["3D"] = {}
    metrics_formatted["3D"]["Precision"] = metrics["threshold_geometry"][0]['3D']['precision']
    metrics_formatted["3D"]["Recall"] = metrics["threshold_geometry"][0]['3D']['recall']
    metrics_formatted["3D"]["IOU"] = metrics["threshold_geometry"][0]['3D']['jaccardIndex']
    metrics_formatted["ZRMS"] = metrics['relative_accuracy'][0]['zrmse']
    metrics_formatted["HRMS"] = metrics['relative_accuracy'][0]['hrmse']
    metrics_formatted["Slope RMS"] = orderRMS
    metrics_formatted["DTM RMS"] = metrics['terrain_accuracy']['zrmse']
    metrics_formatted["DTM Completeness"] = metrics['terrain_accuracy']['completeness']
    metrics_formatted["Z IOU"] = IOUZ
    metrics_formatted["AGL IOU"] = IOUAGL
    metrics_formatted["MODEL IOU"] = IOUMZ
    metrics_formatted["X Offset"] = xyz_offset[0]
    metrics_formatted["Y Offset"] = xyz_offset[1]
    metrics_formatted["Z Offset"] = xyz_offset[2]
    metrics_formatted["P Value"] = metrics['threshold_geometry'][0]['pearson']

    fileout = os.path.join(output_folder, "metrics.json")
    with open(fileout, 'w') as fid:
        json.dump(metrics_formatted, fid, indent=2)
    print(json.dumps(metrics_formatted, indent=2))

    # metrics.png
    if PLOTS_ENABLE:
        cls_iou_fn = [filename for filename in files if filename.endswith("CLS_IOU.tif")][0]
        cls_z_iou_fn = [filename for filename in files if filename.endswith("CLS_Z_IOU.tif")][0]
        cls_z_slope_fn = [filename for filename in files if filename.endswith("CLS_Z_SLOPE_IOU.tif")][0]
        if test_conf_filename:
            plot.make_final_metrics_images(stoplight_fn, errhgt_fn, Path(os.path.join(output_path, 'CONF_VIZ_aligned.tif')),
                                           cls_iou_fn, cls_z_iou_fn, cls_z_slope_fn, ref_cls, output_folder)

    # inputs.png
        plot.make_final_input_images_grayscale(ref_cls, ref_dsm, ref_dtm, test_cls,
                                                test_dsm, test_dtm, output_folder)
    # textured.png
    if config['BLENDER.TEST']['OBJDirectoryFilename']:
        try:
            from CORE3D_Perspective_Imagery import generate_blender_images
            objpath = config['BLENDER.TEST']['OBJDirectoryFilename']
            gsd = config['BLENDER.TEST']['GSD']
            Zup = config['BLENDER.TEST']['+Z']
            N = config['BLENDER.TEST']['OrbitalLocations']
            e = config['BLENDER.TEST']['ElevationAngle']
            f = config['BLENDER.TEST']['FocalLength']
            r = config['BLENDER.TEST']['RadialDistance']
            output_location = generate_blender_images(objpath, gsd, Zup, N, e, f, r, output_path)
            files = [str(Path(output_path, filename).absolute()) for filename in os.listdir(output_path) if
                     filename.startswith("persp_image")]
            files.append(files[0])
            # Make metrics image
            plot.make_final_input_images_rgb(files, output_folder)
            print("Done")
        except:
            print("Could not render Blender images...")
    else:
        pass
コード例 #50
0
def seg_erode(seg_d,
              iterations=1,
              background_idx=1,
              structure=None,
              min_vox_count=5,
              seg_null_value=0,
              VERBOSE=False):
    """
    Binary erosion (or dilation) of integer type segmentation data (np.array) with options
    If iterations < 0, performs binary dilation

    :param seg_d:           np.array of segmentation, integers
    :param iterations:      number of erosion iterations, if negative, provides the number of dilations (in this case, min_vox_count not used)
    :param background_idx:  value for background index, currently ignored (TODO: remove)
    :param structure:       binary structure for erosion from scipy.ndimage (ndimage.morphology.generate_binary_structure(3,1))
    :param min_vox_count:   minimun number of voxels to allow to be in a segmentation, if less, does not erode
    :param seg_null_value:  value to set as null for binary erosion step (i.e., a value NOT in your segmentation index)
    :param VERBOSE:         spit out loads of text to stdout, because you can.
    :return: seg_shrunk_d   eroded (or dilated) version of segmentation
    """

    import scipy.ndimage as ndi
    import numpy as np

    if iterations >= 0:
        pos_iter = True
    else:
        iterations = iterations * -1
        pos_iter = False

    if structure is None:
        structure = ndi.morphology.generate_binary_structure(3, 1)
    if seg_null_value == 0:
        seg_shrunk_d = np.zeros_like(seg_d)
        temp_d = np.zeros_like(seg_d)
    else:
        seg_shrunk_d = np.ones_like(seg_d) * seg_null_value
        temp_d = np.ones_like(seg_d) * seg_null_value

    seg_idxs = np.unique(seg_d)

    if seg_null_value in seg_idxs:
        print("Shit, your null value is also an index. This will not work.")
        print(
            "Set it to a suitably strange value that is not already an index. {0,999}"
        )
        return None
    if VERBOSE:
        print("Indices:")
    for seg_idx in seg_idxs:
        if VERBOSE:
            print(seg_idx),
        if (background_idx is not None) and (background_idx == seg_idx):
            seg_shrunk_d[
                seg_d ==
                seg_idx] = seg_idx  # just set the value to the bckgrnd value, and be done with it
            if VERBOSE:
                print("[bckg]"),
        else:
            temp_d[seg_d == seg_idx] = 1
            for idx in range(
                    0, iterations
            ):  # messy, does not exit the loop when already gone too far. but it still works
                if pos_iter:
                    temp_temp_d = ndi.binary_erosion(temp_d,
                                                     iterations=1,
                                                     structure=structure)
                else:
                    temp_temp_d = ndi.binary_dilation(temp_d,
                                                      iterations=1,
                                                      structure=structure)
                if np.sum(temp_temp_d) >= min_vox_count:
                    temp_d = temp_temp_d
                    if VERBOSE:
                        print("[y]"),
                else:
                    if VERBOSE:
                        print("[no]"),
            seg_shrunk_d[temp_d == 1] = seg_idx
            temp_d[:, :, :] = seg_null_value
            if VERBOSE:
                print(seg_idx)
        if VERBOSE:
            print("")
    return seg_shrunk_d
コード例 #51
0
    def vis_forward(self, x):
        # x.shape: [N, C, H, W] (?, 1, 32, 32)
        x = x * 0.5 + 0.5
        if x.shape[1] == 3:
            x = x[:,0:1,:,:] * 0.299 + x[:,1:2,:,:] * 0.587 + x[:,2:3,:,:] * 0.114

        # simulate https://github.com/scikit-image/scikit-image/blob/master/skimage/feature/_canny.py: canny()

        # L175-L180
        bleed_over = self.gaussian(self.mask)
        x = self.gaussian(x)
        x = x / (bleed_over + 1e-12)

        vis1 = x.clone().detach()

        # debug_gaussian = x.data.clone()

        jsobel = self.sobel(x, axis=1)
        isobel = self.sobel(x, axis=0)

        # print(1, torch.max(jsobel))
        # print(2, torch.min(jsobel))

        abs_isobel = torch.abs(isobel)
        abs_jsobel = torch.abs(jsobel)
        magnitude2 = isobel ** 2 + jsobel ** 2
        magnitude = torch.sqrt(magnitude2 + self.eps)
        # magnitude = selfTF(self.thres, magnitude)

        vis2 = magnitude.clone().detach()

        # L186-L188
        #
        # Make the eroded mask. Setting the border value to zero will wipe
        # out the image edges for us.
        #
        # assert x.shape[0] == 1
        s = generate_binary_structure(2, 2)
        mask = self.mask.detach().cpu().numpy()[0, 0]  # mask.shape: [32, 32]
        eroded_mask = binary_erosion(mask, s, border_value=0)
        eroded_mask = eroded_mask & (magnitude2.detach().cpu().numpy()[0, 0] > 0)  # replace magnitude by magnitude2
        eroded_mask = torch.ByteTensor(eroded_mask.astype(np.uint8)).to(x.device)

        # L195-L212
        #
        # --------- Find local maxima --------------
        #
        local_maxima = torch.zeros(x.shape).byte().to(x.device)
        # ----- 0 to 45 degrees ------
        pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
        pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
        pts = pts_plus | pts_minus
        pts = eroded_mask & pts
        c1 = magnitude[:, :, 1:, :][pts[:, :, :-1, :]]
        c2 = magnitude[:, :, 1:, 1:][pts[:, :, :-1, :-1]]
        m = magnitude[pts]
        w = abs_jsobel[pts] / (abs_isobel[pts] + self.eps)
        c_plus = c2 * w + c1 * (1 - w) <= m

        s_0_45_1 = F.relu(-m + self.gamma + (c2 * w + c1 * (1 - w)))

        c1 = magnitude[:, :, :-1, :][pts[:, :, 1:, :]]
        c2 = magnitude[:, :, :-1, :-1][pts[:, :, 1:, 1:]]
        c_minus = c2 * w + c1 * (1 - w) <= m

        s_0_45_2 = F.relu(-m + self.gamma + (c2 * w + c1 * (1 - w)))
        s_0_45 = torch.max(s_0_45_1, s_0_45_2)

        local_maxima[pts] = c_plus & c_minus

        # L216-L228
        # ----- 45 to 90 degrees ------
        # Mix diagonal and vertical
        #
        pts_plus = (isobel >= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
        pts_minus = (isobel <= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
        pts = pts_plus | pts_minus
        pts = eroded_mask & pts
        c1 = magnitude[:, :, :, 1:][pts[:, :, :, :-1]]
        c2 = magnitude[:, :, 1:, 1:][pts[:, :, :-1, :-1]]
        m = magnitude[pts]
        w = abs_isobel[pts] / abs_jsobel[pts]
        c_plus = c2 * w + c1 * (1 - w) <= m

        s_45_90_1 = F.relu(-m + self.gamma + (c2 * w + c1 * (1 - w)))

        c1 = magnitude[:, :, :, :-1][pts[:, :, :, 1:]]
        c2 = magnitude[:, :, :-1, :-1][pts[:, :, 1:, 1:]]
        c_minus = c2 * w + c1 * (1 - w) <= m

        s_45_90_2 = F.relu(-m + self.gamma + (c2 * w + c1 * (1 - w)))
        s_45_90 = torch.max(s_45_90_1, s_45_90_2)

        local_maxima[pts] = c_plus & c_minus

        # L232-L244
        # ----- 90 to 135 degrees ------
        # Mix anti-diagonal and vertical
        #
        pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel <= abs_jsobel)
        pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel <= abs_jsobel)
        pts = pts_plus | pts_minus
        pts = eroded_mask & pts
        c1a = magnitude[:, :, :, 1:][pts[:, :, :, :-1]]
        c2a = magnitude[:, :, :-1, 1:][pts[:, :, 1:, :-1]]
        m = magnitude[pts]
        w = abs_isobel[pts] / abs_jsobel[pts]
        c_plus = c2a * w + c1a * (1.0 - w) <= m

        s_90_135_1 = F.relu(-m + self.gamma + (c2a * w + c1a * (1.0 - w)))

        c1 = magnitude[:, :, :, :-1][pts[:, :, :, 1:]]
        c2 = magnitude[:, :, 1:, :-1][pts[:, :, :-1, 1:]]
        c_minus = c2 * w + c1 * (1.0 - w) <= m

        s_90_135_2 = F.relu(-m + self.gamma + (c2a * w + c1a * (1.0 - w)))
        s_90_135 = torch.max(s_90_135_1, s_90_135_2)

        local_maxima[pts] = c_plus & c_minus

        # L248-L260
        # ----- 135 to 180 degrees ------
        # Mix anti-diagonal and anti-horizontal
        #
        pts_plus = (isobel <= 0) & (jsobel >= 0) & (abs_isobel >= abs_jsobel)
        pts_minus = (isobel >= 0) & (jsobel <= 0) & (abs_isobel >= abs_jsobel)
        pts = pts_plus | pts_minus
        pts = eroded_mask & pts
        c1 = magnitude[:, :, :-1, :][pts[:, :, 1:, :]]
        c2 = magnitude[:, :, :-1, 1:][pts[:, :, 1:, :-1]]
        m = magnitude[pts]
        w = abs_jsobel[pts] / abs_isobel[pts]
        c_plus = c2 * w + c1 * (1 - w) <= m

        s_135_180_1 = F.relu(-m + self.gamma + (c2 * w + c1 * (1 - w)))

        c1 = magnitude[:, :, 1:, :][pts[:, :, :-1, :]]
        c2 = magnitude[:, :, 1:, :-1][pts[:, :, :-1, 1:]]
        c_minus = c2 * w + c1 * (1 - w) <= m

        s_135_180_2 = F.relu(-m + self.gamma + (c2 * w + c1 * (1 - w)))
        s_135_180 = torch.max(s_135_180_1, s_135_180_2)

        local_maxima[pts] = c_plus & c_minus

        # Final part
        # local_maxima_np = (local_maxima.data.clone().cpu().numpy() == 1)[0][0]
        # magnitude_np = magnitude.data.clone().cpu().numpy()[0][0]
        local_maxima_np = (local_maxima.data.clone().cpu().numpy() == 1)
        magnitude_np = magnitude.data.clone().cpu().numpy()
        high_mask = local_maxima_np & (magnitude_np >= self.high_threshold)
        low_mask = local_maxima_np & (magnitude_np >= self.low_threshold)

        vis3 = high_mask.copy()
        vis4 = low_mask.copy()

        strel = np.ones((3, 3), bool)
        mask_final_list = []
        for i in range(x.shape[0]):
            labels, count = label(low_mask[i][0], strel)
            if count == 0:
                mask_final = low_mask[i][0]
            else:
                sums = (np.array(ndi.sum(high_mask[i][0], labels,
                                         np.arange(count, dtype=np.int32) + 1),
                                 copy=False, ndmin=1))
                good_label = np.zeros((count + 1,), bool)
                good_label[1:] = sums > 0
                output_mask = good_label[labels]
                mask_final = output_mask
            mask_final_list.append([mask_final])

        mask_final = np.concatenate((mask_final_list), 0)
        mask_final = np.reshape(mask_final.astype(np.float32),(x.shape[0], 1, 32, 32))

        # magnitude = magnitude * torch.FloatTensor(mask_final).cuda()
        magnitude = torch.FloatTensor(mask_final).cuda() + magnitude - magnitude.detach()
        test = magnitude[magnitude != 0]
        # magnitude = magnitude / magnitude.max().item()
        # if magnitude.max().item() == 0:
        #     print('yes')
        magnitude = (magnitude - 0.5) / 0.5
        # return s_0_45, s_45_90, s_90_135, s_135_180, local_maxima, test, magnitude
        return magnitude, vis1, vis2, vis3, vis4
コード例 #52
0
def binary_closing(bImg, iterations=1):
    bImg = ndimage.binary_dilation(bImg, iterations=iterations)
    bImg = ndimage.binary_erosion(bImg, iterations=iterations)
    return bImg
コード例 #53
0
    def padAnalysis(self):
        #Define the Top Threshold and Average top surface
        #self.ModArr = self.interpNaN(self.ModArr*-1)
        ZV = np.array(self.ModArr).reshape(np.array(self.ModArr).size).tolist()
        mask = ~np.isnan(ZV)
        if len(np.array(self.XV)[mask]) < 307200:
            ZV = PRF_Algo().interpNaN(
                np.array(self.XV)[mask],
                np.array(self.YV)[mask],
                np.array(ZV)[mask], self.XVx, self.YVx)
            self.ModArr = np.array(ZV).reshape(np.array(ZV).size).tolist()
        self.ModArr = np.array(self.ModArr) * -1  #Used to invert the data
        self.ModArr.shape = (len(self.Y_Array), len(self.X_Array))

        TempTop = self.ModArr
        TopHist = np.histogram(self.ModArr, bins=800)
        yhat_x = np.array(TopHist[1]).reshape(np.array(
            TopHist[1]).size).tolist()
        yhat = PRF_Algo().savitzky_golay(
            np.array(
                np.array(TopHist[0]).reshape(np.array(
                    TopHist[0]).size).tolist()).astype('float'), 25, 6)
        MaximaListBins = np.array(yhat)[argrelextrema(np.array(yhat),
                                                      np.greater,
                                                      order=1)[0]]
        MaximaListHeight = np.array(yhat_x)[argrelextrema(np.array(yhat),
                                                          np.greater,
                                                          order=1)[0]]
        MaximaVal = float(
            np.array(MaximaListHeight).reshape(
                np.array(MaximaListHeight).size).tolist()[
                    np.array(MaximaListBins).reshape(
                        np.array(MaximaListBins).size).tolist().index(
                            np.max(MaximaListBins))])
        MinimaListBins = np.array(yhat)[argrelextrema(np.array(yhat),
                                                      np.less,
                                                      order=20)[0]]
        MinimaListHeight = np.array(yhat_x)[argrelextrema(np.array(yhat),
                                                          np.less,
                                                          order=20)[0]]
        MinimaListHeight1 = np.array(MinimaListHeight).reshape(
            np.array(MinimaListHeight).size).tolist()
        MinimaListHeight1.reverse()
        FoundMin = False
        NewThresh = 0.
        for CutoffMinima in MinimaListHeight1:
            if float(CutoffMinima) <= float(
                    MaximaVal) and FoundMin == False and float(
                        CutoffMinima) < 100:
                FoundMin = True
                NewThresh = float(CutoffMinima)
                NewThreshHt = MinimaListHeight1[
                    MinimaListHeight1.index(CutoffMinima) + 1]
##                        print NewThresh

        TopHeight = self.SortAndFill(self.ModArr, NewThresh, np.nan, 'LE')
        Heightmask = ~np.isnan(TopHeight)
        AvgTopHt = np.average(TopHeight[Heightmask])

        XV1, YV1 = np.meshgrid(self.X_Array, self.Y_Array)
        XV3, YV3 = np.meshgrid(self.X_Array, self.Y_Array)

        TempTest = self.SortAndFill(self.ModArr, NewThresh, 0, 'GE')
        TempTest = self.SortAndFill(TempTest, 0, 255, 'L')

        #use blob detection to find the via locations
        Blob = blob_dog(TempTest,
                        min_sigma=10,
                        max_sigma=150.,
                        exclude_border=True)
        Blob[:, 2] = Blob[:, 2] * np.sqrt(2)
        #y, x, r = Blob

        #Find the via closest to the center of FOV.
        Img_X_Cent = 320
        Img_Y_Cent = 240
        Near_BlobX = 0
        Near_BlobY = 0
        OVal = 0
        Near_R = 0
        Offset_Cent = np.sqrt(np.square(Img_X_Cent) + np.square(Img_X_Cent))
        for CentBlob in Blob:
            Offset = np.sqrt(
                np.square(Img_X_Cent - CentBlob[1]) +
                np.square(Img_Y_Cent - CentBlob[0]))
            Off_Dif = np.abs(Offset_Cent - Offset)
            if float(Off_Dif) >= float(OVal) and CentBlob[2] > (5 *
                                                                np.sqrt(2)):
                OVal = Off_Dif
                Near_BlobX = CentBlob[1]
                Near_BlobY = CentBlob[0]
                Near_R = CentBlob[2]

##                #Closest to the center of FOV.
##                centerblob = [(Near_BlobX*self.Meas_Pix) , (Near_BlobY*self.Meas_Pix) ]

#Get indices for bounding mask (L=leftEdge, R=rightEdge, T=topEdge, B=bottomEdge)
        R = int((Near_BlobX + 2) + (np.round(Near_R) + 10))
        L = int((Near_BlobX - 2) - (np.round(Near_R) + 10))
        T = int((Near_BlobY + 2) + (np.round(Near_R) + 10))
        B = int((Near_BlobY - 2) - (np.round(Near_R) + 10))

        #create a temp array filled with zero, add selected data and binarize it.
        Segmented = np.zeros(shape=(480, 640))
        Segmented[B:T, L:R] = self.ModArr[B:T, L:R]
        Segmented = self.SortAndFill(Segmented, NewThresh, 0, 'G')
        Segmented = self.SortAndFill(Segmented, 0, 255, 'L')

        #Use edge detection
        Edge = canny(Segmented, sigma=9, low_threshold=1)
        mask = ~np.isnan(Edge)
        XV1 = np.array(XV1[Edge])
        YV1 = np.array(YV1[Edge])

        #LMS fit of top via data
        self.topCircle = PRF_Algo().leastsq_circle(XV1, YV1)
        self.topDiam = self.topCircle[9]
        ##                print("Circle Fit Data: ", self.topCircle)
        print("Top Diameter: ", self.topCircle[9])

        #Prep via bottom for analysis
        TempBot = np.empty(shape=(480, 640))
        TempBot[:] = np.nan
        TempBot[B:T, L:R] = self.ModArr[B:T, L:R]

        TempBot = self.SortAndFill(TempBot, NewThreshHt, np.nan, 'G')
        BotMask = ~np.isnan(TempBot)
        BotHist = np.histogram(TempBot[BotMask], bins=800)

        #A histogram is used to find the frequency distribution. The signal is noisy so a smoothing algorithm is used to find transitions.
        yhat_x = np.array(BotHist[1]).reshape(np.array(
            BotHist[1]).size).tolist()
        yhat = PRF_Algo().savitzky_golay(
            np.array(
                np.array(BotHist[0]).reshape(np.array(
                    BotHist[0]).size).tolist()).astype('float'), 25,
            6)  # window size 51, polynomial order 3
        MaximaListBins = np.array(yhat)[argrelextrema(np.array(yhat),
                                                      np.greater,
                                                      order=6)[0]]
        MaximaListHeight = np.array(yhat_x)[argrelextrema(np.array(yhat),
                                                          np.greater,
                                                          order=6)[0]]
        MaximaVal = float(
            np.array(MaximaListHeight).reshape(
                np.array(MaximaListHeight).size).tolist()[
                    np.array(MaximaListBins).reshape(
                        np.array(MaximaListBins).size).tolist().index(
                            np.max(MaximaListBins))])
        MinimaListBins = np.array(yhat)[argrelextrema(
            np.array(yhat), np.less, order=8)[0]]  #Order was 12 and 8
        MinimaListHeight = np.array(yhat_x)[argrelextrema(np.array(yhat),
                                                          np.less,
                                                          order=8)[0]]

        maxMid = np.max(
            MaximaListBins
        ) * 0.5  #This uses 50% of the max bin height as a threshold
        FoundMin = False
        NewThreshHt = NewThreshHt
        CntLst = 0
        CntLstVal = 0
        for CutoffMinima in MinimaListHeight:
            CntLst = CntLst + 1
            MinimaBinVal = float(
                np.array(MinimaListBins).reshape(
                    np.array(MinimaListBins).size).tolist()[np.array(
                        MinimaListHeight).reshape(
                            np.array(MinimaListHeight).size).tolist().index(
                                CutoffMinima)])
            if float(CutoffMinima) >= float(
                    MaximaVal) and FoundMin == False and MinimaBinVal < maxMid:
                FoundMin = True
                NewThreshHt = float(CutoffMinima)
                MinimaBinVal = float(
                    np.array(MinimaListBins).reshape(
                        np.array(MinimaListBins).size).tolist()
                    [np.array(MinimaListHeight).reshape(
                        np.array(MinimaListHeight).size).tolist().index(
                            CutoffMinima)])
                CntLstVal = CntLst

        Segmented = np.zeros(shape=(480, 640))
        Segmented[B:T, L:R] = self.ModArr[B:T, L:R]

        BotHeight = self.SortAndFill(Segmented, NewThreshHt, np.nan, 'G')
        Heightmask = ~np.isnan(BotHeight)
        AvgBotHt = np.average(BotHeight[Heightmask])
        ##                print "Avg Bot Ht: " + str(AvgBotHt)
        ##                print "Via Depth: " + str(AvgTopHt - AvgBotHt)
        self.viaDepth = AvgTopHt - AvgBotHt

        Segmented = self.SortAndFill(Segmented, NewThreshHt, 0, 'G')
        Segmented = self.SortAndFill(Segmented, 0, 255, 'L')

        tmpSigma = .25
        fillComplete = False
        while fillComplete == False:
            test = canny(Segmented, sigma=tmpSigma)
            test = ndimage.binary_closing(test, iterations=5)
            test = ndimage.binary_fill_holes(test)
            test = ndimage.binary_erosion(test, iterations=5)

            #This logic is used to make sure that the via bottom is filled so false data isn't used.
            if test[int(Near_BlobY)][int(Near_BlobX)] != 0:
                Edge = canny(test)
                mask = ~np.isnan(Edge)
                XA1 = np.array(XV3[Edge])
                YA1 = np.array(YV3[Edge])
                try:
                    self.botCircle = PRF_Algo().leastsq_circle(XA1, YA1)
                    self.botDiam = self.botCircle[9]
                    print("Bottom Diameter: ", self.botCircle[9])
                    fillComplete = True
                except:
                    10 / 0  #Force fail for now
                if np.count_nonzero(Edge == True) < 150:
                    Edge = canny(Segmented, sigma=2, low_threshold=30)

            else:
                tmpSigma = tmpSigma + 0.25
                if tmpSigma >= 5:  #needed or will be in infinate loop!!!
                    fillComplete = True

        self.Offset = np.sqrt(
            np.square(float(self.topCircle[0]) - float(self.botCircle[0])) +
            np.square(float(self.topCircle[1]) - float(self.botCircle[1])))
コード例 #54
0
PlaneDicom = numpy.zeros(ConstPixelDims, dtype=RefDs.pixel_array.dtype)

# loop through all the DICOM files
for filenameDCM in lstFilesDCM:
    # read the file
    ds = dicom.read_file(filenameDCM)
    # store the raw image data
    PlaneDicom[:, :, lstFilesDCM.index(filenameDCM)] = ds.pixel_array

print('Processing Slices')
fiducials = []

for i in range(ConstPixelDims[2]):
    thirdcoords = i*ConstPixelSpacing[2]*(ConstPixelDims[0]*ConstPixelSpacing[0])/(ConstPixelSpacing[2]*ConstPixelDims[2])
    img = PlaneDicom[:,:,i] > 10000
    img = ndimage.binary_erosion(img)
    coords = feature.corner_peaks(feature.corner_harris(img), min_distance=7)
    coords = numpy.array(coords)
    if coords.size == 0:
        continue
    elif len(coords) == 1:
        fiducials.append([coords[0][0]*ConstPixelSpacing[0], coords[0][1]*ConstPixelSpacing[0], thirdcoords])
        continue
    else:
        thresh = 40
        clusters = hcluster.fclusterdata(coords, thresh, criterion="distance")
    
    j = 1
    while j <= clusters.max():
        c1 = coords[clusters == j , 0].mean()
        c2 = coords[clusters == j , 1].mean()
コード例 #55
0
ファイル: analysis.py プロジェクト: migueldvb/mskpy
def find(im, sigma=None, thresh=2, centroid=None, fwhm=2, **kwargs):
    """Find sources in an image.

    Generally designed for point-ish sources.

    Parameters
    ----------
    im : array
      The image to search.
    sigma : float, optional
      The 1-sigma uncertainty in the background, or `None` to estimate
      the uncertainty with the sigma-clipped mean and standard
      deviation of `meanclip`.  If provided, then the image should be
      background subtracted.
    thresh : float, optional
      The detection threshold in sigma.  If a pixel is detected above
      `sigma * thresh`, it is an initial source candidate.
    centroid : function, optional
      The centroiding function, or `None` to use `gcentroid`.  The
      function is passed a subsection of the image.  All other
      parameters are provided via `kwargs`.
    fwhm : int, optional
      A rough estimate of the FWHM of a source, used for binary
      morphology operations.
    **kwargs
      Any keyword arguments for `centroid`.

    Returns
    -------
    cat : ndarray
      A catalog of `(y, x)` source positions.
    f : ndarray
      An array of approximate source fluxes (a background estimate is
      removed if `sigma` is `None`).

    """

    import scipy.ndimage as nd
    from ..util import meanclip

    assert isinstance(fwhm, int), 'FWHM must be integer'

    _im = im.copy()

    if sigma is None:
        stats = meanclip(_im, full_output=True)[:2]
        _im -= stats[0]
        sigma = stats[1]

    if centroid is None:
        centroid = gcentroid

    det = _im > thresh * sigma
    det = nd.binary_erosion(det, iterations=fwhm)  # remove small objects
    det = nd.binary_dilation(det,
                             iterations=fwhm * 2 + 1)  # grow aperture size
    label, n = nd.label(det)

    yx = []
    f = []
    bad = 0
    for i in nd.find_objects(label):
        star = _im[i]

        if not np.isfinite(star.sum()):
            bad += 1
            continue

        try:
            cen = centroid(star, **kwargs)
        except:
            bad += 1
            continue

        if any(np.array(cen) < -0.5):
            bad += 1
            continue

        if any((cen[0] >= star.shape[0] - 0.5, cen[1] >= star.shape[1] - 0.5)):
            bad += 1
            continue

        cen += np.array((i[0].start, i[1].start))

        if not any(np.isfinite(cen)):
            bad += 1
            continue

        yx.append(cen)
        f.append(star.sum())

    print('[find] {} good, {} bad sources'.format(len(yx), bad))
    return np.array(yx), np.array(f)
コード例 #56
0
                       skiprows=0,
                       names=[
                           name,
                       ],
                       parse_dates=True,
                       infer_datetime_format=True).squeeze()


# ----------------------------------------------------------------------------
## Improve ICE MARGINS mask prior to generating any statistics

# Erode the mask in order to remove dark pixels along the ice sheet margins
# binary_erosion does not work with nans, so convert them to zeros
mtmp = np.where(np.isnan(onset.mask.values), 0, 1)
# Do the erosion
mask_erode = ndimage.binary_erosion(mtmp, iterations=10)
# Convert zeros back to nans
mask_erode = np.where(mask_erode == 0, np.nan, 1)
# Calculate the difference
mask_erode_diff = np.where(np.isnan(onset.mask), 0, 1) + np.where(
    np.isnan(mask_erode), 0, 1)

## ---------------------------------------------------------------------------
## Dark ice masks

# First create the 600 m masks, 1 per year
masks_annual_dark = onset.dark_dur \
   .where((onset.dark_dur > min_dark_days) & (mask_erode == 1)) \
   .notnull()

# Need to load in a MAR XY layer to get coordinates, the MAR ice mask will do
コード例 #57
0
    def fit_island(self, isl, opts, img, ngmax=None, ffimg=None, ini_gausfit=None):
        """Fit island with a set of 2D gaussians.

        Parameters:
        isl: island
        opts: Opts structure of the image
        beam: beam parameters which are used as an initial guess for
              gaussian shape

        Returns:
        Function returns 2 lists with parameters of good and flagged
        gaussians. Gaussian parameters are updated to be image-relative.

        Note: "fitok" indicates whether fit converged
               and one or more flagged Gaussians indicate
               that significant residuals remain (peak > thr).
        """
        from _cbdsm import MGFunction
        import functions as func
        from const import fwsig

        if ffimg == None:
            fit_image = isl.image-isl.islmean
        else:
            fit_image = isl.image-isl.islmean-ffimg
        fcn = MGFunction(fit_image, isl.mask_active, 1)
        # For fitting, use img.beam instead of img.pixel_beam, as we want
        # to pick up the wavelet beam (img.pixel_beam is not changed for
        # wavelet images, but img.beam is)
        beam = N.array(img.beam2pix(img.beam))
        beam = (beam[0]/fwsig, beam[1]/fwsig, beam[2]+90.0) # change angle from +y-axis to +x-axis and FWHM to sigma

        if abs(beam[0]/beam[1]) < 1.1:
            beam = (1.1*beam[0], beam[1], beam[2])

        thr1 = isl.mean + opts.thresh_isl*isl.rms
        thr2 = isl.mean + img.thresh_pix*isl.rms
        thr0 = thr1
        verbose = opts.verbose_fitting
        g3_only = opts.fix_to_beam
        peak = fcn.find_peak()[0]
        dof = isl.size_active
        shape = isl.shape
        isl_image = isl.image - isl.islmean
        size = isl.size_active/img.pixel_beamarea()*2.0
        gaul = []
        iter = 0
        ng1 = 0
        if ini_gausfit == None:
            ini_gausfit = opts.ini_gausfit

        if ini_gausfit not in ['default', 'simple', 'nobeam']:
            ini_gausfit = 'default'
        if ini_gausfit == 'simple' and ngmax == None:
          ngmax = 25
        if ini_gausfit == 'default' or opts.fix_to_beam:
          gaul, ng1, ngmax = self.inigaus_fbdsm(isl, thr0, beam, img)
        if ini_gausfit == 'nobeam' and not opts.fix_to_beam:
          gaul = self.inigaus_nobeam(isl, thr0, beam, img)
          ng1 = len(gaul); ngmax = ng1+2
        while iter < 5:
            iter += 1
            fitok = self.fit_iter(gaul, ng1, fcn, dof, beam, thr0, iter, ini_gausfit, ngmax, verbose, g3_only)
            gaul, fgaul = self.flag_gaussians(fcn.parameters, opts,
                                              beam, thr0, peak, shape, isl.mask_active,
                                              isl.image, size)
            ng1 = len(gaul)
            if fitok and len(fgaul) == 0:
                break
        if (not fitok or len(gaul) == 0) and ini_gausfit != 'simple':
            # If fits using default or nobeam methods did not work,
            # try using simple instead
            gaul = []
            iter = 0
            ng1 = 0
            ngmax = 25
            while iter < 5:
               iter += 1
               fitok = self.fit_iter(gaul, ng1, fcn, dof, beam, thr0, iter, 'simple', ngmax, verbose, g3_only)
               gaul, fgaul = self.flag_gaussians(fcn.parameters, opts,
                                                 beam, thr0, peak, shape, isl.mask_active,
                                                 isl.image, size)
               ng1 = len(gaul)
               if fitok and len(fgaul) == 0:
                   break
        sm_isl = nd.binary_dilation(isl.mask_active)
        if (not fitok or len(gaul) == 0) and N.sum(~sm_isl) >= img.minpix_isl:
            # If fitting still fails, shrink the island a little and try again
            fcn = MGFunction(fit_image, nd.binary_dilation(isl.mask_active), 1)
            gaul = []
            iter = 0
            ng1 = 0
            ngmax = 25
            while iter < 5:
               iter += 1
               fitok = self.fit_iter(gaul, ng1, fcn, dof, beam, thr0, iter, 'simple', ngmax, verbose, g3_only)
               gaul, fgaul = self.flag_gaussians(fcn.parameters, opts,
                                                 beam, thr0, peak, shape, isl.mask_active,
                                                 isl.image, size)
               ng1 = len(gaul)
               if fitok and len(fgaul) == 0:
                   break
        lg_isl = nd.binary_erosion(isl.mask_active)
        if (not fitok or len(gaul) == 0) and N.sum(~lg_isl) >= img.minpix_isl:
            # If fitting still fails, expand the island a little and try again
            fcn = MGFunction(fit_image, nd.binary_erosion(isl.mask_active), 1)
            gaul = []
            iter = 0
            ng1 = 0
            ngmax = 25
            while iter < 5:
               iter += 1
               fitok = self.fit_iter(gaul, ng1, fcn, dof, beam, thr0, iter, 'simple', ngmax, verbose, g3_only)
               gaul, fgaul = self.flag_gaussians(fcn.parameters, opts,
                                                 beam, thr0, peak, shape, isl.mask_active,
                                                 isl.image, size)
               ng1 = len(gaul)
               if fitok and len(fgaul) == 0:
                   break

        if not fitok or len(gaul) == 0:
            # If all else fails, try to use moment analysis
            inisl = N.where(~isl.mask_active)
            mask_id = N.zeros(isl.image.shape, dtype=N.int32) - 1
            mask_id[inisl] = isl.island_id
            try:
                pixel_beamarea = img.pixel_beamarea()
                mompara = func.momanalmask_gaus(fit_image, mask_id, isl.island_id, pixel_beamarea, True)
                mompara[5] += 90.0
                if not N.isnan(mompara[1]) and not N.isnan(mompara[2]):
                    x1 = N.int(N.floor(mompara[1]))
                    y1 = N.int(N.floor(mompara[2]))
                    xind = slice(x1, x1+2, 1); yind = slice(y1, y1+2, 1)
                    t=(mompara[1]-x1)/(x1+1-x1)
                    u=(mompara[2]-y1)/(y1+1-y1)
                    s_peak=(1.0-t)*(1.0-u)*fit_image[x1,y1]+t*(1.0-u)*fit_image[x1+1,y1]+ \
                         t*u*fit_image[x1+1,y1+1]+(1.0-t)*u*fit_image[x1,y1+1]
                    mompara[0] = s_peak
                    par = [mompara.tolist()]
                    par[3] /= fwsig
                    par[4] /= fwsig
                    gaul, fgaul = self.flag_gaussians(par, opts,
                                                      beam, thr0, peak, shape, isl.mask_active,
                                                      isl.image, size)
            except:
                pass

        ### return whatever we got
        isl.mg_fcn = fcn
        gaul  = [self.fixup_gaussian(isl, g) for g in gaul]
        fgaul = [(flag, self.fixup_gaussian(isl, g))
                                       for flag, g in fgaul]

        if verbose:
            print 'Number of good Gaussians: %i' % (len(gaul),)
            print 'Number of flagged Gaussians: %i' % (len(fgaul),)
        return gaul, fgaul
コード例 #58
0
ファイル: utils_backup.py プロジェクト: yglkings/PlaneNet
		for index_1, value_1 in enumerate(values_1):
				if counts_1[index_1] < planeAreaThreshold or value_1 == 360:
						continue
					mask_1 = valueMaps[0] == value_1

				values_2, counts_2 = np.unique(valueMaps[1][mask_1], return_counts=True)
				for index_2, value_2 in enumerate(values_2):
						if counts_2[index_2] < planeAreaThreshold or value_2 == 360:
								continue
							mask_2 = mask_1 * (valueMaps[1] == value_2)
							values_3, counts_3 = np.unique(valueMaps[2][mask_2], return_counts=True)
						for index_3, value_3 in enumerate(values_3):
								if counts_3[index_3] < planeAreaThreshold or value_3 == 360:
										continue
									mask_3 = mask_2 * (valueMaps[2] == value_3)
									mask_3 = ndimage.binary_erosion(mask_3).astype(mask_3.dtype)
								if mask_3.sum() < planeAreaThreshold:
										continue

								# regionX = X[mask_3]
							# regionY = Y[mask_3]
						# regionZ = Y[mask_3]
					
								normal = np.array([normals[:, :, 0][mask_3].mean(), normals[:, :, 1][mask_3].mean(), normals[:, :, 2][mask_3].mean()])
								normal /= np.linalg.norm(normal, 2)
								dPlane = (-(normal[0] * X + normal[1] * Y + normal[2] * Z))[mask_3].mean()

								globalMask += mask_3
								segmentations.append(mask_3)
								azimuth = np.arctan2(-normal[1], normal[0])
								altitude = np.arctan2(np.sign(-normal[1]) * np.linalg.norm(normal[:2]), normal[2])
コード例 #59
0
def shapeawewm(mask,sigma):
    mask = mask.astype('float')
    wc = balancewm(mask)
    binimage=(mask==1).astype('float')
    diststeps=10000
    
    cells,cellscount = ndimage.measurements.label(binimage)
    chull = np.zeros_like(mask)
    # convex hull of each object
    for ci in range(1,cellscount+1):
        I = (cells==ci).astype('float')
        R = convex_hull_image(I) - I
        R = ndimage.binary_opening(R,structure=np.ones((3,3))).astype('float')
        R = ndimage.binary_dilation(R,structure=np.ones((3,3))).astype('float')
        chull += R

    # distance transform to object skeleton
    skcells=thin(binimage)
    dtcells=ndimage.distance_transform_edt(skcells!=1)
    border=binimage-ndimage.binary_erosion(input=(binimage),structure=np.ones((3,3)),iterations=1).astype('float')
    tau=np.max(dtcells[border==1])+0.1
    dtcells=np.abs(1-dtcells*border/tau)*border

    # distance transform to convex hull skeleton
    skchull=thin(chull)
    dtchull=ndimage.distance_transform_edt(skchull!=1)
    border=chull-ndimage.binary_erosion(input=(chull),structure=np.ones((3,3)),iterations=1).astype('float')
    dtchull=np.abs(1-dtchull*border/tau)*border

    # maximum border
    saw=np.concatenate((dtcells[:,:,np.newaxis],dtchull[:,:,np.newaxis]),2)
    saw = np.max(saw,2)
    saw /= np.max(saw)

    # propagate contour values inside the objects
    prop=binimage+chull
    prop[prop>1]=1
    prop=ndimage.binary_erosion(input=(prop),structure=np.ones((3,3)),iterations=1).astype('float')
    current_saw=saw

    for i in range(20):
        tprop=ndimage.binary_erosion(input=(prop),structure=np.ones((3,3)),iterations=1).astype('float')
        border=prop-tprop
        prop=tprop

        x1,y1 = np.where(border!=0)
        x2,y2 = np.where(current_saw!=0)

        if x1.size==0 or x2.size==0: break

        tsaw=np.zeros_like(saw)
        for a in range(0,x1.size,diststeps):
            minl=np.min(np.array([diststeps+a-1,x1.size-1])) +1
            dis=cdist(np.vstack((x2,y2)).transpose(), np.vstack((x1[a:minl],y1[a:minl])).transpose())
            ind=np.argmin(dis,axis=0)
            tsaw[x1[a:minl],y1[a:minl]]=current_saw[x2[ind],y2[ind]]

        saw=np.concatenate((saw[:,:,np.newaxis],tsaw[:,:,np.newaxis]),2)
        saw = np.max(saw,2)
        saw=ndimage.filters.gaussian_filter(saw,sigma)
        saw/=np.max(saw)
        current_saw=saw*(border!=0).astype('float')

    saw = saw + wc +1
    return saw
コード例 #60
0
def binary_erosion(bImg, iterations=1):
    bImg = ndimage.binary_erosion(bImg, iterations=iterations)
    return bImg