def planning(self):
     n = self.n
     word_size = self.word_size
     radius = self.radius
     center_x = self.centroid_x / word_size * n
     center_y = self.centroid_y / word_size * n
     grid_world = np.ones((n + 1, n + 1))
     mask = np.full(np.shape(grid_world), False, dtype=bool)
     for i in range(n + 1):
         for j in range(n + 1):
             if np.sqrt((i - center_x)**2 +
                        (j - center_y)**2) <= radius / word_size * n:
                 mask[i, j] = True
     grid_world_A = np.ma.MaskedArray(np.ones((n + 1, n + 1)), mask)
     grid_world_A[0, 0] = 0
     grid_world_B = np.ma.MaskedArray(np.ones((n + 1, n + 1)), mask)
     grid_world_B[n, n] = 0
     self.dist_map_A = skfmm.travel_time(grid_world_A,
                                         np.ones_like(grid_world),
                                         dx=word_size / n)
     self.shortest_distance = self.dist_map_A[n, n]
     self.dist_map_B = skfmm.travel_time(grid_world_B,
                                         np.ones_like(grid_world),
                                         dx=word_size / n)
     self.shortest_path()
def drone_saves_life(centroid_x, centroid_y, radius, num_grid, image_path):
    # start_time = time.time()
    # Create a mask map to template the shortest path
    grid_map = np.ones((num_grid + 1, num_grid + 1))
    mask = np.full(np.shape(grid_map), False, dtype=bool)
    coordinate_x = centroid_x / map_length * num_grid
    coordinate_y = centroid_y / map_length * num_grid
    for i in range(num_grid + 1):
        for j in range(num_grid + 1):
            if radius / map_length * num_grid >= np.sqrt(
                (i - coordinate_x)**2 + (j - coordinate_y)**2):
                mask[i, j] = True
    # Creating two grid maps based on the position of point A and point B
    # We also want to nullify the points on the grid that has been marked as the storm area
    grid_map_A = np.ma.MaskedArray(np.ones((num_grid + 1, num_grid + 1)), mask)
    grid_map_B = np.ma.MaskedArray(np.ones((num_grid + 1, num_grid + 1)), mask)
    # Mark off the start and end point:
    grid_map_A[0, 0] = 0
    grid_map_B[num_grid, num_grid] = 0
    # Then we could use the skfmm to solve the traveling time for us
    dist_map_A = skfmm.travel_time(grid_map_A,
                                   np.ones_like(grid_map),
                                   dx=map_length / num_grid)
    dist_map_B = skfmm.travel_time(grid_map_B,
                                   np.ones_like(grid_map),
                                   dx=map_length / num_grid)
    shortest_distance = dist_map_A[num_grid, num_grid]
    # print('The shortest distance is :' + str(shortest_distance))
    # We could obtain the shortest path
    shortest_path = finding_shortest_path(dist_map_A, dist_map_B, num_grid)
    path_visualization_and_save(centroid_x, centroid_y, radius, shortest_path,
                                image_path)
    # computation_time = time.time() - start_time
    # print("The compution time is (unit:sec) :" + str(computation_time))
    return shortest_distance
Exemplo n.º 3
0
def test8():
    """ shape mismatch test """
    try:
        travel_time([-1, 1], [2])
    except Exception as ex:
        assert ValueError.__name__ == ex.__class__.__name__
        assert ex.args[0] == "phi and speed must have the same shape"
    else:
        raise Exception("no exception raised")
Exemplo n.º 4
0
def test8_2():
    """ speed wrong type test """
    try:
        travel_time([0, 0, 1, 1], 2)
    except Exception as ex:
        assert ValueError.__name__ == ex.__class__.__name__
        assert ex.args[0] == "speed must be a 1D to 12-D array of doubles"
    else:
        raise Exception("no exception raised")
Exemplo n.º 5
0
def test9():
    """ dx mismatch test """
    try:
        travel_time([-1, 1], [2, 2], [2, 2, 2, 2])
    except Exception as ex:
        assert ValueError.__name__ == ex.__class__.__name__
        assert ex.args[0] == "dx must be of length len(phi.shape)"
    else:
        raise Exception("no exception raised")
Exemplo n.º 6
0
def test_corner_case1():
    """
    Test catching speeds which are too small. Speeds less than the
    machine epsilon are masked off to avoid an overflow
    """
    t = travel_time([-1, -1, 0, 1, 1], [1, 1, 1, 1, 0])
    assert isinstance(t, np.ma.MaskedArray)
    np.testing.assert_array_equal(t.data[:-1], [2, 1, 0, 1])
    np.testing.assert_array_equal(t.mask, [False, False, False, False, True])

    t2 = travel_time([-1, -1, 0, 1, 1], [1, 1, 1, 1, 1e-300])
    np.testing.assert_array_equal(t, t2)
Exemplo n.º 7
0
def find_cells(nuclei, mask, small_holes=100, remove_boundary_cells=True):
    """Expand labeled nuclei to cells, constrained to where mask is >0. 
    Mask is divvied up by  
    """
    import skfmm

    # voronoi
    phi = (nuclei > 0) - 0.5
    speed = mask + 0.1
    time = skfmm.travel_time(phi, speed)
    time[nuclei > 0] = 0

    cells = skimage.morphology.watershed(time, nuclei, mask=mask)

    # remove cells touching the boundary
    if remove_boundary_cells:
        cut = np.concatenate(
            [cells[0, :], cells[-1, :], cells[:, 0], cells[:, -1]])
        cells.flat[np.in1d(cells, np.unique(cut))] = 0

    # assign small holes to neighboring cell with most contact
    holes = skimage.measure.label(cells == 0)
    regions = skimage.measure.regionprops(
        holes, intensity_image=skimage.morphology.dilation(cells))

    # for reg in regions:
    #     if reg.area < small_holes:
    #         vals = reg.intensity_image[reg.intensity_image>0]
    #         cells[holes == reg.label] = scipy.stats.mode(vals)[0][0]

    return cells.astype(np.uint16)
Exemplo n.º 8
0
    def calc_fmm(self, ped, wait=1):
        """

        :param ped:
        :param wait:
        :return:
        """
        p, speed = ped
        if self.fmm_distance.size == 0:
            t_grid = np.array(np.ones_like(self.grid), dtype=np.double)
            mask = np.array(0 * np.ones_like(self.grid), dtype=bool)
            t_grid[self.target.row, self.target.col] = -1
            for i in self.obstacles:
                mask[i.row][i.col] = True
            phi = np.ma.MaskedArray(t_grid, mask)
            self.fmm_distance = skfmm.distance(phi)
            self.grid[p[0]][p[1]].initial_predicted_time = self.fmm_distance[p[0]][p[1]] / self.speed[p[0]][p[1]]
            self.tt = skfmm.travel_time(phi, self.speed, self.dx)
            for z in self.pedestrian_fmm:
                self.grid[z[0][0]][z[0][1]].initial_predicted_time = self.fmm_distance[z[0][0]][z[0][1]] / z[1]
        for i in self.obstacles:
            self.fmm_distance[i.row][i.col] = sys.maxsize
        d = np.copy(self.fmm_distance)
        t = np.copy(self.tt)
        for j in self.pedestrian:
            d[j.row, j.col] *= ((wait * (1 + (1 / (d[j.row, j.col]) * 10))) + 1 / d[j.row, j.col])
        return self.calc_fmm_path(d, t, p, speed)
def compute_distance_fmm(geometry: Geometry,
                         grid: Grid,
                         start,
                         mask,
                         speed=None):
    """
    Compute the distance to start in geometry.
    :param geometry: Geometry to use
    :param grid: Grid to use
    :param start: start cells
    :param mask: masked cells
    :param speed: speed field
    :return: distance to start by ffm
    """
    inside = grid.inside_cells

    phi = inside - 5 * start
    phi = np.ma.MaskedArray(phi, mask)

    if speed is None:
        distance = skfmm.distance(phi, dx=grid.cellsize)
    else:
        distance = skfmm.travel_time(phi, speed, dx=grid.cellsize)

    return distance
Exemplo n.º 10
0
def erode_bacteria(space, dt, log, biomass_containers, rate, topgrid=0.10):
    top = int(space.shape[0] * (1 - topgrid))
    biomass_containers = to_list(biomass_containers)
    biomass = to_grid(space, biomass_containers, "mass")

    zeros = biofilm_edge(space, biomass)

    erosion_force = np.square(space.meshgrid[0] + space.dl) * rate / space.dl
    time_to_erode = np.array(
        skfmm.travel_time(zeros,
                          erosion_force,
                          dx=1,
                          periodic=space.periodic,
                          order=1))
    # TODO: use higher order. skfmm has a bug, #18

    time_to_erode[top:] = 0  # instantly erode at top
    zeros = time_to_erode == 0
    if np.any(zeros):
        time_to_erode[zeros] = dt / 1e10
    shrunken_volume = np.exp(-dt / time_to_erode).reshape(-1)
    if log is not None:
        original_mass = np.zeros(space.shape)
    for container in biomass_containers:
        if log is not None:
            original_mass[:] = to_grid(space, container, "mass")
        container.mass *= shrunken_volume[container.location]
        container.multi_remove(container.mass < (container.division_mass / 3))

        if log is not None:
            mass = original_mass - to_grid(space, container, "mass")
            log["eroded_{}".format(container.name)] = mass.sum()
Exemplo n.º 11
0
    def __call__(self, x, y, ds=None, validate=True):
        """
        The origin is in the upper left corner.
        """
        if validate is True:
            assert (is_non_neg_number(x) and x <= self._vd.w)
            assert (is_non_neg_number(y) and y <= self._vd.l)

        distances = space2.distance(x, y, self._wgrid, self._lgrid)

        phi = np.ones(self._vs.shape)
        phi[np.unravel_index(distances.argmin(), distances.shape)] = 0

        tts = skfmm.travel_time(phi, self._vs, dx=self._d)
        tts = TravelTimes(self._vd.w, self._vd.l, tts)

        if ds is None:
            xs, ys = self._vd.xs, self._vd.ys
        else:
            xs, ys = ds.xs, ds.ys
        tts = tts.interpolate(xs, ys)

        tts[tts < 0] = 0

        return TravelTimes(self._vd.w, self._vd.l, tts)
Exemplo n.º 12
0
    def skeleton(self, obj):
        obj = np.array(obj, dtype=np.bool)
        im_2d = True if obj.ndim == 2 else False

        boundary_dist = skfmm.distance(obj)
        source_point = np.unravel_index(np.argmax(boundary_dist),
                                        boundary_dist.shape)
        max_dist_ = boundary_dist[source_point]
        speed_im = (boundary_dist / max_dist_)**self.speed_power
        del boundary_dist

        flag = True
        length_threshold = 0.0
        obj = np.ones(obj.shape, dtype=np.float64)
        obj[source_point] = 0.0
        skeleton_segments = []
        source_point = np.expand_dims(source_point, axis=0)
        while True:
            dist = skfmm.travel_time(obj, speed_im)
            end_point = np.unravel_index(np.ma.argmax(dist), dist.shape)
            max_dist = dist[end_point]
            dist = np.ma.filled(dist, max_dist)
            end_point = np.expand_dims(end_point, axis=0)

            if self.simple_path:
                shortest_path = self._discrete_shortest_path(
                    dist, end_point, im_2d)
            else:
                shortest_path = self._Euler_shortest_path(
                    dist, end_point, source_point, self.Euler_step_size, im_2d)

            path_length = self._get_line_length(shortest_path)
            if self.verbose:
                print(path_length)

            if flag:
                depth_threshold = self.depth_th * max_dist_

                longest_line_threshold = np.inf
                if self.length_th:
                    longest_line_threshold = self.length_th * path_length

                length_threshold = min(depth_threshold, longest_line_threshold)
                flag = False

            if path_length <= length_threshold: break

            source_point = np.append(source_point, shortest_path, axis=0)
            skeleton_segments.append(shortest_path)

            shortest_path = np.floor(shortest_path).astype(int)
            obj[tuple(shortest_path.T)] = 0

        final_skeleton = None
        if len(skeleton_segments) != 0:
            final_skeleton = self._organize_skeleton(skeleton_segments,
                                                     length_threshold, im_2d)

        return final_skeleton
Exemplo n.º 13
0
def test6():
    """ test default dx """
    N = 50
    X, Y = np.meshgrid(np.linspace(-1, 1, N), np.linspace(-1, 1, N))
    r = 0.5
    phi = (X)**2 + (Y)**2 - r**2
    speed = np.ones_like(phi) * 2
    t = travel_time(phi, speed, self_test=True)
Exemplo n.º 14
0
def test_simple_case():
    """ more simple testing -- three point test """
    np.testing.assert_array_equal(distance([-1, 0, 1]), [-1, 0, 1])
    np.testing.assert_array_equal(distance([-1, 0, 1], dx=[2]), [-2, 0, 2])
    np.testing.assert_array_equal(distance([-1, 0, 1], dx=2), [-2, 0, 2])
    np.testing.assert_array_equal(distance([-1, 0, 1], dx=2.0), [-2, 0, 2])

    np.testing.assert_array_equal(travel_time([1, 0, -1], [1, 1, 1]),
                                  [1, 0, 1])
    np.testing.assert_array_equal(travel_time([-1, 0, 1], [1, 1, 1]),
                                  [1, 0, 1])
    np.testing.assert_array_equal(travel_time([1, 0, -1], [1, 1, 1], dx=2),
                                  [2, 0, 2])
    np.testing.assert_array_equal(travel_time([1, 0, -1], [1, 1, 1], dx=[2]),
                                  [2, 0, 2])
    np.testing.assert_array_equal(travel_time([1, 0, -1], [1, 1, 1], dx=2.0),
                                  [2, 0, 2])
Exemplo n.º 15
0
    def run_travel_times(self, max_time, num_intervals):
        # create time intervals to recalculate obstacles
        times_to_run = np.linspace(0, max_time, num_intervals + 1)
        # Get rid of zero
        times_to_run = times_to_run[1:]

        # Include a dummy travel time background
        cur_travel_times = np.zeros(
            (self.num_pops + 1, self.lattice_mesh.shape[1],
             self.lattice_mesh.shape[2]),
            dtype=np.double)

        cur_travel_times[-1, :, :] = 10 * max_time
        self.all_obstacles = np.ones_like(self.lattice_mesh,
                                          dtype=np.bool) * False

        expansion_history = None
        for cur_time in times_to_run:
            print cur_time

            self.before_travel_iteration(
                cur_time, expansion_history)  # Required for subclassing

            for i in range(self.num_pops):
                cur_lattice = self.lattice_mesh[i]
                cur_obstacle = self.all_obstacles[i]
                # Mask the lattice by the obstacles...other strains
                cur_lattice = np.ma.MaskedArray(cur_lattice, cur_obstacle)

                cur_speed = self.speed_mesh[i]

                t = fmm.travel_time(cur_lattice,
                                    cur_speed,
                                    float(self.dx),
                                    narrow=cur_time)

                cur_travel_times[i, :, :] = t

            # Based on the travel times, create obstacle masks for each strain
            non_background = cur_travel_times[0:self.num_pops, :, :]
            non_background[non_background > cur_time] = np.inf
            # If cur_travel_times = 0, you are in an obstacle
            non_background[non_background == 0] = np.inf

            expansion_history = np.nanargmin(cur_travel_times, axis=0)

            for i in range(
                    self.num_pops):  # Loop over strains, locate obstacles
                # Make sure nan's do not interfere with future
                not_current_strain = (expansion_history != i)
                not_background = (expansion_history != self.num_pops
                                  )  # Dummy background strain

                self.all_obstacles[
                    i, :, :] = not_current_strain & not_background

        self.travel_times = cur_travel_times[0:self.num_pops, :, :]
Exemplo n.º 16
0
def _eikonal_fmm(grid_xyz, node_spacing, velocity_grid, station_xyz):
    """
    Calculates the traveltime lookup tables by solving the eikonal equation
    using an implementation of the fast-marching algorithm.

    Traveltime calculation can only be performed between grid nodes: the
    station location is therefore taken as the closest grid node. Note that
    for large node spacings this may cause a modest error in the calculated
    traveltimes.

    .. warning:: Requires the scikit-fmm python package.

    Parameters
    ----------
    grid_xyz : array-like
        [X, Y, Z] coordinates of each node.
    node_spacing : array-like
        [X, Y, Z] distances between each node.
    velocity_grid : array-like
        Contains the speed of interface propagation at each point in the
        domain.
    station_xyz : array-like
        Station location (in grid xyz).

    Returns
    -------
    traveltimes : array-like, same shape as grid_xyz
        Contains the traveltime from the zero contour (zero level set) of phi
        to each point in the array given the scalar velocity field speed. If
        the input array speed has values less than or equal to zero the return
        value will be a masked array.

    Raises
    ------
    ImportError
        If scikit-fmm is not installed.

    """

    try:
        import skfmm
    except ImportError:
        raise ImportError("Unable to import skfmm - you need to install "
                          "scikit-fmm to use this method.\nSee the "
                          "installation instructions in the documentation"
                          "for more details.")

    phi = -np.ones(grid_xyz[0].shape)
    # Find closest grid node to true station location
    indx = np.argmin(
        abs(grid_xyz[0] - station_xyz[0]) + abs(grid_xyz[1] - station_xyz[1]) +
        abs(grid_xyz[2] - station_xyz[2]))
    phi[np.unravel_index(indx, grid_xyz[0].shape)] = 1.0

    return skfmm.travel_time(phi, velocity_grid, dx=node_spacing)
Exemplo n.º 17
0
def Fast_Marching(fastMarchingStartPointListFMM, basinIndexArray, flowArray,
                  reciprocalLocalCostArray):
    # Fast marching
    print('Performing fast marching')
    # Do fast marching for each sub basin
    geodesicDistanceArray = np.zeros((basinIndexArray.shape))
    geodesicDistanceArray[geodesicDistanceArray == 0] = np.Inf
    for i in range(0, len(fastMarchingStartPointListFMM[0])):
        basinIndexList = basinIndexArray[fastMarchingStartPointListFMM[0, i],
                                         fastMarchingStartPointListFMM[1, i]]
        print('basin Index: {}'.format(basinIndexList))
        print('start point : {}'.format(fastMarchingStartPointListFMM[:, i]))
        maskedBasin = np.zeros((basinIndexArray.shape))
        maskedBasin[basinIndexArray == basinIndexList] = 1
        maskedBasinFAC = np.zeros((basinIndexArray.shape))
        maskedBasinFAC[basinIndexArray == basinIndexList] = \
            flowArray[basinIndexArray == basinIndexList]
        # call the fast marching here
        phi = np.nan * np.ones((reciprocalLocalCostArray.shape))
        speed = np.ones((reciprocalLocalCostArray.shape)) * np.nan
        phi[maskedBasinFAC != 0] = 1
        speed[maskedBasinFAC != 0] = reciprocalLocalCostArray[maskedBasinFAC != 0]
        phi[fastMarchingStartPointListFMM[0, i],
            fastMarchingStartPointListFMM[1, i]] = -1
        try:
            travelTimearray = skfmm.travel_time(phi, speed, dx=1)
        except IOError as e:
            print('Error in calculating skfmm travel time')
            print('Error in catchment: {}'.format(basinIndexList))
            print("I/O error({0}): {1}".format(e.errno, e.strerror))
            # setting travel time to empty array
            travelTimearray = np.nan * np.zeros((reciprocalLocalCostArray.shape))
            if defaults.doPlot == 1:
                pyg_plt.raster_point_plot(speed, fastMarchingStartPointListFMM[:, i],
                                          'speed basin Index' + str(basinIndexList))
                # plt.contour(speed,cmap=cm.coolwarm)
                pyg_plt.raster_point_plot(phi, fastMarchingStartPointListFMM[:, i],
                                          'phi basin Index' + str(basinIndexList))
        except ValueError:
            print('Error in calculating skfmm travel time')
            print('Error in catchment: '.format(basinIndexList))
            print("Oops!  That was no valid number.  Try again...")
        geodesicDistanceArray[maskedBasin == 1] = travelTimearray[maskedBasin == 1]
    geodesicDistanceArray[geodesicDistanceArray == np.Inf] = np.nan
    # Plot the geodesic array
    if defaults.doPlot == 1:
        pyg_plt.geodesic_contour_plot(geodesicDistanceArray,
                                      'Geodesic distance array (travel time)')
    # Writing the geodesic distance array
    outfilepath = parameters.geonetResultsDir
    demName = parameters.demFileName.split('.')[0]
    outfilename = demName + '_geodesicDistance.tif'
    pyg_rio.write_geotif_generic(geodesicDistanceArray, outfilepath, outfilename)
    return geodesicDistanceArray
Exemplo n.º 18
0
def test3():
    """test eikonal solution"""
    N = 50
    X, Y = np.meshgrid(np.linspace(-1, 1, N), np.linspace(-1, 1, N))
    r = 0.5
    dx = 2. / (N - 1)
    phi = (X)**2 + (Y)**2 - r**2
    speed = np.ones_like(phi) * 2
    t = travel_time(phi, speed, dx)
    exact = 0.5 * np.abs(np.sqrt(X**2 + Y**2) - 0.5)

    np.testing.assert_allclose(t, exact, atol=dx)
Exemplo n.º 19
0
    def _fast_marching(self):
        speed = self._make_speed()

        # # Fast Marching
        if self._quality:
            # if not self._silent: print('--MSFM...')
            self._t = msfm.run(speed, self._bimg.copy().astype('int64'), self._soma.centroid, True, True)
        else:
            # if not self._silent: print('--FM...')
            marchmap = np.ones(self._bimg.shape)
            marchmap[self._soma.centroid[0], self._soma.centroid[1], self._soma.centroid[2]] = -1
            self._t = skfmm.travel_time(marchmap, speed, dx=5e-3)
def skeleton(Ax):
    
    boundary_dist=skfmm.distance(Ax)
    
    source_point=np.unravel_index(np.argmax(boundary_dist), boundary_dist.shape)
    maxD=boundary_dist[source_point]
    
    speed_im=(boundary_dist/maxD)**1.5
    
    Ax=np.ones(Ax.shape)
    Ax[source_point]=0
    
    flag=True
    skeleton_segments=[]
    source_point = np.array(source_point,ndmin=2)
    while True:
        
        D=skfmm.travel_time(Ax,speed_im)
        end_point=np.unravel_index(np.ma.argmax(D), D.shape)
        max_dist=D[end_point]
        D=np.ma.MaskedArray.filled(D,max_dist)
        
        end_point = np.array(end_point,ndmin=2)
        shortest_line=euler_shortest_path(D,source_point,end_point,step_size=0.1)
        #shortest_line = discrete_shortest_path(D,end_point)
            
        line_length=get_line_length(shortest_line)
        print(line_length)

        if flag:
            length_threshold=min(40*maxD, 0.18*line_length)
            flag=False
        
        if(line_length<=length_threshold):
            break
        
        
        source_point=np.append(source_point,shortest_line,axis=0)
        
        skeleton_segments.append(shortest_line)
        
        shortest_line=np.floor(shortest_line).astype(int)
        
        for i in shortest_line:
            Ax[tuple(i)]=0
    
    if len(skeleton_segments)!=0:
        final_skeleton=organize_skeleton(skeleton_segments,length_threshold)
    else:
        final_skeleton=[]
    
    return final_skeleton
Exemplo n.º 21
0
def eikonal(x, y, z, V, S):
    ''' 
        Travel-Time formulation using a simple eikonal method.
        Requires the skifmm python package.

        Inputs:
            x - np.array of points in X-direction
            y - np.array of points in Y-direction
            z - np.array of points in Z-direction
            V - np.array of velocity in Z,Y,X for P- and S-wave
            S - Definition of the station location in grid

        Outputs:
            t - Travel-time numpy array 

    '''
    t = []

    dx = float(x[1] - x[0])
    phi = -1 * np.ones_like(V)

    if S.ndim == 1:
        S = np.array([S])
        ns = 1
        ns, ndim = S.shape
    else:
        ns, ndim = S.shape

    for i in range(ns):
        # get location of source
        #print(i)
        ix = np.abs(x - S[i, 0]).argmin()
        if ndim > 1:
            iy = np.abs(y - S[i, 1]).argmin()
        if ndim > 2:
            iz = np.abs(z - S[i, 2]).argmin()

        if ndim > 2:
            phi[iy, ix, iz] = 1
        elif ndim > 1:
            phi[iy, ix] = 1
        else:
            phi[ix] = 1

        t_comp = skfmm.travel_time(phi, V, dx)

        t.append(t_comp)

    return t
Exemplo n.º 22
0
    def run_travel_times(self, max_time, num_intervals):
        # create time intervals to recalculate obstacles
        times_to_run = np.linspace(0, max_time, num_intervals + 1)
        # Get rid of zero
        times_to_run = times_to_run[1:]

        # Include a dummy travel time background
        cur_travel_times = np.zeros((self.num_pops + 1,
                                     self.lattice_mesh.shape[1],
                                     self.lattice_mesh.shape[2]),
                                    dtype=np.double)

        cur_travel_times[-1, :, :] = 10*max_time
        self.all_obstacles = np.ones_like(self.lattice_mesh, dtype=np.bool) * False

        expansion_history = None
        for cur_time in times_to_run:
            print cur_time

            self.before_travel_iteration(cur_time, expansion_history) # Required for subclassing

            for i in range(self.num_pops):
                cur_lattice = self.lattice_mesh[i]
                cur_obstacle = self.all_obstacles[i]
                # Mask the lattice by the obstacles...other strains
                cur_lattice = np.ma.MaskedArray(cur_lattice, cur_obstacle)

                cur_speed = self.speed_mesh[i]

                t = fmm.travel_time(cur_lattice, cur_speed, float(self.dx), narrow=cur_time)

                cur_travel_times[i, :, :] = t

            # Based on the travel times, create obstacle masks for each strain
            non_background = cur_travel_times[0:self.num_pops, : , :]
            non_background[non_background > cur_time] = np.inf
            # If cur_travel_times = 0, you are in an obstacle
            non_background[non_background == 0] = np.inf

            expansion_history = np.nanargmin(cur_travel_times, axis=0)

            for i in range(self.num_pops): # Loop over strains, locate obstacles
                # Make sure nan's do not interfere with future
                not_current_strain = (expansion_history != i)
                not_background = (expansion_history != self.num_pops) # Dummy background strain

                self.all_obstacles[i, :, :] = not_current_strain & not_background

        self.travel_times = cur_travel_times[0:self.num_pops, :, :]
Exemplo n.º 23
0
    def _compute_travel_time(speed_data: np.ndarray, source_point: PointType,
                             parameters: Parameters):
        # define the zero contour and set the wave source
        phi = np.ones_like(speed_data)
        phi[source_point] = -1

        try:
            travel_time = fmm.travel_time(phi,
                                          speed_data,
                                          dx=parameters.travel_time_spacing,
                                          order=parameters.travel_time_order)
        except Exception as err:  # pragma: no cover
            raise ComputeTravelTimeError from err

        return travel_time, phi
Exemplo n.º 24
0
    def _fast_marching(self):
        speed = self._make_speed()

        # # Fast Marching
        if self._quality:
            # if not self._silent: print('--MSFM...')
            self._t = msfm.run(speed,
                               self._bimg.copy().astype('int64'),
                               self._soma.centroid, True, True)
        else:
            # if not self._silent: print('--FM...')
            marchmap = np.ones(self._bimg.shape)
            marchmap[self._soma.centroid[0], self._soma.centroid[1],
                     self._soma.centroid[2]] = -1
            self._t = skfmm.travel_time(marchmap, speed, dx=5e-3)
Exemplo n.º 25
0
    def test_fmm(self):
        q = self.qs
        Zstar = 5
        Zi = cc.alpha_i([1, 0], q) + cc.alpha_i([0.8, 0.5], q)
        Znorm = -Zi / Zstar + 1

        phi = np.zeros(self.dim) - 1
        phi[5, 5] = 1
        # speed = Znorm.reshape(self.dim)
        speed = np.ones_like(phi)
        ans = skfmm.travel_time(phi, speed, dx=.1)
        ans = skfmm.distance(phi, dx=.1)

        logger.debug("lalala")
        fig, ax = plt.subplots(figsize=(10, 10))

        plt.contour(ans)
        plt.show()
Exemplo n.º 26
0
def segment_cells(nuclei, mask, small_holes=100, remove_boundary_cells=True):
    
    selem_3 = skimage.morphology.square(3)

    # voronoi
    phi = (nuclei>0) - 0.5
    speed = np.ones(phi.shape)
    time = skfmm.travel_time(phi, speed)
    time[nuclei>0] = 0

    w = skimage.morphology.watershed(time, nuclei)

    if remove_boundary_cells:
        cut = np.r_[w[0,:], w[-1,:], w[:,0], w[:,-1]]
        w.flat[np.in1d(w, np.unique(cut))] = 0
        w = skimage.measure.label(w)

    # apply mask
    w[mask==0] = 0
    w = skimage.morphology.closing(w)

    # only take biggest component for each cell
    relabeled = skimage.measure.label(w)
    relabeled[w==0] = 0
    regions = skimage.measure.regionprops(relabeled, 
                                          intensity_image=nuclei)
    cut = [reg.label for reg in regions if reg.intensity_image.max() == 0]
    relabeled.flat[np.in1d(relabeled, np.unique(cut))] = 0

    # fill small holes
    holes = skimage.measure.label(relabeled==0)
    regions = skimage.measure.regionprops(holes,
                intensity_image=skimage.morphology.dilation(relabeled))

    for reg in regions:
        if reg.area < small_holes:
            vals = reg.intensity_image[reg.intensity_image>0]
            relabeled[holes == reg.label] = scipy.stats.mode(vals)[0][0]

    select = 2. * (relabeled != skimage.morphology.erosion(relabeled,
                                                      selem=selem_3))

    return relabeled, select
Exemplo n.º 27
0
def segment_cells(nuclei, mask, small_holes=100, remove_boundary_cells=True):

    selem_3 = skimage.morphology.square(3)

    # voronoi
    phi = (nuclei>0) - 0.5
    speed = np.ones(phi.shape)
    time = skfmm.travel_time(phi, speed)
    time[nuclei>0] = 0

    w = skimage.morphology.watershed(time, nuclei)

    if remove_boundary_cells:
        cut = np.r_[w[0,:], w[-1,:], w[:,0], w[:,-1]]
        w.flat[np.in1d(w, np.unique(cut))] = 0
        w = skimage.measure.label(w)

    # apply mask
    w[mask==0] = 0
    w = skimage.morphology.closing(w)

    # only take biggest component for each cell
    relabeled = skimage.measure.label(w)
    relabeled[w==0] = 0
    regions = skimage.measure.regionprops(relabeled, 
                                          intensity_image=nuclei)
    cut = [reg.label for reg in regions if reg.intensity_image.max() == 0]
    relabeled.flat[np.in1d(relabeled, np.unique(cut))] = 0

    # fill small holes
    holes = skimage.measure.label(relabeled==0)
    regions = skimage.measure.regionprops(holes,
                intensity_image=skimage.morphology.dilation(relabeled))

    for reg in regions:
        if reg.area < small_holes:
            vals = reg.intensity_image[reg.intensity_image>0]
            relabeled[holes == reg.label] = scipy.stats.mode(vals)[0][0]

    select = 2. * (relabeled != skimage.morphology.erosion(relabeled,
                                                      selem=selem_3))

    return relabeled, select
Exemplo n.º 28
0
def segment_cells(nuclei, mask, small_holes=100, remove_boundary_cells=True):
    import skfmm
    selem_3 = skimage.morphology.square(3)

    # voronoi
    phi = (nuclei > 0) - 0.5
    speed = np.ones(phi.shape)
    time = skfmm.travel_time(phi, speed).astype(np.uint16)
    time[nuclei > 0] = 0

    cells = skimage.morphology.watershed(time, nuclei)

    # apply mask
    cells[mask == 0] = 0

    # only take biggest component for each cell
    bkgd = cells == 0
    cells = skimage.measure.label(cells, background=0) + 1
    cells[bkgd] = 0

    # remove cells that don't overlap nuclei
    regions = skimage.measure.regionprops(cells, intensity_image=nuclei)
    cut = [reg.label for reg in regions if reg.intensity_image.max() == 0]
    cells.flat[np.in1d(cells, np.unique(cut))] = 0

    # remove cells touching the boundary
    if remove_boundary_cells:
        cut = np.concatenate(
            [cells[0, :], cells[-1, :], cells[:, 0], cells[:, -1]])
        cells.flat[np.in1d(cells, np.unique(cut))] = 0
        cells = skimage.measure.label(cells)

    # assign small holes to neighboring cell with most contact
    holes = skimage.measure.label(cells == 0, background=0) + 1
    regions = skimage.measure.regionprops(
        holes, intensity_image=skimage.morphology.dilation(cells))

    for reg in regions:
        if reg.area < small_holes:
            vals = reg.intensity_image[reg.intensity_image > 0]
            cells[holes == reg.label] = scipy.stats.mode(vals)[0][0]

    return cells.astype(np.uint16)
Exemplo n.º 29
0
def _traveltime_oneway(trav, sources, vel, dsamp):
    """Auxiliary routine to compute traveltime for a subset of sources
    """
    trav = np.zeros_like(trav)
    for isrc, src in enumerate(sources.T):
        phi = np.ones_like(vel)
        if len(dsamp) == 2:
            src = np.round([src[0] / dsamp[0],
                            src[1] / dsamp[1]]).astype(np.int32)
            phi[src[0], src[1]] = -1

        else:
            src = np.round(
                [src[0] / dsamp[0], src[1] / dsamp[1],
                 src[2] / dsamp[2]]).astype(np.int32)
            phi[src[0], src[1], src[2]] = -1
        trav[:, isrc] = (skfmm.travel_time(phi=phi, speed=vel,
                                           dx=dsamp)).ravel()
    return trav
Exemplo n.º 30
0
    def compute_action_set(self, s, v):
        # gets list of actions from a state s given velocity v

        m = self.m
        x = s2x([s], m, m, 1)
        i = int(x[-1, 0])
        j = int(x[-1, 1])
        print('s = {}\n m = {}\nv = {}\nx = {}\ni = {}\nj = {}'.format(
            s, self.m, v, x, i, j))
        wP = np.ones((m, m))
        wP[i, j] = 0
        wP = travel_time(wP, v * self.speed, dx=1)

        mask = (wP.data <= 1) * 1 * (self.phi > 0)
        # this is if we want to convert to states
        #X,Y = np.where(mask)
        #actions = Y * m + X

        return mask
Exemplo n.º 31
0
 def calc_fmm(self, p, wait=1):
     if self.fmm_distance.size == 0:
         t_grid = np.array(np.ones_like(self.grid), dtype=np.double)
         mask = np.array(0 * np.ones_like(self.grid), dtype=bool)
         t_grid[self.target.row, self.target.col] = -1
         for i in self.obstacles:
             mask[i.row][i.col] = True
         phi = np.ma.MaskedArray(t_grid, mask)
         self.fmm_distance = skfmm.distance(phi)
         speed = 1.3 * np.ones_like(t_grid)
         self.tt = skfmm.travel_time(phi, speed, dx=0.4)
     #print(t)
     for i in self.obstacles:
         self.fmm_distance[i.row][i.col] = sys.maxsize
     d = np.copy(self.fmm_distance)
     t = np.copy(self.tt)
     for j in self.pedestrian:
         d[j.row, j.col] *= ((wait * (1 + (1 / (d[j.row, j.col]) * 10))) +
                             1 / d[j.row, j.col])
     return self.calc_fmm_path(d, t, p)
Exemplo n.º 32
0
def main():
    fname = "../data/cocco8cv4Rotated_216_182_249_253.raw"
    data = np.fromfile( fname, dtype=np.uint8 )
    data = data.reshape((182,249,253), order="F")

    subtractPlaneAverage = True
    phi = np.ones(data.shape)
    #phi[1:phi.shape[0]-1, 1:phi.shape[1]-1, 1:phi.shape[2]-1] = -1
    phi[-1, :,:] = -1

    threshold = 80
    refractiveIndex = 10.0
    speed = np.ones(data.shape)
    speed[data>threshold] = 1.0/refractiveIndex


    travelTime = skfmm.travel_time(phi, speed, dx=21.6)

    fig = plt.figure()
    ax1 = fig.add_subplot(2,2,1)
    ax2 = fig.add_subplot(2,2,2)
    ax3 = fig.add_subplot(2,2,3)
    centerX = int(travelTime.shape[0]/2)
    centerY = int(travelTime.shape[1]/2)
    centerZ = int(travelTime.shape[2]/2)

    travelTime -= travelTime.min()
    # Scale data to uint8 and save as raw
    travelTime *= 255/travelTime.max()
    travelTime = travelTime.astype(np.uint8)

    ax1.contour( travelTime[centerX,:,:] )
    ax2.contour( travelTime[:,centerY,:] )
    ax3.contour( travelTime[:,:,centerZ] )
    plt.show()

    travelTime = travelTime.ravel(order="F")

    ttimefname = fname.split(".raw")[0]+"TravelTime.raw"
    travelTime.tofile(ttimefname)
    print ("Travel time written to %s"%(ttimefname))
Exemplo n.º 33
0
def geodesic_distance(z, marker_points, xi=0.1):
    """Return the geodesic distance from the marker points, with coordinates given
    by 'marker_points[i,0]' and 'marker_points[i,1]', in a mollified version of 'z'
    """

    beta = 1000  # weight for gradient in geodesic distance
    z_sm = gaussian_filter(z, sigma=1)  # smoothen the image
    gx, gy = np.gradient(z_sm)
    # calculate the gradients
    nab_z = np.sqrt(gx ** 2 + gy ** 2)
    # get the gradients norm

    # putting 1's into an array at the marker points' positions
    R = np.zeros(z.shape)
    for m in marker_points:
        R[m[0], m[1]] = 1

    # Euclidean distance transform of 1-R
    # --> Fill array with distance to marker points
    BW = morphology.distance_transform_edt(1 - R)

    # Normalise this to [0,1]
    D_E = BW / np.max(BW.flatten())

    # define the "barrier function"
    f = (1.0e-3) * np.ones(np.shape(D_E)) + beta * nab_z ** 2 + xi * D_E
    # normalize to [0,1]
    f = (f - np.min(f.flatten())) / (np.max(f.flatten()) - np.min(f.flatten()))
    f = f + 0.01
    f_inverse = (1.0 / f) + 0.01  # invert f to bring everything into eikonal form

    # use the 'fast marching' method (FMM) to solve the eikonal equation
    T = skfmm.travel_time(
        R - 0.5 * np.ones(np.shape(R)),
        speed=f_inverse,
        dx=1.0 / np.shape(R)[0],
        order=1,
    )

    return T
Exemplo n.º 34
0
def main():
    '20130824164324-cam12010988-1-75900-35423847.jpg'
    im = spim.imread('20130824164324-cam12010988-1-75900-35423847.jpg', True)
    #     im = im[im.shape[0] / 2:, im.shape[1] / 2:]

    N = np.gradient(im)
    N = np.concatenate(
        (N[1][..., None], N[0][..., None], np.ones_like(N[0][..., None])),
        axis=2)
    N = N / np.sqrt((N**2).sum(2))[..., None]

    L = N.dot([0, 0, 1])
    epsilon = 1e-9

    W = np.sqrt(1 / L**2 - 1)
    W[W < epsilon] = epsilon
    W[~np.isfinite(W)] = epsilon

    p = np.empty_like(W)
    p[...] = -1
    #     p[704, 1485] = 1
    #     p[193, 159] = 1
    p[L > (1 - epsilon)] = 1

    t = skfmm.travel_time(p, 1 / W)

    plt.figure()
    plt.imshow(im, cmap='gray')
    plt.figure()
    plt.imshow(N, cmap='gray')
    plt.figure()
    plt.imshow(L, cmap='gray')
    plt.figure()
    plt.imshow(W, cmap='gray')
    plt.figure()
    plt.imshow(p, cmap='gray')
    plt.figure()
    plt.imshow(-t, cmap='gray')

    plt.show()
Exemplo n.º 35
0
def computeEikonal(xi, ti, grid, vel_field):
    """Computes the eikonal solution given the EASs xi and their timings ti.

    Parameters
    ----------
    xi : np.ndarray (int or float)
        [N, 2] array of float positions, or [N] array of DOF index, marking the EAS location
    ti : np.ndarray (float)
        [N] array, prescribing the initiation time of each EAS
    grid : np.ndarray (float)
        [K, L, 2] array describing the grid coordinates
    vel_field : np.ndarray (float)
        [K, L] array of velocities for each grid point

    Returns
    -------
    np.ndarray (float)
        [K, L] array of the computed activation times for each grid point
    """

    dx = grid[1, 0, 0] - grid[0, 0, 0]

    if np.issubdtype(xi.dtype, float):
        xi, ti = upsampleEAS(xi, ti, grid, vel_field)

    #The chosen eikonal solver can only handle one EAS at a time, so we have to compute the eikonal solution for each EAS separately
    #and then use the geodesic property to combine all solutions.
    #Note that the custom eikonal solver from the paper only needs to solve eikonal here once
    phi_total = np.ones(grid.shape[:-1]) * 1e5
    for i in range(ti.size):
        phi_current = np.ones_like(phi_total)
        phi_current[xi[i, 0], xi[i, 1]] = 0.
        phi_current = skfmm.travel_time(phi_current,
                                        speed=vel_field,
                                        order=1,
                                        dx=float(dx))
        phi_total = np.minimum(phi_total, phi_current + ti[i])

    return phi_total
Exemplo n.º 36
0
def main():
    n = 1.333
    N = 1000
    R = 1000
    xmin = -100.0
    xmax = 100.0
    x = 0.7*np.linspace(xmin,xmax,N)
    y = np.linspace(xmin,xmax,N)
    lens = Lens()
    lens.plotRefractiveIndex(x,y)
    speed = np.ones((N,N))
    for i in range(0,N):
        for j in range(0,N):
            if ( lens.isInside(x[i],y[j]) ):
                speed[i,j] = 1.0/lens.refractiveIndex

    # Define zero contour
    phi = np.ones(speed.shape)
    phi[1,1:phi.shape[1]-1] = -1
    travelTime = skfmm.travel_time(phi, speed)

    fig = plt.figure()
    ax = fig.add_subplot(1,1,1)
    X, Y = np.meshgrid(x,y)
    im = ax.contour(X,Y,travelTime, N=100)
    im = ax.imshow(travelTime)
    grad = np.gradient(travelTime)
    maxQuivers = 20
    X = X[::int(X.shape[0]/maxQuivers),::int(X.shape[1]/maxQuivers)]
    Y = Y[::int(Y.shape[0]/maxQuivers),::int(Y.shape[1]/maxQuivers)]
    #g1 = grad[1]
    #g1 = g1[::int(g1.shape[0]/maxQuivers),::int(g1.shape[1]/maxQuivers)]
    #g0 = grad[0]
    #g0 = g0[::int(g0.shape[0]/maxQuivers),::int(g0.shape[1]/maxQuivers)]

    #ax.quiver(X,Y,g1,g0)

    fig.colorbar(im)
    plt.show()

	fmm = np.ones((1024,1024))
	for i in xrange(len(centroids)):
		x1 = int(centroids[i][0])
		y1 = int(centroids[i][1])
		fmm[y1][x1] = -1
	dist_mat = skfmm.distance(fmm)
	plt.imshow(dist_mat)
	plt.show()
	speed = 1 - np.copy(slice2) + 0.1
	print np.amax(slice2)
	print np.amin(slice2)
	print np.amax(speed)
	print np.amin(speed)
	t = skfmm.travel_time(fmm, speed)
	print np.shape(t)
	plt.imshow(t)
	plt.show()
	plt.plot(t[100])
	plt.show()
	plt.contour(np.flipud(t), 55)
	plt.colorbar()
	plt.show()
	extract_ridges(t)
	#centroids = remove_some(centroids)
	#centroids = np.array(centroids).flatten()
	#centroids = centroids.reshape(len(centroids)/2,2)
	#cx,cy = np.transpose(centroids)
	#plt.imshow(slice2)
	#plt.scatter(x=cx, y=cy, c='g', s=14)
Exemplo n.º 38
0
import numpy as np
import skfmm
import OptPath as op
import matplotlib.pyplot as plt

# set the problem
X, Y = np.meshgrid(np.linspace(0,100,1001), np.linspace(0,100,1001))

phi=np.ones_like(X)
phi[:,3:10]=-1 # this needs to be robust in order to stop path finding alg. at the moment

speed=np.ones_like(X)
speed[(X-50)**2+(Y-50)**2<36] = 2. 

# solve the FMM part of the problem
tmatrix=skfmm.travel_time(phi,speed,dx=np.asscalar(X[0,1]-X[0,0]))

# now find the optimal path with two methods
nptraj1=op.optpath_eulerforward(np.squeeze(X[0,:]),np.squeeze(Y[:,0]),tmatrix,phi,(60,60))
nptraj2=op.optpath_scipyode(np.squeeze(X[0,:]),np.squeeze(Y[:,0]),tmatrix,phi,(60,60))

# output and compare
plt.close("all")
plt.imshow(speed,extent=[X[0,0],X[0,-1],Y[0,0],Y[-1,0]],origin='lower')
plt.plot(nptraj1[:,1],nptraj1[:,2],'g.')
plt.plot(nptraj2[:,1],nptraj2[:,2],'b-')
plt.show()
speed = 1+X**2+Y**2

plt.subplot(221)
plt.title("Zero-contour of phi")
plt.contour(X, Y, phi, [0], colors='black', linewidths=(3))
plt.gca().set_aspect(1)
plt.xticks([]); plt.yticks([])

plt.subplot(222)
plt.title("Distance")
plt.contour(X, Y, phi, [0], colors='black', linewidths=(3))
plt.contour(X, Y, skfmm.distance(phi, dx=2.0/500), 15)
plt.gca().set_aspect(1)
plt.xticks([]); plt.yticks([])

plt.subplot(223)
plt.title("Distance with x- \nand y- directions periodic")
plt.contour(X, Y, phi, [0], colors='black', linewidths=(3))
plt.contour(X, Y, skfmm.distance(phi, dx=2.0/500, periodic=True), 15)
plt.gca().set_aspect(1)
plt.xticks([]); plt.yticks([])

plt.subplot(224)
plt.title("Travel time with y- \ndirection periodic ")
plt.contour(X, Y, phi, [0], colors='black', linewidths=(3))
plt.contour(X, Y, skfmm.travel_time(phi, speed, dx=2.0/500, periodic=(1,0)), 15)
plt.gca().set_aspect(1)
plt.xticks([]); plt.yticks([])

plt.show()
Exemplo n.º 40
0
b=1/np.pi
theta = np.linspace(0,np.pi,300)
x = b*(theta-np.sin(theta))
y = b*(1-np.cos(theta))


N     = 111 # value can be 111 or 1112
assert (np.linspace(0,1.1,N)==1.0).any()
coords, grid_spacing = np.linspace(0, 1.1, N, retstep=True)
X, Y  = np.meshgrid(coords, coords)
phi = np.ones_like(X)
phi[X==1] = 0
phi[X>1] = -1

vel = np.sqrt(Y)
time = travel_time(phi,vel,dx=grid_spacing)
plt.contourf(X,Y,time)
plt.plot(x,y) # plot exact solution as solid line

# starting point for numerical solution
# we cannot start a the origin because the velocity is zero.
y0 = 0.05
theta0 = np.arccos(1-y0/b)
x0 = b*(theta0-np.sin(theta0))
print y0, x0
plt.plot([x0], [y0], "*") # plot starting point

coords = np.linspace(0, 1.1, N)
dx = coords[1]-coords[0]
xp, yp = optimal_path_2d(time, ((x0,y0),), dx, coords)[0]
Exemplo n.º 41
0
pl.contour(X, Y, phi,[0], linewidths=(3), colors='black')
pl.title('Boundary location: the zero contour of phi')
pl.savefig('2d_phi.png')
pl.show()

d = skfmm.distance(phi, dx=1e-2)
pl.title('Distance from the boundary')
pl.contour(X, Y, phi,[0], linewidths=(3), colors='black')
pl.contour(X, Y, d, 15)
pl.colorbar()
pl.savefig('2d_phi_distance.png')
pl.show()

speed = np.ones_like(X)
speed[Y>0] = 1.5
t = skfmm.travel_time(phi, speed, dx=1e-2)

pl.title('Travel time from the boundary')
pl.contour(X, Y, phi,[0], linewidths=(3), colors='black')
pl.contour(X, Y, t, 15)
pl.colorbar()
pl.savefig('2d_phi_travel_time.png')
pl.show()

mask = np.logical_and(abs(X)<0.1, abs(Y)<0.5)
phi  = np.ma.MaskedArray(phi, mask)
t    = skfmm.travel_time(phi, speed, dx=1e-2)
pl.title('Travel time from the boundary with an obstacle')
pl.contour(X, Y, phi, [0], linewidths=(3), colors='black')
pl.contour(X, Y, phi.mask, [0], linewidths=(3), colors='red')
pl.contour(X, Y, t, 15)
Exemplo n.º 42
0
def main():
    print "current working directory", os.getcwd()
    print "Reading input file path :",Parameters.demDataFilePath
    print "Reading input file :",Parameters.demFileName
    defaults.figureNumber = 0

    rawDemArray = read_dem_from_geotiff(Parameters.demFileName,\
                                        Parameters.demDataFilePath)

    nanDemArraylr=np.array(rawDemArray)
    nanDemArray = nanDemArraylr
    nanDemArray[nanDemArray < defaults.demNanFlag]= np.nan
    Parameters.minDemValue= np.min(nanDemArray[:])
    Parameters.maxDemValue= np.max(nanDemArray[:])

    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(nanDemArray,cmap=cm.coolwarm)
    plt.xlabel('X[m]')
    plt.ylabel('Y[m]')
    plt.title('Input DEM')
    if defaults.doPlot==1:
        plt.show()

    # Area of analysis
    Parameters.xDemSize=np.size(nanDemArray,0)
    Parameters.yDemSize=np.size(nanDemArray,1)

    # Calculate pixel length scale and assume square
    Parameters.maxLowerLeftCoord = np.max([Parameters.xDemSize, \
                                           Parameters.yDemSize])
    print 'DTM size: ',Parameters.xDemSize, 'x' ,Parameters.yDemSize
    #-----------------------------------------------------------------------------

    # Compute slope magnitude for raw and filtered DEMs
    print 'Computing slope of raw DTM'
    print 'DEM pixel scale:',Parameters.demPixelScale
    print np.array(nanDemArray).shape
    slopeXArray,slopeYArray = np.gradient(np.array(nanDemArray),\
                                          Parameters.demPixelScale)
    slopeMagnitudeDemArray = np.sqrt(slopeXArray**2 + slopeYArray**2)

    # plot the slope DEM array
    slopeMagnitudeDemArrayNp = np.array(slopeMagnitudeDemArray)
    print slopeMagnitudeDemArrayNp.shape

    # plotting the slope DEM of non filtered DEM
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(slopeMagnitudeDemArrayNp,cmap=cm.coolwarm)
    plt.xlabel('X[m]')
    plt.ylabel('Y[m]')
    plt.title('Slope of unfiltered DEM')
    if defaults.doPlot==1:
        plt.show()

    # Computation of the threshold lambda used in Perona-Malik nonlinear
    # filtering. The value of lambda (=edgeThresholdValue) is given by the 90th
    # quantile of the absolute value of the gradient.
    print'Computing lambda = q-q-based nonlinear filtering threshold'
    slopeMagnitudeDemArrayQ = slopeMagnitudeDemArrayNp
    slopeMagnitudeDemArrayQ = np.reshape(slopeMagnitudeDemArrayQ,\
                                         np.size(slopeMagnitudeDemArrayQ))
    slopeMagnitudeDemArrayQ = slopeMagnitudeDemArrayQ[~np.isnan(slopeMagnitudeDemArrayQ)]
    print 'dem smoothing Quantile',defaults.demSmoothingQuantile

    edgeThresholdValuescipy = mquantiles(np.absolute(slopeMagnitudeDemArrayQ),\
                                         defaults.demSmoothingQuantile)
    print 'edgeThresholdValuescipy :', edgeThresholdValuescipy
    
    # performing PM filtering using the anisodiff
    print 'Performing Perona-Malik nonlinear filtering'
    filteredDemArray = anisodiff(nanDemArray, defaults.nFilterIterations, \
                                     edgeThresholdValuescipy,\
                                     defaults.diffusionTimeIncrement, \
                                     (Parameters.demPixelScale,\
                                      Parameters.demPixelScale),2)
    
    # plotting the filtered DEM
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(filteredDemArray,cmap=cm.coolwarm)
    plt.xlabel('X[m]')
    plt.ylabel('Y[m]')
    plt.title('Filtered DEM')
    if defaults.doPlot==1:
        plt.show()
    
    # Writing the filtered DEM as a tif
    write_geotif_filteredDEM(filteredDemArray,Parameters.demDataFilePath,\
                             Parameters.demFileName)

    # Computing slope of filtered DEM
    print 'Computing slope of filtered DTM'
    filteredDemArraynp = filteredDemArray#np.gradient only takes an array as input
    slopeXArray,slopeYArray = np.gradient(filteredDemArraynp,Parameters.demPixelScale)
    slopeDemArray = np.sqrt(slopeXArray**2 + slopeYArray**2)
    slopeMagnitudeDemArrayQ = slopeDemArray
    slopeMagnitudeDemArrayQ = np.reshape(slopeMagnitudeDemArrayQ,\
                                         np.size(slopeMagnitudeDemArrayQ))
    slopeMagnitudeDemArrayQ = slopeMagnitudeDemArrayQ[~np.isnan(slopeMagnitudeDemArrayQ)]
    print ' angle min:', np.arctan(np.percentile(slopeMagnitudeDemArrayQ,0.1))*180/np.pi
    print ' angle max:', np.arctan(np.percentile(slopeMagnitudeDemArrayQ,99.9))*180/np.pi
    print 'mean slope:',np.nanmean(slopeDemArray[:])
    print 'stdev slope:',np.nanstd(slopeDemArray[:])
    
    #Computing curvature
    print 'computing curvature'
    curvatureDemArrayIn= filteredDemArraynp
    #curvatureDemArrayIn[curvatureDemArrayIn== defaults.demErrorFlag]=np.nan
    curvatureDemArray = compute_dem_curvature(curvatureDemArrayIn,\
                                              Parameters.demPixelScale,\
                                              defaults.curvatureCalcMethod)
    #Writing the curvature array
    outfilepath = Parameters.geonetResultsDir
    outfilename = Parameters.demFileName
    outfilename = outfilename.split('.')[0]+'_curvature.tif'
    write_geotif_generic(curvatureDemArray,outfilepath,outfilename)
    
    #Computation of statistics of curvature
    print 'Computing curvature statistics'
    print curvatureDemArray.shape
    tt = curvatureDemArray[~np.isnan(curvatureDemArray[:])]
    print tt.shape
    finiteCurvatureDemList = curvatureDemArray[np.isfinite(curvatureDemArray[:])]
    print finiteCurvatureDemList.shape
    curvatureDemMean = np.nanmean(finiteCurvatureDemList)
    curvatureDemStdDevn = np.nanstd(finiteCurvatureDemList)
    print ' mean: ', curvatureDemMean
    print ' standard deviation: ', curvatureDemStdDevn


    # plotting only for testing purposes
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(curvatureDemArray,cmap=cm.coolwarm)
    plt.xlabel('X[m]')
    plt.ylabel('Y[m]')
    plt.title('Curvature DEM')
    plt.colorbar()
    if defaults.doPlot==1:
        plt.show()
    
    #*************************************************
    #Compute curvature quantile-quantile curve
    # This seems to take a long time ... is commented for now
    print 'Computing curvature quantile-quantile curve'
    #osm,osr = compute_quantile_quantile_curve(finiteCurvatureDemList)
    #print osm[0]
    #print osr[0]
    thresholdCurvatureQQxx = 1
    # have to add method to automatically compute the thresold
    # .....
    # .....
    #*************************************************
   

    # Computing contributing areas
    print 'Computing upstream accumulation areas using MFD from GRASS GIS'
    """
    return {'outlets':outlets, 'fac':nanDemArrayfac ,\
            'fdr':nanDemArrayfdr ,'basins':nanDemArraybasins,\
            'outletsxxProj':outletsxxProj, 'outletsyyProj':outletsyyProj,\
            'bigbasins':allbasins}
    """
    # Call the flow accumulation function
    flowroutingresults = flowaccumulation(filteredDemArray)

    # Read out the flowroutingresults into appropriate variables
    outletPointsList = flowroutingresults['outlets']
    flowArray = flowroutingresults['fac']
    flowDirectionsArray = flowroutingresults['fdr']
    # These are actually not sub basins, if the basin threshold
    # is large, then you might have as nulls, so best
    # practice is to keep the basin threshold close to 1000
    # default value is 10,000
    #subBasinIndexArray = flowroutingresults['basins']

    
    #subBasinIndexArray[subBasinIndexArray==-9999]=np.nan
    basinIndexArray = flowroutingresults['bigbasins']

    flowArray[np.isnan(filteredDemArray)]=np.nan
    flowMean = np.mean(flowArray[~np.isnan(flowArray[:])])
    print 'Mean upstream flow: ', flowMean

    # plotting only for testing purposes
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    drainageMeasure = -np.sqrt(np.log10(flowArray))
    plt.imshow(drainageMeasure,cmap=cm.coolwarm)
    plt.plot(outletPointsList[1],outletPointsList[0],'go')
    plt.xlabel('X[m]')
    plt.ylabel('Y[m]')
    plt.title('flowArray with outlets')
    plt.colorbar()
    if defaults.doPlot==1:
        plt.show()
    
    # plotting only for testing purposes
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(basinIndexArray.T,cmap=cm.Dark2)
    plt.plot(outletPointsList[1],outletPointsList[0],'go')
    plt.xlabel('X[m]')
    plt.ylabel('Y[m]')
    plt.title('basinIndexArray with outlets')
    if defaults.doPlot==1:
        plt.show()


    # Define a skeleton based on flow alone
    skeletonFromFlowArray = \
    compute_skeleton_by_single_threshold(flowArray.T,\
        defaults.flowThresholdForSkeleton)
    
    # Define a skeleton based on curvature alone
    skeletonFromCurvatureArray =\
    compute_skeleton_by_single_threshold(curvatureDemArray.T,\
        curvatureDemMean+thresholdCurvatureQQxx*curvatureDemStdDevn)
    
    
    # Define a skeleton based on curvature and flow
    skeletonFromFlowAndCurvatureArray =\
    compute_skeleton_by_dual_threshold(curvatureDemArray.T, flowArray.T, \
        curvatureDemMean+thresholdCurvatureQQxx*curvatureDemStdDevn, \
        defaults.flowThresholdForSkeleton)

    # plotting only for testing purposes
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(skeletonFromFlowAndCurvatureArray.T,cmap=cm.binary)
    plt.plot(outletPointsList[1],outletPointsList[0],'go')
    plt.xlabel('X[m]')
    plt.ylabel('Y[m]')
    plt.title('Curvature with outlets')
    if defaults.doPlot==1:
        plt.show()
    
    # Writing the skeletonFromFlowAndCurvatureArray array
    outfilepath = Parameters.geonetResultsDir
    outfilename = Parameters.demFileName
    outfilename = outfilename.split('.')[0]+'_skeleton.tif'
    write_geotif_generic(skeletonFromFlowAndCurvatureArray.T,\
                         outfilepath,outfilename)

    
    # Computing the percentage drainage areas
    print 'Computing percentage drainage area of each indexed basin'
    fastMarchingStartPointList = np.array(outletPointsList)
    print fastMarchingStartPointList
    #fastMarchingStartPointListFMM = np.zeros((fastMarchingStartPointList.shape))
    fastMarchingStartPointListFMMx = []
    fastMarchingStartPointListFMMy = []
    basinsUsedIndexList = np.zeros((len(fastMarchingStartPointList[0]),1))
    nx = Parameters.xDemSize
    ny = Parameters.yDemSize
    nDempixels = float(nx*ny)
    basinIndexArray = basinIndexArray.T
    for label in range(0,len(fastMarchingStartPointList[0])):        
        outletbasinIndex = basinIndexArray[fastMarchingStartPointList[0,label],\
                                         fastMarchingStartPointList[1,label]]
        print outletbasinIndex
        numelments = basinIndexArray[basinIndexArray==outletbasinIndex]
        #print type(numelments), len(numelments)
        percentBasinArea = float(len(numelments)) * 100/nDempixels
        print 'Basin: ',outletbasinIndex,\
              '@ : ',fastMarchingStartPointList[:,label],' #Elements ',len(numelments),\
              ' area ',percentBasinArea,' %'
        if percentBasinArea > defaults.thresholdPercentAreaForDelineation and\
           len(numelments) > Parameters.numBasinsElements:
            # Get the watersheds used
            basinsUsedIndexList[label]= label
            # Preparing the outlets used for fast marching in ROI
            #fastMarchingStartPointListFMM[:,label] = fastMarchingStartPointList[:,label]
            fastMarchingStartPointListFMMx.append(fastMarchingStartPointList[0,label])
            fastMarchingStartPointListFMMy.append(fastMarchingStartPointList[1,label])
        # finishing Making outlets for FMM
    #Closing Basin area computation

    fastMarchingStartPointListFMM = np.array([fastMarchingStartPointListFMMx,\
                                                  fastMarchingStartPointListFMMy])
    # Computing the local cost function
    print 'Preparing to calculate cost function'
    # lets normalize the curvature first
    if defaults.doNormalizeCurvature ==1:
        curvatureDemArrayNor = normalize(curvatureDemArray)
    del curvatureDemArray
    curvatureDemArray = curvatureDemArrayNor
    del curvatureDemArrayNor
    
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.figure(defaults.figureNumber)
    plt.imshow(curvatureDemArray,cmap=cm.coolwarm)
    plt.title('Curvature after normalization')
    plt.colorbar()
    if defaults.doPlot==1:
        plt.show()
    
    
    print 'Curvature min: ' ,str(np.min(curvatureDemArray[~np.isnan(curvatureDemArray)])), \
          ' exp(min): ',str(np.exp(3*np.min(curvatureDemArray[~np.isnan(curvatureDemArray)])))
    print 'Curvature max: ' ,str(np.max(curvatureDemArray[~np.isnan(curvatureDemArray)])),\
          ' exp(max): ',str(np.exp(3*np.max(curvatureDemArray[~np.isnan(curvatureDemArray)])))
    
    # set all the nan's to zeros before cost function is computed
    curvatureDemArray[np.isnan(curvatureDemArray)] = 0
    
    print 'Computing cost function & geodesic distance'
    # Calculate the local reciprocal cost (weight, or propagation speed in the
    # eikonal equation sense).  If the cost function isn't defined, default to
    # old cost function.
    flowArray = flowArray.T
    curvatureDemArray = curvatureDemArray.T
    
    if hasattr(defaults, 'reciprocalLocalCostFn'):
        print 'Evaluating local cost func.'
        reciprocalLocalCostArray = eval(defaults.reciprocalLocalCostFn)
    else:
        print 'Evaluating local cost func. (default)'
        reciprocalLocalCostArray = flowArray + \
                                   (flowMean*skeletonFromFlowAndCurvatureArray)\
                                   + (flowMean*curvatureDemArray)
    del reciprocalLocalCostArray
    # Forcing the evaluations
    reciprocalLocalCostArray = flowArray + \
                                   (flowMean*skeletonFromFlowAndCurvatureArray)\
                                   + (flowMean*curvatureDemArray)
    if hasattr(defaults,'reciprocalLocalCostMinimum'):
        if defaults.reciprocalLocalCostMinimum != 'nan':
            reciprocalLocalCostArray[reciprocalLocalCostArray[:]\
                                 < defaults.reciprocalLocalCostMinimum]=1.0
    
    print '1/cost min: ', np.nanmin(reciprocalLocalCostArray[:]) 
    print '1/cost max: ', np.nanmax(reciprocalLocalCostArray[:])

    # Writing the reciprocal array
    outfilepath = Parameters.geonetResultsDir
    outfilename = Parameters.demFileName
    outfilename = outfilename.split('.')[0]+'_costfunction.tif'
    write_geotif_generic(reciprocalLocalCostArray,outfilepath,outfilename)

    # Fast marching
    print 'Performing fast marching'
    print '# of unique basins:',np.size(np.unique(basinIndexArray))
    # Now access each unique basin and get the
    # outlets for it
    basinIndexList = np.unique(basinIndexArray)
    print 'basinIndexList:', str(basinIndexList)
    print reciprocalLocalCostArray.shape
    #stop

    
    # Do fast marching for each sub basin
    geodesicDistanceArray = np.zeros((basinIndexArray.shape))
    geodesicDistanceArray[geodesicDistanceArray==0]=np.Inf
    geodesicDistanceArray = geodesicDistanceArray.T
    filteredDemArrayTr = filteredDemArray.T
    basinIndexArray = basinIndexArray.T
    # create a watershed outlet dictionary
    outletwatersheddict = {}
    defaults.figureNumber = defaults.figureNumber + 1
    for i in range(0,len(fastMarchingStartPointListFMM[0])):
        basinIndexList = basinIndexArray[fastMarchingStartPointListFMM[1,i],\
                                    fastMarchingStartPointListFMM[0,i]]
        print 'basin Index:',basinIndexList
        print 'start point :', fastMarchingStartPointListFMM[:,i]
        outletwatersheddict[basinIndexList]=fastMarchingStartPointListFMM[:,i]
        maskedBasin = np.zeros((basinIndexArray.shape))
        maskedBasin[basinIndexArray==basinIndexList]=1
        # For the masked basin get the maximum accumulation are
        # location and use that as an outlet for the basin.
        maskedBasinFAC = np.zeros((basinIndexArray.shape))
        maskedBasinFAC[basinIndexArray==basinIndexList]=\
        flowArray[basinIndexArray==basinIndexList]
        maskedBasinFAC[maskedBasinFAC==0]=np.nan
        # Get the outlet of subbasin
        maskedBasinFAC[np.isnan(maskedBasinFAC)]=0
        # print subBasinoutletindices
        # outlets locations in projection of the input dataset
        outletsxx = fastMarchingStartPointList[0,i]
        outletsyy = fastMarchingStartPointList[1,i]
        # call the fast marching here
        phi = np.nan * np.ones((reciprocalLocalCostArray.shape))
        speed = np.ones((reciprocalLocalCostArray.shape))* np.nan
        phi[maskedBasinFAC!=0] = 1
        speed[maskedBasinFAC!=0] = reciprocalLocalCostArray[maskedBasinFAC!=0]
        phi[fastMarchingStartPointListFMM[1,i],\
            fastMarchingStartPointListFMM[0,i]] =-1
        try:
            travelTimearray = skfmm.travel_time(phi,speed, dx=1)
        except IOError as e:            
            print 'Error in calculating skfmm travel time'
            print 'Error in catchment: ',basinIndexList
            # setting travel time to empty array
            travelTimearray = np.nan * np.zeros((reciprocalLocalCostArray.shape))
            plt.figure(defaults.figureNumber+1)
            plt.imshow(speed.T,cmap=cm.coolwarm)
            plt.plot(fastMarchingStartPointListFMM[1,i],\
                    fastMarchingStartPointListFMM[0,i],'ok')
            #plt.contour(speed.T,cmap=cm.coolwarm)
            plt.title('speed basin Index'+str(basinIndexList))
            plt.colorbar()
            plt.show()
            
            plt.figure(defaults.figureNumber+1)
            plt.imshow(phi.T,cmap=cm.coolwarm)
            plt.plot(fastMarchingStartPointListFMM[1,i],\
                    fastMarchingStartPointListFMM[0,i],'ok')
            #plt.contour(speed.T,cmap=cm.coolwarm)
            plt.title('phi basin Index'+str(basinIndexList))
            plt.colorbar()
            plt.show()
            
            print "I/O error({0}): {1}".format(e.errno, e.strerror)
            #stop
        
        #print travelTimearray.shape
        geodesicDistanceArray[maskedBasin ==1]= travelTimearray[maskedBasin ==1]

    #-----------------------------------
    #-----------------------------------
    # Plot the geodesic array
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(np.log10(geodesicDistanceArray.T),cmap=cm.coolwarm)
    plt.contour(geodesicDistanceArray.T,140,cmap=cm.coolwarm)
    plt.title('Geodesic distance array (travel time)')
    plt.colorbar()
    if defaults.doPlot==1:
        plt.show()
    
    print geodesicDistanceArray.shape
    # Writing the geodesic distance array
    outfilepath = Parameters.geonetResultsDir
    outfilename = Parameters.demFileName
    outfilename = outfilename.split('.')[0]+'_geodesicDistance.tif'
    write_geotif_generic(geodesicDistanceArray.T,outfilepath,outfilename)
    
    # Locating end points
    print 'Locating skeleton end points'
    xySkeletonSize = skeletonFromFlowAndCurvatureArray.shape
    skeletonLabeledArray, skeletonNumConnectedComponentsList =\
                          ndimage.label(skeletonFromFlowAndCurvatureArray)
    #print skeletonNumConnectedComponentsList
    """
     Through the histogram of skeletonNumElementsSortedList
     (skeletonNumElementsList minus the maximum value which
      corresponds to the largest connected element of the skeleton) we get the
      size of the smallest elements of the skeleton, which will likely
      correspond to small isolated convergent areas. These elements will be
      excluded from the search of end points.
    """
    print 'Counting the number of elements of each connected component'
    #print "ndimage.labeled_comprehension"
    lbls = np.arange(1, skeletonNumConnectedComponentsList+1)
    skeletonLabeledArrayNumtuple = ndimage.labeled_comprehension(skeletonFromFlowAndCurvatureArray,\
                                                                 skeletonLabeledArray,\
                                                                 lbls,np.count_nonzero,\
                                                                 int,0)
    skeletonNumElementsSortedList = np.sort(skeletonLabeledArrayNumtuple)
    print np.sqrt(len(skeletonNumElementsSortedList))
    histarray,skeletonNumElementsHistogramX=np.histogram(\
        skeletonNumElementsSortedList[0:len(skeletonNumElementsSortedList)-1],
        np.sqrt(len(skeletonNumElementsSortedList)))

    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(skeletonLabeledArray.T,cmap=cm.coolwarm)
    plt.title('Skeleton Labeled Array elements Array')
    plt.colorbar()
    if defaults.doPlot==1:
        plt.show()

    # Create skeleton gridded array
    skeletonNumElementsGriddedArray = np.zeros(xySkeletonSize)
    #"""
    for i in range(0,xySkeletonSize[0]):
        for j in range(0,xySkeletonSize[1]):
            #Gets the watershed label for this specified cell and checked in
            #subsequent if statement
            basinIndex = basinIndexArray[i,j]
            if skeletonLabeledArray[i, j] > 0:
                skeletonNumElementsGriddedArray[i,j] = \
                    skeletonLabeledArrayNumtuple[skeletonLabeledArray[i,j]-1]
    
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(skeletonNumElementsGriddedArray.T,cmap=cm.coolwarm)
    plt.title('Skeleton Num elements Array')
    plt.colorbar()
    if defaults.doPlot==1:
        plt.show()
    
    #"""
    # Elements smaller than skeletonNumElementsThreshold are not considered in the
    # skeletonEndPointsList detection
    print skeletonNumElementsHistogramX
    skeletonNumElementsThreshold = skeletonNumElementsHistogramX[2]
    
    print 'skeletonNumElementsThreshold',str(skeletonNumElementsThreshold)
    
    # Scan the array for finding the channel heads
    print 'Continuing to locate skeleton endpoints'
    #"""
    skeletonEndPointsList = []
    for i in range(0,xySkeletonSize[0]):
        for j in range(0,xySkeletonSize[1]):
            #print i,j
            # Skip this pixel if the current point is not a labeled or if the
            # number of connected skeleton elements is too small
            if skeletonLabeledArray[i,j]!=0 \
               and skeletonNumElementsGriddedArray[i,j]>=skeletonNumElementsThreshold:
                # Define search box and ensure it fits within the DTM bounds
                mx = i-1
                px = xySkeletonSize[0]-i
                my = j-1
                py = xySkeletonSize[1]-j
                xMinus = np.min([defaults.endPointSearchBoxSize, mx])
                xPlus  = np.min([defaults.endPointSearchBoxSize, px])
                yMinus = np.min([defaults.endPointSearchBoxSize, my])
                yPlus  = np.min([defaults.endPointSearchBoxSize, py])
                # Extract the geodesic distances geodesicDistanceArray for pixels within the search box
                searchGeodesicDistanceBox = geodesicDistanceArray[i-xMinus:i+xPlus, j-yMinus:j+yPlus]
                # Extract the skeleton labels for pixels within the search box
                searchLabeledSkeletonBox = skeletonLabeledArray[i-xMinus:i+xPlus, j-yMinus:j+yPlus]
                # Look in the search box for skeleton points with the same label
                # and greater geodesic distance than the current pixel at (i,j)
                # - if there are none, then add the current point as a channel head
                v = searchLabeledSkeletonBox==skeletonLabeledArray[i,j]
                v1 = v * searchGeodesicDistanceBox > geodesicDistanceArray[i,j]
                v3 = np.where(np.any(v1==True,axis=0))
                if len(v3[0])==0:
                    skeletonEndPointsList.append([i,j])
    
    # For loop ends here
    skeletonEndPointsListArray = np.array(skeletonEndPointsList)
    xx = skeletonEndPointsListArray[0:len(skeletonEndPointsListArray),0]
    yy = skeletonEndPointsListArray[0:len(skeletonEndPointsListArray),1]
    
    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(skeletonFromFlowAndCurvatureArray.T,cmap=cm.binary)
    plt.plot(xx,yy,'or')
    plt.title('Skeleton Num elements Array with channel heads')
    plt.colorbar()
    if defaults.doPlot==1:
        plt.show()             

    defaults.figureNumber = defaults.figureNumber + 1
    plt.figure(defaults.figureNumber)
    plt.imshow(np.log(geodesicDistanceArray.T),cmap=cm.coolwarm)
    plt.plot(xx,yy,'or')
    plt.title('Geodesic distance Array with channel heads')
    plt.colorbar()
    if defaults.doPlot==1:
        plt.show()

    # Write shapefiles of channel heads
    write_channel_heads(xx,yy)
    
    # Do compute discrete geodesics
    print 'Computing discrete geodesics'
    compute_discrete_geodesic_v1()
    print 'Finished pyGeoNet'
Exemplo n.º 43
0
def kline_vessel(vol, startID, **kwargs):
    """This function creates a centerline from the segmented volume (vol)

    Inputs:
        Required:
            vol: 3D binary volume where -
                0: background
                1: object to be skeletonized/centerline extracted
            startID: index of root, as in [x,y,z] location
    
        Optional (kwargs):
            dist_map_weight
            cluster_graph_weight
            min_branch_length
            min_branch_to_root

    Returns:
        extracted_centerline

    dependencies:
        numpy
        scikit-fmm (import name: skfmm)
        scipy

    """
    
    # Make sure object is equal to 1, without specifying dtype, would be logical
    B2 = np.array(vol.copy() > 0, dtype = 'int8')

    # Set defaults
    if kwargs is not None:
        for key, value in kwargs.iteritems():
            print "%s == %s" %(key,value)
            if key=='dist_map_weight':
                dmw = value
            if key=='cluster_graph_weight':
                cgw = value
            if key=='min_branch_length':
                mbl = value
            if key=='min_branch_to_root':
                mbtr = value
    if 'dmw' not in locals():
        dmw = 6
    if 'cgw' not in locals():
        cgw = np.sum(vol)/20
    if 'mbl' not in locals():
        mbl = 5
    if 'mbtr' not in locals():
        mbtr = 10
    print "dmw = %s" %(dmw)
    print "cgw = %s" %(cgw)

    # Remember original volume size
    [xOrig,yOrig,zOrig] = np.shape(B2)

    # Find 3D coordinates of volume

    x3, y3, z3 = find_3D_object_voxel_list(B2)

    # Limit volume size
    B2 = B2[np.min(x3):np.max(x3)+1,np.min(y3):np.max(y3)+1,np.min(z3):np.max(z3)+1]

    # Setup starting index list and correct for change in volume size
    sx = startID[0] - np.min(x3) 
    sy = startID[1] - np.min(y3) 
    sz = startID[2] - np.min(z3) 

    # New volume size (bounding box)
    [x_si,y_si,z_si] = np.shape(B2)

    sys.stdout.flush() 
    time.sleep(1)       
    # Perform first fast march to determine endpoints
    # works on binary speed function
    phi = B2.copy()
    constant_speed = B2.copy()
    phi[sx,sy,sz] = -1
    #constant_speed = np.ones(shape = (np.shape(phi)))
    mask = B2<1
    phi = np.ma.MaskedArray(phi, mask)
    binary_travel_time = skfmm.travel_time(phi, constant_speed)

    # Fill in masked values and set to zero
    binary_travel_time = binary_travel_time.filled()
    binary_travel_time[binary_travel_time==1.e20] = 0
    print "minimum of binary travel time is %s" % np.min(binary_travel_time)

    sys.stdout.flush() 
    time.sleep(1)       
    # Normalize and apply cluster graph weighting (cluster graph weighting doesn't seem to be doing much, perhaps a better FMM implementation???)
    # Find endpoints
    hold_binary_travel_time = binary_travel_time.copy()
    print "number of non-zero elements is %s" % (np.sum(B2))
    [endx, endy, endz] = detect_local_maxima(hold_binary_travel_time)
    print "number of local maxima was %s" % (len(endx))

    sys.stdout.flush() 
    time.sleep(1)       
    # Now perform second FMM, to create field for gradient descent
    dMap = morphology.distance_transform_edt(constant_speed) #distance map finds distance from 1's, to nearest 0.
    weighted_speed = dMap ** dmw
    weighted_travel_time = skfmm.travel_time(phi, weighted_speed)
    weighted_travel_time = weighted_travel_time.filled()

    # Order endpoints by distance from start
    print "Min of weighted travel time: %s, max: %s" %(np.min(weighted_travel_time),np.max(weighted_travel_time))
    print "Number of initialized endpoints is %s" % len(endx)
    Euc = []
    for i in range (0,len(endx)):
        Euc.append(np.sqrt((endx[i]-sx)**2 + (endy[i] - sy)**2 + (endz[i] - sz)**2))

    order_indici = np.argsort(Euc) # returns indices to sort elements
    Euc = np.sort(Euc)

    X = []
    Y = []
    Z = []

    for i in range(0,len(order_indici)):
        if Euc[i] > mbtr: # Check whether endpoint is sufficiently far from root voxel (min_branch_to_root)
            X.append(endx[order_indici[i]])
            Y.append(endy[order_indici[i]])
            Z.append(endz[order_indici[i]])

    print "New root is at x: %s, y: %s, z: %s" %(sx+1,sy+1,sz+1)
    print "Number of endpoints after pruning is %s" % len(X)

    sys.stdout.flush() 
    time.sleep(1)       
    # Now implement march back method to build centerline (enlarge volume)
    # The approach proceeds by updating skeleton as equal to 2
    # When branch is finished, the skeleton is solidified and set to 1
    skel = np.zeros(shape=(x_si+2,y_si+2,z_si+2), dtype = 'uint8')
    D = skel.copy() + 1.e20
    D[1:x_si+1,1:y_si+1,1:z_si+1] = weighted_travel_time
    counting = 1
    take_out = []
    number_loops = len(X)

    # Correct points for new size of volume
    start_x = sx + 1
    start_y = sy + 1
    start_z = sz + 1

    D[start_x,start_y,start_z] = 0
    skel[start_x,start_y,start_z] = 1 # initialize root

    # Begin extracting skeleton
    for ijk in range(0,number_loops):

        # Initialize endpoints and correct for larger volume   
        i = X[ijk] + 1
        j = Y[ijk] + 1
        k = Z[ijk] + 1
        
        # Check whether endpoint in neighborhood of skeleton (whisker)
        if np.all(skel[i-1:i+2,j-1:j+2,k-1:k+2])!=1: 
       
            if D[i,j,k]!=1.e20:

                done_loop = 0               
                skel[skel>0] = 1                
                
                # Check whether branch is now connected to rest of tree (stopping criteria)
                while ((i!=start_x) or (j!=start_y) or (k!=start_z)) and done_loop!=1: # can probably just do done_loop part (or) doesn't make sense (tried just done loop, always went to 1.e20 for each branch)
                #while ((i!=start_x) and (j!=start_y) and (k!=start_z)) and done_loop!=1:
                    skel[i,j,k]=2               
                    d_neighborhood = D[i-1:i+2,j-1:j+2,k-1:k+2]                    
                    
                    if np.all(skel[i-1:i+2,j-1:j+2,k-1:k+2])!=1:        
                        
                        currentMin = 1.e21 # was 1.e20
                        # Find min in neighborhood
                        for ni in range(0,3):
                            for nj in range(0,3):
                                for nk in range(0,3):
                                    if (d_neighborhood[ni,nj,nk] < currentMin) and (np.all([ni,nj,nk])!=1) and (skel[i+ni-1,j+nj-1,k+nk-1]!=2):
                                        ii = ni
                                        jj = nj
                                        kk = nk
                                        currentMin = d_neighborhood[ni,nj,nk]

                        # Update                         
                        i = i + ii - 1
                        j = j + jj - 1
                        k = k + kk - 1
                        
                        #print ijk, i,j,k, D[i,j,k]
                        #sys.stdout.flush()
                        #time.sleep(0.05)

                        if D[i,j,k] == 1.e20:
                            done_loop = 1
                            skel[skel==2] = 0 #remove branch, not marching back to root (local min in weighted_travel_time)
                    else:
                        done_loop = 1
            
            print ijk
            sys.stdout.flush() 
            time.sleep(1)                 
        else:
            take_out.append(ijk)
            

    #shift skel and start points back to correspond with original volume
    centerline_extracted = np.zeros(shape=(x_si,y_si,z_si), dtype = 'uint8')
    skel[skel==2] = 1
    centerline_extracted = skel[1:x_si+1,1:y_si+1,1:z_si+1]
    print "Number of centerline voxels is %s" %(np.sum(centerline_extracted))

    final_centerline = np.zeros(shape=(xOrig,yOrig,zOrig), dtype = 'uint8')
    final_centerline[np.min(x3):np.max(x3)+1,np.min(y3):np.max(y3)+1,np.min(z3):np.max(z3)+1] = centerline_extracted

    final_march = np.zeros(shape=(xOrig,yOrig,zOrig))
    final_march[np.min(x3):np.max(x3)+1,np.min(y3):np.max(y3)+1,np.min(z3):np.max(z3)+1] = weighted_travel_time

    return final_centerline, final_march