Esempio n. 1
0
def test127d_ok():
    pts = 2
    dims = 127
    data_pts = np.arange(pts * dims).reshape(pts, dims)
    kdtree = KDTree(data_pts)
    dist, idx = kdtree.query(data_pts)
    assert np.all(dist == 0)
def match_cris_viirs(crisLos, crisPos, viirsPos, viirsMask):
    """
    Match crisLos with viirsPos using the method by Wang et al. (2016)
    Wang, L., D. A. Tremblay, B. Zhang, and Y. Han, 2016: Fast and Accurate 
      Collocation of the Visible Infrared Imaging Radiometer Suite 
      Measurements and Cross-track Infrared Sounder Measurements. 
      Remote Sensing, 8, 76; doi:10.3390/rs8010076.     
    """

    # Derive Satellite Postion
    crisSat = crisPos - crisLos

    # using KD-tree to find best matched points

    # build kdtree to find match index
    pytree_los = KDTree(viirsPos.reshape(viirsPos.size / 3, 3))
    dist_los, idx_los = pytree_los.query(crisPos.reshape(crisPos.size / 3, 3),
                                         sqr_dists=False)

    my, mx = np.unravel_index(idx_los, viirsPos.shape[0:2])


    idy, idx  = find_match_index(crisLos.reshape(crisLos.size/3, 3),\
                                     crisSat.reshape(crisSat.size/3, 3),\
                                     viirsPos, viirsMask, mx, my)

    idy = np.array(idy).reshape(crisLos.shape[0:crisLos.ndim - 1])
    idx = np.array(idx).reshape(crisLos.shape[0:crisLos.ndim - 1])

    return idy, idx
Esempio n. 3
0
 def __init__(self, fields: Volume_T, max_dist: Number_T = 0.1):
     # Process data type
     if len(set([i.dtype for i in fields])) > 1:
         raise RadarCalculationError("All input data should have same data type")
     self.dtype = fields[0].dtype
     # Process time
     t_arr = np.array([time.mktime(i.scantime.timetuple()) for i in fields])
     if (t_arr.max() - t_arr.min()) / 60 > 10:
         raise RadarCalculationError(
             "Time difference of input data should not exceed 10 minutes"
         )
     mean_time = t_arr.mean()
     mean_dtime = datetime.datetime(*time.localtime(int(mean_time))[:6])
     time_increment = 10
     time_rest = mean_dtime.minute % time_increment
     if time_rest > time_increment / 2:
         mean_dtime += datetime.timedelta(minutes=(time_increment - time_rest))
     else:
         mean_dtime -= datetime.timedelta(minutes=time_rest)
     self.scan_time = mean_dtime
     self.lon_ravel = np.hstack([i.lon.ravel() for i in fields])
     self.lat_ravel = np.hstack([i.lat.ravel() for i in fields])
     self.data_ravel = np.ma.hstack([i.data.ravel() for i in fields])
     self.dist_ravel = np.hstack(
         [np.broadcast_to(i.dist, i.lon.shape).ravel() for i in fields]
     )
     self.tree = KDTree(np.dstack((self.lon_ravel, self.lat_ravel))[0])
     self.md = max_dist
Esempio n. 4
0
def profile(points, radius=4, leafsize=4):
    points = points[:1024]
    closest = closest_neighbors(KDTree(points))
    points *= 2 / closest
    # points += np.random.normal(scale=1e-2 * (2 / closest))
    tree = KDTree(points, leafsize=leafsize)
    k = np.arange(2, 40)
    times = []
    for k0 in tqdm(k):
        times.append(
            timeit(lambda: recursive_query_ball_point(tree, points, radius, k0),
                   number=20))
    _, mask = recursive_query_ball_point(tree, points, radius, 10)
    spt = spatial.cKDTree(points, leafsize=leafsize)  # pylint: disable=no-member
    t = timeit(lambda: spt.query_ball_tree(spt, radius), number=20)
    times = np.array(times) / t

    neighbors = np.count_nonzero(mask, axis=1)
    ax = plt.gca()
    ax.plot(k, times)
    # ax.plot([k[0], k[-1]], [t, t], linestyle='dashed')
    ax.plot([k[0], k[-1]], [0, 0], linestyle='dashed', color='k')
    ax.plot([k[0], k[-1]], [1, 1], linestyle='dashed', color='k')
    ax.set_xlabel('k')
    ax.set_ylabel('t')
    ax.set_title('mean = {}, max = {}'.format(np.mean(neighbors),
                                              mask.shape[1]))
    print(times[14] / np.min(times))
    # ax.set_yscale('log')
    plt.show()
Esempio n. 5
0
def test3d_8n():
    query_pts = np.array([[787014.438, -340616.906, 6313018.],
                          [751763.125, -59925.969, 6326205.5],
                          [769957.188, -202418.125, 6321069.5]])

    kdtree = KDTree(data_pts_real)
    dist, idx = kdtree.query(query_pts, k=8)

    exp_dist = np.array([[
        0.00000000e+00, 4.05250235e+03, 4.07389794e+03, 8.08201128e+03,
        8.17063009e+03, 1.20904577e+04, 1.22902057e+04, 1.60775136e+04
    ],
                         [
                             1.73205081e+00, 2.70216896e+03, 2.71431274e+03,
                             5.39537066e+03, 5.43793210e+03, 8.07855631e+03,
                             8.17119970e+03, 1.07513693e+04
                         ],
                         [
                             1.41424892e+02, 3.25500021e+03, 3.44284958e+03,
                             6.58019346e+03, 6.81038455e+03, 9.89140135e+03,
                             1.01918659e+04, 1.31892516e+04
                         ]])

    exp_idx = np.array([[7, 8, 6, 9, 5, 10, 4, 11],
                        [93, 94, 92, 95, 91, 96, 90, 97],
                        [45, 46, 44, 47, 43, 48, 42, 49]])

    assert np.array_equal(idx, exp_idx)
    assert np.allclose(dist, exp_dist)
Esempio n. 6
0
def test127d_ok():
    pts = 2
    dims = 127
    data_pts = np.arange(pts * dims).reshape(pts, dims)
    kdtree = KDTree(data_pts)
    dist, idx = kdtree.query(data_pts)
    assert np.all(dist == 0)
Esempio n. 7
0
 def _makeSparseness(self, features):
     """ Sparseness is the mean dist to K nearest neighbors. """
     feature_arr = np.array(features)
     tree = KDTree(np.vstack((np.array(self.archive), feature_arr)))
     dists, _ = tree.query(feature_arr, k=self.ns_K + 1)
     sparseness = np.mean(dists[:, 1:], axis=1)
     return sparseness
Esempio n. 8
0
 def __init__(self, fields: Volume_T, max_dist: Number_T = 0.1):
     # Process data type
     self.dtype = get_dtype(fields[0])
     # Process time
     t_arr = np.array(
         [time.mktime(i.scan_time.timetuple()) for i in fields])
     if (t_arr.max() - t_arr.min()) / 60 > 10:
         raise RadarCalculationError(
             "Time difference of input data should not exceed 10 minutes")
     mean_time = t_arr.mean()
     mean_dtime = datetime.datetime(*time.localtime(int(mean_time))[:6])
     time_increment = 10
     time_rest = mean_dtime.minute % time_increment
     if time_rest > time_increment / 2:
         mean_dtime += datetime.timedelta(minutes=(time_increment -
                                                   time_rest))
     else:
         mean_dtime -= datetime.timedelta(minutes=time_rest)
     self.scan_time = mean_dtime
     self.lon_ravel = np.hstack(
         [i["longitude"].values.ravel() for i in fields])
     self.lat_ravel = np.hstack(
         [i["latitude"].values.ravel() for i in fields])
     self.data_ravel = np.ma.hstack(
         [i[self.dtype].values.ravel() for i in fields])
     self.dist_ravel = np.hstack([
         np.broadcast_to(i["distance"], i["longitude"].shape).ravel()
         for i in fields
     ])
     self.tree = KDTree(np.dstack((self.lon_ravel, self.lat_ravel))[0])
     self.md = max_dist
     self.attr = fields[0].attrs.copy()
Esempio n. 9
0
def distance_p2p(pointcloud_pred, pointcloud_gt, normals_pred, normals_gt):
    ''' Computes minimal distances of each point in points_src to points_tgt.
    Args:
        points_src (numpy array): source points
        normals_src (numpy array): source normals
        points_tgt (numpy array): target points
        normals_tgt (numpy array): target normals
    '''
    kdtree = KDTree(pointcloud_gt)
    dist, idx = kdtree.query(pointcloud_pred)

    if normals_pred is None:
        return dist, None

    normals_pred = normals_pred / np.linalg.norm(
        normals_pred, axis=-1, keepdims=True)
    normals_gt = normals_gt / np.linalg.norm(
        normals_gt, axis=-1, keepdims=True)

    normals_dot_product = (normals_gt[idx] * normals_pred).sum(axis=-1)
    # Handle normals that point into wrong direction gracefully
    # (mostly due to mehtod not caring about this in generation)
    normals_dot_product = np.abs(normals_dot_product)

    return dist, normals_dot_product
Esempio n. 10
0
def distance_p2p(points_src, normals_src, points_tgt, normals_tgt):
    """Computes minimal distances of each point in points_src to points_tgt.

    Arguments:
    ----------
        points_src (numpy array): source points
        normals_src (numpy array): source normals
        points_tgt (numpy array): target points
        normals_tgt (numpy array): target normals
    """
    kdtree = KDTree(points_tgt)
    dist, idx = kdtree.query(points_src)

    if normals_src is not None and normals_tgt is not None:
        normals_src = \
            normals_src / np.linalg.norm(normals_src, axis=-1, keepdims=True)
        normals_tgt = \
            normals_tgt / np.linalg.norm(normals_tgt, axis=-1, keepdims=True)

        normals_dot_product = (normals_tgt[idx] * normals_src).sum(axis=-1)
        # Handle normals that point into wrong direction gracefully
        # (mostly due to mehtod not caring about this in generation)
        normals_dot_product = np.abs(normals_dot_product)
    else:
        normals_dot_product = np.array([np.nan] * points_src.shape[0],
                                       dtype=np.float32)
    return dist, normals_dot_product
Esempio n. 11
0
def test3d_8n_ub_eps():
    query_pts = np.array([[787014.438, -340616.906, 6313018.],
                          [751763.125, -59925.969, 6326205.5],
                          [769957.188, -202418.125, 6321069.5]])

    kdtree = KDTree(data_pts_real)
    dist, idx = kdtree.query(query_pts,
                             k=8,
                             eps=0.1,
                             distance_upper_bound=10e3,
                             sqr_dists=False)

    exp_dist = np.array([[
        0.00000000e+00, 4.05250235e+03, 4.07389794e+03, 8.08201128e+03,
        8.17063009e+03, np.Inf, np.Inf, np.Inf
    ],
                         [
                             1.73205081e+00, 2.70216896e+03, 2.71431274e+03,
                             5.39537066e+03, 5.43793210e+03, 8.07855631e+03,
                             8.17119970e+03, np.Inf
                         ],
                         [
                             1.41424892e+02, 3.25500021e+03, 3.44284958e+03,
                             6.58019346e+03, 6.81038455e+03, 9.89140135e+03,
                             np.Inf, np.Inf
                         ]])
    n = 100
    exp_idx = np.array([[7, 8, 6, 9, 5, n, n, n],
                        [93, 94, 92, 95, 91, 96, 90, n],
                        [45, 46, 44, 47, 43, 48, n, n]])

    assert np.array_equal(idx, exp_idx)
    assert np.allclose(dist, exp_dist)
Esempio n. 12
0
    def populate_indices(self):
        """Pre-populate guesses of particle xi/yi indices using a kdtree.

        This is only intended for curvilinear grids, where the initial index search
        may be quite expensive.
        """

        if self.fieldset is None:
            # we need to be attached to a fieldset to have a valid
            # gridset to search for indices
            return

        if KDTree is None:
            return
        else:
            for i, grid in enumerate(self.fieldset.gridset.grids):
                if not isinstance(grid, CurvilinearGrid):
                    continue

                tree_data = np.stack((grid.lon.flat, grid.lat.flat), axis=-1)
                tree = KDTree(tree_data)
                # stack all the particle positions for a single query
                pts = np.stack((self._collection.data['lon'],
                                self._collection.data['lat']),
                               axis=-1)
                # query datatype needs to match tree datatype
                _, idx = tree.query(pts.astype(tree_data.dtype))
                yi, xi = np.unravel_index(idx, grid.lon.shape)

                self._collection.data['xi'][:, i] = xi
                self._collection.data['yi'][:, i] = yi
Esempio n. 13
0
def closest_node_idx_to_sample_idx(
    df,
    axes_limits,
    array_shape,
    verbose = False):
    """
    Purpose: To get the index of the closest node
    point to a coordinate in the sampling

    """
    limits_coords_by_axis = ctcu.axes_limits_coordinates(axes_limits,array_shape = array_shape)

    from pykdtree.kdtree import KDTree

    xi,yi,zi = np.meshgrid(*limits_coords_by_axis,indexing="ij")
    limits_coords = np.vstack([k.ravel() for k in [xi,yi,zi]]).T
    
#     if verbose:
#         print(f"limits_coords=\n{limits_coords}")

    limits_coords_kd = KDTree(df[["x","y","z"]].to_numpy())
    dist,closest_nodes = limits_coords_kd.query(limits_coords)
    if verbose:
        print(f"closest_nodes = {closest_nodes}")
        print(f"dist = {dist}")
    return closest_nodes
Esempio n. 14
0
    def fitness(self):
        pos = np.asarray(self.node_pos[self.n_nodes])
        kd_tree = KDTree(pos)
        dist, _ = kd_tree.query(pos, k=30)
        fitness = np.mean(dist)

        return fitness
Esempio n. 15
0
    def findInCloud(self, cloud, cloudNormals):
        sceneTree = KDTree(cloud)
        indexes = list(range(len(cloud)))

        r = self.Model.Radius
        iterations = 0
        while True:
            iterations += 1
            if iterations % 100 == 0:
                print(iterations)
            i = np.random.choice(indexes, 1)[0]
            p1 = cloud[i]
            n1 = cloudNormals[i]
            neighborIdx = [
                ii for ii in sceneTree.query(
                    p1.reshape((1, 3)), 50, distance_upper_bound=2 * r)[1][0]
                if ii < len(cloud) and ii != i
            ]
            j, k = np.random.choice(neighborIdx, 2, replace=False)

            p2, p3 = cloud[j], cloud[k]
            n2, n3 = cloudNormals[j], cloudNormals[k]

            if np.linalg.cond(np.column_stack((n1, n2, n3))) > 1e5:
                continue

            for pose in self.Model.getPose(np.column_stack((p1, p2, p3)),
                                           np.column_stack((n1, n2, n3))):
                yield pose

        return None
Esempio n. 16
0
def filt_stdev(coords, k=3, std_dev=2):

   kDTree = KDTree(coords, leafsize = 5)

   if pykdtree==1:
      dx, idx_knn = kDTree.query(coords[:, :], k = k)
   else:
      dx, idx_knn = kDTree.query(coords[:, :], k = k, n_jobs=-1)

   dx, idx_knn = dx[:,1:], idx_knn[:,1:]

   distances = np.sum(dx, axis=1)/(k - 1.0)
   valid_distances = np.shape(distances)[0]

   #Estimate the mean and the standard deviation of the distance vector
   sum = np.sum(distances)
   sq_sum = np.sum(distances**2)

   mean = sum / float(valid_distances)
   variance = (sq_sum - sum * sum / float(valid_distances)) / (float(valid_distances) - 1)
   stddev = np.sqrt (variance)

   # a distance that is bigger than this signals an outlier
   distance_threshold = mean + std_dev * stddev
   idx = np.nonzero(distances < distance_threshold)

   return idx, np.copy(coords[idx])
Esempio n. 17
0
    def __init__(self, ncfile, latvarname, lonvarname):
        """Initialization function

        Arguments:
            latvar -- the netCDF latitude variable
            lonvar -- the netCDF longitude variable
        """
        self.latvar = ncfile.variables[latvarname]
        self.lonvar = ncfile.variables[lonvarname]

        self.time_var = utils.get_time_var(ncfile)

        self.kdt = _data_cache.get(ncfile.filepath())
        if self.kdt is None:
            rad_factor = pi / 180.0
            latvals = self.latvar[:] * rad_factor
            lonvals = self.lonvar[:] * rad_factor
            clat, clon = np.cos(latvals), np.cos(lonvals)
            slat, slon = np.sin(latvals), np.sin(lonvals)
            triples = np.array(
                [np.ravel(clat * clon),
                 np.ravel(clat * slon),
                 np.ravel(slat)]).transpose()

            self.kdt = KDTree(triples)
            _data_cache[ncfile.filepath()] = self.kdt

        self._shape = ncfile.variables[latvarname].shape
Esempio n. 18
0
    def match_surface_precip(self, input_data):
        """
        Match surface precipitation from .sim file to points in xarray
        dataset.

        Args:
            input_data: xarray dataset containing the input data from
            the preprocessor.

        Return:
            The input dataset but with the surface_precip field added.
        """
        n_scans = input_data.scans.size

        dx = 40
        i_c = 110
        ix_start = i_c - dx // 2
        ix_end = i_c + 1 + dx // 2

        lats_1c = input_data["latitude"][:,
                                         ix_start:ix_end].data.reshape(-1, 1)
        lons_1c = input_data["longitude"][:,
                                          ix_start:ix_end].data.reshape(-1, 1)
        z = np.zeros_like(lats_1c)
        coords_1c = pyproj.transform(_LLA,
                                     _ECEF,
                                     lons_1c,
                                     lats_1c,
                                     z,
                                     radians=False)
        coords_1c = np.concatenate(coords_1c, axis=1)

        lats = self.data["latitude"].reshape(-1, 1)
        lons = self.data["longitude"].reshape(-1, 1)
        z = np.zeros_like(lats)
        coords_sim = pyproj.transform(_LLA,
                                      _ECEF,
                                      lons,
                                      lats,
                                      z,
                                      radians=False)
        coords_sim = np.concatenate(coords_sim, 1)

        kdtree = KDTree(coords_1c)
        dists, indices = kdtree.query(coords_sim)
        print(dists, indices)
        surface_precip = np.zeros(n_scans * (dx + 1))
        surface_precip[:] = np.nan
        surface_precip[indices] = self.data["surface_precip"]
        surface_precip = surface_precip.reshape(n_scans, dx + 1)

        surface_precip_full = np.zeros(input_data["latitude"].shape,
                                       dtype=np.float32)
        surface_precip_full[:] = np.nan
        surface_precip_full[:, ix_start:ix_end] = surface_precip

        input_data["surface_precip"] = (("scans", "pixels"),
                                        surface_precip_full)
        return input_data
Esempio n. 19
0
def gll_2_exodus(gll_model,
                 exodus_model,
                 gll_order=4,
                 dimensions=3,
                 nelem_to_search=20,
                 parameters="TTI",
                 model_path="MODEL/data",
                 coordinates_path="MODEL/coordinates",
                 gradient=False):
    """
    Interpolate parameters from gll file to exodus model. This will mostly be
    used to interpolate gradients to begin with.
    :param gll_model: path to gll_model
    :param exodus_model: path_to_exodus_model
    :param parameters: Currently not used but will be fixed later
    """
    with h5py.File(gll_model, 'r') as gll_model:
        gll_points = np.array(gll_model[coordinates_path][:], dtype=np.float64)
        gll_data = gll_model[model_path][:]
        params = gll_model[model_path].attrs.get(
            "DIMENSION_LABELS")[1].decode()
        parameters = params[2:-2].replace(" ", "").split("|")

    centroids = _find_gll_centroids(gll_points, dimensions)
    print("centroids", np.shape(centroids))
    # Build a KDTree of the centroids to look for nearest elements
    print("Building KDTree")
    centroid_tree = KDTree(centroids)

    print("Read in mesh")
    exodus = Exodus(exodus_model, mode="a")
    # Find nearest elements
    print("Querying the KDTree")
    print(exodus.points.shape)
    # if exodus.points.shape[1] == 3:
    #     exodus.points = exodus.points[:, :-1]
    _, nearest_element_indices = centroid_tree.query(exodus.points,
                                                     k=nelem_to_search)
    npoints = exodus.npoint
    # parameters = utils.pick_parameters(parameters)
    values = np.zeros(shape=[npoints, len(parameters)])
    print(parameters)
    s = 0

    for point in exodus.points:
        if s == 0 or (s + 1) % 1000 == 0:
            print(f"Now I'm looking at point number:"
                  f"{s+1}{len(exodus.points)}")
        element, ref_coord = _check_if_inside_element(
            gll_points, nearest_element_indices[s, :], point, dimensions)

        coeffs = get_coefficients(4, 4, 0, ref_coord, dimensions)
        values[s, :] = np.sum(gll_data[element, :, :] * coeffs, axis=1)
        s += 1
    i = 0
    for param in parameters:
        exodus.attach_field(param, np.zeros_like(values[:, i]))
        exodus.attach_field(param, values[:, i])
        i += 1
Esempio n. 20
0
 def __init__(
     self, data: np.ndarray, x: np.ndarray, y: np.ndarray, roi: Number_T = 0.02
 ):
     x_ravel = x.ravel()
     y_ravel = y.ravel()
     self.tree = KDTree(np.dstack((x_ravel, y_ravel))[0])
     self.data = data
     self.roi = roi
Esempio n. 21
0
def gradient_2_cartesian_exodus(gradient, cartesian, params, first=False):
    """
    Interpolate gradient from 2D smoothiesem and sum on top of
    2D cartesian mesh. Using gll would be ideal but this function
    is now only with exodus.
    :param gradient: path to cartesian mesh to interpolate from
    :param cartesian: path to smoothiesem mesh to interpolate to
    :param params: list of parameters to interpolate
    :param first: If this is the first gradient, it will overwrite fields
    :return: Gradient interpolated to a cartesian mesh.
    """
    lib = load_lib()

    exodus_a = Exodus(gradient, mode="a")
    print(exodus_a.points)
    print(f"Exodus shape: {exodus_a.points.shape}")
    points = np.array(exodus_a.points, dtype=np.float64)
    exodus_a.points = points

    a_centroids = exodus_a.get_element_centroid()

    # The trilinear interpolator only works in 3D so we fool it to think
    # we are working in 3D
    a_centroids = np.concatenate(
        (a_centroids, np.zeros(shape=(a_centroids.shape[0], 1))), axis=1)

    centroid_tree = KDTree(a_centroids)

    nelem_to_search = 20
    exodus_b = Exodus(cartesian, mode="a")

    _, nearest_element_indices = centroid_tree.query(exodus_b.points,
                                                     k=nelem_to_search)
    nearest_element_indices = np.array(nearest_element_indices, dtype=np.int64)

    npoints = exodus_b.npoint
    enclosing_element_node_indices = np.zeros((npoints, 4), dtype=np.int64)
    weights = np.zeros((npoints, 4))
    connectivity = exodus_a.connectivity[:, :]
    nfailed = lib.triLinearInterpolator(
        nelem_to_search, npoints, nearest_element_indices,
        np.ascontiguousarray(connectivity,
                             dtype=np.int64), enclosing_element_node_indices,
        np.ascontiguousarray(exodus_a.points), weights,
        np.ascontiguousarray(exodus_b.points))

    assert nfailed is 0, f"{nfailed} points could not be interpolated"

    for param in params:
        param_a = exodus_a.get_nodal_field(param)
        values = np.sum(param_a[enclosing_element_node_indices] * weights,
                        axis=1)
        if not first:
            param_b = exodus_b.get_nodal_field(
                param)  # Get pre-existing gradient
            values += param_b  # Add new gradient on top of old one
        exodus_b.attach_field(param, np.zeros_like(values))
        exodus_b.attach_field(param, values)
Esempio n. 22
0
def test1d():

    data_pts = np.arange(1000)
    kdtree = KDTree(data_pts, leafsize=15)
    query_pts = np.arange(400, 300, -10)
    dist, idx = kdtree.query(query_pts)
    assert idx[0] == 400
    assert dist[0] == 0
    assert idx[1] == 390
Esempio n. 23
0
def test1d():

    data_pts = np.arange(1000)
    kdtree = KDTree(data_pts, leafsize=15)
    query_pts = np.arange(400, 300, -10)
    dist, idx = kdtree.query(query_pts)
    assert idx[0] == 400
    assert dist[0] == 0
    assert idx[1] == 390
Esempio n. 24
0
def test1d_all_masked():
    data_pts = np.arange(1000)
    np.random.shuffle(data_pts)
    kdtree = KDTree(data_pts, leafsize=15)
    query_pts = np.arange(400, 300, -10)
    query_mask = np.ones(data_pts.shape[0]).astype(bool)
    dist, idx = kdtree.query(query_pts, mask=query_mask)
    # all invalid
    assert np.all(i >= 1000 for i in idx)
    assert np.all(d >= 1001 for d in dist)
Esempio n. 25
0
def test3d_float32_mismatch():

    #7, 93, 45
    query_pts = np.array([[787014.438, -340616.906, 6313018.],
                          [751763.125, -59925.969, 6326205.5],
                          [769957.188, -202418.125, 6321069.5]],
                         dtype=np.float32)

    kdtree = KDTree(data_pts_real)
    dist, idx = kdtree.query(query_pts, sqr_dists=True)
Esempio n. 26
0
def test3d_float32_mismatch():


    #7, 93, 45
    query_pts = np.array([[  787014.438,  -340616.906,  6313018.],
                          [751763.125, -59925.969, 6326205.5],
                          [769957.188, -202418.125, 6321069.5]], dtype=np.float32)

    kdtree = KDTree(data_pts_real)
    dist, idx = kdtree.query(query_pts, sqr_dists=True)
def test1d_all_masked():
    data_pts = np.arange(1000)
    np.random.shuffle(data_pts)
    kdtree = KDTree(data_pts, leafsize=15)
    query_pts = np.arange(400, 300, -10)
    query_mask = np.ones(data_pts.shape[0]).astype(bool)
    dist, idx = kdtree.query(query_pts, mask=query_mask)
    # all invalid
    assert np.all(i >= 1000 for i in idx)
    assert np.all(d >= 1001 for d in dist)
Esempio n. 28
0
def combine_meshes(meshes, max_dist='auto'):
    """Try combining (partially overlapping) meshes.

    This function effectively works on the vertex graph and will not produce
    meaningful faces.
    """
    # Sort meshes by size
    meshes = sorted(meshes, key=lambda x: len(x.vertices), reverse=True)

    comb = tm.Trimesh(meshes[0].vertices.copy(), meshes[0].faces.copy())
    comb.remove_unreferenced_vertices()

    if max_dist == 'auto':
        max_dist = comb.edges_unique_length.mean()

    for m in config.tqdm(meshes[1:], desc='Combining',
                         disable=config.pbar_hide,
                         leave=config.pbar_leave):
        # Generate a new up-to-date tree
        tree = KDTree(comb.vertices)

        # Offset faces
        vertex_offset = comb.vertices.shape[0]
        new_faces = m.faces + vertex_offset

        # Find vertices that can be merged - note that we are effectively
        # zippig the two meshes by making sure that each vertex can only be
        # merged once
        dist, ix = tree.query(m.vertices, distance_upper_bound=max_dist)

        merged = set()
        # Merge closest vertices first
        for i in np.argsort(dist):
            # Skip if no more within-distance
            if dist[i] >= np.inf:
                break
            # Skip if target vertex has already been merged
            if ix[i] in merged:
                continue

            # Remap this vertex
            new_faces[new_faces == (i + vertex_offset)] = ix[i]

            # Track that target vertex has already been seen
            merged.add(ix[i])

        # Merge vertices and faces
        comb.vertices = np.append(comb.vertices, m.vertices, axis=0)
        comb.faces = np.append(comb.faces, new_faces, axis=0)

        # Drop unreferenced vertices (i.e. those that were remapped)
        comb.remove_unreferenced_vertices()

    return comb
Esempio n. 29
0
def print_stationtable(sd, mode):
    """Print a table of 'mode' stations."""
    import numpy as np
    from pykdtree.kdtree import KDTree
    cols = 5
    header = '{| class="wikitable"'
    subheader = """|-
! HSL name
! OSM name
! mode
! type
! delta"""
    footer = "|}"

    ost = sd["ostat"]
    pst = sd["pstat"]  # data from provider

    stations = pst[mode]
    stations.sort(key=keykey('name'))
    ostats = ost[mode]
    kd_tree = KDTree(np.array([e["x:latlon"] for e in ostats]))

    linecounter = 0
    statcounter = 0
    probcounter = 0
    wr(header)
    wr(subheader)
    for ind in range(len(stations)):
        if linecounter > 19:
            wr(subheader)
            linecounter = 0
        ps = stations[ind]
        darr, iarr = kd_tree.query(np.array(ps["latlon"], ndmin=2))
        oind = iarr[0]
        os = ostats[oind]
        nlines, isok = print_stationline(os, ps, cols)
        #linecounter += nlines
        linecounter += 1  # Makes diffs more stable
        statcounter += 1
        probcounter += 0 if isok else 1
    wr(footer)
    wr("")
    wr("{} stations.\n".format(statcounter))
    wr("{} stations with differences.\n".format(probcounter))
    not_in = [s for s in ostats if not "x:matched" in s.keys()]
    if not_in:
        not_in.sort(key=keykey("name"))
        wr("'''{} stations not in HSL'''\n".format(mode.capitalize()))
        sgen = ("[{} {}]".format(osm.obj2url(s),
                                 s.get("name", "<no name in OSM>"))
                for s in not_in)
        wr(" {}\n".format(", ".join(sgen)))
Esempio n. 30
0
    def _xyz2lonlat(self):

        lons = np.arctan2(self.y / self.radius, self.x / self.radius)
        lats = np.arcsin(self.z / self.radius)

        # Convert spherical mesh longitudes and latitudes to degrees
        self.lonlat = np.empty((self.nbPts, 2))
        self.lonlat[:, 0] = np.mod(np.degrees(lons) + 180.0, 360.0)[:, 0]
        self.lonlat[:, 1] = np.mod(np.degrees(lats) + 90, 180.0)[:, 0]

        self.tree = KDTree(self.lonlat, leafsize=10)

        return
Esempio n. 31
0
def test3d_float32_mismatch2():

    #7, 93, 45
    query_pts = np.array([[787014.438, -340616.906, 6313018.],
                          [751763.125, -59925.969, 6326205.5],
                          [769957.188, -202418.125, 6321069.5]])

    kdtree = KDTree(data_pts_real.astype(np.float32))
    try:
        dist, idx = kdtree.query(query_pts, sqr_dists=True)
        assert False
    except TypeError:
        assert True
Esempio n. 32
0
def PCA_normal(input_array):
    kd_tree = KDTree(input_array)
    neighbours = kd_tree.query(input_array, 10 + 1)[1]
    neighbours = input_array[neighbours]

    p = Pool()
    normals = p.map(compute_normal, neighbours)

    datadict = {}
    datadict['coords'] = input_array
    datadict['normals'] = np.array(normals, dtype=np.float32)

    return datadict
Esempio n. 33
0
def test3d_float32_mismatch2():


    #7, 93, 45
    query_pts = np.array([[  787014.438,  -340616.906,  6313018.],
                          [751763.125, -59925.969, 6326205.5],
                          [769957.188, -202418.125, 6321069.5]])

    kdtree = KDTree(data_pts_real.astype(np.float32))
    try:
        dist, idx = kdtree.query(query_pts, sqr_dists=True)
        assert False
    except TypeError:
        assert True
Esempio n. 34
0
    def __init__(self, filename):
        """
        Open CSU binary file containing GPROF retrieval data.

        Args:
            filename(``pathlib.Path``): The file to open.
        """
        self.filename = filename
        self.header = np.memmap(filename,
                                dtype=HEADER_TYPES_GPROF,
                                mode="r",
                                shape=(1,))
        self.n_scans = self.header["num_scans"][0]
        self.n_pixels = self.header["num_pixels"][0]
        self.n_species = self.header["num_species"][0]
        self.n_layers = self.header["num_layers"][0]
        self.n_temps = self.header["num_temps"][0]
        self.n_profiles = self.header["num_profiles"][0]

        self.pixel_types = get_pixel_types_gprof(self.n_species)
        self.header_size = sum(np.dtype(t[1]).itemsize
                               for t in HEADER_TYPES_GPROF)

        self.profile_info_types = get_profile_types_gprof(self.n_species,
                                                          self.n_temps,
                                                          self.n_layers,
                                                          self.n_profiles)
        with open(filename, "rb") as file:
            self.data = file.read()
        self.profile_info = np.frombuffer(self.data,
                                          self.profile_info_types,
                                          offset=self.header_size,
                                          count=1)

        self.scan_header_size = np.dtype(SCAN_HEADER_TYPES_GPROF).itemsize
        self.pixel_size = np.dtype(self.pixel_types).itemsize
        self.scan_size = (int(self.scan_header_size)
                          + self.n_pixels * int(self.pixel_size))
        self.profile_info_size = np.dtype(self.profile_info_types).itemsize
        self.total_size = int(self.header_size) + self.n_scans * self.scan_size

        lats = np.zeros((self.n_scans, self.n_pixels))
        lons = np.zeros((self.n_scans, self.n_pixels))

        for i in range(self.n_scans):
            for j in range(self.n_pixels):
                lats[i, j] = self[i, j]["latitude"]
                lons[i, j] = self[i, j]["longitude"]
        coords = to_euclidean(lats.ravel(), lons.ravel())
        self.tree = KDTree(coords)
Esempio n. 35
0
    def __init__(self, datadict, max_r, denoise=None, denoise_delta=None, detect_planar=None):

        self.manager = Manager()
        self.D = datadict # dict of numpy arrays
        self.m, self.n = datadict['coords'].shape

        if datadict.has_key('coords_in_buffer'):
            self.kd_tree = KDTree(concatenate([self.D['coords'], self.D['coords_in_buffer']]))
        else:
            self.kd_tree = KDTree(self.D['coords'])

        self.SuperR = max_r
        self.denoise = denoise
        self.denoise_delta = denoise_delta
        self.detect_planar = detect_planar
Esempio n. 36
0
def compute_lfs(datadict, k=10):
    from pykdtree.kdtree import KDTree

    # collect all ma_coords that are not NaN
    ma_coords = np.concatenate(
        [datadict['ma_coords_in'], datadict['ma_coords_out']])
    ma_coords = ma_coords[~np.isnan(ma_coords).any(axis=1)]

    # build kd-tree of ma_coords to compute lfs
    pykdtree = KDTree(ma_coords)
    if k > 1:
        datadict['lfs'] = np.sqrt(
            np.median(pykdtree.query(datadict['coords'], k)[0], axis=1))
    else:
        datadict['lfs'] = np.sqrt(pykdtree.query(datadict['coords'], k)[0])
Esempio n. 37
0
    def __init__(self, ncfile, latvarname, lonvarname):
        """Initialization function

        Arguments:
            latvar -- the netCDF latitude variable
            lonvar -- the netCDF longitude variable
        """
        self.latvar = ncfile.variables[latvarname]
        self.lonvar = ncfile.variables[lonvarname]

        self.time_var = utils.get_time_var(ncfile)

        self.kdt = _data_cache.get(ncfile.filepath())
        if self.kdt is None:
            rad_factor = pi / 180.0
            latvals = self.latvar[:] * rad_factor
            lonvals = self.lonvar[:] * rad_factor
            clat, clon = np.cos(latvals), np.cos(lonvals)
            slat, slon = np.sin(latvals), np.sin(lonvals)
            triples = np.array(list(zip(np.ravel(clat * clon),
                                        np.ravel(clat * slon),
                                        np.ravel(slat))))

            self.kdt = KDTree(triples)
            _data_cache[ncfile.filepath()] = self.kdt

        self._shape = ncfile.variables[latvarname].shape
Esempio n. 38
0
def test1d_mask():
    data_pts = np.arange(1000)
    # put the input locations in random order
    np.random.shuffle(data_pts)
    bad_idx = np.nonzero(data_pts == 400)
    nearest_idx_1 = np.nonzero(data_pts == 399)
    nearest_idx_2 = np.nonzero(data_pts == 390)
    kdtree = KDTree(data_pts, leafsize=15)
    # shift the query points just a little bit for known neighbors
    # we want 399 as a result, not 401, when we query for ~400
    query_pts = np.arange(399.9, 299.9, -10)
    query_mask = np.zeros(data_pts.shape[0]).astype(bool)
    query_mask[bad_idx] = True
    dist, idx = kdtree.query(query_pts, mask=query_mask)
    assert idx[0] == nearest_idx_1  # 399, would be 400 if no mask
    assert np.isclose(dist[0], 0.9)
    assert idx[1] == nearest_idx_2  # 390
    assert np.isclose(dist[1], 0.1)
Esempio n. 39
0
def test3d_float32():


    #7, 93, 45
    query_pts = np.array([[  787014.438,  -340616.906,  6313018.],
                          [751763.125, -59925.969, 6326205.5],
                          [769957.188, -202418.125, 6321069.5]], dtype=np.float32)


    kdtree = KDTree(data_pts_real.astype(np.float32))
    dist, idx = kdtree.query(query_pts, sqr_dists=True)
    epsilon = 1e-5
    assert idx[0] == 7
    assert idx[1] == 93
    assert idx[2] == 45
    assert dist[0] == 0
    assert abs(dist[1] - 3.) < epsilon * dist[1]
    assert abs(dist[2] - 20001.) < epsilon * dist[2]
    assert kdtree.data_pts.dtype == np.float32
Esempio n. 40
0
def test3d_mask():
    #7, 93, 45
    query_pts = np.array([[  787014.438,  -340616.906,  6313018.],
                          [751763.125, -59925.969, 6326205.5],
                          [769957.188, -202418.125, 6321069.5]])

    kdtree = KDTree(data_pts_real)
    query_mask = np.zeros(data_pts_real.shape[0])
    query_mask[6:10] = True
    dist, idx = kdtree.query(query_pts, sqr_dists=True, mask=query_mask)

    epsilon = 1e-5
    assert idx[0] == 5  # would be 7 if no mask
    assert idx[1] == 93
    assert idx[2] == 45
    # would be 0 if no mask
    assert abs(dist[0] - 66759196.1053) < epsilon * dist[0]
    assert abs(dist[1] - 3.) < epsilon * dist[1]
    assert abs(dist[2] - 20001.) < epsilon * dist[2]
Esempio n. 41
0
def test3d_large_query():
    # Target idxs: 7, 93, 45
    query_pts = np.array([[  787014.438,  -340616.906,  6313018.],
                          [751763.125, -59925.969, 6326205.5],
                          [769957.188, -202418.125, 6321069.5]])

    # Repeat the same points multiple times to get 60000 query points
    n = 20000
    query_pts = np.repeat(query_pts, n, axis=0)

    kdtree = KDTree(data_pts_real)
    dist, idx = kdtree.query(query_pts, sqr_dists=True)

    epsilon = 1e-5
    assert np.all(idx[:n] == 7)
    assert np.all(idx[n:2*n] == 93)
    assert np.all(idx[2*n:] == 45)
    assert np.all(dist[:n] == 0)
    assert np.all(abs(dist[n:2*n] - 3.) < epsilon * dist[n:2*n])
    assert np.all(abs(dist[2*n:] - 20001.) < epsilon * dist[2*n:])
Esempio n. 42
0
def test3d_8n():
    query_pts = np.array([[  787014.438,  -340616.906,  6313018.],
                          [751763.125, -59925.969, 6326205.5],
                          [769957.188, -202418.125, 6321069.5]])

    kdtree = KDTree(data_pts_real)
    dist, idx = kdtree.query(query_pts, k=8)

    exp_dist = np.array([[  0.00000000e+00,   4.05250235e+03,   4.07389794e+03,   8.08201128e+03,
                            8.17063009e+03,   1.20904577e+04,   1.22902057e+04,   1.60775136e+04],
                        [  1.73205081e+00,   2.70216896e+03,   2.71431274e+03,   5.39537066e+03,
                            5.43793210e+03,   8.07855631e+03,   8.17119970e+03,   1.07513693e+04],
                        [  1.41424892e+02,   3.25500021e+03,   3.44284958e+03,   6.58019346e+03,
                            6.81038455e+03,   9.89140135e+03,   1.01918659e+04,   1.31892516e+04]])

    exp_idx = np.array([[ 7,  8,  6,  9,  5, 10,  4, 11],
                        [93, 94, 92, 95, 91, 96, 90, 97],
                        [45, 46, 44, 47, 43, 48, 42, 49]])

    assert np.array_equal(idx, exp_idx)
    assert np.allclose(dist, exp_dist)
Esempio n. 43
0
def test3d_8n_ub_eps():
    query_pts = np.array([[  787014.438,  -340616.906,  6313018.],
                          [751763.125, -59925.969, 6326205.5],
                          [769957.188, -202418.125, 6321069.5]])

    kdtree = KDTree(data_pts_real)
    dist, idx = kdtree.query(query_pts, k=8, eps=0.1, distance_upper_bound=10e3, sqr_dists=False)

    exp_dist = np.array([[  0.00000000e+00,   4.05250235e+03,   4.07389794e+03,   8.08201128e+03,
                            8.17063009e+03,   np.Inf,   np.Inf,   np.Inf],
                        [  1.73205081e+00,   2.70216896e+03,   2.71431274e+03,   5.39537066e+03,
                            5.43793210e+03,   8.07855631e+03,   8.17119970e+03,   np.Inf],
                        [  1.41424892e+02,   3.25500021e+03,   3.44284958e+03,   6.58019346e+03,
                            6.81038455e+03,   9.89140135e+03,   np.Inf,   np.Inf]])
    n = 100
    exp_idx = np.array([[ 7,  8,  6,  9,  5, n,  n, n],
                        [93, 94, 92, 95, 91, 96, 90, n],
                        [45, 46, 44, 47, 43, 48, n, n]])

    assert np.array_equal(idx, exp_idx)
    assert np.allclose(dist, exp_dist)
Esempio n. 44
0
def main(args):
	if args.infile.endswith('ply'):
		from masbpy import io_ply
		datadict = io_ply.read_ply(args.infile)
	elif args.infile.endswith('las'):
		from masbpy import io_las
		datadict = io_las.read_las(args.infile)
	elif args.infile.endswith('npy'):
		datadict = io_npy.read_npy(args.infile, ['coords'])
	
	kd_tree = KDTree( datadict['coords'] )
	neighbours = kd_tree.query( datadict['coords'], args.k+1 )[1]
	neighbours = datadict['coords'][neighbours]
	
	p = Pool()
	t1 = time()
	normals = p.map(compute_normal, neighbours)
	t2 = time()
	print "finished normal computation in {} s".format(t2-t1)
	
	datadict['normals'] = np.array(normals, dtype=np.float32)

	io_npy.write_npy(args.outfile, datadict)
Esempio n. 45
0
    def __init__(self, datadict, max_r, denoise_absmin=None, denoise_delta=None, denoise_min=None, detect_planar=None):
        self.D = datadict # dict of numpy arrays

        self.pykdtree = KDTree(self.D['coords'])

        self.m, self.n = datadict['coords'].shape
        self.D['ma_coords_in'] = np.empty( (self.m,self.n), dtype=np.float32 )
        self.D['ma_coords_in'][:] = np.nan
        self.D['ma_coords_out'] = np.empty( (self.m,self.n), dtype=np.float32 )
        self.D['ma_coords_out'][:] = np.nan
        self.D['ma_q_in'] = np.zeros( (self.m), dtype=np.uint32 )
        self.D['ma_q_in'][:] = np.nan
        self.D['ma_q_out'] = np.zeros( (self.m), dtype=np.uint32 )
        self.D['ma_q_out'][:] = np.nan

        self.SuperR = max_r
        self.delta_convergence = 0.001
        self.iteration_limit = 30

        if denoise_absmin is None:
            self.denoise_absmin = None
        else:
            self.denoise_absmin = (math.pi/180)*denoise_absmin
        if denoise_delta is None:
            self.denoise_delta = None
        else:
            self.denoise_delta = (math.pi/180)*denoise_delta
        if denoise_min is None:
            self.denoise_min = None
        else:
            self.denoise_min = (math.pi/180)*denoise_min

        if detect_planar is None:
            self.detect_planar = None
        else:
            self.detect_planar = (math.pi/180)*detect_planar
Esempio n. 46
0
class MASB(object):

    def __init__(self, datadict, max_r, denoise=None, denoise_delta=None, detect_planar=None):

        self.manager = Manager()
        self.D = datadict # dict of numpy arrays
        self.m, self.n = datadict['coords'].shape

        if datadict.has_key('coords_in_buffer'):
            self.kd_tree = KDTree(concatenate([self.D['coords'], self.D['coords_in_buffer']]))
        else:
            self.kd_tree = KDTree(self.D['coords'])

        self.SuperR = max_r
        self.denoise = denoise
        self.denoise_delta = denoise_delta
        self.detect_planar = detect_planar
    
    def compute_sp(self):
        from Queue import Queue
        queue = Queue()
        datalen = len(self.D['coords'])
        self(queue,0,datalen, True, False)
        self(queue,0,datalen, False, False)
        return queue.get() + queue.get()

    def compute_balls(self, num_processes=cpu_count()):
        datalen = len(self.D['coords'])
        
        n = num_processes/2 # we are spawning 2 processes (inner and outer ma) per n
        batchsize = datalen/n
        chunk_bounds = []
        end=0
        for i in xrange(n-1):
            start = end
            end = (i+1)* batchsize -1
            chunk_bounds.append( (start, end) )
        start, end = end, datalen
        chunk_bounds.append((start, end))

        print "chunking at:", chunk_bounds

        jobs = []
        queue = self.manager.Queue()

        t1 = time()
        for s,e in chunk_bounds:
            p1 = Process(target=self, args=(queue,s,e, True, False))
            p1.start()
            jobs.append(p1)

            p2 = Process(target=self, args=(queue,s,e, False, False))
            p2.start()
            jobs.append(p2)
        
        result = []
        for j in jobs:
            j.join()
            res = queue.get()
            result.append(res)

        t2 = time()

        print "Finished ma computation in {} s".format(t2-t1)

        result.sort(key=lambda item: (item[1], item[0]))

        self.D['ma_coords_out'] = concatenate([ma_coords for start, inner, ma_coords, ma_f2 in result[:n] ])
        # self.D['ma_radii_out'] = concatenate([ma_radii for start, inner, ma_coords, ma_radii, ma_f2, shrinkhist in result[:n] ])
        self.D['ma_f2_out'] = concatenate([ma_f2 for start, inner, ma_coords, ma_f2 in result[:n] ])
        # self.D['ma_shrinkhist_out'] = list(chain(*[shrinkhist for start, inner, ma_coords, ma_radii, ma_f2, shrinkhist in result[:n] ]))
        
        self.D['ma_coords_in'] = concatenate([ma_coords for start, inner, ma_coords, ma_f2 in result[n:] ])
        # self.D['ma_radii_in'] = concatenate([ma_radii for start, inner, ma_coords, ma_radii, ma_f2, shrinkhist in result[n:] ])
        self.D['ma_f2_in'] = concatenate([ma_f2 for start, inner, ma_coords, ma_f2 in result[n:] ])
        # self.D['ma_shrinkhist_in'] = list(chain(*[shrinkhist for start, inner, ma_coords, ma_radii, ma_f2, shrinkhist in result[n:] ]))
        
        print "Finished datamerging in {} s".format(time()-t2)

    # @autojit
    # @profile
    def __call__(self, queue, start, end, inner=True, verbose=False):
        """Balls shrinking algorithm. Set `inner` to False when outer balls are wanted."""
        print 'processing', start, end, "inner:", inner#, hex(id(self.kd_tree)), hex(id(self.D))
        m = end-start
        ma_coords = zeros((m, self.n), dtype=np.float32)
        ma_coords[:] = nan
        # ma_radii = zeros(m)
        # ma_radii[:] = nan
        ma_f2 = zeros(m, dtype=np.uint32)
        ma_f2[:] = nan
        if self.denoise != None:
            self.denoise = (math.pi/180)*self.denoise
        if self.denoise_delta != None:
            self.denoise_delta = (math.pi/180)*self.denoise_delta
        if self.detect_planar != None:
            self.detect_planar = (math.pi/180)*self.detect_planar
        # q_history_list = []
        ZeroDivisionError_cnt = 0
        for i, pi in enumerate(xrange(start,end)):
            p, n = self.D['coords'][pi], self.D['normals'][pi]
            # print "for", p, n

            if not inner:
                n = -n
                        
            # use the previous point as initial estimate for q
            q=p
            # but, when approximating 1st point initialize q with random point not equal to p
            # if i==0:
            #     while equal(q,p):
            #         random_index = int(rand(1)*self.D['coords'].shape[0])
            #         q = self.D['coords'][random_index]
            #     r = compute_radius(p,n,q)
            # forget optimization of r:
            r=self.SuperR
            
            if verbose: print 'initial r: ' + str(r)

            r_ = None
            c = None
            j = -1
            q_i = None
            q_history = []
            while True:
                j+=1
                # initialize r on last found radius
                if j>0:
                    r = r_
                elif j==0 and i>0:
                    r = r

                # compute ball center
                c = p - n*r

                q_i_previous = q_i

                # find closest point to c and assign to q
                dists, results = self.kd_tree.query(array([c]), k=2)
                candidate_c = self.D['coords'][results]
                q = candidate_c[0][0]
                q_i = results[0][0]

                # What to do if closest point is p itself?
                if equal(q,p):
                    # 1) if r==SuperR, apparantly no other points on the halfspace spanned by -n => that's an infinite ball
                    if r == self.SuperR: break
                    # 2) otherwise just pick the second closest point
                    else: 
                        q = candidate_c[0][1]
                        q_i = results[0][1]
                
                # q_history.append(q_i)
                # compute new candidate radius r_
                try:
                    r_ = compute_radius(p,n,q)
                except ZeroDivisionError:
                    ZeroDivisionError_cnt += 1
                    r_ = self.SuperR+1

                # if r_ < 0 closest point was on the wrong side of plane with normal n => start over with SuperRadius on the right side of that plance
                if r_ < 0.: r_ = self.SuperR
                # if r_ > SuperR, stop now because otherwise in case of planar surface point configuration, we end up in an infinite loop
                elif r_ > self.SuperR:
                    r_ = self.SuperR
                    break
                if verbose: print 'current ball: ' + str(i) +' - ' + str(r_)

                c_ = p - n*r_
                if self.denoise != None:
                    if math.acos(cos_angle(p-c, q-c)) < self.denoise and j>0 and r_>norm(q-p):
                        r_=r
                        break

                if self.denoise_delta != None and j>0:
                    theta_now = math.acos(cos_angle(p-c_, q-c_))
                    q_previous = self.D['coords'][q_i_previous]
                    theta_prev = math.acos(cos_angle(p-c_, q_previous-c_))
                    
                    if theta_prev-theta_now > self.denoise_delta and r_>norm(q-p):
                        # keep previous radius:
                        r_=r
                        q_i = q_i_previous
                        break

                if self.detect_planar != None:
                    if math.acos( cos_angle(q-p, -n) ) > self.detect_planar:
                        r_= self.SuperR
                        break

                # stop iteration if r has converged
                if abs(r - r_) < 0.01:
                    break

                # stop iteration if this looks like an infinite loop:
                if j > 30:
                    if verbose: print "breaking possible infinite loop at j=30"
                    break

            if r_ >= self.SuperR or r_ == None:
                pass
            else:
                # ma_radii[i] = r_
                ma_coords[i] = c
                ma_f2[i] = q_i
            # q_history_list.append(q_history[:-1])

        result = ( start, inner, ma_coords, ma_f2 )
        queue.put( result )

        print '{} ZeroDivisionErrors'.format(ZeroDivisionError_cnt)
        print "done!", start, inner, "len:", ma_coords.shape
Esempio n. 47
0
class MASB(object):

    def __init__(self, datadict, max_r, denoise_absmin=None, denoise_delta=None, denoise_min=None, detect_planar=None):
        self.D = datadict # dict of numpy arrays

        self.pykdtree = KDTree(self.D['coords'])

        self.m, self.n = datadict['coords'].shape
        self.D['ma_coords_in'] = np.empty( (self.m,self.n), dtype=np.float32 )
        self.D['ma_coords_in'][:] = np.nan
        self.D['ma_coords_out'] = np.empty( (self.m,self.n), dtype=np.float32 )
        self.D['ma_coords_out'][:] = np.nan
        self.D['ma_q_in'] = np.zeros( (self.m), dtype=np.uint32 )
        self.D['ma_q_in'][:] = np.nan
        self.D['ma_q_out'] = np.zeros( (self.m), dtype=np.uint32 )
        self.D['ma_q_out'][:] = np.nan

        self.SuperR = max_r
        self.delta_convergence = 0.001
        self.iteration_limit = 30

        if denoise_absmin is None:
            self.denoise_absmin = None
        else:
            self.denoise_absmin = (math.pi/180)*denoise_absmin
        if denoise_delta is None:
            self.denoise_delta = None
        else:
            self.denoise_delta = (math.pi/180)*denoise_delta
        if denoise_min is None:
            self.denoise_min = None
        else:
            self.denoise_min = (math.pi/180)*denoise_min

        if detect_planar is None:
            self.detect_planar = None
        else:
            self.detect_planar = (math.pi/180)*detect_planar

    def compute_balls(self):
        for inner in [True, False]:
            self.compute_balls_oneside(inner)

    def compute_balls_oneside(self, inner=True):
        """Balls shrinking algorithm. Set `inner` to False when outer balls are wanted."""

        # iterate over all point-normal pairs
        for p_i in xrange(self.m):
            p, n = self.D['coords'][p_i], self.D['normals'][p_i]
            #-- p is the point along whose normal n we are shrinking a ball, its index is p_i
            
            if not inner:
                n = -n
            
            # initialize some helper variables:
            #-- q will represent the second point that defines a ball together with p and n
            q = None 
            #-- q_i is the index of q
            q_i = None
            #-- r represents the ball radius found in the current iteration (i.e. of the while loop below)
            r = None
            #-- r_previous represents the ball radius found in the previous iteration
            r_previous=self.SuperR
            #-- c is the ball's center point in the current iteration
            c = None
            #-- c_previous is the ball's center point in the previous iteration
            c_previous = None
            #-- j counts the iterations
            j = -1
            
            while True:
                # increment iteration counter
                j+=1
                # set r to last found radius if this isn't the first iteration
                if j>0:
                    r_previous = r

                # compute ball center
                c = p - n*r_previous
                
                # keep track of this for denoising purpose
                q_i_previous = q_i


                ### FINDING NEAREST NEIGHBOR OF c

                # find closest point to c and assign to q

                dists, indices = self.pykdtree.query(np.array([c]), k=2)

                try:
                    candidate_c = self.D['coords'][indices]
                except IndexError as detail:
                    print detail, indices, dists
                    import pdb; pdb.set_trace()
                    raise

                q = candidate_c[0][0]
                q_i = indices[0][0]
                
                # What to do if closest point is p itself?
                if equal(q,p):
                    # 1) if r_previous==SuperR, apparantly no other points on the halfspace spanned by -n => that's an infinite ball
                    if r_previous == self.SuperR: 
                        r = r_previous
                        break
                    # 2) otherwise just pick the second closest point
                    else: 
                        q = candidate_c[0][1]
                        q_i = indices[0][1]

                ### END FINDING NEAREST NEIGHBOR OF c
                # compute new candidate radius r
                try:
                    r = compute_radius(p,n,q)
                except ZeroDivisionError:
                    # this happens on some rare occasions, we'll just skip the point
                    # print 'ZeroDivisionError: excepting p: {} with n={} and q={}'.format(p,n,q)
                    break


                ### EXCEPTIONAL CASES

                # if r < 0 closest point was on the wrong side of plane with normal n => start over with SuperRadius on the right side of that plane
                if r < 0: 
                    r = self.SuperR
                # if r > SuperR, stop now because otherwise in case of planar surface point configuration, we end up in an infinite loop
                elif r > self.SuperR:
                    r = self.SuperR
                    break

                ### END EXCEPTIONAL CASES


                ### DENOISING STUFF
                # i.e. terminate iteration early if certain conditions are satisfied based on (history of) ball metrics

                c_ = p - n*r
                # this seems to work well against noisy ma points.
                if self.denoise_absmin is not None:
                    if math.acos(cos_angle(p-c_, q-c_)) < self.denoise_absmin and j>0 and r>norm(q-p):
                        # keep previous radius:
                        r=r_previous
                        q_i = q_i_previous
                        break

                if self.denoise_delta is not None and j>0:
                    theta_now = math.acos(cos_angle(p-c_, q-c_))
                    q_previous = self.D['coords'][q_i_previous]
                    theta_prev = math.acos(cos_angle(p-c_, q_previous-c_))
                    
                    if theta_prev-theta_now > self.denoise_delta and theta_now < self.denoise_min and r>norm(q-p):
                        # keep previous radius:
                        r=r_previous
                        q_i = q_i_previous
                        break

                if self.detect_planar != None:
                    if math.acos( cos_angle(q-p, -n) ) > self.detect_planar and j<2:
                        r= self.SuperR
                        break

                ### END DENOISING STUFF


                # stop iteration if r has converged
                if abs(r_previous - r) < self.delta_convergence:
                    break

                # stop iteration if this looks like an infinite loop:
                if j > self.iteration_limit:
                    # print "breaking for possible infinite loop"
                    break

            # now store valid points in array (invalid points will be NaN)
            if inner: inout = 'in'
            else: inout = 'out'
            
            if r >= self.SuperR:
                pass
            else:
                # self.D['ma_radii_'+inout][p_i] = r
                self.D['ma_coords_'+inout][p_i] = c
                self.D['ma_q_'+inout][p_i] = q_i
Esempio n. 48
0
def make_map(e, n, t, d, dat_port, dat_star, data_R, pix_m, res, cs2cs_args, sonpath, p, mode, nn, numstdevs, c, dx, use_uncorrected, scalemax): #dogrid, influence,dowrite,

   thres=5

   trans =  pyproj.Proj(init=cs2cs_args)

   mp = np.nanmean(dat_port)
   ms = np.nanmean(dat_star)
   if mp>ms:
      merge = np.vstack((dat_port,dat_star*(mp/ms)))      
   else:
      merge = np.vstack((dat_port*(ms/mp),dat_star))
   del dat_port, dat_star

   merge[np.isnan(merge)] = 0
   merge = merge[:,:len(n)]

   ## actual along-track resolution is this: dx times dy = Af
   tmp = data_R * dx * (c*0.007 / 2) #dx = np.arcsin(c/(1000*meta['t']*meta['f']))
   res_grid = np.sqrt(np.vstack((tmp, tmp)))
   del tmp
   res_grid = res_grid[:np.shape(merge)[0],:np.shape(merge)[1]]
   
   #if use_uncorrected != 1:
   #   merge = merge - 10*np.log10(res_grid)
   
   res_grid = res_grid.astype('float32')

   merge[np.isnan(merge)] = 0
   merge[merge<0] = 0

   merge = merge.astype('float32')

   merge = denoise_tv_chambolle(merge.copy(), weight=.2, multichannel=False).astype('float32')

   R = np.vstack((np.flipud(data_R),data_R))
   del data_R
   R = R[:np.shape(merge)[0],:np.shape(merge)[1]]

   # get number pixels in scan line
   extent = int(np.shape(merge)[0]/2)

   yvec = np.squeeze(np.linspace(np.squeeze(pix_m),extent*np.squeeze(pix_m),extent))

   X, Y, D, h, t  = getXY(e,n,yvec,np.squeeze(d),t,extent)

   X = X.astype('float32')
   Y = Y.astype('float32')
   D = D.astype('float32')
   h = h.astype('float32')
   t = t.astype('float32')
   X = X.astype('float32')

   D[np.isnan(D)] = 0
   h[np.isnan(h)] = 0
   t[np.isnan(t)] = 0

   X = X[np.where(np.logical_not(np.isnan(Y)))]
   merge = merge.flatten()[np.where(np.logical_not(np.isnan(Y)))]
   res_grid = res_grid.flatten()[np.where(np.logical_not(np.isnan(Y)))]
   Y = Y[np.where(np.logical_not(np.isnan(Y)))]
   D = D[np.where(np.logical_not(np.isnan(Y)))]
   R = R.flatten()[np.where(np.logical_not(np.isnan(Y)))]
   h = h[np.where(np.logical_not(np.isnan(Y)))]
   t = t[np.where(np.logical_not(np.isnan(Y)))]

   Y = Y[np.where(np.logical_not(np.isnan(X)))]
   merge = merge.flatten()[np.where(np.logical_not(np.isnan(X)))]
   res_grid = res_grid.flatten()[np.where(np.logical_not(np.isnan(X)))]
   X = X[np.where(np.logical_not(np.isnan(X)))]
   D = D[np.where(np.logical_not(np.isnan(X)))]
   R = R.flatten()[np.where(np.logical_not(np.isnan(X)))]
   h = h[np.where(np.logical_not(np.isnan(X)))]
   t = t[np.where(np.logical_not(np.isnan(X)))]

   X = X[np.where(np.logical_not(np.isnan(merge)))]
   Y = Y[np.where(np.logical_not(np.isnan(merge)))]
   merge = merge[np.where(np.logical_not(np.isnan(merge)))]
   res_grid = res_grid.flatten()[np.where(np.logical_not(np.isnan(merge)))]
   D = D[np.where(np.logical_not(np.isnan(merge)))]
   R = R[np.where(np.logical_not(np.isnan(merge)))]
   h = h[np.where(np.logical_not(np.isnan(merge)))]
   t = t[np.where(np.logical_not(np.isnan(merge)))]

   X = X[np.where(np.logical_not(np.isinf(merge)))]
   Y = Y[np.where(np.logical_not(np.isinf(merge)))]
   merge = merge[np.where(np.logical_not(np.isinf(merge)))]
   res_grid = res_grid.flatten()[np.where(np.logical_not(np.isinf(merge)))]
   D = D[np.where(np.logical_not(np.isinf(merge)))]
   R = R[np.where(np.logical_not(np.isinf(merge)))]
   h = h[np.where(np.logical_not(np.isinf(merge)))]
   t = t[np.where(np.logical_not(np.isinf(merge)))]



   print("writing point cloud")
   #if dowrite==1:
   ## write raw bs to file
   outfile = os.path.normpath(os.path.join(sonpath,'x_y_ss_raw'+str(p)+'.asc'))
   ##write.txtwrite( outfile, np.hstack((humutils.ascol(X.flatten()),humutils.ascol(Y.flatten()), humutils.ascol(merge.flatten()), humutils.ascol(D.flatten()), humutils.ascol(R.flatten()), humutils.ascol(h.flatten()), humutils.ascol(t.flatten())  )) )
   np.savetxt(outfile, np.hstack((humutils.ascol(X.flatten()),humutils.ascol(Y.flatten()), humutils.ascol(merge.flatten()), humutils.ascol(D.flatten()), humutils.ascol(R.flatten()), humutils.ascol(h.flatten()), humutils.ascol(t.flatten())  )) , fmt="%8.6f %8.6f %8.6f %8.6f %8.6f %8.6f %8.6f") 

   del D, R, h, t

   sigmas = 0.1 #m
   eps = 2

   print("gridding ...")
   #if dogrid==1:
   if 2>1:

      if res==99:
         resg = np.min(res_grid[res_grid>0])/2
         print('Gridding at resolution of %s' % str(resg))
      else:
         resg = res

      tree = KDTree(np.c_[X.flatten(),Y.flatten()])
      complete=0
      while complete==0:
         try:
            grid_x, grid_y, res = getmesh(np.min(X), np.max(X), np.min(Y), np.max(Y), resg)
            longrid, latgrid = trans(grid_x, grid_y, inverse=True)
            longrid = longrid.astype('float32')
            latgrid = latgrid.astype('float32')
            shape = np.shape(grid_x)

            ## create mask for where the data is not
            if pykdtree==1:
               dist, _ = tree.query(np.c_[grid_x.ravel(), grid_y.ravel()], k=1)
            else:
               try:
                  dist, _ = tree.query(np.c_[grid_x.ravel(), grid_y.ravel()], k=1, n_jobs=cpu_count())
               except:
                  #print ".... update your scipy installation to use faster kd-tree queries"
                  dist, _ = tree.query(np.c_[grid_x.ravel(), grid_y.ravel()], k=1)

            dist = dist.reshape(grid_x.shape)

            targ_def = pyresample.geometry.SwathDefinition(lons=longrid.flatten(), lats=latgrid.flatten())
            del longrid, latgrid

            humlon, humlat = trans(X, Y, inverse=True)
            orig_def = pyresample.geometry.SwathDefinition(lons=humlon.flatten(), lats=humlat.flatten())
            del humlon, humlat
            if 'orig_def' in locals():
               complete=1
         except:
            print("memory error: trying grid resolution of %s" % (str(resg*2)))
            resg = resg*2

      if mode==1:

         complete=0
         while complete==0:
            try:
               try:
                  dat = pyresample.kd_tree.resample_nearest(orig_def, merge.flatten(), targ_def, radius_of_influence=res*20, fill_value=None, nprocs = cpu_count(), reduce_data=1)
               except:
                  dat = pyresample.kd_tree.resample_nearest(orig_def, merge.flatten(), targ_def, radius_of_influence=res*20, fill_value=None, nprocs = 1, reduce_data=1)

               try:
                  r_dat = pyresample.kd_tree.resample_nearest(orig_def, res_grid.flatten(), targ_def, radius_of_influence=res*20, fill_value=None, nprocs = cpu_count(), reduce_data=1)
               except:
                  r_dat = pyresample.kd_tree.resample_nearest(orig_def, res_grid.flatten(), targ_def, radius_of_influence=res*20, fill_value=None, nprocs = 1, reduce_data=1)

               stdev = None
               counts = None
               if 'dat' in locals():
                  complete=1
            except:
               del grid_x, grid_y, targ_def, orig_def

               wf = None
               humlon, humlat = trans(X, Y, inverse=True)
               dat, stdev, counts, resg, complete, shape = getgrid_lm(humlon, humlat, merge, res*10, min(X), max(X), min(Y), max(Y), resg*2, mode, trans, nn, wf, sigmas, eps)
               r_dat, stdev, counts, resg, complete, shape = getgrid_lm(humlon, humlat, res_grid, res*10, min(X), max(X), min(Y), max(Y), resg*2, mode, trans, nn, wf, sigmas, eps)
               del humlon, humlat

      elif mode==2:

         # custom inverse distance
         wf = lambda r: 1/r**2

         complete=0
         while complete==0:
            try:
               try:
                  dat, stdev, counts = pyresample.kd_tree.resample_custom(orig_def, merge.flatten(),targ_def, radius_of_influence=res*20, neighbours=nn, weight_funcs=wf, fill_value=None, with_uncert = True, nprocs = cpu_count(), reduce_data=1)
               except:
                  dat, stdev, counts = pyresample.kd_tree.resample_custom(orig_def, merge.flatten(),targ_def, radius_of_influence=res*20, neighbours=nn, weight_funcs=wf, fill_value=None, with_uncert = True, nprocs = 1, reduce_data=1)

               try:
                  r_dat = pyresample.kd_tree.resample_custom(orig_def, res_grid.flatten(), targ_def, radius_of_influence=res*20, neighbours=nn, weight_funcs=wf, fill_value=None, with_uncert = False, nprocs = cpu_count(), reduce_data=1)
               except:
                  r_dat = pyresample.kd_tree.resample_custom(orig_def, res_grid.flatten(), targ_def, radius_of_influence=res*20, neighbours=nn, weight_funcs=wf, fill_value=None, with_uncert = False, nprocs = 1, reduce_data=1)

               if 'dat' in locals():
                  complete=1
            except:
               del grid_x, grid_y, targ_def, orig_def
               humlon, humlat = trans(X, Y, inverse=True)
               dat, stdev, counts, resg, complete, shape = getgrid_lm(humlon, humlat, merge, res*2, min(X), max(X), min(Y), max(Y), resg*2, mode, trans, nn, wf, sigmas, eps)
               r_dat, stdev, counts, resg, complete, shape = getgrid_lm(humlon, humlat, res_grid, res*2, min(X), max(X), min(Y), max(Y), resg*2, mode, trans, nn, wf, sigmas, eps)
               del humlat, humlon
               del stdev_null, counts_null

      elif mode==3:
         wf = None

         complete=0
         while complete==0:
            try:
               try:
                  dat, stdev, counts = pyresample.kd_tree.resample_gauss(orig_def, merge.flatten(), targ_def, radius_of_influence=res*20, neighbours=nn, sigmas=sigmas, fill_value=None, with_uncert = True, nprocs = cpu_count(), epsilon = eps, reduce_data=1)
               except:
                  dat, stdev, counts = pyresample.kd_tree.resample_gauss(orig_def, merge.flatten(), targ_def, radius_of_influence=res*20, neighbours=nn, sigmas=sigmas, fill_value=None, with_uncert = True, nprocs = 1, epsilon = eps, reduce_data=1)

               try:
                  r_dat = pyresample.kd_tree.resample_gauss(orig_def, res_grid.flatten(), targ_def, radius_of_influence=res*20, neighbours=nn, sigmas=sigmas, fill_value=None, with_uncert = False, nprocs = cpu_count(), epsilon = eps, reduce_data=1)
               except:
                  r_dat = pyresample.kd_tree.resample_gauss(orig_def, res_grid.flatten(), targ_def, radius_of_influence=res*20, neighbours=nn, sigmas=sigmas, fill_value=None, with_uncert = False, nprocs = 1, epsilon = eps, reduce_data=1)

               if 'dat' in locals():
                  complete=1
            except:
               del grid_x, grid_y, targ_def, orig_def
               humlon, humlat = trans(X, Y, inverse=True)
               dat, stdev, counts, resg, complete, shape = getgrid_lm(humlon, humlat, merge, res*10, min(X), max(X), min(Y), max(Y), resg*2, mode, trans, nn, wf, sigmas, eps)
               r_dat, stdev_null, counts_null, resg, complete, shape = getgrid_lm(humlon, humlat, res_grid, res*10, min(X), max(X), min(Y), max(Y), resg*2, mode, trans, nn, wf, sigmas, eps)
               del humlat, humlon
               del stdev_null, counts_null

      humlon, humlat = trans(X, Y, inverse=True)
      del X, Y, res_grid, merge

      dat = dat.reshape(shape)

      dat[dist>res*30] = np.nan
      del dist

      r_dat = r_dat.reshape(shape)
      r_dat[r_dat<1] = 1
      r_dat[r_dat > 2*np.pi] = 1
      r_dat[np.isnan(dat)] = np.nan

      dat = dat + r_dat #np.sqrt(np.cos(np.deg2rad(r_dat))) #dat*np.sqrt(r_dat) + dat

      del r_dat

      if mode>1:
         stdev = stdev.reshape(shape)
         counts = counts.reshape(shape)

      mask = dat.mask.copy()

      dat[mask==1] = np.nan
      #dat[mask==1] = 0

      if mode>1:
         dat[(stdev>numstdevs) & (mask!=0)] = np.nan
         dat[(counts<nn) & (counts>0)] = np.nan


   #if dogrid==1:

   dat[dat==0] = np.nan
   dat[np.isinf(dat)] = np.nan

   dat[dat<thres] = np.nan

   datm = np.ma.masked_invalid(dat)

   glon, glat = trans(grid_x, grid_y, inverse=True)
   #del grid_x, grid_y

   try:
      from osgeo import gdal,ogr,osr
      proj = osr.SpatialReference()
      proj.ImportFromEPSG(int(cs2cs_args.split(':')[-1])) #26949)
      datout = np.squeeze(np.ma.filled(dat))#.astype('int16')
      datout[np.isnan(datout)] = -99
      driver = gdal.GetDriverByName('GTiff')
      #rows,cols = np.shape(datout)
      cols,rows = np.shape(datout)    
      outFile = os.path.normpath(os.path.join(sonpath,'geotiff_map'+str(p)+'.tif'))
      ds = driver.Create( outFile, rows, cols, 1, gdal.GDT_Float32, [ 'COMPRESS=LZW' ] )        
      if proj is not None:  
        ds.SetProjection(proj.ExportToWkt()) 

      xmin, ymin, xmax, ymax = [grid_x.min(), grid_y.min(), grid_x.max(), grid_y.max()]

      xres = (xmax - xmin) / float(rows)
      yres = (ymax - ymin) / float(cols)
      geotransform = (xmin, xres, 0, ymax, 0, -yres)

      ds.SetGeoTransform(geotransform)
      ss_band = ds.GetRasterBand(1)
      ss_band.WriteArray(np.flipud(datout)) #datout)
      ss_band.SetNoDataValue(-99)
      ss_band.FlushCache()
      ss_band.ComputeStatistics(False)
      del ds   
   
   except:
      print("error: geotiff could not be created... check your gdal/ogr install")


   try:

      # =========================================================
      print("creating kmz file ...")
      ## new way to create kml file
      pixels = 1024 * 10

      fig, ax = humutils.gearth_fig(llcrnrlon=glon.min(),
                     llcrnrlat=glat.min(),
                     urcrnrlon=glon.max(),
                     urcrnrlat=glat.max(),
                     pixels=pixels)
      cs = ax.pcolormesh(glon, glat, datm, vmax=scalemax, cmap='gray')
      ax.set_axis_off()
      fig.savefig(os.path.normpath(os.path.join(sonpath,'map'+str(p)+'.png')), transparent=True, format='png')
      del fig, ax

      # =========================================================
      fig = plt.figure(figsize=(1.0, 4.0), facecolor=None, frameon=False)
      ax = fig.add_axes([0.0, 0.05, 0.2, 0.9])
      cb = fig.colorbar(cs, cax=ax)
      cb.set_label('Intensity [dB W]', rotation=-90, color='k', labelpad=20)
      fig.savefig(os.path.normpath(os.path.join(sonpath,'legend'+str(p)+'.png')), transparent=False, format='png')
      del fig, ax, cs, cb

      # =========================================================
      humutils.make_kml(llcrnrlon=glon.min(), llcrnrlat=glat.min(),
         urcrnrlon=glon.max(), urcrnrlat=glat.max(),
         figs=[os.path.normpath(os.path.join(sonpath,'map'+str(p)+'.png'))],
         colorbar=os.path.normpath(os.path.join(sonpath,'legend'+str(p)+'.png')),
         kmzfile=os.path.normpath(os.path.join(sonpath,'GroundOverlay'+str(p)+'.kmz')),
         name='Sidescan Intensity')

   except:
      print("error: map could not be created...")


   #y1 = np.min(glat)-0.001
   #x1 = np.min(glon)-0.001
   #y2 = np.max(glat)+0.001
   #x2 = np.max(glon)+0.001

   print("drawing and printing map ...")
   fig = plt.figure(frameon=False)
   map = Basemap(projection='merc', epsg=cs2cs_args.split(':')[1],
    resolution = 'i', #h #f
    llcrnrlon=np.min(humlon)-0.001, llcrnrlat=np.min(glat)-0.001,
    urcrnrlon=np.max(humlon)+0.001, urcrnrlat=np.max(glat)+0.001)

   try:
      map.arcgisimage(server='http://server.arcgisonline.com/ArcGIS', service='World_Imagery', xpixels=1000, ypixels=None, dpi=300)
   except:
      map.arcgisimage(server='http://server.arcgisonline.com/ArcGIS', service='ESRI_Imagery_World_2D', xpixels=1000, ypixels=None, dpi=300)
   #finally:
   #   print "error: map could not be created..."

   #if dogrid==1:
   gx,gy = map.projtran(glon, glat)

   ax = plt.Axes(fig, [0., 0., 1., 1.], )
   ax.set_axis_off()
   fig.add_axes(ax)

   #if dogrid==1:
   if 2>1:
      if datm.size > 25000000:
         print("matrix size > 25,000,000 - decimating by factor of 5 for display")
         map.pcolormesh(gx[::5,::5], gy[::5,::5], datm[::5,::5], cmap='gray', vmin=np.nanmin(datm), vmax=scalemax) #vmax=np.nanmax(datm)
      else:
         map.pcolormesh(gx, gy, datm, cmap='gray', vmin=np.nanmin(datm), vmax=scalemax) #vmax=np.nanmax(datm)
      del datm, dat
   else:
      ## draw point cloud
      x,y = map.projtran(humlon, humlat)
      map.scatter(x.flatten(), y.flatten(), 0.5, merge.flatten(), cmap='gray', linewidth = '0')

   #map.drawmapscale(x1+0.001, y1+0.001, x1, y1, 200., units='m', barstyle='fancy', labelstyle='simple', fontcolor='k') #'#F8F8FF')
   #map.drawparallels(np.arange(y1-0.001, y2+0.001, 0.005),labels=[1,0,0,1], linewidth=0.0, rotation=30, fontsize=8)
   #map.drawmeridians(np.arange(x1, x2, 0.002),labels=[1,0,0,1], linewidth=0.0, rotation=30, fontsize=8)

   custom_save2(sonpath,'map_imagery'+str(p))
   del fig


   del humlat, humlon
   return res #return the new resolution
Esempio n. 49
0
def read(humfile, sonpath, cs2cs_args, c, draft, doplot, t, bedpick, flip_lr, model, calc_bearing, filt_bearing, chunk): #cog = 1,

    '''
    Read a .DAT and associated set of .SON files recorded by a Humminbird(R)
    instrument.

    Parse the data into a set of memory mapped files that will
    subsequently be used by the other functions of the PyHum module.

    Export time-series data and metadata in other formats.

    Create a kml file for visualising boat track

    Syntax
    ----------
    [] = PyHum.read(humfile, sonpath, cs2cs_args, c, draft, doplot, t, bedpick, flip_lr, chunksize, model, calc_bearing, filt_bearing, chunk)

    Parameters
    ------------
    humfile : str
       path to the .DAT file
    sonpath : str
       path where the *.SON files are
    cs2cs_args : int, *optional* [Default="epsg:26949"]
       arguments to create coordinates in a projected coordinate system
       this argument gets given to pyproj to turn wgs84 (lat/lon) coordinates
       into any projection supported by the proj.4 libraries
    c : float, *optional* [Default=1450.0]
       speed of sound in water (m/s). Defaults to a value of freshwater
    draft : float, *optional* [Default=0.3]
       draft from water surface to transducer face (m)
    doplot : float, *optional* [Default=1]
       if 1, plots will be made
    t : float, *optional* [Default=0.108]
       length of transducer array (m).
       Default value is that of the 998 series Humminbird(R)
    bedpick : int, *optional* [Default=1]
       if 1, bedpicking with be carried out automatically
       if 0, user will be prompted to pick the bed location on screen
    flip_lr : int, *optional* [Default=0]
       if 1, port and starboard scans will be flipped
       (for situations where the transducer is flipped 180 degrees)
    model: int, *optional* [Default=998]
       A 3 or 4 number code indicating the model number
       Examples: 998, 997, 1198, 1199
    calc_bearing : float, *optional* [Default=0]
       if 1, bearing will be calculated from coordinates
    filt_bearing : float, *optional* [Default=0]
       if 1, bearing will be filtered
    chunk : str, *optional* [Default='d100' (distance, 100 m)]
       letter, followed by a number.
       There are the following letter options:
       'd' - parse chunks based on distance, then number which is distance in m
       'p' - parse chunks based on number of pings, then number which is number of pings
       'h' - parse chunks based on change in heading, then number which is the change in heading in degrees
       '1' - process just 1 chunk

    Returns
    ---------
    sonpath+base+'_data_port.dat': memory-mapped file
        contains the raw echogram from the port side
        sidescan sonar (where present)

    sonpath+base+'_data_port.dat': memory-mapped file
        contains the raw echogram from the starboard side
        sidescan sonar (where present)

    sonpath+base+'_data_dwnhi.dat': memory-mapped file
        contains the raw echogram from the high-frequency
        echosounder (where present)

    sonpath+base+'_data_dwnlow.dat': memory-mapped file
        contains the raw echogram from the low-frequency
        echosounder (where present)

    sonpath+base+"trackline.kml": google-earth kml file
        contains the trackline of the vessel during data
        acquisition

    sonpath+base+'rawdat.csv': comma separated value file
        contains time-series data. columns corresponding to
        longitude
        latitude
        easting (m)
        northing (m)
        depth to bed (m)
        alongtrack cumulative distance (m)
        vessel heading (deg.)

    sonpath+base+'meta.mat': .mat file
        matlab format file containing a dictionary object
        holding metadata information. Fields are:
        e : ndarray, easting (m)
        n : ndarray, northing (m)
        es : ndarray, low-pass filtered easting (m)
        ns : ndarray, low-pass filtered northing (m)
        lat : ndarray, latitude
        lon : ndarray, longitude
        shape_port : tuple, shape of port scans in memory mapped file
        shape_star : tuple, shape of starboard scans in memory mapped file
        shape_hi : tuple, shape of high-freq. scans in memory mapped file
        shape_low : tuple, shape of low-freq. scans in memory mapped file
        dep_m : ndarray, depth to bed (m)
        dist_m : ndarray, distance along track (m)
        heading : ndarray, heading of vessel (deg. N)
        pix_m: float, size of 1 pixel in across-track dimension (m)
        bed : ndarray, depth to bed (m)
        c : float, speed of sound in water (m/s)
        t : length of sidescan transducer array (m)
        spd : ndarray, vessel speed (m/s)
        time_s : ndarray, time elapsed (s)
        caltime : ndarray, unix epoch time (s)
    '''

    # prompt user to supply file if no input file given
    if not humfile:
      print('An input file is required!!!!!!')
      Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
      humfile = askopenfilename(filetypes=[("DAT files","*.DAT")])

    # prompt user to supply directory if no input sonpath is given
    if not sonpath:
      print('A *.SON directory is required!!!!!!')
      Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
      sonpath = askdirectory()

    # print given arguments to screen and convert data type where necessary
    if humfile:
      print('Input file is %s' % (humfile))

    if sonpath:
      print('Son files are in %s' % (sonpath))

    if cs2cs_args:
      print('cs2cs arguments are %s' % (cs2cs_args))

    if draft:
      draft = float(draft)
      print('Draft: %s' % (str(draft)))

    if c:
      c = float(c)
      print('Celerity of sound: %s m/s' % (str(c)))

    if doplot:
      doplot = int(doplot)
      if doplot==0:
         print("Plots will not be made")

    if flip_lr:
      flip_lr = int(flip_lr)
      if flip_lr==1:
         print("Port and starboard will be flipped")

    if t:
      t = np.asarray(t,float)
      print('Transducer length is %s m' % (str(t)))

    if bedpick:
      bedpick = np.asarray(bedpick,int)
      if bedpick==1:
         print('Bed picking is auto')
      elif bedpick==0:
         print('Bed picking is manual')
      else:
         print('User will be prompted per chunk about bed picking method')

    if chunk:
       chunk = str(chunk)
       if chunk[0]=='d':
          chunkmode=1
          chunkval = int(chunk[1:])
          print('Chunks based on distance of %s m' % (str(chunkval)))
       elif chunk[0]=='p':
          chunkmode=2
          chunkval = int(chunk[1:])
          print('Chunks based on %s pings' % (str(chunkval)))
       elif chunk[0]=='h':
          chunkmode=3
          chunkval = int(chunk[1:])
          print('Chunks based on heading devation of %s degrees' % (str(chunkval)))
       elif chunk[0]=='1':
          chunkmode=4
          chunkval = 1
          print('Only 1 chunk will be produced')
       else:
          print("Chunk mode not understood - should be 'd', 'p', or 'h' - using defaults")
          chunkmode=1
          chunkval = 100
          print('Chunks based on distance of %s m' % (str(chunkval)))

    if model:
       try:
          model = int(model)
          print("Data is from the %s series"  % (str(model)))
       except:
          if model=='onix':
             model=0
             print("Data is from the ONIX series")
          elif model=='helix':
             model=1
             print("Data is from the HELIX series")
          elif model=='mega':
             model=2
             print("Data is from the MEGA series")
#    if cog:
#       cog = int(cog)
#       if cog==1:
#          print "Heading based on course-over-ground"

    if calc_bearing:
       calc_bearing = int(calc_bearing)
       if calc_bearing==1:
          print("Bearing will be calculated from coordinates")

    if filt_bearing:
       filt_bearing = int(filt_bearing)
       if filt_bearing==1:
          print("Bearing will be filtered")

    ## for debugging
    #humfile = r"test.DAT"; sonpath = "test_data"
    #cs2cs_args = "epsg:26949"; doplot = 1; draft = 0
    #c=1450; bedpick=1; fliplr=1; chunk = 'd100'
    #model=998; cog=1; calc_bearing=0; filt_bearing=0

    #if model==2:
    #   f = 1000
    #else:
    f = 455

    try:
       print("Checking the epsg code you have chosen for compatibility with Basemap ... ")
       from mpl_toolkits.basemap import Basemap
       m = Basemap(projection='merc', epsg=cs2cs_args.split(':')[1],
          resolution = 'i', llcrnrlon=10, llcrnrlat=10, urcrnrlon=30, urcrnrlat=30)
       del m
       print("... epsg code compatible")
    except (ValueError):
       print("Error: the epsg code you have chosen is not compatible with Basemap")
       print("please choose a different epsg code (http://spatialreference.org/)")
       print("program will now close")
       sys.exit()

    # start timer
    if os.name=='posix': # true if linux/mac or cygwin on windows
       start = time.time()
    else: # windows
       start = time.clock()

    # if son path name supplied has no separator at end, put one on
    if sonpath[-1]!=os.sep:
       sonpath = sonpath + os.sep

    # get the SON files from this directory
    sonfiles = glob.glob(sonpath+'*.SON')
    if not sonfiles:
        sonfiles = glob.glob(os.getcwd()+os.sep+sonpath+'*.SON')

    base = humfile.split('.DAT') # get base of file name for output
    base = base[0].split(os.sep)[-1]

    # remove underscores, negatives and spaces from basename
    base = humutils.strip_base(base)

    print("WARNING: Because files have to be read in byte by byte,")
    print("this could take a very long time ...")

    #reading each sonfile in parallel should be faster ...
    try:
       o = Parallel(n_jobs = np.min([len(sonfiles), cpu_count()]), verbose=0)(delayed(getscans)(sonfiles[k], humfile, c, model, cs2cs_args) for k in range(len(sonfiles)))
       X, Y, A, B = zip(*o)

       for k in range(len(Y)):
          if Y[k] == 'sidescan_port':
             dat = A[k] #data.gethumdat()
             metadat = B[k] #data.getmetadata()
             if flip_lr==0:
                data_port = X[k].astype('int16')
             else:
                data_star = X[k].astype('int16')

          elif Y[k] == 'sidescan_starboard':
             if flip_lr==0:
                data_star = X[k].astype('int16')
             else:
                data_port = X[k].astype('int16')

          elif Y[k] == 'down_lowfreq':
             data_dwnlow = X[k].astype('int16')

          elif Y[k] == 'down_highfreq':
             data_dwnhi = X[k].astype('int16')

          elif Y[k] == 'down_vhighfreq': #hopefully this only applies to mega systems
             data_dwnhi = X[k].astype('int16')

       del X, Y, A, B, o
       old_pyread = 0

       if 'data_port' not in locals():
          data_port = ''
          print("portside scan not available")

       if 'data_star' not in locals():
          data_star = ''
          print("starboardside scan not available")

       if 'data_dwnhi' not in locals():
          data_dwnlow = ''
          print("high-frq. downward scan not available")

       if 'data_dwnlow' not in locals():
          data_dwnlow = ''
          print("low-frq. downward scan not available")

    except: # revert back to older version if paralleleised version fails

       print("something went wrong with the parallelised version of pyread ...")

       try:
          import pyread
       except:
          from . import pyread

       data = pyread.pyread(sonfiles, humfile, c, model, cs2cs_args)

       dat = data.gethumdat()

       metadat = data.getmetadata()

       old_pyread = 1

    nrec = len(metadat['n'])

    metadat['instr_heading'] = metadat['heading'][:nrec]

    #metadat['heading'] = humutils.get_bearing(calc_bearing, filt_bearing, cog, metadat['lat'], metadat['lon'], metadat['instr_heading'])

    try:
       es = humutils.runningMeanFast(metadat['e'][:nrec],len(metadat['e'][:nrec])/100)
       ns = humutils.runningMeanFast(metadat['n'][:nrec],len(metadat['n'][:nrec])/100)
    except:
       es = metadat['e'][:nrec]
       ns = metadat['n'][:nrec]

    metadat['es'] = es
    metadat['ns'] = ns

    try:
       trans =  pyproj.Proj(init=cs2cs_args)
    except:
       trans =  pyproj.Proj(cs2cs_args.lstrip(), inverse=True)

    lon, lat = trans(es, ns, inverse=True)
    metadat['lon'] = lon
    metadat['lat'] = lat

    metadat['heading'] = humutils.get_bearing(calc_bearing, filt_bearing, metadat['lat'], metadat['lon'], metadat['instr_heading']) #cog

    dist_m = humutils.get_dist(lat, lon)
    metadat['dist_m'] = dist_m

    if calc_bearing==1: # recalculate speed, m/s
       ds=np.gradient(np.squeeze(metadat['time_s']))
       dx=np.gradient(np.squeeze(metadat['dist_m']))
       metadat['spd'] = dx[:nrec]/ds[:nrec]

    # theta at 3dB in the horizontal
    theta3dB = np.arcsin(c/(t*(f*1000)))
    #resolution of 1 sidescan pixel to nadir
    ft = (np.pi/2)*(1/theta3dB) #/ (f/455)

    dep_m = humutils.get_depth(metadat['dep_m'][:nrec])

    if old_pyread == 1: #older pyread version

       # port scan
       try:
          if flip_lr==0:
             data_port = data.getportscans().astype('int16')
          else:
             data_port = data.getstarscans().astype('int16')
       except:
          data_port = ''
          print("portside scan not available")

    if data_port!='':

       Zt, ind_port = makechunks_scan(chunkmode, chunkval, metadat, data_port, 0)
       del data_port

       ## create memory mapped file for Z
       shape_port = io.set_mmap_data(sonpath, base, '_data_port.dat', 'int16', Zt)

       ##we are only going to access the portion of memory required
       port_fp = io.get_mmap_data(sonpath, base, '_data_port.dat', 'int16', shape_port)

    if old_pyread == 1: #older pyread version
       # starboard scan
       try:
          if flip_lr==0:
             data_star = data.getstarscans().astype('int16')
          else:
             data_star = data.getportscans().astype('int16')
       except:
          data_star = ''
          print("starboardside scan not available")

    if data_star!='':

       Zt, ind_star = makechunks_scan(chunkmode, chunkval, metadat, data_star, 1)
       del data_star

       # create memory mapped file for Z
       shape_star = io.set_mmap_data(sonpath, base, '_data_star.dat', 'int16', Zt)

       star_fp = io.get_mmap_data(sonpath, base, '_data_star.dat', 'int16', shape_star)

    if 'star_fp' in locals() and 'port_fp' in locals():
       # check that port and starboard are same size
       # and trim if not
       if np.shape(star_fp)!=np.shape(port_fp):
          print("port and starboard scans are different sizes ... rectifying")
          if np.shape(port_fp[0])[1] > np.shape(star_fp[0])[1]:
             tmp = port_fp.copy()
             tmp2 = np.empty_like(star_fp)
             for k in range(len(tmp)):
                 tmp2[k] = tmp[k][:,:np.shape(star_fp[k])[1]]
             del tmp

             # create memory mapped file for Z
             shape_port = io.set_mmap_data(sonpath, base, '_data_port2.dat', 'int16', tmp2)
             #shape_star = shape_port.copy()
             shape_star = tuple(np.asarray(shape_port).copy())

             ##we are only going to access the portion of memory required
             port_fp = io.get_mmap_data(sonpath, base, '_data_port2.dat', 'int16', shape_port)

             ind_port = list(ind_port)
             ind_port[-1] = np.shape(star_fp[0])[1]
             ind_port = tuple(ind_port)

          elif np.shape(port_fp[0])[1] < np.shape(star_fp[0])[1]:
             tmp = star_fp.copy()
             tmp2 = np.empty_like(port_fp)
             for k in range(len(tmp)):
                 tmp2[k] = tmp[k][:,:np.shape(port_fp[k])[1]]
             del tmp

             # create memory mapped file for Z
             shape_port = io.set_mmap_data(sonpath, base, '_data_star2.dat', 'int16', tmp2)
             #shape_star = shape_port.copy()
             shape_star = tuple(np.asarray(shape_port).copy())

             #we are only going to access the portion of memory required
             star_fp = io.get_mmap_data(sonpath, base, '_data_star2.dat', 'int16', shape_star)

             ind_star = list(ind_star)
             ind_star[-1] = np.shape(port_fp[0])[1]
             ind_star = tuple(ind_star)

    if old_pyread == 1: #older pyread version
       # low-freq. sonar
       try:
          data_dwnlow = data.getlowscans().astype('int16')
       except:
          data_dwnlow = ''
          print("low-freq. scan not available")

    if data_dwnlow!='':

       Zt, ind_low = makechunks_scan(chunkmode, chunkval, metadat, data_dwnlow, 2)
       del data_dwnlow

       # create memory mapped file for Z
       shape_low = io.set_mmap_data(sonpath, base, '_data_dwnlow.dat', 'int16', Zt)

       ##we are only going to access the portion of memory required
       dwnlow_fp = io.get_mmap_data(sonpath, base, '_data_dwnlow.dat', 'int16', shape_low)

    if old_pyread == 1: #older pyread version
       # hi-freq. sonar
       try:
          data_dwnhi = data.gethiscans().astype('int16')
       except:
          data_dwnhi = ''
          print("high-freq. scan not available")

    if data_dwnhi!='':

       Zt, ind_hi = makechunks_scan(chunkmode, chunkval, metadat, data_dwnhi, 3)
       del data_dwnhi

       # create memory mapped file for Z
       shape_hi = io.set_mmap_data(sonpath, base, '_data_dwnhi.dat', 'int16', Zt)

       dwnhi_fp = io.get_mmap_data(sonpath, base, '_data_dwnhi.dat', 'int16', shape_hi)

    if 'dwnhi_fp' in locals() and 'dwnlow_fp' in locals():
       # check that low and high are same size
       # and trim if not
       if (np.shape(dwnhi_fp)!=np.shape(dwnlow_fp)) and (chunkmode!=4):
          print("dwnhi and dwnlow are different sizes ... rectifying")
          if np.shape(dwnhi_fp[0])[1] > np.shape(dwnlow_fp[0])[1]:
             tmp = dwnhi_fp.copy()
             tmp2 = np.empty_like(dwnlow_fp)
             for k in range(len(tmp)):
                 tmp2[k] = tmp[k][:,:np.shape(dwnlow_fp[k])[1]]
             del tmp

             # create memory mapped file for Z
             shape_low = io.set_mmap_data(sonpath, base, '_data_dwnhi2.dat', 'int16', tmp2)
             #shape_hi = shape_low.copy()
             shape_hi = tuple(np.asarray(shape_low).copy())

             ##we are only going to access the portion of memory required
             dwnhi_fp = io.get_mmap_data(sonpath, base, '_data_dwnhi2.dat', 'int16', shape_hi)

             ind_hi = list(ind_hi)
             ind_hi[-1] = np.shape(dwnlow_fp[0])[1]
             ind_hi = tuple(ind_hi)

          elif np.shape(dwnhi_fp[0])[1] < np.shape(dwnlow_fp[0])[1]:
             tmp = dwnlow_fp.copy()
             tmp2 = np.empty_like(dwnhi_fp)
             for k in range(len(tmp)):
                 tmp2[k] = tmp[k][:,:np.shape(dwnhi_fp[k])[1]]
             del tmp

             # create memory mapped file for Z
             shape_low = io.set_mmap_data(sonpath, base, '_data_dwnlow2.dat', 'int16', tmp2)
             #shape_hi = shape_low.copy()
             shape_hi = tuple(np.asarray(shape_low).copy())

             ##we are only going to access the portion of memory required
             dwnlow_fp = io.get_mmap_data(sonpath, base, '_data_dwnlow2.dat', 'int16', shape_low)

             ind_low = list(ind_low)
             ind_low[-1] = np.shape(dwnhi_fp[0])[1]
             ind_low = tuple(ind_low)

    if old_pyread == 1: #older pyread version
       del data

    if ('shape_port' in locals()) and (chunkmode!=4):
       metadat['shape_port'] = shape_port
       nrec = metadat['shape_port'][0] * metadat['shape_port'][2]
    elif ('shape_port' in locals()) and (chunkmode==4):
       metadat['shape_port'] = shape_port
       nrec = metadat['shape_port'][1]
    else:
       metadat['shape_port'] = ''

    if ('shape_star' in locals()) and (chunkmode!=4):
       metadat['shape_star'] = shape_star
       nrec = metadat['shape_star'][0] * metadat['shape_star'][2]
    elif ('shape_star' in locals()) and (chunkmode==4):
       metadat['shape_star'] = shape_star
       nrec = metadat['shape_star'][1]
    else:
       metadat['shape_star'] = ''

    if ('shape_hi' in locals()) and (chunkmode!=4):
       metadat['shape_hi'] = shape_hi
       #nrec = metadat['shape_hi'][0] * metadat['shape_hi'][2] * 2
    elif ('shape_hi' in locals()) and (chunkmode==4):
       metadat['shape_hi'] = shape_hi
    else:
       metadat['shape_hi'] = ''

    if ('shape_low' in locals()) and (chunkmode!=4):
       metadat['shape_low'] = shape_low
       #nrec = metadat['shape_low'][0] * metadat['shape_low'][2] * 2
    elif ('shape_low' in locals()) and (chunkmode==4):
       metadat['shape_low'] = shape_low
    else:
       metadat['shape_low'] = ''

    #make kml boat trackline
    humutils.make_trackline(lon,lat, sonpath, base)

    if 'port_fp' in locals() and 'star_fp' in locals():

       #if not os.path.isfile(os.path.normpath(os.path.join(sonpath,base+'meta.mat'))):
       if 2>1:
          if bedpick == 1: # auto

             x, bed = humutils.auto_bedpick(ft, dep_m, chunkmode, port_fp, c)

             if len(dist_m)<len(bed):
                dist_m = np.append(dist_m,dist_m[-1]*np.ones(len(bed)-len(dist_m)))

             if doplot==1:
                if chunkmode!=4:
                   for k in range(len(star_fp)):
                      plot_2bedpicks(port_fp[k], star_fp[k], bed[ind_port[-1]*k:ind_port[-1]*(k+1)], dist_m[ind_port[-1]*k:ind_port[-1]*(k+1)], x[ind_port[-1]*k:ind_port[-1]*(k+1)], ft, shape_port, sonpath, k, chunkmode)
                else:
                   plot_2bedpicks(port_fp, star_fp, bed, dist_m, x, ft, shape_port, sonpath, 0, chunkmode)

             # 'real' bed is estimated to be the minimum of the two
             bed = np.min(np.vstack((bed[:nrec],np.squeeze(x[:nrec]))),axis=0)
             bed = humutils.runningMeanFast(bed, 3)

          elif bedpick>1: # user prompt

             x, bed = humutils.auto_bedpick(ft, dep_m, chunkmode, port_fp, c)

             if len(dist_m)<len(bed):
                dist_m = np.append(dist_m,dist_m[-1]*np.ones(len(bed)-len(dist_m)))

             # 'real' bed is estimated to be the minimum of the two
             bed = np.min(np.vstack((bed[:nrec],np.squeeze(x[:nrec]))),axis=0)
             bed = humutils.runningMeanFast(bed, 3)

             # manually intervene
             fig = plt.figure()
             ax = plt.gca()
             if chunkmode !=4:
                im = ax.imshow(np.hstack(port_fp), cmap = 'gray', origin = 'upper')
             else:
                im = ax.imshow(port_fp, cmap = 'gray', origin = 'upper')
             plt.plot(bed,'r')
             plt.axis('normal'); plt.axis('tight')

             pts1 = plt.ginput(n=300, timeout=30) # it will wait for 200 clicks or 60 seconds
             x1=map(lambda x: x[0],pts1) # map applies the function passed as
             y1=map(lambda x: x[1],pts1) # first parameter to each element of pts
             plt.close()
             del fig

             if x1 != []: # if x1 is not empty
                tree = KDTree(zip(np.arange(1,len(bed)), bed))
                try:
                   dist, inds = tree.query(zip(x1, y1), k = 100, eps=5, n_jobs=-1)
                except:
                   dist, inds = tree.query(zip(x1, y1), k = 100, eps=5)

                b = np.interp(inds,x1,y1)
                bed2 = bed.copy()
                bed2[inds] = b
                bed = bed2

             if doplot==1:
                if chunkmode!=4:
                   for k in range(len(star_fp)):
                      plot_2bedpicks(port_fp[k], star_fp[k], bed[ind_port[-1]*k:ind_port[-1]*(k+1)], dist_m[ind_port[-1]*k:ind_port[-1]*(k+1)], x[ind_port[-1]*k:ind_port[-1]*(k+1)], ft, shape_port, sonpath, k, chunkmode)
                else:
                   plot_2bedpicks(port_fp, star_fp, bed, dist_m, x, ft, shape_port, sonpath, 0, chunkmode)

          else: #manual

             beds=[]

             if chunkmode!=4:
                for k in range(len(port_fp)):
                   raw_input("Bed picking "+str(k+1)+" of "+str(len(port_fp))+", are you ready? 30 seconds. Press Enter to continue...")
                   bed={}
                   fig = plt.figure()
                   ax = plt.gca()
                   im = ax.imshow(port_fp[k], cmap = 'gray', origin = 'upper')
                   pts1 = plt.ginput(n=300, timeout=30) # it will wait for 200 clicks or 60 seconds
                   x1=map(lambda x: x[0],pts1) # map applies the function passed as
                   y1=map(lambda x: x[1],pts1) # first parameter to each element of pts
                   bed = np.interp(np.r_[:ind_port[-1]],x1,y1)
                   plt.close()
                   del fig
                   beds.append(bed)
                   extent = np.shape(port_fp[k])[0]
                bed = np.asarray(np.hstack(beds),'float')
             else:
                raw_input("Bed picking - are you ready? 30 seconds. Press Enter to continue...")
                bed={}
                fig = plt.figure()
                ax = plt.gca()
                im = ax.imshow(port_fp, cmap = 'gray', origin = 'upper')
                pts1 = plt.ginput(n=300, timeout=30) # it will wait for 200 clicks or 60 seconds
                x1=map(lambda x: x[0],pts1) # map applies the function passed as
                y1=map(lambda x: x[1],pts1) # first parameter to each element of pts
                bed = np.interp(np.r_[:ind_port[-1]],x1,y1)
                plt.close()
                del fig
                beds.append(bed)
                extent = np.shape(port_fp)[1]
                bed = np.asarray(np.hstack(beds),'float')

          # now revise the depth in metres
          dep_m = (1/ft)*bed

          if doplot==1:
             if chunkmode!=4:
                for k in range(len(star_fp)):
                   plot_bedpick(port_fp[k], star_fp[k], (1/ft)*bed[ind_port[-1]*k:ind_port[-1]*(k+1)], dist_m[ind_port[-1]*k:ind_port[-1]*(k+1)], ft, shape_port, sonpath, k, chunkmode)
             else:
                plot_bedpick(port_fp, star_fp, (1/ft)*bed, dist_m, ft, shape_port, sonpath, 0, chunkmode)

          metadat['bed'] = bed[:nrec]

    else:
       metadat['bed'] = dep_m[:nrec]*ft

    metadat['heading'] = metadat['heading'][:nrec]
    metadat['lon'] = lon[:nrec]
    metadat['lat'] = lat[:nrec]
    metadat['dist_m'] = dist_m[:nrec]
    metadat['dep_m'] = dep_m[:nrec]
    metadat['pix_m'] = 1/ft
    metadat['bed'] = metadat['bed'][:nrec]
    metadat['c'] = c
    metadat['t'] = t
    if model==2:
       metadat['f'] = f*2
    else:
       metadat['f'] = f

    metadat['spd'] = metadat['spd'][:nrec]
    metadat['time_s'] = metadat['time_s'][:nrec]
    metadat['e'] = metadat['e'][:nrec]
    metadat['n'] = metadat['n'][:nrec]
    metadat['es'] = metadat['es'][:nrec]
    metadat['ns'] = metadat['ns'][:nrec]
    try:
       metadat['caltime'] = metadat['caltime'][:nrec]
    except:
       metadat['caltime'] = metadat['caltime']

    savemat(os.path.normpath(os.path.join(sonpath,base+'meta.mat')), metadat ,oned_as='row')

    f = open(os.path.normpath(os.path.join(sonpath,base+'rawdat.csv')), 'wt')
    writer = csv.writer(f)
    writer.writerow( ('longitude', 'latitude', 'easting', 'northing', 'depth (m)', 'distance (m)', 'instr. heading (deg)', 'heading (deg.)' ) )
    for i in range(0, nrec):
       writer.writerow(( float(lon[i]),float(lat[i]),float(es[i]),float(ns[i]),float(dep_m[i]),float(dist_m[i]), float(metadat['instr_heading'][i]), float(metadat['heading'][i]) ))
    f.close()

    del lat, lon, dep_m #, dist_m

    if doplot==1:

       plot_pos(sonpath, metadat, es, ns)

       if 'dwnlow_fp' in locals():

          plot_dwnlow(dwnlow_fp, chunkmode, sonpath)

       if 'dwnhi_fp' in locals():

          plot_dwnhi(dwnhi_fp, chunkmode, sonpath)

    if os.name=='posix': # true if linux/mac
       elapsed = (time.time() - start)
    else: # windows
       elapsed = (time.clock() - start)
    print("Processing took "+ str(elapsed) + "seconds to analyse")

    print("Done!")
    print("===================================================")
Esempio n. 50
0
def map_texture(humfile, sonpath, cs2cs_args, res, mode, nn, numstdevs): #influence = 10, 
         
    '''
    Create plots of the texture lengthscale maps made in PyHum.texture module 
    using the algorithm detailed by Buscombe et al. (2015)
    This textural lengthscale is not a direct measure of grain size. Rather, it is a statistical 
    representation that integrates over many attributes of bed texture, of which grain size is the most important. 
    The technique is a physically based means to identify regions of texture within a sidescan echogram, 
    and could provide a basis for objective, automated riverbed sediment classification.

    Syntax
    ----------
    [] = PyHum.map_texture(humfile, sonpath, cs2cs_args, res, mode, nn, numstdevs)

    Parameters
    ----------
    humfile : str
       path to the .DAT file
    sonpath : str
       path where the *.SON files are
    cs2cs_args : int, *optional* [Default="epsg:26949"]
       arguments to create coordinates in a projected coordinate system
       this argument gets given to pyproj to turn wgs84 (lat/lon) coordinates
       into any projection supported by the proj.4 libraries
    res : float, *optional* [Default=0.5]
       grid resolution of output gridded texture map
    mode: int, *optional* [Default=3]
       gridding mode. 1 = nearest neighbour
                      2 = inverse weighted nearest neighbour
                      3 = Gaussian weighted nearest neighbour
    nn: int, *optional* [Default=64]
       number of nearest neighbours for gridding (used if mode > 1) 
    numstdevs: int, *optional* [Default = 4]
       Threshold number of standard deviations in texture lengthscale per grid cell up to which to accept 
           
    Returns
    -------
    sonpath+'x_y_class'+str(p)+'.asc' : text file
        contains the point cloud of easting, northing, and texture lengthscales
        of the pth chunk

    sonpath+'class_GroundOverlay'+str(p)+'.kml': kml file
        contains gridded (or point cloud) texture lengthscale map for importing into google earth
        of the pth chunk

    sonpath+'class_map'+str(p)+'.png' : 
        image overlay associated with the kml file

    sonpath+'class_map_imagery'+str(p)+'.png' : png image file
        gridded (or point cloud) texture lengthscale map
        overlain onto an image pulled from esri image server

    References
    ----------
      .. [1] Buscombe, D., Grams, P.E., and Smith, S.M.C., 2015, Automated riverbed sediment
       classification using low-cost sidescan sonar. Journal of Hydraulic Engineering 10.1061/(ASCE)HY.1943-7900.0001079, 06015019.
    '''

    # prompt user to supply file if no input file given
    if not humfile:
       print('An input file is required!!!!!!')
       Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
       humfile = askopenfilename(filetypes=[("DAT files","*.DAT")]) 

    # prompt user to supply directory if no input sonpath is given
    if not sonpath:
       print('A *.SON directory is required!!!!!!')
       Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
       sonpath = askdirectory() 

    # print given arguments to screen and convert data type where necessary
    if humfile:
       print('Input file is %s' % (humfile))

    if sonpath:
       print('Sonar file path is %s' % (sonpath))

    if cs2cs_args:
       print('cs2cs arguments are %s' % (cs2cs_args))

    if res:
       res = np.asarray(res,float)
       print('Gridding resolution: %s' % (str(res)))      

    if mode:
       mode = int(mode)
       print('Mode for gridding: %s' % (str(mode)))      

    if nn:
       nn = int(nn)
       print('Number of nearest neighbours for gridding: %s' % (str(nn)))            

    #if influence:
    #   influence = int(influence)
    #   print 'Radius of influence for gridding: %s (m)' % (str(influence))             

    if numstdevs:
       numstdevs = int(numstdevs)
       print('Threshold number of standard deviations in texture lengthscale per grid cell up to which to accept: %s' % (str(numstdevs)))         

    # start timer
    if os.name=='posix': # true if linux/mac or cygwin on windows
       start = time.time()
    else: # windows
       start = time.clock()
       
    trans =  pyproj.Proj(init=cs2cs_args)

    # if son path name supplied has no separator at end, put one on
    if sonpath[-1]!=os.sep:
       sonpath = sonpath + os.sep

    base = humfile.split('.DAT') # get base of file name for output
    base = base[0].split(os.sep)[-1]

    # remove underscores, negatives and spaces from basename
    base = humutils.strip_base(base)

    meta = loadmat(os.path.normpath(os.path.join(sonpath,base+'meta.mat')))

    esi = np.squeeze(meta['e'])
    nsi = np.squeeze(meta['n']) 

    pix_m = np.squeeze(meta['pix_m'])*1.1
    dep_m = np.squeeze(meta['dep_m'])
    c = np.squeeze(meta['c'])
    #dist_m = np.squeeze(meta['dist_m'])

    theta = np.squeeze(meta['heading'])/(180/np.pi)

    # load memory mapped scans
    shape_port = np.squeeze(meta['shape_port'])
    if shape_port!='':
       if os.path.isfile(os.path.normpath(os.path.join(sonpath,base+'_data_port_lar.dat'))):
          port_fp = io.get_mmap_data(sonpath, base, '_data_port_lar.dat', 'float32', tuple(shape_port))
       else:
          port_fp = io.get_mmap_data(sonpath, base, '_data_port_la.dat', 'float32', tuple(shape_port))

    shape_star = np.squeeze(meta['shape_star'])
    if shape_star!='':
       if os.path.isfile(os.path.normpath(os.path.join(sonpath,base+'_data_star_lar.dat'))):
             star_fp = io.get_mmap_data(sonpath, base, '_data_star_lar.dat', 'float32', tuple(shape_star))
       else:
          star_fp = io.get_mmap_data(sonpath, base, '_data_star_la.dat', 'float32', tuple(shape_star))

    if len(shape_star)>2:    
       shape = shape_port.copy()
       shape[1] = shape_port[1] + shape_star[1]
       class_fp = io.get_mmap_data(sonpath, base, '_data_class.dat', 'float32', tuple(shape))
       #with open(os.path.normpath(os.path.join(sonpath,base+'_data_class.dat')), 'r') as ff:
       #   class_fp = np.memmap(ff, dtype='float32', mode='r', shape=tuple(shape))
    else:
       with open(os.path.normpath(os.path.join(sonpath,base+'_data_class.dat')), 'r') as ff:
          class_fp = np.load(ff)    


    tvg = ((8.5*10**-5)+(3/76923)+((8.5*10**-5)/4))*c
    dist_tvg = ((np.tan(np.radians(25)))*dep_m)-(tvg)

    if len(shape_star)>2:    
       for p in range(len(class_fp)):

          e = esi[shape_port[-1]*p:shape_port[-1]*(p+1)]
          n = nsi[shape_port[-1]*p:shape_port[-1]*(p+1)]
          t = theta[shape_port[-1]*p:shape_port[-1]*(p+1)]
          d = dist_tvg[shape_port[-1]*p:shape_port[-1]*(p+1)]

          len_n = len(n)
   
          merge = class_fp[p].copy()

          merge[np.isnan(merge)] = 0
          merge[np.isnan(np.vstack((np.flipud(port_fp[p]),star_fp[p])))] = 0

          extent = shape_port[1]
          R1 = merge[extent:,:]
          R2 = np.flipud(merge[:extent,:])

          merge = np.vstack((R2,R1))
          del R1, R2

          # get number pixels in scan line
          extent = int(np.shape(merge)[0]/2)

          yvec = np.linspace(pix_m,extent*pix_m,extent)

          X, Y  = getXY(e,n,yvec,d,t,extent)

          merge[merge==0] = np.nan

          if len(merge.flatten()) != len(X):
             merge = merge[:,:len_n]

          merge = merge.T.flatten()

          index = np.where(np.logical_not(np.isnan(merge)))[0]

          X, Y, merge = trim_xys(X, Y, merge, index)

          X = X.astype('float32')
          Y = Y.astype('float32')
          merge = merge.astype('float32')
          
          # write raw bs to file
          outfile = os.path.normpath(os.path.join(sonpath,'x_y_class'+str(p)+'.asc'))
          with open(outfile, 'w') as f:
             np.savetxt(f, np.hstack((humutils.ascol(X),humutils.ascol(Y), humutils.ascol(merge))), delimiter=' ', fmt="%8.6f %8.6f %8.6f")

          humlon, humlat = trans(X, Y, inverse=True)

          #if dogrid==1:

          orig_def, targ_def, grid_x, grid_y, res, shape = get_griddefs(np.min(X), np.max(X), np.min(Y), np.max(Y), res, humlon, humlat, trans)

          grid_x = grid_x.astype('float32')
          grid_y = grid_y.astype('float32')
                                      
          sigmas = 1 #m
          eps = 2
          dat, res = get_grid(mode, orig_def, targ_def, merge, res*10, np.min(X), np.max(X), np.min(Y), np.max(Y), res, nn, sigmas, eps, shape, numstdevs, trans, humlon, humlat)
          
          del merge
             
          dat[dat==0] = np.nan
          dat[np.isinf(dat)] = np.nan

          datm = np.ma.masked_invalid(dat)
          del dat

          glon, glat = trans(grid_x, grid_y, inverse=True)
          del grid_x, grid_y
          
          vmin=np.nanmin(datm)+0.1
          vmax=np.nanmax(datm)-0.1
          if vmin > vmax:
             vmin=np.nanmin(datm)
             vmax=np.nanmax(datm)            
          
          print_map(cs2cs_args, glon, glat, datm, sonpath, p, vmin=vmin, vmax=vmax)

    else: #just 1 chunk   
    
       e = esi
       n = nsi
       t = theta
       d = dist_tvg

       len_n = len(n)
   
       merge = class_fp.copy()

       merge[np.isnan(merge)] = 0
       merge[np.isnan(np.vstack((np.flipud(port_fp),star_fp)))] = 0

       extent = shape_port[0]
       R1 = merge[extent:,:]
       R2 = np.flipud(merge[:extent,:])

       merge = np.vstack((R2,R1))
       del R1, R2

       # get number pixels in scan line
       extent = int(np.shape(merge)[0]/2)

       yvec = np.linspace(pix_m,extent*pix_m,extent)

       X, Y  = getXY(e,n,yvec,d,t,extent)

       merge[merge==0] = np.nan

       if len(merge.flatten()) != len(X):
          merge = merge[:,:len_n]

       merge = merge.T.flatten()

       index = np.where(np.logical_not(np.isnan(merge)))[0]

       X, Y, merge = trim_xys(X, Y, merge, index)

       # write raw bs to file
       outfile = os.path.normpath(os.path.join(sonpath,'x_y_class'+str(0)+'.asc'))
       with open(outfile, 'w') as f:
          np.savetxt(f, np.hstack((humutils.ascol(X),humutils.ascol(Y), humutils.ascol(merge))), delimiter=' ', fmt="%8.6f %8.6f %8.6f")

       humlon, humlat = trans(X, Y, inverse=True)

       #if dogrid==1:
       if 2>1:

          orig_def, targ_def, grid_x, grid_y, res, shape = get_griddefs(np.min(X), np.max(X), np.min(Y), np.max(Y), res, humlon, humlat, trans)

          ## create mask for where the data is not
          tree = KDTree(np.c_[X.flatten(),Y.flatten()])

          if pykdtree==1:
             dist, _ = tree.query(np.c_[grid_x.ravel(), grid_y.ravel()], k=1)                      
          else:
             try:
                dist, _ = tree.query(np.c_[grid_x.ravel(), grid_y.ravel()], k=1, n_jobs=cpu_count())
             except:
                #print ".... update your scipy installation to use faster kd-tree queries"
                dist, _ = tree.query(np.c_[grid_x.ravel(), grid_y.ravel()], k=1)

          dist = dist.reshape(grid_x.shape)
             
          sigmas = 1 #m
          eps = 2
          dat, res = get_grid(mode, orig_def, targ_def, merge, res*10, np.min(X), np.max(X), np.min(Y), np.max(Y), res, nn, sigmas, eps, shape, numstdevs, trans, humlon, humlat)
          
          del merge

       #if dogrid==1:
       if 2>1:
          dat[dat==0] = np.nan
          dat[np.isinf(dat)] = np.nan
          dat[dist>res*2] = np.nan
          del dist

          datm = np.ma.masked_invalid(dat)

          glon, glat = trans(grid_x, grid_y, inverse=True)
          del grid_x, grid_y

       vmin=np.nanmin(datm)+0.1
       vmax=np.nanmax(datm)-0.1
       if vmin > vmax:
         vmin=np.nanmin(datm)
         vmax=np.nanmax(datm)
       
       Parallel(n_jobs = 2, verbose=0)(delayed(doplots)(k, humlon, humlat, cs2cs_args, glon, glat, datm, sonpath, 0, vmin=vmin, vmax=vmax) for k in range(2)) 
       
       #print_map(cs2cs_args, glon, glat, datm, sonpath, 0, vmin=vmin, vmax=vmax)

       #print_contour_map(cs2cs_args, humlon, humlat, glon, glat, datm, sonpath, 0, vmin=vmin, vmax=vmax) 

    if os.name=='posix': # true if linux/mac
       elapsed = (time.time() - start)
    else: # windows
       elapsed = (time.clock() - start)
    print("Processing took "+str(elapsed)+"seconds to analyse")

    print("Done!")
    print("===================================================")
Esempio n. 51
0
class Grid(object):

    def __init__(self, ncfile, latvarname, lonvarname):
        """Initialization function

        Arguments:
            latvar -- the netCDF latitude variable
            lonvar -- the netCDF longitude variable
        """
        self.latvar = ncfile.variables[latvarname]
        self.lonvar = ncfile.variables[lonvarname]

        self.time_var = utils.get_time_var(ncfile)

        self.kdt = _data_cache.get(ncfile.filepath())
        if self.kdt is None:
            rad_factor = pi / 180.0
            latvals = self.latvar[:] * rad_factor
            lonvals = self.lonvar[:] * rad_factor
            clat, clon = np.cos(latvals), np.cos(lonvals)
            slat, slon = np.sin(latvals), np.sin(lonvals)
            triples = np.array(list(zip(np.ravel(clat * clon),
                                        np.ravel(clat * slon),
                                        np.ravel(slat))))

            self.kdt = KDTree(triples)
            _data_cache[ncfile.filepath()] = self.kdt

        self._shape = ncfile.variables[latvarname].shape

    def find_index(self, lat0, lon0, n=1):
        """Finds the y,x indicies that are closest to a latitude, longitude
        pair.

        Arguments:
            lat0 -- the target latitude
            lon0 -- the target longitude
            n -- the number of indicies to return

        Returns:
            y, x indicies
        """
        if hasattr(lat0, "__len__"):
            lat0 = np.array(lat0)
            lon0 = np.array(lon0)
            multiple = True
        else:
            multiple = False
        rad_factor = pi / 180.0
        lat0_rad = lat0 * rad_factor
        lon0_rad = lon0 * rad_factor
        clat0, clon0 = np.cos(lat0_rad), np.cos(lon0_rad)
        slat0, slon0 = np.sin(lat0_rad), np.sin(lon0_rad)
        q = [clat0 * clon0, clat0 * slon0, slat0]
        if multiple:
            q = np.array(q).transpose()
        dist_sq_min, minindex_1d = self.kdt.query(np.float32(q), k=n)
        iy_min, ix_min = np.unravel_index(minindex_1d, self._shape)
        return iy_min, ix_min

    def bounding_box(self, lat, lon, n=10):
        y, x = self.find_index(np.array(lat).ravel(), np.array(lon).ravel(), n)
        miny, maxy = np.amin(y), np.amax(y)
        minx, maxx = np.amin(x), np.amax(x)

        def fix_limits(data, limit):
            max = np.amax(data)
            min = np.amin(data)
            delta = max - min

            if delta < 2:
                min -= 2
                max += 2

            min = int(min - delta / 4.0)
            if min < 0:
                min = 0

            max = int(max + delta / 4.0)
            if max > limit:
                max = limit - 1

            return min, max

        miny, maxy = fix_limits(y, self.latvar.shape[0])
        minx, maxx = fix_limits(x, self.latvar.shape[1])

        return miny, maxy, minx, maxx

    def interpolation_radius(self, lat, lon):
        distance = VincentyDistance()
        d = distance.measure(
            (np.amin(lat), np.amin(lon)),
            (np.amax(lat), np.amax(lon))
        ) * 1000 / 8.0

        if d == 0:
            d = 50000

        d = np.clip(d, 20000, 50000)

        return d

    def _get_interpolation(self, interp, lat, lon):
        method = interp.get('method')
        neighbours = interp.get('neighbours')
        if neighbours < 1:
            neighbours = 1

        radius = self.interpolation_radius(np.median(lat),
                                           np.median(lon))

        return method, neighbours, radius

    def _transect(self, variable, points, timestep, depth=None, n=100,
                  interpolation={'method': 'inv_square', 'neighbours': 8}):
        distances, times, target_lat, target_lon, b = _path_to_points(
            points, n)

        miny, maxy, minx, maxx = self.bounding_box(target_lat, target_lon, 10)

        lat = self.latvar[miny:maxy, minx:maxx]
        lon = self.lonvar[miny:maxy, minx:maxx]

        if depth is None:
            data = variable[timestep, :, miny:maxy, minx:maxx]
            data = np.rollaxis(data, 0, 3)
        else:
            if len(variable.shape) == 4:
                data = variable[timestep, depth, miny:maxy, minx:maxx]
            else:
                data = variable[timestep, miny:maxy, minx:maxx]

            data = np.expand_dims(data, -1).view(np.ma.MaskedArray)

        _fill_invalid_shift(data)

        method, neighbours, radius = self._get_interpolation(interpolation,
                                                             target_lat,
                                                             target_lon)
        resampled = []
        for d in range(0, data.shape[-1]):
            resampled.append(
                resample(
                    lat,
                    lon,
                    np.array(target_lat),
                    np.array(target_lon),
                    data[:, :, d],
                    method=method,
                    neighbours=neighbours,
                    radius_of_influence=radius,
                    nprocs=4
                )
            )
        resampled = np.ma.vstack(resampled)

        return np.array([target_lat, target_lon]), distances, resampled, b

    def transect(self, variable, points, timestep, n=100,
                 interpolation={'method': 'inv_square', 'neighbours': 8}):
        latlon, distances, resampled, b = self._transect(
            variable, points, timestep, None, n, interpolation)
        return latlon, distances, resampled

    def surfacetransect(self, variable, points, timestep, n=100,
                        interpolation={
                            'method': 'inv_square',
                            'neighbours': 8
                        }):
        latlon, distances, resampled, b = self._transect(
            variable, points, timestep, 0, n, interpolation)
        return latlon, distances, resampled[0]

    def velocitytransect(self, variablex, variabley,
                         points, timestep, n=100,
                         interpolation={
                             'method': 'inv_square',
                             'neighbours': 8
                         }):

        latlon, distances, x, b = self._transect(variablex, points, timestep,
                                                 None, n, interpolation)
        latlon, distances, y, b = self._transect(variabley, points, timestep,
                                                 None, n, interpolation)

        r = np.radians(np.subtract(90, b))
        theta = np.arctan2(y, x) - r
        mag = np.sqrt(x ** 2 + y ** 2)

        parallel = mag * np.cos(theta)
        perpendicular = mag * np.sin(theta)

        return latlon, distances, parallel, perpendicular

    def path(self, variable, depth, points, times, n=100,
             interpolation={'method': 'inv_square', 'neighbours': 8}):

        target_lat = points[:, 0]
        target_lon = points[:, 1]

        miny, maxy, minx, maxx = self.bounding_box(target_lat, target_lon, 10)

        lat = self.latvar[miny:maxy, minx:maxx]
        lon = self.lonvar[miny:maxy, minx:maxx]

        method, neighbours, radius = self._get_interpolation(interpolation,
                                                             target_lat,
                                                             target_lon)

        ts = [
            t.replace(tzinfo=pytz.UTC)
            for t in
            netcdftime.utime(self.time_var.units).num2date(self.time_var[:])
        ]

        mintime, x = _take_surrounding(ts, times[0])
        x, maxtime = _take_surrounding(ts, times[-1])
        maxtime += 1
        uniquetimes = range(mintime, maxtime + 1)

        combined = []
        for t in range(mintime, maxtime):
            if len(variable.shape) == 3:
                data = variable[t, miny:maxy, minx:maxx]
            else:
                data = variable[t, depth, miny:maxy, minx:maxx]
            _fill_invalid_shift(np.ma.array(data))
            combined.append(resample(lat,
                                     lon,
                                     np.array(target_lat),
                            np.array(target_lon),
                                     data,
                                     method=method,
                                     neighbours=neighbours,
                                     radius_of_influence=radius,
                                     nprocs=4))
        combined = np.ma.array(combined)

        if mintime + 1 >= len(ts):
            result = combined[0]
        else:
            t0 = ts[mintime]
            td = (ts[mintime + 1] - t0).total_seconds()

            deltas = np.ma.masked_array([t.total_seconds() / td
                                        for t in np.subtract(times, t0)])

            model_td = ts[1] - ts[0]

            deltas[
                np.where(np.array(times) > ts[-1] + model_td / 2)
            ] = np.ma.masked
            deltas[
                np.where(np.array(times) < ts[0] - model_td / 2)
            ] = np.ma.masked
            print abcd

            # This is a slight modification on scipy's interp1d
            # https://github.com/scipy/scipy/blob/v0.17.1/scipy/interpolate/interpolate.py#L534-L561
            x = np.array(range(0, len(uniquetimes) - 1))
            new_idx = np.searchsorted(x, deltas)
            new_idx = new_idx.clip(1, len(x) - 1).astype(int)
            low = new_idx - 1
            high = new_idx
            if (high >= len(x)).any():
                result = combined[0]
                # result[:, np.where(deltas.mask)] = np.ma.masked
                result[np.where(deltas.mask)] = np.ma.masked
            else:
                x_low = x[low]
                x_high = x[high]
                y_low = combined[low, range(0, len(times))]
                y_high = combined[high, range(0, len(times))]
                slope = (y_high - y_low) / (x_high - x_low)[None]
                y_new = slope * (deltas - x_low)[None] + y_low
                result = y_new[0]

        return result