def prepare_normals(means, sigmas, weights, normalize, axis): isnan_mean = np.isnan(means) isnan_sigmas = np.isnan(sigmas) if np.any(isnan_mean != isnan_sigmas): raise ValueError('mean and sigma NaNs do not match') if np.any(sigmas == 0): raise ValueError('sigmas cannot be 0') if weights is None: weights = np.ones(means.shape) if normalize: weights = weights * ( 1 / np.nansum(weights * ~isnan_mean, axis=axis, keepdims=True)) return isnan_mean, isnan_sigmas, weights
def select_track_points(xy, images, polygon, dem, max_distance): """ Return track points mask for a set of starting images. Returns: array: Coordinates of points to track (n, 2) array: Visibility mask for each image (n, m) """ # In DEM, in polygon, and DEM not NaN z = dem.sample(xy, bounds_error=False, fill_value=np.nan) mask = ~np.isnan(z) & glimpse.helpers.points_in_polygon(xy, polygon) # Visible in one or more images xyz = np.column_stack((xy, z))[mask] visible = np.tile(mask.reshape(-1, 1), reps=(1, len(images))) for i, img in enumerate(images): uv = img.cam.project(xyz, correction=True) # In image frame visible[mask, i] &= img.cam.inframe(uv) # In range distance = np.linalg.norm(xyz[:, 0:2] - img.cam.xyz[0:2], axis=1) visible[mask, i] &= distance < max_distance # In DEM viewshed viewshed = glimpse.Raster(Z=dem.viewshed(img.cam.xyz), x=dem.xlim, y=dem.ylim) visible[mask, i] &= viewshed.sample(xyz[:, 0:2], order=1) > 0.99 # Not in land mask land_mask = glimpse.Raster(load_masks([img])[0]) visible[mask, i] &= land_mask.sample(uv, order=1, bounds_error=False, fill_value=1.0) == 0 return visible
def arc_distance_median(angles): # https://github.com/scipy/scipy/issues/6644 mask = ~np.isnan(angles) if mask.size and not mask.any(): return np.nan distances = angles[np.newaxis, mask] - angles[mask, np.newaxis] distances = (distances + np.pi) % (2 * np.pi) - np.pi sum_distances = np.abs(distances).sum(axis=1) return angles[mask][np.argmin(sum_distances)]
def flatten_tracks_doug(runs): # Join together second forward and backward runs f, r = runs['fv'], runs['rv'] means = np.column_stack((f.means[..., 3:], r.means[..., 3:])) sigmas = np.column_stack((f.sigmas[..., 3:], r.sigmas[..., 3:])) # Flatten joined runs # Mean: Inverse-variance weighted mean # Sigma: Linear combination of weighted correlated random variables # (approximation using the weighted mean of the variances) weights = sigmas**-2 weights *= 1 / np.nansum(weights, axis=1, keepdims=True) allnan = np.isnan(means).all(axis=1, keepdims=True) means = np.nansum(weights * means, axis=1, keepdims=True) sigmas = np.sqrt(np.nansum(weights * sigmas**2, axis=1, keepdims=True)) # np.nansum interprets sum of nans as 0 means[allnan] = np.nan sigmas[allnan] = np.nan return means.squeeze(axis=1), sigmas.squeeze(axis=1)
update_plot, frames=range(time_mask.sum()), blit=True, interval=200) ani.save('speed_multi2.mp4') # ---- Animate velocity vectors ---- # Plot vx, vy fig = matplotlib.pyplot.figure(tight_layout=True, figsize=(12, 8)) ax = matplotlib.pyplot.gca() ax.axis('off') ax.set_aspect(1) i = 0 scale = 15 mask = ~np.isnan(vx[indices][..., i]) quiver = matplotlib.pyplot.quiver( raster.X, raster.Y, vx[indices][..., i] * scale, vy[indices][..., i] * scale, # color='black', speeds[indices][..., i], clim=[0, 20], alpha=1, width=5, headaxislength=0, headwidth=1, minlength=0, pivot='tail', angles='xy',
orthoZ[orthoZ == 0] = np.nan # HACK: Clip dem and ortho to same size relative to x, y min ij = np.minimum(dem.shape, orthoZ.shape[0:2]) dem = dem[(dem.shape[0] - ij[0]):, :ij[1]] orthoZ = orthoZ[(orthoZ.shape[0] - ij[0]):, :ij[1], :] # Compute mask if viewshed_scale != 1: smdem = dem.copy() smdem.resize(viewshed_scale) else: smdem = dem mask = glimpse.Raster(Z=smdem.viewshed(img.cam.xyz), x=dem.xlim, y=dem.ylim) if viewshed_scale != 1: mask.resample(dem) mask.Z = mask.Z.astype(bool) mask.Z &= ~np.isnan(orthoZ[:, :, 0]) # Copy to shared memory if parallel: dem.Z = sharedmem.copy(dem.Z) orthoZ = sharedmem.copy(orthoZ) mask.Z = sharedmem.copy(mask.Z) # Project onto image aggregate = glimpse.helpers.merge_dicts( {i: np.mean for i in range(orthoZ.shape[2])}, {orthoZ.shape[2]: [np.mean, np.std]}) I = img.cam.project_dem( dem=dem, values=orthoZ, mask=mask.Z, tile_size=(256, 256), tile_overlap=(1, 1), scale=scale, scale_limits=scale_limits, parallel=parallel, correction=True, return_depth=True, aggregate=aggregate)
DEM_DIR = '/volumes/science-b/data/columbia/dem' DATE_STR = '20070922' # ---- Read DEM ---- dem_path = glob.glob(os.path.join(DEM_DIR, DATE_STR + '*.tif'))[-1] dem = glimpse.Raster.read(dem_path) # ---- Moraine lines ---- paths = glob.glob('geojson/moraines/' + DATE_STR + '.geojson') for path in paths: geo = glimpse.helpers.read_geojson(path, crs=32606) glimpse.helpers.elevate_geojson(geo, elevation=dem) for coords in glimpse.helpers.geojson_itercoords(geo): if any(np.isnan(coords[:, 2])): print('Missing elevations in ' + path) geo2 = glimpse.helpers.ordered_geojson(geo) glimpse.helpers.write_geojson(geo2, path=os.path.splitext(path)[0] + '.geojson', decimals=(7, 7, 2), crs=32606) # ---- Ground control points ---- GCP_DEM_PATH = '/volumes/science-b/data/columbia/_new/ArcticDEM/v2.0/tiles/merged_projected_clipped.tif' dem_ref = glimpse.Raster.read(GCP_DEM_PATH) geo = glimpse.helpers.read_geojson('geojson/gcp.geojson', crs=32606, key='id') keys = [key for key in geo['features'] if re.findall('T' + DATE_STR, key)] keys.sort() for key in keys:
# ---- Load canonical bed ---- # bed bed = glimpse.Raster.read('bed.tif') # ---- Build track template ---- grid = glimpse.helpers.box_to_grid(dem_interpolant.means[0].box2d, step=grid_step, snap=(0, 0)) track_template = glimpse.Raster(np.ones(grid[0].shape, dtype=bool), x=grid[0], y=grid[1][::-1]) xy = glimpse.helpers.grid_to_points((track_template.X, track_template.Y)) selected = glimpse.helpers.points_in_polygon(xy, cg.Glacier()) # Filter by velocity availability # NOTE: Use nearest to avoid NaN propagation (and on same grid anyway) selected &= ~np.isnan(vx.sample(xy, order=0)) mask = selected.reshape(track_template.shape) track_points = xy[mask.ravel()] track_ids = np.ravel_multi_index(np.nonzero(mask), track_template.shape) # Write to file track_template.Z &= mask track_template.Z = track_template.Z.astype(np.uint8) track_template.write(os.path.join(cartesian_path, 'template.tif'), crs=32606) track_template.write(os.path.join(cylindrical_path, 'template.tif'), crs=32606) # ---- For each observer ---- for obs in range(0, len(start_images)): print(obs) images = start_images[obs]
negate = diffs[diffs <= 1].astype(np.int) [images.pop(_) for _ in negate] images = images[:int(len(images) / 2)] print("Image set {} \n".format(len(images))) obs = glimpse.Observer(list(np.array(images)), cache=False) observers.append(obs) #------------------------- #---------------------------- # Prepare DEM path = glob.glob(join(DEM_DIR, "*.tif"))[0] dem = glimpse.Raster.read(path, d=10) print("DEM PATH: {}".format(path)) dem.crop(zlim=(0, np.inf)) dem.fill_crevasses(mask=~np.isnan(dem.Z), fill=True) # ---- Prepare viewshed ---- for obs in observers: dem.fill_circle(obs.xyz, radius=100) viewshed = dem.copy() viewshed.Z = np.ones(dem.shape, dtype=bool) for obs in observers: viewshed.Z &= dem.viewshed(obs.xyz) print("\n *****Viewshed Done**** \n") # ---- Run Tracker ---- xy = [] xy0 = np.array((7361411.0, 533528.0)) #xy.append(xy0)
import cg from cg import glimpse from glimpse.imports import (datetime, np, os, collections) root = '/volumes/science/data/columbia' cg.IMAGE_PATH = os.path.join(root, 'timelapse') base_dem_path = os.path.join(root, 'dem-ifsar-merged/data/ifsar.tif') surface_sigma = 3 # m, surface roughness grid_size = 20 # m zlim = (1, np.inf) # m fill_crevasses_args = dict( maximum=dict(size=5), # 100 m gaussian=dict(sigma=5), # 200 m (68%) mask=lambda x: ~np.isnan(x), fill=True) max_distance = 10e3 # m dem_interpolant_path = 'dem_interpolant.pkl' # ---- Build DEM template ---- json = glimpse.helpers.read_json('observers.json', object_pairs_hook=collections.OrderedDict) stations = set([station for x in json for station in x]) station_xy = np.vstack([f['geometry']['coordinates'][:, 0:2] for station, f in cg.Stations().items() if station in stations]) box = glimpse.helpers.bounding_box(cg.Glacier()) XY = glimpse.helpers.box_to_grid(box, step=(grid_size, grid_size), snap=(0, 0), mode='grid') xy = glimpse.helpers.grid_to_points(XY) distances = glimpse.helpers.pairwise_distance(xy, station_xy, metric='euclidean')
# Precompute spatial neighborhoods and masks ncols = template.shape[1] neighbor_ids = np.column_stack( (ids, ids - 1, ids + 1, ids - ncols, ids + ncols)) if diagonal_neighbors: neighbor_ids = np.column_stack( (neighbor_ids, np.column_stack((ids - 1 - cols, ids + 1 - cols, ids - 1 + cols, ids + 1 + cols)))) neighbor_rows = np.searchsorted(ids, neighbor_ids) missing = ~np.isin(neighbor_ids, ids) neighbor_rows[missing] = 0 few_cams = ( (nobservers[neighbor_rows, :] < min_observers) & (nobservers[neighbor_rows, :].max(axis=1, keepdims=True) >= min_observers)) isnan = np.isnan(means) # Apply spatial median filter fmeans = np.full(means.shape, np.nan, dtype=np.float32) fsigmas = fmeans.copy() for dim in range(means.shape[2]): print(dim) # (point, neighbor, time) m = means[..., dim][neighbor_rows, :] s = sigmas[..., dim][neighbor_rows, :] # Mask out missing neighbors or neighbors with too few cameras m[missing] = np.nan m[few_cams] = np.nan if not fill_missing: # Mask out neighborhoods with missing centers m[np.tile(isnan[..., 0][:, None, :], (1, m.shape[1], 1))] = np.nan
datetimes = np.array(t0) vx = np.dstack(vx0) vy = np.dstack(vy0) # ---- Filter Landsat velocities ---- lmask = np.array([key[1] == 'landsat' for key in velocity_keys]) lvx = vx[..., lmask].copy() lvy = vy[..., lmask].copy() # Normalize vx, vy to the unit circle theta = np.arctan2(lvy, lvx) theta[theta < 0] += 2 * np.pi uy = np.sin(theta) ux = np.cos(theta) # Compute moving-window median orientations mask = ~np.isnan(ux) mux = np.zeros(ux.shape, dtype=float) muy = np.zeros(uy.shape, dtype=float) for i in range(ux.shape[0]): for j in range(ux.shape[1]): mux[i, j, mask[i, j, :]] = scipy.ndimage.median_filter(ux[i, j, mask[i, j, :]], size=5) muy[i, j, mask[i, j, :]] = scipy.ndimage.median_filter(uy[i, j, mask[i, j, :]], size=5) mtheta = np.arctan2(muy, mux) # Remove outliers mtheta[mtheta < 0] += 2 * np.pi
img = glimpse.Image(basename + '.JPG', cam=cam) I = img.read() if I.ndim > 2: I = glimpse.helpers.rgb_to_gray(I).astype(np.uint8) # Prepare synthetic image cam = glimpse.helpers.read_json(basename + '-synth.json') simg = glimpse.Image(basename + '-synth.JPG', cam=cam) sI = simg.read() if sI.ndim > 2: smask = (sI[:, :, 0] != 127).astype(np.uint8) sI = glimpse.helpers.rgb_to_gray(sI).astype(np.uint8) else: smask = (sI != 127).astype(np.uint8) depth = glimpse.Raster.read(basename + '-depth.tif') depth_sigma = glimpse.Raster.read(basename + '-depth_stderr.tif') depth_sigma.Z[np.isnan(depth_sigma.Z)] = 0 # # Equalize images # clahe = cv2.createCLAHE(clipLimit=1.0, tileGridSize=(20, 20)) # I = clahe.apply(I) # sI = clahe.apply(sI) # Match keypoints k = glimpse.optimize.detect_keypoints(I) sk = glimpse.optimize.detect_keypoints(sI, mask=smask) uv, suv, ratio = glimpse.optimize.match_keypoints( k, sk, max_ratio=0.75, max_distance=MAX_DISTANCE_SCALE * img.cam.imgsz[0], return_ratios=True) print(len(uv), 'matches') # matplotlib.pyplot.hist(ratio)