Example #1
0
def load_masks(images):
    """
    Return a list of boolean land masks.

    Images must all be from the same station.

    Arguments:
        images (iterable): Image objects
    """
    # All images must be from the same station (for now)
    station = parse_image_path(images[0].path)['station']
    pattern = re.compile(station + r'_[0-9]{8}_[0-9]{6}[^\/]*$')
    is_station = [pattern.search(img.path) is not None for img in images[1:]]
    assert all(is_station)
    # Find all station svg with 'land' markup
    imgsz = images[0].cam.imgsz
    svg_paths = glob.glob(os.path.join(CG_PATH, 'svg', station + '_*.svg'))
    markups = [glimpse.svg.parse_svg(path, imgsz=imgsz) for path in svg_paths]
    land_index = np.where(['land' in markup for markup in markups])[0]
    if len(land_index) == 0:
        raise ValueError('No land masks found for station')
    svg_paths = np.array(svg_paths)[land_index]
    land_markups = np.array(markups)[land_index]
    # Select svg files nearest to images, with preference within breaks
    svg_datetimes = paths_to_datetimes(svg_paths)
    svg_break_indices = np.array(
        [_station_break_index(path) for path in svg_paths])
    img_datetimes = [img.datetime for img in images]
    distances = glimpse.helpers.pairwise_distance_datetimes(
        img_datetimes, svg_datetimes)
    nearest_index = []
    for i, img in enumerate(images):
        break_index = _station_break_index(img.path)
        same_break = np.where(break_index == svg_break_indices)[0]
        if same_break.size > 0:
            i = same_break[np.argmin(distances[i][same_break])]
        else:
            raise ValueError('No mask found within motion breaks for image', i)
            i = np.argmin(distances[i])
        nearest_index.append(i)
    nearest = np.unique(nearest_index)
    # Make masks and expand per image without copying
    masks = [None] * len(images)
    image_sizes = np.array([img.cam.imgsz for img in images])
    sizes = np.unique(image_sizes, axis=0)
    for i in nearest:
        polygons = land_markups[i]['land'].values()
        is_nearest = nearest_index == i
        for size in sizes:
            scale = size / imgsz
            rpolygons = [polygon * scale for polygon in polygons]
            mask = glimpse.helpers.polygons_to_mask(rpolygons,
                                                    size=size).astype(np.uint8)
            mask = sharedmem.copy(mask)
            for j in np.where(is_nearest
                              & np.all(image_sizes == size, axis=1))[0]:
                masks[j] = mask
    return masks
Example #2
0
def _station_break_index(path):
    """
    Return index of image in motion break sequence.

    Arguments:
        path (str): Image path

    Returns:
        int: Either 0 (original viewdir) or i (viewdir of break i + 1)
    """
    stations = Stations()
    ids = parse_image_path(path)
    station = stations[ids['station']]
    if 'breaks' not in station['properties']:
        return 0
    breaks = station['properties']['breaks']
    if not breaks:
        return 0
    break_images = np.array([x['start'] for x in breaks])
    idx = np.argsort(break_images)
    i = np.where(break_images[idx] <= ids['basename'])[0]
    if i.size > 0:
        return idx[i[-1]] + 1
    else:
        return 0
Example #3
0
def load_model(camera,
               svgs=None,
               keys=None,
               step=None,
               group_params=dict(),
               station_calib=False,
               camera_calib=False,
               fixed=None):
    # Gather motion control
    motion_images, motion_controls, motion_cam_params = cg.camera_motion_matches(
        camera, station_calib=station_calib, camera_calib=camera_calib)
    # Gather svg control
    svg_images, svg_controls, svg_cam_params = cg.camera_svg_controls(
        camera,
        keys=keys,
        svgs=svgs,
        correction=True,
        station_calib=station_calib,
        camera_calib=camera_calib,
        step=step)
    # Standardize image sizes
    imgszs = np.unique([img.cam.imgsz for img in (motion_images + svg_images)],
                       axis=0)
    if len(imgszs) > 1:
        i_max = np.argmax(imgszs[:, 0])
        print('Resizing images and controls to', imgszs[i_max])
        for control in motion_controls + svg_controls:
            control.resize(size=imgszs[i_max], force=True)
        # Set new imgsz as original camera imgsz
        for img in motion_images + svg_images:
            img.cam.original_vector[6:8] = imgszs[i_max]
    # Determine whether xyz can be optimized
    stations = [cg.parse_image_path(img.path)['station'] for img in svg_images]
    if fixed is None:
        if len(stations) > 0 and (np.array(stations) == stations[0]).all():
            fixed = cg.Stations()[stations[0]]['properties']['fixed']
        else:
            fixed = True
    station = None if fixed else stations[0]
    if station:
        group_params = glimpse.helpers.merge_dicts(group_params,
                                                   dict(xyz=True))
    model = glimpse.optimize.Cameras(
        cams=[img.cam for img in motion_images + svg_images],
        controls=motion_controls + svg_controls,
        cam_params=motion_cam_params + svg_cam_params,
        group_params=group_params)
    return motion_images, svg_images, model, station
Example #4
0
def get_nearest_terminus(t):
    """
    Return the terminus nearest a datetime.
    """
    types = ('aerometric', 'arcticdem', 'ifsar', 'tandem', 'landsat-8',
             'landsat-7', 'terrasar')
    termini = [
        f for f in Termini() if len(f['properties']['date']) == 10
        and f['properties']['type'] in types
    ]
    termini.sort(key=lambda x: (x['properties']['date'],
                                types.index(x['properties']['type'])))
    datetimes = [
        datetime.datetime.strptime(f['properties']['date'] + '22',
                                   '%Y-%m-%d%H') for f in termini
    ]
    dt = np.abs(np.array(datetimes) - t)
    i = np.where(np.min(dt) == dt)[0][0]
    return termini[i]['geometry']['coordinates']
Example #5
0
#==============================================

DATA_DIR = "/home/dunbar/Research/helheim/data/observations"
DEM_DIR = os.path.join(DATA_DIR, 'dem')
MAX_DEPTH = 30e3

# ---- Prepare Observers ----

observerpath = ['stardot1','stardot2']
observers = []
for observer in observerpath:
    path = join(DATA_DIR,observer)
    campaths =  glob.glob(join(path,"*.JSON"))
    images = [glimpse.Image(path=campath.replace(".JSON",".jpg"),cam=campath) for campath in campaths]
    images.sort(key= lambda img: img.datetime)
    datetimes = np.array([img.datetime for img in images])
    for n, delta in enumerate(np.diff(datetimes)):
        if delta <= datetime.timedelta(seconds=0):
            secs = datetime.timedelta(seconds= n%5 + 1)
            images[n+1].datetime = images[n+1].datetime + secs
    diffs = np.array([dt.total_seconds() for dt in np.diff(np.array([img.datetime for img in images]))])
    negate = diffs[diffs <= 1].astype(np.int)
    [images.pop(_) for _ in negate]

    images = images[:int(len(images)/2)]
    print("Image set {} \n".format(len(images)))
    obs = glimpse.Observer(list(np.array(images)),cache=False)
    observers.append(obs)
#-------------------------
uv = observer[1].images[0].cam.project(( 7361411.0,533528.0,180))
observer[1].images[0].cam.plot()
Example #6
0
     # NOTE: Determine sigma programmatically?
     observer = glimpse.Observer(images, cache=True, correction=True, sigma=0.3)
     observers.append(observer)
 # ---- Load track points ----
 t = min([observer.datetimes[0] for observer in observers])
 datestr = t.strftime('%Y%m%d')
 basename = str(i_obs)
 # ids, xy, observer_mask, vrthz, vrthz_sigma, flotation
 params = glimpse.helpers.read_pickle(
     os.path.join(points_path, basename + '.pkl'))
 # ---- Load DEM ----
 # dem, dem_sigma
 dem, dem_sigma = dem_interpolant(t, return_sigma=True)
 # Crop DEM (for lower memory use)
 box = (glimpse.helpers.bounding_box(params['xy']) +
     np.array([-1, -1, 1, 1]) * dem_padding)
 dem.crop(xlim=box[0::2], ylim=box[1::2])
 dem_sigma.crop(xlim=box[0::2], ylim=box[1::2])
 dem.crop_to_data()
 dem_sigma.crop_to_data()
 # ---- Compute motion models ----
 cylindrical = 'vrthz' in params
 # motion_models
 time_unit = datetime.timedelta(days=1)
 m = len(params['xy'])
 if cylindrical:
     vrthz_sigmas = [compute_vrthz_sigma(
         params['vrthz_sigma'][i], params['flotation'][i]) for i in range(m)]
     arthz_sigmas = [compute_arthz_sigma(
         vrthz_sigmas[i], params['flotation'][i]) for i in range(m)]
     motion_models = [glimpse.tracker.CylindricalMotionModel(
Example #7
0
sequences = cg.Sequences()
station_ranges = dict()
station_iranges = dict()
station_images = dict()
station_datetimes = dict()
for station in stations:
    print(station)
    services = sequences[sequences.station == station].service.values
    images = cg.load_images(
        station=station, services=services, snap=snap,
        service_exif=True, anchors=False, viewdir=True, viewdir_as_anchor=True,
        file_errors=False)
    images = [img for img in images if img.anchor]
    images.sort(key=lambda x: x.datetime)
    datetimes = np.array([img.datetime for img in images])
    # Endpoints
    iranges = np.atleast_2d((0, len(images)))
    # Changes in image size
    width = [img.cam.imgsz[0] for img in images]
    cuts = [i + 1 for i in np.nonzero(np.diff(width))[0]]
    iranges = glimpse.helpers.cut_ranges(iranges, cuts)
    # Camera changes
    # HACK: Use k1 as a proxy
    k = [img.cam.k[0] for img in images]
    cuts = [i + 1 for i in np.nonzero(np.diff(k))[0]]
    iranges = glimpse.helpers.cut_ranges(iranges, cuts)
    # Gaps in coverage
    dt = np.diff(datetimes)
    cuts = [i + 1 for i in np.nonzero(dt > max_gap)[0]]
    iranges = glimpse.helpers.cut_ranges(iranges, cuts)
Example #8
0
    I = clahe.apply(I.astype(np.uint8))
    I_ori = clahe.apply(glimpse.helpers.rgb_to_gray(img.read()).astype(np.uint8))
    # Write synthetic image
    I[I == 127] = 126
    I[nanI] = 127
    img.write(basename + '-synth.JPG', I, quality=95)
    # Write synthetic camera
    img.cam.write(
        path=basename + '-synth.json',
        attributes=('xyz', 'viewdir', 'fmm', 'cmm', 'k', 'p', 'sensorsz'),
        flat_arrays=True, indent=4)
    # Write copy of original image
    img.cam.resize()
    img.write(basename + '.JPG', I_ori, quality=95)
    # Write svg
    svg_path = basename + '.svg'
    if not os.path.isfile(svg_path):
        size = img.cam.imgsz
        svg_size = size * 0.25
        synth_size = np.array(I.shape[0:2][::-1])
        xml = glimpse.svg._svg(
            glimpse.svg._image(size=size, scale=svg_size / size, path=image + '.JPG'),
            glimpse.svg._image(size=synth_size, scale=svg_size / synth_size, path=image + '-synth.JPG'),
            id='svg', size=svg_size
        )
        glimpse.svg._write_svg(xml, path=svg_path, pretty_print=True)
    # Write depth map
    img.write(basename + '-depth.tif', depth)
    img.write(basename + '-depth_stderr.tif', depth_sigma)
    print(timeit.default_timer() - start)
Example #9
0
         img.anchor = False
 # Load matches for tile
 read_matches(matcher, imgs=indices[starts[tile]:ends[tile]])
 # Remove images with too few matches
 # NOTE: Repeat until no additional images are below threshold
 imgs = [None]
 while len(imgs):
     n = matcher.matches_per_image()
     imgs = np.where(n < MIN_MATCHES)[0]
     matcher.drop_images(imgs)
 # Check for breaks in remaining matches
 breaks = matcher.match_breaks()
 if len(breaks):
     raise ValueError('Match breaks at:', breaks)
 # Check for an anchor image
 is_anchor = np.array([img.anchor for img in matcher.images])
 anchors = np.where(is_anchor)[0]
 if not len(anchors):
     raise ValueError('No anchor image present')
 # Free up memory and convert matches to XY
 matcher.filter_matches(clear_weights=True)
 matcher.convert_matches(glimpse.optimize.RotationMatchesXY,
                         clear_uvs=True)
 # Orient cameras
 cams = [img.cam for img in matcher.images]
 controls = tuple(matcher.matches.data)
 cam_params = [
     dict() if img.anchor else dict(viewdir=True)
     for img in matcher.images
 ]
 model = glimpse.optimize.Cameras(cams, controls, cam_params=cam_params)
Example #10
0
# ---- Build DEM template ----

json = glimpse.helpers.read_json('observers.json',
    object_pairs_hook=collections.OrderedDict)
stations = set([station for x in json for station in x])
station_xy = np.vstack([f['geometry']['coordinates'][:, 0:2]
    for station, f in cg.Stations().items()
    if station in stations])
box = glimpse.helpers.bounding_box(cg.Glacier())
XY = glimpse.helpers.box_to_grid(box, step=(grid_size, grid_size),
    snap=(0, 0), mode='grid')
xy = glimpse.helpers.grid_to_points(XY)
distances = glimpse.helpers.pairwise_distance(xy, station_xy, metric='euclidean')
selected = distances.min(axis=1) < max_distance
box = glimpse.helpers.bounding_box(xy[selected]) + 0.5 * np.array([-1, -1, 1, 1]) * grid_size
shape = np.diff([box[1::2], box[0::2]], axis=1) / grid_size
dem_template = glimpse.Raster(np.ones(shape.astype(int).ravel(), dtype=bool),
    x=box[0::2], y=box[1::2][::-1])
dem_points = glimpse.helpers.grid_to_points((dem_template.X, dem_template.Y))

# ---- Select DEMs ----

dem_sigmas = {
    'aerometric': 1.5,
    'ifsar': 1.5 + 0.5, # additional time uncertainty
    'arcticdem': 3,
    'tandem': 3 # after bulk corrections
}
dem_keys = [
    ('20040618', 'aerometric'),
Example #11
0
    y = glimpse.Raster.read(basepath + '_vy.tif',
                            xlim=template.xlim,
                            ylim=template.ylim,
                            nan=nan)
    # NOTE: Avoiding faster grid sampling because of NaN
    vx0.append(
        x.sample(points, order=1, bounds_error=False).reshape(template.shape))
    vy0.append(
        y.sample(points, order=1, bounds_error=False).reshape(template.shape))
    datestr = re.findall(r'^([0-9]{8})', key[0])[0]
    t0.append(datetime.datetime.strptime(datestr, '%Y%m%d'))
# Remove unread keys
for i in dropped:
    velocity_keys.pop(i)
# Stack results
datetimes = np.array(t0)
vx = np.dstack(vx0)
vy = np.dstack(vy0)

# ---- Filter Landsat velocities ----

lmask = np.array([key[1] == 'landsat' for key in velocity_keys])
lvx = vx[..., lmask].copy()
lvy = vy[..., lmask].copy()
# Normalize vx, vy to the unit circle
theta = np.arctan2(lvy, lvx)
theta[theta < 0] += 2 * np.pi
uy = np.sin(theta)
ux = np.cos(theta)
# Compute moving-window median orientations
mask = ~np.isnan(ux)
Example #12
0
def load_images(station,
                services,
                use_exif=False,
                service_exif=False,
                anchors=False,
                viewdir=True,
                viewdir_as_anchor=False,
                file_errors=True,
                **kwargs):
    """
    Return list of calibrated Image objects.

    Any available station, camera, image, and viewdir calibrations are loaded
    and images with image calibrations are marked as anchors.

    Arguments:
        station (str): Station identifier
        services (iterable): Service identifiers
        use_exif (bool): Whether to parse image datetimes from EXIF (slower)
            rather than parsed from paths (faster)
        service_exif (bool): Whether to extract EXIF from first image (faster)
            or all images (slower) in service.
            If `True`, `Image.datetime` is parsed from path.
            Always `False` if `use_exif=True`.
        anchors (bool): Whether to include anchor images even if
            filtered out by `kwargs['snap']`
        **kwargs: Arguments to `glimpse.helpers.select_datetimes()`
    """
    if use_exif:
        service_exif = False
    # Sort services in time
    if isinstance(services, str):
        services = services,
    services = np.sort(services)
    # Parse datetimes of all candidate images
    paths_service = [
        glob.glob(
            os.path.join(IMAGE_PATH, station, station + '_' + service,
                         '*.JPG')) for service in services
    ]
    paths = np.hstack(paths_service)
    basenames = [glimpse.helpers.strip_path(path) for path in paths]
    if use_exif:
        exifs = [glimpse.Exif(path) for path in paths]
        datetimes = np.array([exif.datetime for exif in exifs])
    else:
        datetimes = paths_to_datetimes(basenames)
    # Select images based on datetimes
    indices = glimpse.helpers.select_datetimes(datetimes, **kwargs)
    if anchors:
        # Add anchors
        # HACK: Ignore any <image>-<suffix>.json files
        anchor_paths = glob.glob(
            os.path.join(CG_PATH, 'images', station + '_*[0-9].json'))
        anchor_basenames = [
            glimpse.helpers.strip_path(path) for path in anchor_paths
        ]
        if 'start' in kwargs or 'end' in kwargs:
            # Filter by start, end
            anchor_datetimes = np.asarray(paths_to_datetimes(anchor_basenames))
            inrange = glimpse.helpers.select_datetimes(
                anchor_datetimes,
                **glimpse.helpers.merge_dicts(kwargs, dict(snap=None)))
            anchor_basenames = np.asarray(anchor_basenames)[inrange]
        anchor_indices = np.where(np.isin(basenames, anchor_basenames))[0]
        indices = np.unique(np.hstack((indices, anchor_indices)))
    service_breaks = np.hstack((0, np.cumsum([len(x) for x in paths_service])))
    station_calibration = load_calibrations(station_estimate=station,
                                            station=station,
                                            merge=True,
                                            file_errors=False)
    images = []
    for i, service in enumerate(services):
        index = indices[(indices >= service_breaks[i])
                        & (indices < service_breaks[i + 1])]
        if not index.size:
            continue
        service_calibration = glimpse.helpers.merge_dicts(
            station_calibration,
            load_calibrations(path=paths[index[0]],
                              camera=True,
                              merge=True,
                              file_errors=file_errors))
        if service_exif:
            exif = glimpse.Exif(paths[index[0]])
        for j in index:
            basename = basenames[j]
            calibrations = load_calibrations(
                image=basename,
                viewdir=basename if viewdir else False,
                station_estimate=station,
                merge=False,
                file_errors=False)
            if calibrations['image']:
                calibration = glimpse.helpers.merge_dicts(
                    service_calibration, calibrations['image'])
                anchor = True
            else:
                calibration = glimpse.helpers.merge_dicts(
                    service_calibration,
                    dict(viewdir=calibrations['station_estimate']['viewdir']))
                anchor = False
            if viewdir and calibrations['viewdir']:
                calibration = glimpse.helpers.merge_dicts(
                    calibration, calibrations['viewdir'])
                if viewdir_as_anchor:
                    anchor = True
            if use_exif:
                exif = exifs[j]
            elif not service_exif:
                exif = None
            if KEYPOINT_PATH:
                keypoint_path = os.path.join(KEYPOINT_PATH, basename + '.pkl')
            else:
                keypoint_path = None
            image = glimpse.Image(path=paths[j],
                                  cam=calibration,
                                  anchor=anchor,
                                  exif=exif,
                                  datetime=None if use_exif else datetimes[j],
                                  keypoints_path=keypoint_path)
            images.append(image)
    return images