示例#1
0
def clip_glacier_with_terminus(line):
    """
    Clip glacier extent with a terminus.
    """
    gpoly = shapely.geometry.Polygon(shell=Glacier())
    # Extend western edge past polygon boundary
    wpoint = shapely.geometry.Point(line[0])
    west_snaps = shapely.ops.nearest_points(wpoint, gpoly.exterior)
    if west_snaps[0] != west_snaps[1]:
        new_west = west_snaps[1].coords
        d = new_west - line[0]
        d /= np.linalg.norm(d)
        line = np.row_stack((new_west + d, line))
    # Extend eastern edge past polygon boundary
    epoint = shapely.geometry.Point(line[-1])
    east_snaps = shapely.ops.nearest_points(epoint, gpoly.exterior)
    if east_snaps[0] != east_snaps[1]:
        new_east = east_snaps[1].coords
        d = new_east - line[-1]
        d /= np.linalg.norm(d)
        line = np.row_stack((line, new_east + d))
    tline = shapely.geometry.LineString(line)
    # Split glacier at terminus
    splits = shapely.ops.split(gpoly, tline)
    if len(splits) < 2:
        raise ValueError('Glacier polygon not split by terminus')
    else:
        areas = [split.area for split in splits]
        return np.asarray(splits[np.argmax(areas)].exterior.coords)
示例#2
0
def load_forebay_polygon(glacier):
    """
    Return the forebay extent for a given glacier extent.
    """
    gpoly = shapely.geometry.Polygon(shell=glacier)
    fpoly = shapely.geometry.Polygon(shell=Forebay())
    diff = fpoly.difference(gpoly)
    return np.asarray(diff.exterior.coords)
示例#3
0
def numpy_dropdims(a, axis=None, keepdims=False):
    a = np.asarray(a)
    if keepdims:
        return a
    elif a.size == 1:
        return np.asscalar(a)
    elif axis is not None and a.shape[axis] == 1:
        return a.squeeze(axis=axis)
    else:
        return a
示例#4
0
def intersect_polygons(polygons):
    """
    Return intersection of polygons.
    """
    shapes = [shapely.geometry.Polygon(shell=xy) for xy in polygons]
    shape = shapes[0]
    for i in range(1, len(shapes)):
        shape = shape.intersection(shapes[i])
    if np.iterable(shape):
        i = np.argmax([poly.area for poly in shape])
        shape = shape[i]
    return np.asarray(shape.exterior.coords)
示例#5
0
def clip_terminus_with_coast(line):
    """
    Clip a terminus with the west and east coastlines.
    """
    # Convert to shapely format
    coast = Coast()
    cline_west = shapely.geometry.LineString(coast['west'])
    cline_east = shapely.geometry.LineString(coast['east'])
    tline = shapely.geometry.LineString(line)
    # Cut terminus at west coastline
    tline = shapely.ops.split(tline, cline_west.buffer(distance=100))[-1]
    # Cut terminus at east coastline
    tline = shapely.ops.split(tline, cline_east.buffer(distance=100))[0]
    return np.asarray(tline.coords)
示例#6
0
def tide_height(t):
    if isinstance(t, datetime.datetime):
        t = [t]
    t = np.asarray(t)
    dt = datetime.timedelta(hours=1.5)
    t_begin = np.nanmin(t).replace(minute=0, second=0, microsecond=0)
    t_end = np.nanmax(t) + dt
    # https://tidesandcurrents.noaa.gov/api/
    params = dict(
        format='json',
        units='metric',
        time_zone='gmt',
        datum='MSL',
        product='hourly_height',
        station=9454240,  # Valdez
        begin_date=t_begin.strftime('%Y%m%d %H:%M'),
        end_date=t_end.strftime('%Y%m%d %H:%M'))
    r = requests.get('https://tidesandcurrents.noaa.gov/api/datagetter',
                     params=params)
    v = [float(item['v']) for item in r.json()['data']]
    return np.interp([dti.total_seconds() for dti in t - t_begin],
                     np.linspace(0, 3600 * len(v[1:]), len(v)), v)
示例#7
0
    starts = np.array([r[0] + ni * step_dtS for ni in range(n)])
    ranges = np.column_stack((starts, starts + dt))
    ranges = np.vectorize(datetime.datetime.fromtimestamp)(ranges)
    observer_ranges.append(ranges)

# Build Observer image lists
observers = []
for i, ranges in enumerate(observer_ranges):
    temp_basenames = [dict() for _ in range(len(ranges))]
    for station in stations:
        crop = station_coverage[station][i]
        selected = ((crop[0] <= station_datetimes[station]) &
            (crop[1] >= station_datetimes[station]))
        if not np.any(selected):
            continue
        images = np.asarray(station_images[station])[selected]
        datetimes = np.asarray(station_datetimes[station])[selected]
        for j, r in enumerate(ranges):
            selected = (r[0] <= datetimes) & (r[1] >= datetimes)
            n = np.count_nonzero(selected)
            if n < min_images:
                continue
            basenames = [glimpse.helpers.strip_path(img.path)
                for img in images[selected]]
            temp_basenames[j][station] = basenames
    observers += temp_basenames

# Remove small Observers
for i, obs in enumerate(observers):
    n = len(obs)
    if n < min_observers:
示例#8
0
for image in images:
    print(image)
    start = timeit.default_timer()
    basename = os.path.join('svg-synth', image)
    if os.path.isfile(basename + '-synth.JPG'):
        continue
    # Load image
    img_path = cg.find_image(image)
    cam_args = cg.load_calibrations(image,
        station_estimate=True, station=True, merge=True, file_errors=False)
    img = glimpse.Image(img_path, cam=cam_args)
    img.cam.resize(img_size)
    # Select nearest dem and ortho
    img_date = datetime.datetime.strptime(cg.parse_image_path(image)['date_str'], '%Y%m%d')
    i_dem = np.argmin(np.abs(np.asarray(dem_dates) - img_date))
    i_ortho = np.argmin(np.abs(np.asarray(ortho_dates) - img_date))
    dem_path = dem_paths[i_dem]
    ortho_path = ortho_paths[i_ortho]
    # Load raster metadata
    dem_grid = glimpse.Grid.read(dem_path, d=grid_size)
    ortho_grid = glimpse.Grid.read(ortho_path, d=grid_size)
    # Intersect bounding boxes
    cam_box = img.cam.viewbox(50e3)[[0, 1, 3, 4]]
    box = glimpse.helpers.intersect_boxes(np.row_stack((
        cam_box, dem_grid.box2d, ortho_grid.box2d)))
    # Read dem and ortho
    dem = glimpse.Raster.read(dem_path, xlim=box[0::2], ylim=box[1::2], d=grid_size)
    dem.crop(zlim=(0.1, np.inf))
    radius = circle_radius.get(image, circle_radius_default)
    dem.fill_circle(center=img.cam.xyz, radius=radius)
示例#9
0
def load_images(station,
                services,
                use_exif=False,
                service_exif=False,
                anchors=False,
                viewdir=True,
                viewdir_as_anchor=False,
                file_errors=True,
                **kwargs):
    """
    Return list of calibrated Image objects.

    Any available station, camera, image, and viewdir calibrations are loaded
    and images with image calibrations are marked as anchors.

    Arguments:
        station (str): Station identifier
        services (iterable): Service identifiers
        use_exif (bool): Whether to parse image datetimes from EXIF (slower)
            rather than parsed from paths (faster)
        service_exif (bool): Whether to extract EXIF from first image (faster)
            or all images (slower) in service.
            If `True`, `Image.datetime` is parsed from path.
            Always `False` if `use_exif=True`.
        anchors (bool): Whether to include anchor images even if
            filtered out by `kwargs['snap']`
        **kwargs: Arguments to `glimpse.helpers.select_datetimes()`
    """
    if use_exif:
        service_exif = False
    # Sort services in time
    if isinstance(services, str):
        services = services,
    services = np.sort(services)
    # Parse datetimes of all candidate images
    paths_service = [
        glob.glob(
            os.path.join(IMAGE_PATH, station, station + '_' + service,
                         '*.JPG')) for service in services
    ]
    paths = np.hstack(paths_service)
    basenames = [glimpse.helpers.strip_path(path) for path in paths]
    if use_exif:
        exifs = [glimpse.Exif(path) for path in paths]
        datetimes = np.array([exif.datetime for exif in exifs])
    else:
        datetimes = paths_to_datetimes(basenames)
    # Select images based on datetimes
    indices = glimpse.helpers.select_datetimes(datetimes, **kwargs)
    if anchors:
        # Add anchors
        # HACK: Ignore any <image>-<suffix>.json files
        anchor_paths = glob.glob(
            os.path.join(CG_PATH, 'images', station + '_*[0-9].json'))
        anchor_basenames = [
            glimpse.helpers.strip_path(path) for path in anchor_paths
        ]
        if 'start' in kwargs or 'end' in kwargs:
            # Filter by start, end
            anchor_datetimes = np.asarray(paths_to_datetimes(anchor_basenames))
            inrange = glimpse.helpers.select_datetimes(
                anchor_datetimes,
                **glimpse.helpers.merge_dicts(kwargs, dict(snap=None)))
            anchor_basenames = np.asarray(anchor_basenames)[inrange]
        anchor_indices = np.where(np.isin(basenames, anchor_basenames))[0]
        indices = np.unique(np.hstack((indices, anchor_indices)))
    service_breaks = np.hstack((0, np.cumsum([len(x) for x in paths_service])))
    station_calibration = load_calibrations(station_estimate=station,
                                            station=station,
                                            merge=True,
                                            file_errors=False)
    images = []
    for i, service in enumerate(services):
        index = indices[(indices >= service_breaks[i])
                        & (indices < service_breaks[i + 1])]
        if not index.size:
            continue
        service_calibration = glimpse.helpers.merge_dicts(
            station_calibration,
            load_calibrations(path=paths[index[0]],
                              camera=True,
                              merge=True,
                              file_errors=file_errors))
        if service_exif:
            exif = glimpse.Exif(paths[index[0]])
        for j in index:
            basename = basenames[j]
            calibrations = load_calibrations(
                image=basename,
                viewdir=basename if viewdir else False,
                station_estimate=station,
                merge=False,
                file_errors=False)
            if calibrations['image']:
                calibration = glimpse.helpers.merge_dicts(
                    service_calibration, calibrations['image'])
                anchor = True
            else:
                calibration = glimpse.helpers.merge_dicts(
                    service_calibration,
                    dict(viewdir=calibrations['station_estimate']['viewdir']))
                anchor = False
            if viewdir and calibrations['viewdir']:
                calibration = glimpse.helpers.merge_dicts(
                    calibration, calibrations['viewdir'])
                if viewdir_as_anchor:
                    anchor = True
            if use_exif:
                exif = exifs[j]
            elif not service_exif:
                exif = None
            if KEYPOINT_PATH:
                keypoint_path = os.path.join(KEYPOINT_PATH, basename + '.pkl')
            else:
                keypoint_path = None
            image = glimpse.Image(path=paths[j],
                                  cam=calibration,
                                  anchor=anchor,
                                  exif=exif,
                                  datetime=None if use_exif else datetimes[j],
                                  keypoints_path=keypoint_path)
            images.append(image)
    return images
示例#10
0
import cg
from cg import (glimpse, glob)
from glimpse.imports import (os, re, np, matplotlib)
cg.IMAGE_PATH = '/volumes/science-b/data/columbia/timelapse'

IMG_SIZE = 0.5
FIGURE_SIZE = 0.25
MAX_RATIO = 0.5

# For each motion sequence...
motion = glimpse.helpers.read_json('motion.json')
for d in motion:
    paths = np.asarray(d['paths'])
    # Skip if all files already exist
    basenames = [os.path.join('motion', paths[i] + '-' + paths[i + 1])
        for i in range(len(paths) - 1)]
    nexists = np.sum([os.path.isfile(basename + '.pkl') for basename in basenames])
    if nexists == len(paths) - 1:
        continue
    # Load images
    images = [glimpse.Image(cg.find_image(path)) for path in paths]
    # Compute sequential matches
    for img in images:
        img.cam.resize(IMG_SIZE)
    matches = cg.build_sequential_matches(images, match=dict(max_ratio=MAX_RATIO))
    # For each motion pair...
    for i, control in enumerate(matches):
        # Skip if file exists
        if os.path.isfile(basenames[i] + '.pkl'):
            continue
        print(basenames[i])