コード例 #1
0
def terminus_lines(img, markup, correction=True, step=None):
    """
    Return terminus Lines object for an Image.

    Arguments:
        img (Image): Image object
        markup (dict): Parsed SVG layer
        correction: Whether Lines should use elevation correction (bool)
            or arguments to `glimpse.helpers.elevation_corrections()`
    """
    luv = tuple(markup.values())
    # HACK: Select terminus with matching date and preferred type
    termini = Termini()
    date_str = img.datetime.strftime('%Y-%m-%d')
    features = [(feature, feature['properties']['type']) for feature in termini
                if feature['properties']['date'] == date_str]
    type_order = ('aerometric', 'worldview', 'landsat-8', 'landsat-7',
                  'terrasar', 'tandem', 'arcticdem', 'landsat-5')
    order = [type_order.index(f[1]) for f in features]
    xy = features[np.argmin(order)[0]]['geometry']['coordinates']
    xyz = np.hstack((xy, sea_height(xy, t=img.datetime)))
    return glimpse.optimize.Lines(img.cam,
                                  luv, [xyz],
                                  correction=correction,
                                  step=step)
コード例 #2
0
def coast_lines(img, markup, correction=True, step=None):
    """
    Return coast Lines object for an Image.

    Arguments:
        img (Image): Image object
        markup (dict): Parsed SVG layer
        correction (bool): Whether to set Lines to use elevation correction
    """
    luv = tuple(markup.values())
    geo = glimpse.helpers.read_geojson(os.path.join(CG_PATH, 'geojson',
                                                    'coast.geojson'),
                                       crs=32606)
    lxy = [feature['geometry']['coordinates'] for feature in geo['features']]
    lxyz = [np.hstack((xy, sea_height(xy, t=img.datetime))) for xy in lxy]
    return glimpse.optimize.Lines(img.cam,
                                  luv,
                                  lxyz,
                                  correction=correction,
                                  step=step)
コード例 #3
0
def compute_arthz_sigma(vrthz_sigma, flotation):
    return np.hstack((
        vrthz_sigma[0] * ar_sigma_scale,
        dtheta_sigma,
        np.maximum(flotation * flotation_az_sigma, min_az_sigma)
    ))
コード例 #4
0
def compute_vrthz_sigma(vrthz_sigma, flotation):
    return np.hstack((
        vrthz_sigma[0] * (1 + short_vr_sigma),
        vrthz_sigma[1],
        vrthz_sigma[2] + np.maximum(flotation * flotation_vz_sigma, min_vz_sigma)
    ))
コード例 #5
0
def compute_axyz_sigma(vxyz_sigma, flotation):
    return np.hstack((
        vxyz_sigma[0:2] * axy_sigma_scale,
        np.maximum(flotation * flotation_az_sigma, min_az_sigma)
    ))
コード例 #6
0
def compute_vxyz_sigma(vxyz_sigma, flotation):
    return np.hstack((
        vxyz_sigma[0:2] * (1 + short_vxy_sigma),
        vxyz_sigma[2] + np.maximum(flotation * flotation_vz_sigma, min_vz_sigma)
    ))
コード例 #7
0
         last_vxy = tracks[i].vxyz[mask, last, 0:2]
         last_vxy_sigma = tracks[i].vxyz_sigma[mask, last, 0:2]
         vxy_motion_models = [copy.copy(model) for model in motion_models]
         if cylindrical:
             for j, model in enumerate(np.array(vxy_motion_models)[mask]):
                 # Estimate cylindrical priors from cartesian results
                 vxy = last_vxy[j] + last_vxy_sigma[j] * np.random.randn(100, 2)
                 speed = np.hypot(vxy[:, 0], vxy[:, 1])
                 vr, vr_sigma = speed.mean(), speed.std()
                 thetas = np.arctan2(vxy[:, 1], vxy[:, 0])
                 unit_yx = (
                     np.sin(thetas).mean(),
                     np.cos(thetas).mean())
                 theta = np.arctan2(*unit_yx)
                 theta_sigma = np.sqrt(-2 * np.log(np.hypot(*unit_yx)))
                 model.vrthz = np.hstack((vr, theta, model.vrthz[2]))
                 model.vrthz_sigma = np.hstack((vr_sigma, theta_sigma,
                     model.vrthz_sigma[2]))
         else:
             for j, model in enumerate(np.array(vxy_motion_models)[mask]):
                 model.vxyz = np.hstack((last_vxy[j], model.vxyz[2]))
                 model.vxyz_sigma = np.hstack((last_vxy_sigma[j], model.vxyz_sigma[2]))
         # Repeat track
         tracks[i + 1] = tracker.track(motion_models=vxy_motion_models,
             observer_mask=params['observer_mask'], tile_size=tile_size,
             parallel=parallel, datetimes=tracker.datetimes[::directions[i + 1]])
 # ---- Clean up tracks ----
 # Clean up tracker, since saved in Tracks.tracker
 tracker.reset()
 # Clear cached images, since saved in Tracks.tracker.observers
 for observer in observers:
コード例 #8
0
def load_images(station,
                services,
                use_exif=False,
                service_exif=False,
                anchors=False,
                viewdir=True,
                viewdir_as_anchor=False,
                file_errors=True,
                **kwargs):
    """
    Return list of calibrated Image objects.

    Any available station, camera, image, and viewdir calibrations are loaded
    and images with image calibrations are marked as anchors.

    Arguments:
        station (str): Station identifier
        services (iterable): Service identifiers
        use_exif (bool): Whether to parse image datetimes from EXIF (slower)
            rather than parsed from paths (faster)
        service_exif (bool): Whether to extract EXIF from first image (faster)
            or all images (slower) in service.
            If `True`, `Image.datetime` is parsed from path.
            Always `False` if `use_exif=True`.
        anchors (bool): Whether to include anchor images even if
            filtered out by `kwargs['snap']`
        **kwargs: Arguments to `glimpse.helpers.select_datetimes()`
    """
    if use_exif:
        service_exif = False
    # Sort services in time
    if isinstance(services, str):
        services = services,
    services = np.sort(services)
    # Parse datetimes of all candidate images
    paths_service = [
        glob.glob(
            os.path.join(IMAGE_PATH, station, station + '_' + service,
                         '*.JPG')) for service in services
    ]
    paths = np.hstack(paths_service)
    basenames = [glimpse.helpers.strip_path(path) for path in paths]
    if use_exif:
        exifs = [glimpse.Exif(path) for path in paths]
        datetimes = np.array([exif.datetime for exif in exifs])
    else:
        datetimes = paths_to_datetimes(basenames)
    # Select images based on datetimes
    indices = glimpse.helpers.select_datetimes(datetimes, **kwargs)
    if anchors:
        # Add anchors
        # HACK: Ignore any <image>-<suffix>.json files
        anchor_paths = glob.glob(
            os.path.join(CG_PATH, 'images', station + '_*[0-9].json'))
        anchor_basenames = [
            glimpse.helpers.strip_path(path) for path in anchor_paths
        ]
        if 'start' in kwargs or 'end' in kwargs:
            # Filter by start, end
            anchor_datetimes = np.asarray(paths_to_datetimes(anchor_basenames))
            inrange = glimpse.helpers.select_datetimes(
                anchor_datetimes,
                **glimpse.helpers.merge_dicts(kwargs, dict(snap=None)))
            anchor_basenames = np.asarray(anchor_basenames)[inrange]
        anchor_indices = np.where(np.isin(basenames, anchor_basenames))[0]
        indices = np.unique(np.hstack((indices, anchor_indices)))
    service_breaks = np.hstack((0, np.cumsum([len(x) for x in paths_service])))
    station_calibration = load_calibrations(station_estimate=station,
                                            station=station,
                                            merge=True,
                                            file_errors=False)
    images = []
    for i, service in enumerate(services):
        index = indices[(indices >= service_breaks[i])
                        & (indices < service_breaks[i + 1])]
        if not index.size:
            continue
        service_calibration = glimpse.helpers.merge_dicts(
            station_calibration,
            load_calibrations(path=paths[index[0]],
                              camera=True,
                              merge=True,
                              file_errors=file_errors))
        if service_exif:
            exif = glimpse.Exif(paths[index[0]])
        for j in index:
            basename = basenames[j]
            calibrations = load_calibrations(
                image=basename,
                viewdir=basename if viewdir else False,
                station_estimate=station,
                merge=False,
                file_errors=False)
            if calibrations['image']:
                calibration = glimpse.helpers.merge_dicts(
                    service_calibration, calibrations['image'])
                anchor = True
            else:
                calibration = glimpse.helpers.merge_dicts(
                    service_calibration,
                    dict(viewdir=calibrations['station_estimate']['viewdir']))
                anchor = False
            if viewdir and calibrations['viewdir']:
                calibration = glimpse.helpers.merge_dicts(
                    calibration, calibrations['viewdir'])
                if viewdir_as_anchor:
                    anchor = True
            if use_exif:
                exif = exifs[j]
            elif not service_exif:
                exif = None
            if KEYPOINT_PATH:
                keypoint_path = os.path.join(KEYPOINT_PATH, basename + '.pkl')
            else:
                keypoint_path = None
            image = glimpse.Image(path=paths[j],
                                  cam=calibration,
                                  anchor=anchor,
                                  exif=exif,
                                  datetime=None if use_exif else datetimes[j],
                                  keypoints_path=keypoint_path)
            images.append(image)
    return images