コード例 #1
0
    def create_overhead_image_object(self, lon, lat, alt, roll, pitch, yaw):
        point_calc = self._generate_camera_point_calc(lon, lat, alt, roll,
                                                      pitch, yaw)

        pixel_grid = image_utils.create_pixel_grid(self._npix_x, self._npix_y)
        pass1_lons, pass1_lats = point_calc.pixel_x_y_alt_to_lon_lat(
            pixel_grid[0], pixel_grid[1], pixel_grid[0] * 0)

        gtiff_x_vals, gtiff_y_vals = self._gtiff_image_object.get_point_calculator(
        ).lon_lat_alt_to_pixel_x_y(pass1_lons, pass1_lats,
                                   numpy.zeros_like(pass1_lons))

        if self._gtiff_image_data is None:
            self._gtiff_image_data = self._gtiff_image_object.read_band_from_disk(
                0)

        simulated_image_band = map_coordinates(self._gtiff_image_data, [
            image_utils.flatten_image_band(gtiff_y_vals),
            image_utils.flatten_image_band(gtiff_x_vals)
        ])

        simulated_image_band = image_utils.unflatten_image_band(
            simulated_image_band, self._npix_x, self._npix_y)
        simulated_image_data = numpy.reshape(simulated_image_band,
                                             (self._npix_y, self._npix_x, 1))
        metadata = PhysicalCameraMetadata()
        metadata.set_npix_x(self._npix_x)
        metadata.set_npix_y(self._npix_y)
        metadata.set_n_bands(1)

        simulated_image_obj = ImageFactory.physical_camera.from_numpy_array_metadata_and_single_point_calc(
            simulated_image_data, metadata, point_calc)
        return simulated_image_obj
コード例 #2
0
def pinhole_timings():

    point_calc = PinholeCamera()
    point_calc.init_pinhole_from_coeffs(0.0, 0.0, 1000.0, 0.0, 0.0, 0.0, 50.0)

    lon_center = 0
    lat_center = 0
    d_lon = 500
    d_lat = 500

    nx = 2000
    ny = 2000

    ground_grid = photogrammetry_utils.create_ground_grid(
        lon_center - d_lon, lon_center + d_lon, lat_center - d_lat,
        lat_center + d_lat, nx, ny)

    lons = image_utils.flatten_image_band(ground_grid[0])
    lats = image_utils.flatten_image_band(ground_grid[1])
    alts = np.zeros_like(lats)

    n_loops = 4

    tic = time.time()
    for n in range(n_loops):
        point_calc.world_to_image_plane(lons, lats, alts)
    toc = time.time()
    print("calculated " + str(n_loops * nx * ny) + " pixels in " +
          str(toc - tic) + " seconds.")
    print(
        str(n_loops * nx * ny / (toc - tic) / 1e6) + " Megapixels per second")
コード例 #3
0
def compute_cov_eq_linear_transform_matrix(
    input_scene,  # type: ndarray
    scene_to_match,  # type: ndarray
    input_scene_mask=None,  # type: ndarray
    scene_to_match_mask=None  # type: ndarray
):  # type: (...) -> ndarray
    flattened_1 = spectral_tools.flatten_image_cube(input_scene)
    flattened_2 = spectral_tools.flatten_image_cube(scene_to_match)
    flattened_mask_1 = image_utils.flatten_image_band(input_scene_mask)
    flattened_mask_2 = image_utils.flatten_image_band(scene_to_match_mask)
    big_l = spectral_image_processing_1d.compute_cov_eq_linear_transform_matrix(
        flattened_1, flattened_2, flattened_mask_1, flattened_mask_2)
    return big_l
コード例 #4
0
    def test_masked_mean_1d(self):
        print("")
        print("MASKED MEAN TEST 1D")
        image_cube = image_utils.create_uniform_image_data(nx,
                                                           ny,
                                                           nbands,
                                                           values=0,
                                                           dtype=np.float32)
        image_cube[y_loc, x_loc, :] = np.arange(0, nbands)
        flattened_image = spectral_utils.flatten_image_cube(image_cube)

        image_mask = np.zeros((ny, nx))
        image_mask[y_loc, x_loc] = 1
        flattened_mask = image_utils.flatten_image_band(image_mask)

        raw_mean = sp1d.compute_image_cube_spectral_mean(flattened_image)
        masked_mean = sp1d.compute_image_cube_spectral_mean(
            flattened_image, flattened_mask)

        assert len(masked_mean) == nbands
        assert len(raw_mean) == nbands
        logging.debug("length of means equals number of spectral bands")

        assert masked_mean.max() == 0
        assert raw_mean.max() != 0
        logging.debug(
            "1d image cube raw and masked means have different results")
        logging.debug("masked_mean_1d test passed")
        print("MASKED MEAN TEST 1D - TEST PASSED")
コード例 #5
0
    def test_masked_covar_1d(self):
        print("")
        print("MASKED COVARIANCE TEST 1D")
        image_cube = image_utils.create_uniform_image_data(nx,
                                                           ny,
                                                           nbands,
                                                           values=0,
                                                           dtype=np.float32)
        image_cube[y_loc, x_loc, :] = np.arange(0, nbands)
        flattened_image = spectral_utils.flatten_image_cube(image_cube)

        image_mask = np.zeros((ny, nx))
        image_mask[y_loc, x_loc] = 1
        flattened_mask = image_utils.flatten_image_band(image_mask)

        raw_covar = sp1d.compute_image_cube_spectral_covariance(
            flattened_image)
        masked_covar = sp1d.compute_image_cube_spectral_covariance(
            flattened_image, flattened_mask)

        assert masked_covar.shape == (nbands, nbands)
        assert raw_covar.shape == (nbands, nbands)
        logging.debug("covariance shape is (nbands x nbands)")

        assert masked_covar.max() == 0
        assert raw_covar.max() != 0
        logging.debug(
            "1d image cube raw and masked covariances have different results")
        print("MASKED_COVAR_1D TEST PASSED")
コード例 #6
0
def compute_image_cube_spectral_covariance(
    spectral_image,  # type: ndarray
    image_mask=None  # type: ndarray
):  # type: (...) -> ndarray
    spectral_image = spectral_tools.flatten_image_cube(spectral_image)
    if image_mask is not None:
        image_mask = image_utils.flatten_image_band(image_mask)
    masked_covariance = spectral_image_processing_1d.compute_image_cube_spectral_covariance(
        spectral_image, image_mask)
    return masked_covariance
コード例 #7
0
def covariance_equalization_mean_centered(
    input_scene_demeaned,  # type: ndarray
    scene_to_match_demeaned,  # type: ndarray
    input_scene_mask=None,  # type: ndarray
    scene_to_match_mask=None  # type: ndarray
):  # type: (...) -> ndarray
    nx, ny, nbands = spectral_tools.get_2d_cube_nx_ny_nbands(
        input_scene_demeaned)
    flattened_1 = spectral_tools.flatten_image_cube(input_scene_demeaned)
    flattened_2 = spectral_tools.flatten_image_cube(scene_to_match_demeaned)
    if input_scene_mask is not None:
        input_scene_mask = image_utils.flatten_image_band(input_scene_mask)
    if scene_to_match_mask is not None:
        scene_to_match_mask = image_utils.flatten_image_band(
            scene_to_match_mask)
    scene_1_cov_equalized = spectral_image_processing_1d.covariance_equalization_mean_centered(
        flattened_1, flattened_2, input_scene_mask, scene_to_match_mask)
    scene_1_cov_equalized = spectral_tools.unflatten_image_cube(
        scene_1_cov_equalized, nx, ny)
    return scene_1_cov_equalized
コード例 #8
0
def demean_image_data(
    spectral_image,  # type: ndarray
    image_mask=None  # type: ndarray
):  # type: (...) -> ndarray
    nx, ny, nbands = spectral_tools.get_2d_cube_nx_ny_nbands(spectral_image)
    spectral_image = spectral_tools.flatten_image_cube(spectral_image)
    if image_mask is not None:
        image_mask = image_utils.flatten_image_band(image_mask)
    demeaned_image_data = spectral_image_processing_1d.demean_image_data(
        spectral_image, image_mask)
    demeaned_image_data = spectral_tools.unflatten_image_cube(
        demeaned_image_data, nx, ny)
    return demeaned_image_data
コード例 #9
0
def ace(
        spectral_image,  # type: ndarray
        target_spectra,  # type: ndarray
        spectral_mean=None,  # type: ndarray
        inverse_covariance=None,  # type: ndarray
        image_mask=None,  # type: ndarray
):  # type: (...) -> ndarray
    nx, ny, nbands = spectral_tools.get_2d_cube_nx_ny_nbands(spectral_image)
    spectral_image = spectral_tools.flatten_image_cube(spectral_image)
    if image_mask is not None:
        image_mask = image_utils.flatten_image_band(image_mask)
    ace_result = spectral_image_processing_1d.ace(spectral_image,
                                                  target_spectra,
                                                  spectral_mean,
                                                  inverse_covariance,
                                                  image_mask)
    ace_result = image_utils.unflatten_image_band(ace_result, nx, ny)
    return ace_result
コード例 #10
0
    def test_rx_1d(self):
        print("")
        print("RX ANAMOLY TEST 1D")
        image_cube = np.random.random((ny, nx, nbands))
        signal_x_axis = np.arange(0, nbands) / nbands * 2 * np.pi
        signal_to_embed = np.sin(signal_x_axis) * 100
        image_cube[y_loc, x_loc, :] = signal_to_embed
        flattened_image = spectral_utils.flatten_image_cube(image_cube)
        image_mask = np.zeros((ny, nx))
        image_mask[y_loc, x_loc] = 1
        flattened_mask = image_utils.flatten_image_band(image_mask)

        masked_mean = sp1d.compute_image_cube_spectral_mean(
            flattened_image, flattened_mask)
        masked_covar = sp1d.compute_image_cube_spectral_covariance(
            flattened_image, flattened_mask)
        masked_inv_cov = np.linalg.inv(masked_covar)

        detection_result = sp1d.rx_anomaly_detector(flattened_image,
                                                    masked_mean,
                                                    masked_inv_cov)
        detection_result_2d = image_utils.unflatten_image_band(
            detection_result, nx, ny)
        detection_max_locs_y_x = np.where(
            detection_result_2d == detection_result_2d.max())
        detection_max_y = detection_max_locs_y_x[0][0]
        detection_max_x = detection_max_locs_y_x[1][0]

        assert detection_max_y == y_loc
        assert detection_max_x == x_loc
        assert len(detection_result.shape) == 1
        assert detection_result.shape[0] == nx * ny

        logging.debug("location of highest detection return matches x/y "
                      "location of embedded signal")
        print("1d rx passed ")
        print("")
コード例 #11
0
    def _pixel_x_y_to_lon_lat_ray_caster_native(
            self,
            pixels_x,  # type: ndarray
            pixels_y,  # type: ndarray
            dem,  # type: AbstractDem
            dem_sample_distance,  # type: float
            dem_highest_alt=None,  # type: float
            dem_lowest_alt=None,  # type: float
            band=None,  # type: int
    ):  # type: (...) -> (ndarray, ndarray, ndarray)
        """
        Protected method that solves for pixel x, y by casting rays onto a DEM.  This is used when the pixel altitudes
        are not already known, and only a method for _lon_lat_alt_to_pixel_x_y_native is available.
        :param pixels_x: x pixels, as a numpy ndarray
        :param pixels_y: y pixels, as a numpy ndarray
        :param dem: digital elevation model, as concrete implementation of an AbstractDem object
        :param dem_sample_distance: resolution at which to sample the DEM, in meters.  If no value is provided
        this value will default to 5 meters.
        :param dem_highest_alt: Highest DEM altitude.  This will be calculated using the full DEM if it is not provided
        :param dem_lowest_alt: Lowest DEM altitude.  This will be calculated using the full DEM if it is not provided
        :param band: image band, as an int, or None if all the image bands are coregistered.
        :return: (longitude, latitude, altitude) in the point calculator's native projection, and the input DEM's
        elevation reference dataum
        """

        # TODO put stuff in here to make sure nx and ny are same size
        # TODO put something here to check that the DEM projection and image projection are the same
        ny = None
        nx = None
        is2d = np.ndim(pixels_x) == 2
        if is2d:
            ny, nx = np.shape(pixels_x)
            pixels_x = image_utils.flatten_image_band(pixels_x)
            pixels_y = image_utils.flatten_image_band(pixels_y)

        n_pixels_to_project = len(pixels_x)

        max_alt = dem_highest_alt
        min_alt = dem_lowest_alt

        if max_alt is None:
            max_alt = dem.get_highest_alt()
        if min_alt is None:
            min_alt = dem.get_lowest_alt()
        alt_range = max_alt - min_alt

        # put the max and min alts at 1 percent above and below the maximum returned by the DEM
        max_alt = max_alt + alt_range * 0.01
        min_alt = min_alt - alt_range * 0.01

        lons_max_alt, lats_max_alt = self.pixel_x_y_alt_to_lon_lat(pixels_x,
                                                                   pixels_y,
                                                                   max_alt,
                                                                   band=band)
        lons_min_alt, lats_min_alt = self.pixel_x_y_alt_to_lon_lat(pixels_x,
                                                                   pixels_y,
                                                                   min_alt,
                                                                   band=band)

        # TODO this operation becomes very expensive at very fine DEM resolutions
        # TODO create implementation for a raster DEM that works faster
        # TODO the time consuming operations are obtaining lon/lats for many points as the DEM resolution becomes finer

        ray_horizontal_lens = np.sqrt(
            np.square(lons_max_alt - lons_min_alt) +
            np.square(lats_max_alt - lats_min_alt))
        n_steps_per_ray = int(
            np.ceil(np.max(ray_horizontal_lens) / dem_sample_distance) + 1)

        lons_matrix = np.zeros(
            (n_pixels_to_project, n_steps_per_ray)) + np.linspace(
                0, 1, n_steps_per_ray)
        lats_matrix = np.zeros(
            (n_pixels_to_project, n_steps_per_ray)) + np.linspace(
                0, 1, n_steps_per_ray)

        lons_matrix = np.tile((lons_min_alt - lons_max_alt), (n_steps_per_ray, 1)).transpose() * \
                      lons_matrix + np.tile(lons_max_alt, (n_steps_per_ray, 1)).transpose()
        lats_matrix = np.tile((lats_min_alt - lats_max_alt), (n_steps_per_ray, 1)).transpose() * \
                      lats_matrix + np.tile(lats_max_alt, (n_steps_per_ray, 1)).transpose()

        all_elevations = dem.get_elevations(np.array(lons_matrix),
                                            np.array(lats_matrix),
                                            world_proj=self.get_projection())

        ray = np.linspace(max_alt, min_alt, n_steps_per_ray)
        first_ray_intersect_indices = np.zeros(n_pixels_to_project,
                                               dtype=np.int)
        ray_step_indices = list(range(n_steps_per_ray))
        ray_step_indices.reverse()
        for i in ray_step_indices:
            does_ray_intersect = all_elevations[:, i] > ray[i]
            first_ray_intersect_indices[np.where(does_ray_intersect)] = i

        all_pixel_indices = np.arange(0, n_pixels_to_project, dtype=int)

        first_ray_intersect_indices = first_ray_intersect_indices - 1
        second_ray_intersect_indices = first_ray_intersect_indices + 1
        b_rays = ray[first_ray_intersect_indices]
        b_alts = all_elevations[all_pixel_indices, first_ray_intersect_indices]

        m_rays = ray[1] - ray[0]
        m_alts = all_elevations[all_pixel_indices,
                                second_ray_intersect_indices] - b_alts

        xs = (b_alts - b_rays) / (m_rays - m_alts)
        intersected_lons = (lons_matrix[all_pixel_indices, second_ray_intersect_indices] -
                            lons_matrix[all_pixel_indices, first_ray_intersect_indices]) * xs + \
                           lons_matrix[all_pixel_indices, first_ray_intersect_indices]
        intersected_lats = (lats_matrix[all_pixel_indices, second_ray_intersect_indices] -
                            lats_matrix[all_pixel_indices, first_ray_intersect_indices]) * xs + \
                           lats_matrix[all_pixel_indices, first_ray_intersect_indices]
        intersected_alts = (all_elevations[all_pixel_indices, second_ray_intersect_indices] -
                            all_elevations[all_pixel_indices, first_ray_intersect_indices]) * xs + \
                           all_elevations[all_pixel_indices, first_ray_intersect_indices]

        if is2d:
            intersected_lons = image_utils.unflatten_image_band(
                intersected_lons, nx, ny)
            intersected_lats = image_utils.unflatten_image_band(
                intersected_lats, nx, ny)
            intersected_alts = image_utils.unflatten_image_band(
                intersected_alts, nx, ny)

        return intersected_lons, intersected_lats, intersected_alts
コード例 #12
0
def rpc_timings():
    samp_num_coeff = [
        -2.401488e-03, 1.014755e+00, 1.773499e-02, 2.048626e-02, -4.609470e-05,
        4.830748e-04, -2.015272e-04, 1.212827e-03, 5.065720e-06, 3.740396e-05,
        -1.582743e-07, 1.437278e-06, 3.620892e-08, 2.144755e-07, -1.333671e-07,
        0.000000e+00, -5.229308e-08, 1.111695e-06, -1.337535e-07, 0.000000e+00
    ]

    samp_den_coeff = [
        1.000000e+00, 1.197118e-03, 9.340466e-05, -4.381989e-04, 3.359669e-08,
        0.000000e+00, 2.959469e-08, 1.412447e-06, 8.398708e-08, -1.782544e-07,
        0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00,
        0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00, 0.000000e+00
    ]
    samp_scale = 1283.0
    samp_off = 1009.0
    height_off = 42.0
    height_scale = 501.0
    lat_off = 32.8902
    lat_scale = 0.0142

    line_num_coeff = [
        3.448567e-03, 1.975650e-02, -1.147937e+00, 1.622923e-01, 5.249710e-05,
        4.231537e-05, 3.559660e-04, -9.052539e-05, -1.932047e-03,
        -2.341649e-05, -1.025999e-06, 0.000000e+00, -1.116629e-07,
        1.635365e-07, -1.704056e-07, -3.139215e-06, 1.607936e-06, 1.281797e-07,
        1.412060e-06, -2.638793e-07
    ]

    line_den_coeff = [
        1.000000e+00, -1.294475e-05, 1.682046e-03, -1.481430e-04, 1.503771e-07,
        7.529185e-07, -4.596928e-07, 0.000000e+00, 2.736755e-06, -1.609702e-06,
        3.466453e-08, 1.066716e-08, 0.000000e+00, 0.000000e+00, 0.000000e+00,
        0.000000e+00, -7.020142e-08, 1.766701e-08, 2.536018e-07, 0.000000e+00
    ]

    line_off = 856.0
    line_scale = 1143.0
    lon_off = 13.1706
    lon_scale = 0.0167

    point_calc = RPCPointCalc.init_from_coeffs(samp_num_coeff, samp_den_coeff,
                                               samp_scale, samp_off,
                                               line_num_coeff, line_den_coeff,
                                               line_scale, line_off, lat_scale,
                                               lat_off, lon_scale, lon_off,
                                               height_scale, height_off)

    lon_center = lon_off
    lat_center = lat_off
    d_lon = 0.001
    d_lat = 0.001

    nx = 2000
    ny = 2000

    ground_grid = photogrammetry_utils.create_ground_grid(
        lon_center - d_lon, lon_center + d_lon, lat_center - d_lat,
        lat_center + d_lat, nx, ny)

    lons = image_utils.flatten_image_band(ground_grid[0])
    lats = image_utils.flatten_image_band(ground_grid[1])
    alts = np.zeros_like(lats)

    n_loops = 4

    tic = time.time()
    for n in range(n_loops):
        point_calc.compute_p(samp_num_coeff, lats, lons, alts)
    toc = time.time()
    print("calculated " + str(n_loops * nx * ny) + " pixels in " +
          str(toc - tic) + " seconds.")
    print(
        str(n_loops * nx * ny / (toc - tic) / 1e6) + " Megapixels per second")
コード例 #13
0
def create_ortho_gtiff_image_world_to_sensor(
        overhead_image,  # type: AbstractEarthOverheadImage
        ortho_nx_pix,  # type: int
        ortho_ny_pix,  # type: int
        world_polygon,  # type: Polygon
        world_proj=crs_defs.PROJ_4326,  # type: Proj
        dem=None,  # type: AbstractDem
        bands=None,  # type: List[int]
        nodata_val=0,  # type: float
        output_fname=None,  # type: str
        spline_order=0,  # type: str
        mask_no_data_region=False,  # type: bool
):  # type:  (...) -> GeotiffImage

    envelope = world_polygon.envelope
    minx, miny, maxx, maxy = envelope.bounds
    image_ground_grid_x, image_ground_grid_y = create_ground_grid(
        minx, maxx, miny, maxy, ortho_nx_pix, ortho_ny_pix)
    geo_t = world_poly_to_geo_t(envelope, ortho_nx_pix, ortho_ny_pix)

    if dem is None:
        dem = DemFactory.constant_elevation(0)
        dem.set_projection(crs_defs.PROJ_4326)
    alts = dem.get_elevations(image_ground_grid_x, image_ground_grid_y,
                              world_proj)

    if bands is None:
        bands = list(range(overhead_image.get_metadata().get_n_bands()))

    images = []
    if overhead_image.get_point_calculator().bands_coregistered() is not True:
        for band in bands:
            pixels_x, pixels_y = overhead_image.get_point_calculator(). \
                lon_lat_alt_to_pixel_x_y(image_ground_grid_x, image_ground_grid_y, alts, band=band, world_proj=world_proj)
            image_data = overhead_image.read_band_from_disk(band)
            im_tp = image_data.dtype

            regridded = map_coordinates(image_data, [
                image_utils.flatten_image_band(pixels_y),
                image_utils.flatten_image_band(pixels_x)
            ],
                                        order=spline_order)

            regridded = image_utils.unflatten_image_band(
                regridded, ortho_nx_pix, ortho_ny_pix)
            regridded = regridded.astype(im_tp)
            images.append(regridded)
    else:
        pixels_x, pixels_y = overhead_image.get_point_calculator(). \
            lon_lat_alt_to_pixel_x_y(image_ground_grid_x, image_ground_grid_y, alts, band=0, world_proj=world_proj)
        for band in bands:
            image_data = overhead_image.get_image_band(band)
            if image_data is None:
                image_data = overhead_image.read_band_from_disk(band)
            im_tp = image_data.dtype

            regridded = map_coordinates(image_data, [
                image_utils.flatten_image_band(pixels_y),
                image_utils.flatten_image_band(pixels_x)
            ],
                                        order=spline_order)

            regridded = image_utils.unflatten_image_band(
                regridded, ortho_nx_pix, ortho_ny_pix)

            regridded = regridded.astype(im_tp)

            regridded[np.where(pixels_x <= 0)] = nodata_val
            regridded[np.where(
                pixels_x >= overhead_image.metadata.get_npix_x() -
                2)] = nodata_val

            regridded[np.where(pixels_y <= 0)] = nodata_val
            regridded[np.where(
                pixels_y >= overhead_image.metadata.get_npix_y() -
                2)] = nodata_val
            images.append(regridded)

    orthorectified_image = np.stack(images, axis=2)

    if mask_no_data_region:
        orthorectified_image = mask_image(orthorectified_image, nodata_val)

    gtiff_image = GeotiffImageFactory.from_numpy_array(orthorectified_image,
                                                       geo_t, world_proj)
    gtiff_image.get_metadata().set_nodata_val(nodata_val)
    if output_fname is not None:
        gtiff_image.write_to_disk(output_fname)

    return gtiff_image