Exemple #1
0
    def footprint(self):
        if not hasattr(self, '_footprint'):
            try:
                polygon_pvl = find_in_dict(self.metadata, 'Polygon')
                start_polygon_byte = find_in_dict(polygon_pvl, 'StartByte')
                num_polygon_bytes = find_in_dict(polygon_pvl, 'Bytes')

                # I too dislike the additional open here.  Not sure a good option
                with open(self.file_name, 'r') as f:
                    f.seek(start_polygon_byte - 1)
                    # Sloppy unicode to string because GDAL pukes on unicode
                    stream = str(f.read(num_polygon_bytes))
                    self._footprint = ogr.CreateGeometryFromWkt(stream)
            except:
                # I dislike that this is copied from latlonext, but am unsure
                # how to avoid the cyclical footprint to latlon_extent property hits.
                xy_extent = self.xy_extent
                lowerlat, lowerlon = self.pixel_to_latlon(xy_extent[0][0], xy_extent[0][1])
                upperlat, upperlon = self.pixel_to_latlon(xy_extent[1][0], xy_extent[1][1])
                geom = {"type": "Polygon", "coordinates": [[[lowerlat, lowerlon],
                                                           [lowerlat, upperlon],
                                                           [upperlat, upperlon],
                                                           [upperlat, lowerlon],
                                                           [lowerlat, lowerlon]]]}
                self._footprint = ogr.CreateGeometryFromJson(json.dumps(geom))

        return self._footprint
Exemple #2
0
 def test_provenance(self):
     image = self.node.get_array()
     self.node.extract_features(image, extractor_parameters={'nfeatures':10})
     self.node.extract_features(image, extractor_parameters={'nfeatures':15})
     p0 = self.node.provenance[0]
     p1 = self.node.provenance[1]
     print(self.node.provenance)
     self.assertEqual(len(self.node.provenance.keys()), 2)
     self.assertNotEqual(find_in_dict(p0, 'nfeatures'),
                         find_in_dict(p1, 'nfeatures'))
    def test_create_pvl_header(self):
        pvl_header = pvl.load('test.net')

        npoints = find_in_dict(pvl_header, 'NumberOfPoints')
        self.assertEqual(5, npoints)

        mpoints = find_in_dict(pvl_header, 'NumberOfMeasures')
        self.assertEqual(10, mpoints)

        points_bytes = find_in_dict(pvl_header, 'PointsBytes')
        self.assertEqual(675, points_bytes)

        points_start_byte = find_in_dict(pvl_header, 'PointsStartByte')
        self.assertEqual(65634, points_start_byte)
    def test_create_pvl_header(self):
        pvl_header = pvl.load('test.net')

        npoints = find_in_dict(pvl_header, 'NumberOfPoints')
        self.assertEqual(2, npoints)

        mpoints = find_in_dict(pvl_header, 'NumberOfMeasures')
        self.assertEqual(5, mpoints)

        points_bytes = find_in_dict(pvl_header, 'PointsBytes')
        self.assertEqual(330, points_bytes)

        points_start_byte = find_in_dict(pvl_header, 'PointsStartByte')
        self.assertEqual(65621, points_start_byte)
    def test_create_pvl_header(self):
        pvl_header = pvl.load("test.net")

        npoints = find_in_dict(pvl_header, "NumberOfPoints")
        self.assertEqual(2, npoints)

        mpoints = find_in_dict(pvl_header, "NumberOfMeasures")
        self.assertEqual(5, mpoints)

        points_bytes = find_in_dict(pvl_header, "PointsBytes")
        self.assertEqual(334, points_bytes)

        points_start_byte = find_in_dict(pvl_header, "PointsStartByte")
        self.assertEqual(65621, points_start_byte)
Exemple #6
0
    def footprint(self):
        if not hasattr(self, '_footprint'):
            try:
                polygon_pvl = find_in_dict(self.metadata, 'Polygon')
                start_polygon_byte = find_in_dict(polygon_pvl, 'StartByte')
                num_polygon_bytes = find_in_dict(polygon_pvl, 'Bytes')

                # I too dislike the additional open here.  Not sure a good option
                with open(self.file_name, 'r') as f:
                    f.seek(start_polygon_byte - 1)
                    # Sloppy unicode to string because GDAL pukes on unicode
                    stream = str(f.read(num_polygon_bytes))
                    self._footprint = ogr.CreateGeometryFromWkt(stream)
            except:
                self._footprint = None

        return self._footprint
Exemple #7
0
def get_isis_translation(label):
    """
    Compute the ISIS serial number for a given image using
    the input cube or the label extracted from the cube.

    Parameters
    ----------
    label : dict or str
            A PVL dict object or file name to extract
            the PVL object from

    Returns
    -------
    translation : dict
                  A PVLModule object containing the extracted
                  translation file
    """
    # Instantiate a DB session if not already instantiated
    if not hasattr(autocnet, 'data_session'):
        autocnet.data_session = setup_db_session(get_data('data.db'))

    # Grab the label is not already read
    if not isinstance(label, PVLModule):
        label = pvl.load(label)

    # Grab the spacecraft name and run it through the ISIS lookup
    spacecraft_name = find_in_dict(label, 'SpacecraftName')

    for row in autocnet.data_session.query(StringToMission).filter(
            StringToMission.key == spacecraft_name):
        spacecraft_name = row.value.lower()

    # Try and pull an instrument identifier
    try:
        instrumentid = find_in_dict(label, 'InstrumentId').capitalize()
    except:
        instrumentid = None

    # Grab the translation PVL object using the lookup
    for row in autocnet.data_session.query(Translations).filter(
            Translations.mission == spacecraft_name,
            Translations.instrument == instrumentid):
        # Convert the JSON back to a PVL object
        translation = PVLModule(row.translation)

    return translation
def get_isis_translation(label):
    """
    Compute the ISIS serial number for a given image using
    the input cube or the label extracted from the cube.

    Parameters
    ----------
    label : dict or str
            A PVL dict object or file name to extract
            the PVL object from

    Returns
    -------
    translation : dict
                  A PVLModule object containing the extracted
                  translation file
    """
    # Instantiate a DB session if not already instantiated
    if not hasattr(autocnet, 'data_session'):
        autocnet.data_session = setup_db_session(get_data('data.db'))

    # Grab the label is not already read
    if not isinstance(label, PVLModule):
        label = pvl.load(label)

    # Grab the spacecraft name and run it through the ISIS lookup
    spacecraft_name = find_in_dict(label, 'SpacecraftName')

    for row in autocnet.data_session.query(StringToMission).filter(StringToMission.key==spacecraft_name):
        spacecraft_name = row.value.lower()

    # Try and pull an instrument identifier
    try:
        instrumentid = find_in_dict(label, 'InstrumentId').capitalize()
    except:
        instrumentid = None

    # Grab the translation PVL object using the lookup
    for row in autocnet.data_session.query(Translations).filter(Translations.mission==spacecraft_name,
                                                                Translations.instrument==instrumentid):
        # Convert the JSON back to a PVL object
        translation = PVLModule(row.translation)

    return translation
Exemple #9
0
    def __init__(self, input_data, cleaned=True, qa_threshold=2000):
        """
        Read the .spc file, parse the label, and extract the spectra

        Parameters
        ==========

        input_data : string
                     The PATH to the input .spc file
        cleaned : boolean
                  If True, mask the data based on the QA array.
        """

        label_dtype_map = {
            'IEEE_REAL': 'f',
            'MSB_INTEGER': 'i',
            'MSB_UNSIGNED_INTEGER': 'u'
        }

        label = pvl.load(input_data)
        self._label = label
        with open(input_data, 'rb') as indata:

            #Get the offsets
            ancillary_data_offset = find_in_dict(
                label, "^ANCILLARY_AND_SUPPLEMENT_DATA").value
            wavelength_offset = find_in_dict(label, "^SP_SPECTRUM_WAV").value
            raw_offset = find_in_dict(label, "^SP_SPECTRUM_RAW").value
            ref2_offset = find_in_dict(label, "^SP_SPECTRUM_REF2").value
            radiance_offset = find_in_dict(label, "^SP_SPECTRUM_RAD").value
            ref1_offset = find_in_dict(label, "^SP_SPECTRUM_REF1").value
            qa_offset = find_in_dict(label, "^SP_SPECTRUM_QA").value
            l2d_offset = find_in_dict(label, "^L2D_RESULT_ARRAY").value

            ancillary_data = find_in_dict(label,
                                          "ANCILLARY_AND_SUPPLEMENT_DATA")
            nrows = ancillary_data['ROWS']
            ncols = ancillary_data['COLUMNS']
            rowbytes = ancillary_data['ROW_BYTES']

            columns = []
            bytelengths = []
            datatypes = []

            index = np.arange(nrows)

            indata.seek(ancillary_data_offset - 1)

            for i in ancillary_data.items():
                if i[0] == 'COLUMN':
                    entry = i[1]
                    columns.append(str(entry['NAME']))
                    datatypes.append(label_dtype_map[entry['DATA_TYPE']])
                    bytelengths.append(entry['BYTES'])
            strbytes = map(str, bytelengths)
            rowdtype = list(
                zip(columns,
                    map(''.join, zip(['>'] * ncols, datatypes, strbytes))))
            d = np.fromstring(indata.read(rowbytes * nrows),
                              dtype=rowdtype,
                              count=nrows)
            self.ancillary_data = pd.DataFrame(d,
                                               columns=columns,
                                               index=np.arange(nrows))
            """
            print len(columns)
            for i in range(nrows):
                d = np.fromstring(indata.read(rowbytes), dtype=rowdtype, count=1)
                self.ancillary_data.iloc[i] = d[0]
            """

            assert (ncols == len(columns))

            keys = [
                "SP_SPECTRUM_WAV", "SP_SPECTRUM_RAW", "SP_SPECTRUM_REF1",
                "SP_SPECTRUM_REF2", "SP_SPECTRUM_RAD", "SP_SPECTRUM_QA"
            ]
            array_offsets = [
                wavelength_offset, raw_offset, ref1_offset, ref2_offset,
                radiance_offset, qa_offset
            ]
            offsets = dict(zip(keys, array_offsets))
            arrays = {}
            for k, offset in offsets.items():
                indata.seek(offset - 1)
                newk = k.split('_')[-1]

                d = find_in_dict(label, k)
                unit = d['UNIT']
                lines = d['LINES']
                scaling_factor = d['SCALING_FACTOR']

                arr = np.fromstring(indata.read(lines * 296 * 2),
                                    dtype='>H').astype(np.float64)
                arr = arr.reshape(lines, -1)

                if isinstance(scaling_factor, float):
                    arr *= scaling_factor
                arrays[newk] = arr

            self.wavelengths = pd.Series(arrays['WAV'][0])

            self.spectra = {}
            for i in range(nrows):
                self.spectra[i] = pd.DataFrame(
                    index=self.wavelengths,
                    columns=["Raw", "Highlands", "Mare", "Radiance", "QA"])

                self.spectra[i]['Raw'] = arrays['RAW'][i]
                self.spectra[i]['Highlands'] = arrays['REF1'][i]
                self.spectra[i]['Mare'] = arrays['REF2'][i]
                self.spectra[i]['Radiance'] = arrays['RAD'][i]
                self.spectra[i]['QA'] = arrays['QA'][i]
                #self.spectra[i] = pd.concat([raw, high, self.mare, rad, qa], axis=1)
                #self.spectra[i] = pd.concat([raw, high, self.mare, rad, qa], axis=1)

                if cleaned:
                    self.spectra[i] = self.spectra[i][
                        self.spectra[i]['QA'] < qa_threshold]

                self.spectra[i] = Spectra(self.spectra[i])
Exemple #10
0
    def __init__(self, input_data, cleaned=True, qa_threshold=2000):
        """
        Read the .spc file, parse the label, and extract the spectra

        Parameters
        ----------

        input_data : string
                     The PATH to the input .spc file

        cleaned : boolean
                  If True, mask the data based on the QA array.
        """

        label_dtype_map = {'IEEE_REAL':'f',
                        'MSB_INTEGER':'i',
                        'MSB_UNSIGNED_INTEGER':'u'}

        label = pvl.load(input_data)
        self._label = label 
        with open(input_data, 'rb') as indata:

            # Get the offsets
            ancillary_data_offset = find_in_dict(label, 
                                    "^ANCILLARY_AND_SUPPLEMENT_DATA").value
            wavelength_offset = find_in_dict(label, "^SP_SPECTRUM_WAV").value
            raw_offset = find_in_dict(label, "^SP_SPECTRUM_RAW").value
            ref2_offset = find_in_dict(label, "^SP_SPECTRUM_REF2").value
            radiance_offset = find_in_dict(label, "^SP_SPECTRUM_RAD").value
            ref1_offset = find_in_dict(label, "^SP_SPECTRUM_REF1").value
            qa_offset = find_in_dict(label, "^SP_SPECTRUM_QA").value
            l2d_offset = find_in_dict(label, "^L2D_RESULT_ARRAY").value

            ancillary_data = find_in_dict(label, "ANCILLARY_AND_SUPPLEMENT_DATA")
            nrows = ancillary_data['ROWS']
            ncols = ancillary_data['COLUMNS']
            rowbytes = ancillary_data['ROW_BYTES']

            columns = []
            bytelengths = []
            datatypes = []

            index = np.arange(nrows)

            indata.seek(ancillary_data_offset - 1)

            for i in ancillary_data.items():
                if i[0] == 'COLUMN':
                    entry = i[1]
                    columns.append(str(entry['NAME']))
                    datatypes.append(label_dtype_map[entry['DATA_TYPE']])
                    bytelengths.append(entry['BYTES'])
            strbytes = map(str, bytelengths)
            rowdtype = list(zip(columns, map(''.join, zip(['>'] * ncols, datatypes, strbytes))))
            d = np.fromstring(indata.read(rowbytes * nrows), dtype=rowdtype,
                              count=nrows)
            self.ancillary_data = pd.DataFrame(d, columns=columns,
                                               index=np.arange(nrows))

            """
            print len(columns)
            for i in range(nrows):
                d = np.fromstring(indata.read(rowbytes), dtype=rowdtype, count=1)
                self.ancillary_data.iloc[i] = d[0]
            """

            assert(ncols == len(columns))

            keys = ["SP_SPECTRUM_WAV","SP_SPECTRUM_RAW", "SP_SPECTRUM_REF1",
                    "SP_SPECTRUM_REF2", "SP_SPECTRUM_RAD", "SP_SPECTRUM_QA"]
            array_offsets = [wavelength_offset, raw_offset, ref1_offset,
                            ref2_offset, radiance_offset, qa_offset]
            offsets = dict(zip(keys, array_offsets))
            arrays = {}
            for k, offset in offsets.items():
                indata.seek(offset - 1)
                newk = k.split('_')[-1]
                
                d = find_in_dict(label, k)
                unit = d['UNIT']
                lines = d['LINES']
                scaling_factor = d['SCALING_FACTOR']
                
                arr = np.fromstring(indata.read(lines * 296*2), dtype='>H').astype(np.float64)
                arr = arr.reshape(lines, -1)
                
                if isinstance(scaling_factor, float):
                    arr *= scaling_factor
                arrays[newk] = arr

            self.wavelengths = pd.Series(arrays['WAV'][0])

            self.spectra = {}
            for i in range(nrows):
                self.spectra[i] = pd.DataFrame(index = self.wavelengths,
                                        columns = ["Raw", "Highlands",
                                                 "Mare", "Radiance", "QA"])
                 
                self.spectra[i]['Raw'] = arrays['RAW'][i]
                self.spectra[i]['Highlands'] = arrays['REF1'][i]
                self.spectra[i]['Mare'] = arrays['REF2'][i]
                self.spectra[i]['Radiance'] = arrays['RAD'][i]
                self.spectra[i]['QA'] = arrays['QA'][i]
                #self.spectra[i] = pd.concat([raw, high, self.mare, rad, qa], axis=1)
                #self.spectra[i] = pd.concat([raw, high, self.mare, rad, qa], axis=1)
           
                if cleaned:
                    self.spectra[i] = self.spectra[i][self.spectra[i]['QA'] < qa_threshold]
            
                self.spectra[i] = Spectra(self.spectra[i])
def match_images(args, config_dict):
    # Matches the images in the input file using various candidate graph methods
    # produces two files usable in isis

    try:
        cg = CandidateGraph.from_adjacency(find_in_dict(config_dict, 'inputfile_path') +
                                           args.input_file, basepath=find_in_dict(config_dict, 'basepath'))
    except:
        cg = CandidateGraph.from_filelist(find_in_dict(config_dict, 'inputfile_path') + args.input_file)

    # Apply SIFT to extract features
    cg.extract_features(method=config_dict['extract_features']['method'],
                        extractor_parameters=find_in_dict(config_dict, 'extractor_parameters'))

    # Match
    cg.match_features(k=config_dict['match_features']['k'])

    # Apply outlier detection
    cg.apply_func_to_edges('symmetry_check')
    cg.apply_func_to_edges('ratio_check',
                    ratio=find_in_dict(config_dict, 'ratio'),
                    mask_name=find_in_dict(config_dict, 'mask_name'),
                    single=find_in_dict(config_dict, 'single'))

    # Compute a homography and apply RANSAC
    cg.apply_func_to_edges('compute_fundamental_matrix', clean_keys=find_in_dict(config_dict, 'fundamental_matrices')['clean_keys'],
                                    method=find_in_dict(config_dict, 'fundamental_matrices')['method'],
                                    reproj_threshold=find_in_dict(config_dict, 'reproj_threshold'),
                                    confidence=find_in_dict(config_dict, 'confidence'))

    cg.apply_func_to_edges('subpixel_register', clean_keys=find_in_dict(config_dict, 'subpixel_register')['clean_keys'],
                         template_size=find_in_dict(config_dict, 'template_size'),
                         threshold=find_in_dict(config_dict, 'threshold'),
                         search_size=find_in_dict(config_dict, 'search_size'),
                         max_x_shift=find_in_dict(config_dict, 'max_x_shift'),
                         max_y_shift=find_in_dict(config_dict, 'max_y_shift'),
                         tiled=find_in_dict(config_dict, 'tiled'),
                         upsampling = find_in_dict(config_dict, 'upsampling'),
                         error_check = find_in_dict(config_dict, 'error_check'))

    cg.apply_func_to_edges('suppress', clean_keys=find_in_dict(config_dict, 'suppress')['clean_keys'],
                k=find_in_dict(config_dict, 'suppress')['k'],
                min_radius=find_in_dict(config_dict, 'min_radius'),
                error_k=find_in_dict(config_dict, 'error_k'))

    cnet = cg.to_cnet(clean_keys=find_in_dict(config_dict, 'cnet_conversion')['clean_keys'],
                      isis_serials=True)

    filelist = cg.to_filelist()

    write_filelist(filelist, find_in_dict(config_dict, 'outputfile_path') + args.output_file + '.lis')

    to_isis(find_in_dict(config_dict, 'outputfile_path') + args.output_file + '.net', cnet,
            mode='wb',
            networkid=find_in_dict(config_dict, 'networkid'),
            targetname=find_in_dict(config_dict, 'targetname'),
            description=find_in_dict(config_dict, 'description'),
            username=find_in_dict(config_dict, 'username'))
Exemple #12
0
def match_images(args, config_dict):
    # Matches the images in the input file using various candidate graph methods
    # produces two files usable in isis

    try:
        cg = CandidateGraph.from_adjacency(
            find_in_dict(config_dict, 'inputfile_path') + args.input_file,
            basepath=find_in_dict(config_dict, 'basepath'))
    except:
        cg = CandidateGraph.from_filelist(
            find_in_dict(config_dict, 'inputfile_path') + args.input_file)

    # Apply SIFT to extract features
    cg.extract_features(method=config_dict['extract_features']['method'],
                        extractor_parameters=find_in_dict(
                            config_dict, 'extractor_parameters'))

    # Match
    cg.match_features(k=config_dict['match_features']['k'])

    # Apply outlier detection
    cg.apply_func_to_edges('symmetry_check')
    cg.apply_func_to_edges('ratio_check',
                           ratio=find_in_dict(config_dict, 'ratio'),
                           mask_name=find_in_dict(config_dict, 'mask_name'),
                           single=find_in_dict(config_dict, 'single'))

    # Compute a homography and apply RANSAC
    cg.apply_func_to_edges(
        'compute_fundamental_matrix',
        clean_keys=find_in_dict(config_dict,
                                'fundamental_matrices')['clean_keys'],
        method=find_in_dict(config_dict, 'fundamental_matrices')['method'],
        reproj_threshold=find_in_dict(config_dict, 'reproj_threshold'),
        confidence=find_in_dict(config_dict, 'confidence'))

    cg.apply_func_to_edges(
        'subpixel_register',
        clean_keys=find_in_dict(config_dict,
                                'subpixel_register')['clean_keys'],
        template_size=find_in_dict(config_dict, 'template_size'),
        threshold=find_in_dict(config_dict, 'threshold'),
        search_size=find_in_dict(config_dict, 'search_size'),
        max_x_shift=find_in_dict(config_dict, 'max_x_shift'),
        max_y_shift=find_in_dict(config_dict, 'max_y_shift'),
        tiled=find_in_dict(config_dict, 'tiled'),
        upsampling=find_in_dict(config_dict, 'upsampling'),
        error_check=find_in_dict(config_dict, 'error_check'))

    cg.apply_func_to_edges('suppress',
                           clean_keys=find_in_dict(config_dict,
                                                   'suppress')['clean_keys'],
                           k=find_in_dict(config_dict, 'suppress')['k'],
                           min_radius=find_in_dict(config_dict, 'min_radius'),
                           error_k=find_in_dict(config_dict, 'error_k'))

    cnet = cg.to_cnet(clean_keys=find_in_dict(config_dict,
                                              'cnet_conversion')['clean_keys'],
                      isis_serials=True)

    filelist = cg.to_filelist()

    write_filelist(
        filelist,
        find_in_dict(config_dict, 'outputfile_path') + args.output_file +
        '.lis')

    to_isis(find_in_dict(config_dict, 'outputfile_path') + args.output_file +
            '.net',
            cnet,
            mode='wb',
            networkid=find_in_dict(config_dict, 'networkid'),
            targetname=find_in_dict(config_dict, 'targetname'),
            description=find_in_dict(config_dict, 'description'),
            username=find_in_dict(config_dict, 'username'))