Esempio n. 1
0
    def load_features(cm, _cxs=None, force_recomp=False):
        if _cxs is None:
            cxs = cm.get_valid_cxs()
        elif type(_cxs) is types.ListType:
            cxs = np.array(_cxs)
        elif type(_cxs) in [types.IntType, types.LongType, np.uint32]:
            cxs = np.array([_cxs])
        else:
            cxs = _cxs
        count_feat = 0
        is_dirty = np.bitwise_or(cm.cx2_dirty_bit[cxs], force_recomp)
        num_samp = cxs.size
        num_dirty = np.sum(is_dirty)
        # HACKS
        if not np.iterable(is_dirty):
            is_dirty = np.array([is_dirty])
        if not np.iterable(cxs):
            cxs = np.array([cxs])
        load_cx = cxs[is_dirty]

        num_clean = num_samp - num_dirty
        #logdbg('Loading Features: Dirty=%d ; #Clean=%d' % (num_dirty, num_clean))
        if num_dirty == 0:
            return
        logio('Loading %d Feature Reps' % num_dirty)
        am = cm.hs.am
        for cx in iter(load_cx):
            cid = cm.cx2_cid[cx]
            if cid <= 0:
                logwarn('WARNING: IX=' + str(cx) + ' is invalid')
                continue
            chiprep_fpath = cm.hs.iom.get_chiprep_fpath(cid)
            # Ensure that the features exists
            if force_recomp or not os.path.exists(chiprep_fpath):
                logio('Computing and saving features of cid=' + str(cid))
                hotspotter.ChipFunctions.precompute_chipreps(
                    cm.hs, [cx], num_procs=1, force_recompute=force_recomp)
            # Load the features
            logdbg('Loading features in ' + chiprep_fpath)
            npz = np.load(chiprep_fpath)
            fpts = npz['arr_0']
            fdsc = npz['arr_1']
            npz.close()
            cm.cx2_fpts[cx] = fpts
            cm.cx2_fdsc[cx] = fdsc
            cm.cx2_dirty_bit[cx] = False
            count_feat += len(fpts)

        logdbg('* Loaded ' + str(count_feat) + ' keypoints and fdscriptors')
        return True
Esempio n. 2
0
 def load_features(cm, _cxs=None, force_recomp=False):
     if _cxs is None:
         cxs = cm.get_valid_cxs()
     elif type(_cxs) is types.ListType:
         cxs = np.array(_cxs)
     elif type(_cxs) in [types.IntType, types.LongType, np.uint32]:
         cxs = np.array([_cxs])
     else: 
         cxs = _cxs
     count_feat = 0
     is_dirty  = np.bitwise_or(cm.cx2_dirty_bit[cxs], force_recomp)
     num_samp  = cxs.size
     num_dirty = np.sum(is_dirty)
     # HACKS
     if not np.iterable(is_dirty):
         is_dirty = np.array([is_dirty])
     if not np.iterable(cxs):
         cxs = np.array([cxs])
     load_cx   = cxs[is_dirty]
     
     num_clean = num_samp - num_dirty
     #logdbg('Loading Features: Dirty=%d ; #Clean=%d' % (num_dirty, num_clean))
     if num_dirty == 0:
         return
     logio('Loading %d Feature Reps' % num_dirty)
     am = cm.hs.am
     for cx in iter(load_cx):
         cid = cm.cx2_cid[cx]
         if cid <= 0:
             logwarn('WARNING: IX='+str(cx)+' is invalid'); continue
         chiprep_fpath = cm.hs.iom.get_chiprep_fpath(cid)
         # Ensure that the features exists
         if force_recomp or not os.path.exists(chiprep_fpath):
             logio('Computing and saving features of cid='+str(cid))
             hotspotter.ChipFunctions.precompute_chipreps(cm.hs, [cx], num_procs=1, force_recompute=force_recomp)
         # Load the features
         logdbg('Loading features in '+chiprep_fpath)
         npz  = np.load(chiprep_fpath)
         fpts = npz['arr_0'] 
         fdsc = npz['arr_1']
         npz.close()
         cm.cx2_fpts[cx]  = fpts
         cm.cx2_fdsc[cx]  = fdsc
         cm.cx2_dirty_bit[cx] = False
         count_feat += len(fpts)
     
     logdbg('* Loaded '+str(count_feat)+' keypoints and fdscriptors' )
     return True
Esempio n. 3
0
 def save_model(vm):
     # See if the model is savable
     if not vm.model_prefs.save_load_model:
         logdbg('Can NOT save the visual model due to preferences')
         return False
     if vm.isDirty:
         raise Exception('Can NOT save the visual model due to dirty index')
     if vm.flann is None:
         raise Exception('Can NOT save the visual model without a flann index')
     logdbg('Building dictionary to save')
     # TODO: This dictionary should just exist and not be 
     # directly tied to this class.
     # Build a dictionary of savable model terms
     to_save_dict = {key : vm.__dict__[key] \
                     for key in vm.savable_model_fields }
     # Get the save paths
     model_fpath = vm.hs.iom.get_model_fpath()
     flann_index_fpath = vm.hs.iom.get_flann_index_fpath()
     # Save the Model
     logio('Saving model to: '+model_fpath)
     np.savez(model_fpath, **to_save_dict)
     # Save the Index
     logio('Saving index to: '+flann_index_fpath)
     vm.flann.save_index(flann_index_fpath)
     logio('Model save was sucessfull')
     return True
Esempio n. 4
0
    def _load_table(iom, csv_fpath, table_name, alloc_func, csv_func):
        """
        Reads csv files. Must pass in a table name a memory allocation function 
        and a csv_func: function which parses the fields read by _load_table
        """
        logio("Loading " + table_name + " Table: " + csv_fpath)
        if not exists(csv_fpath):
            logio('"' + csv_fpath + '" Does Not Exist')
            return False
        fid = file(csv_fpath, "r")
        csv_headers = None
        line = fid.readline()
        num_line_prefix = "# NumData"
        # Foreach line in the CSV file
        while line != "":
            line = line.strip()
            # NEW LINE: Skip
            if line == "":
                continue
            # COMMENT LINE: Check for metadata
            elif line[0] == "#":
                # CHECK Preallocation
                if line.find(num_line_prefix) > -1:
                    # Parse out the number of lines to allocate
                    # and use the given allocation function
                    num_lines = int(line.replace(num_line_prefix, "").replace(" ", ""))
                    alloc_func(num_lines)
                # CHECK Data Headers: StripeSpotter
                elif line.find("#imgindex") > -1:
                    logmsg("Loading a Legacy StripeSpotter File")
                    csv_headers = line[1:].split(",")
                # CHECK Data Headers: Legacy HotSpotter
                elif line.find("#01)") > -1:
                    logmsg("Loading a Legacy HotSpotter File")
                    csv_headers = []
                    while line != "":
                        line = line[:-1]
                        if len(line) < 4 or line[3] != ")":
                            break
                        parnstr = "#\\d\\d\\) "
                        head_field = re.sub(parnstr, "", line)
                        head_field = re.sub(" - .*", "", head_field)
                        csv_headers += [head_field]
                        line = fid.readline()
                # CHECK Data Headers: Hotspotter
                elif any([line.find(field) >= 0 for field in ["ChipID", "NameID", "ImageID"]]):
                    csv_headers = [field.strip() for field in line[1:].split(",")]
                    # HACK: Change the fields to the ones it actually expects
                    import hotspotter.other.AbstractPrintable

                    _lbl2_header = hotspotter.other.AbstractPrintable._lbl2_header
                    _header2_lbl = {v: k for k, v in _lbl2_header.iteritems()}
                    csv_headers = [
                        _header2_lbl[field] if field in _header2_lbl.keys() else field for field in csv_headers
                    ]

            # DATA LINE: Read it
            else:
                csv_data = [data_field.strip() for data_field in line.split(",")]
                csv_func(csv_data, csv_headers)
            # Next Line
            line = fid.readline()
        # Finsh reading table
        fid.close()
        logio("Loaded " + table_name + " Table")
        return True
Esempio n. 5
0
 def  _load_table(iom, csv_fpath, table_name, alloc_func, csv_func):
     '''
     Reads csv files. Must pass in a table name a memory allocation function 
     and a csv_func: function which parses the fields read by _load_table
     '''
     logio('Loading '+table_name+' Table: '+csv_fpath)
     if not exists(csv_fpath):
         logio('\"'+csv_fpath+'\" Does Not Exist')
         return False
     fid = file(csv_fpath, 'r')
     csv_headers = None
     line = fid.readline()
     num_line_prefix = '# NumData'
     # Foreach line in the CSV file
     while line != '':
         line = line.strip()
         # NEW LINE: Skip
         if line == '': continue
         # COMMENT LINE: Check for metadata
         elif line[0] == '#':
             # CHECK Preallocation
             if line.find(num_line_prefix) > -1:
                 # Parse out the number of lines to allocate
                 # and use the given allocation function
                 num_lines = int(line.replace(num_line_prefix,'').replace(' ',''))
                 alloc_func(num_lines)
             # CHECK Data Headers: StripeSpotter
             elif line.find('#imgindex') > -1:
                 logmsg('Loading a Legacy StripeSpotter File')
                 csv_headers = line[1:].split(',')
             # CHECK Data Headers: Legacy HotSpotter 
             elif line.find('#01)') > -1:
                 logmsg('Loading a Legacy HotSpotter File')
                 csv_headers = []
                 while line != '':
                     line = line[:-1]
                     if len(line) < 4 or line[3] != ')': break
                     parnstr = '#\\d\\d\\) '
                     head_field = re.sub(parnstr, '', line)
                     head_field = re.sub(' - .*','', head_field)
                     csv_headers += [head_field]
                     line = fid.readline()
             # CHECK Data Headers: Hotspotter
             elif any([line.find(field) >=0 for field in ['ChipID', 'NameID', 'ImageID']]):
                 csv_headers = [field.strip() for field in line[1:].split(',')]
                 # HACK: Change the fields to the ones it actually expects
                 import hotspotter.other.AbstractPrintable
                 _lbl2_header = hotspotter.other.AbstractPrintable._lbl2_header
                 _header2_lbl = {v:k for k,v in _lbl2_header.iteritems()}
                 csv_headers = [_header2_lbl[field] if field in _header2_lbl.keys() else field for field in csv_headers]
                 
         # DATA LINE: Read it
         else:
             csv_data = [data_field.strip() for data_field in line.split(',')]
             csv_func(csv_data, csv_headers)
         # Next Line
         line = fid.readline()
     # Finsh reading table
     fid.close()
     logio('Loaded '+table_name+' Table')
     return True