示例#1
0
def main(msg):
    fp = shapely.wkt.loads(msg['poly'])
    
    files = msg['files']
    # Compute the fundamental matrices
    fundamentals = {}
    matches = []
    for k, v in msg['matches'].items():
        edge = eval(k)
        print(edge)
        match = pd.read_json(v)
        s = match.iloc[0].source
        d = match.iloc[0].destination
        source_path = files[str(edge[0])]
        destination_path = files[str(edge[1])]

        x1 = from_hdf(source_path, index=match.source_idx.values, descriptors=False)
        x2 = from_hdf(destination_path, index=match.destination_idx.values, descriptors=False)

        x1 = make_homogeneous(x1[['x', 'y']].values)
        x2 = make_homogeneous(x2[['x', 'y']].values)
        f, fmask = compute_fundamental_matrix(x1, x2, method='ransac', reproj_threshold=20)
        fundamentals[edge] = f
        match['strength'] = compute_reprojection_error(f, x1, x2)
        matches.append(match)
    
    matches = pd.concat(matches)

    # Of the concatenated matches only a subset intersect the geometry for this overlap, pull these
    
    def check_in(r, poly):
        p = shapely.geometry.Point(r.lon, r.lat)
        return p.within(poly)

    intersects = matches.apply(check_in, args=(fp,), axis=1)
    matches = matches[intersects]
    matches = matches.reset_index(drop=True)

    # Apply the spatial suppression
    bounds = fp.bounds
    k = fp.area / 0.005
    if k < 3:
        k = 3
    if k > 25:
        k = 25
    subset = spatial_suppression(matches, bounds, k=k)

    # Push the points through
    overlaps = msg['overlaps']
    oid = msg['oid']
    pts = deepen(subset, fundamentals, overlaps, oid)

    return pts
示例#2
0
    def get_keypoints(self,
                      index=None,
                      format='hdf',
                      overlap=False,
                      homogeneous=False,
                      **kwargs):
        """
        Return the keypoints for the node.  If index is passed, return
        the appropriate subset.
        Parameters
        ----------
        index : iterable
                indices for of the keypoints to return
        Returns
        -------
         : dataframe
           A pandas dataframe of keypoints
        """
        path = self.keypoint_file

        if format == 'npy':
            kps = io_keypoints.from_npy(path)
        elif format == 'hdf':
            kps = io_keypoints.from_hdf(path,
                                        index=index,
                                        descriptors=False,
                                        **kwargs)

        kps = kps[['x', 'y']]  # Added for fundamental

        if homogeneous:
            # TODO: Make the kps homogeneous
            pass

        return kps
示例#3
0
文件: node.py 项目: fossabot/autocnet
    def load_features(self, in_path, format='npy', **kwargs):
        """
        Load keypoints and descriptors for the given image
        from a HDF file.

        Parameters
        ----------
        in_path : str or object
                  PATH to the hdf file or a HDFDataset object handle

        format : {'npy', 'hdf'}
                 The format that the features are stored in.  Default: npy.
        """
        if format == 'npy':
            keypoints, descriptors = io_keypoints.from_npy(in_path)
        elif format == 'hdf':
            keypoints, descriptors = io_keypoints.from_hdf(in_path, **kwargs)

        self.keypoints = keypoints
        self.descriptors = descriptors
示例#4
0
文件: node.py 项目: Kelvinrr/autocnet
    def load_features(self, in_path, format='npy', **kwargs):
        """
        Load keypoints and descriptors for the given image
        from a HDF file.

        Parameters
        ----------
        in_path : str or object
                  PATH to the hdf file or a HDFDataset object handle

        format : {'npy', 'hdf'}
                 The format that the features are stored in.  Default: npy.
        """
        if format == 'npy':
            keypoints, descriptors = io_keypoints.from_npy(in_path)
        elif format == 'hdf':
            keypoints, descriptors = io_keypoints.from_hdf(in_path, **kwargs)

        self.keypoints = keypoints
        self.descriptors = descriptors
示例#5
0
 def descriptors(self):
     try:
         return io_keypoints.from_hdf(self.keypoint_file, keypoints=False)
     except:
         return
示例#6
0
 def keypoints(self):
     try:
         return io_keypoints.from_hdf(self.keypoint_file, descriptors=False)
     except:
         return pd.DataFrame()
示例#7
0
def match(msg, args):

    #try:
    # Load the npz file

    ref_kps, ref_desc = from_hdf(msg['spath'])
    tar_kps, tar_desc = from_hdf(msg['dpath'])

    # Default message
    data = {
        'success': False,
        'source': msg['sidx'],
        'destin': msg['didx'],
        'callback': 'ring_matcher_callback'
    }

    #TODO: Pull geom out - the ring matcher can handle all and singe we already
    # read all the kps, we are good to go.
    """    if 'geom' in msg.keys():
        fp = ogr.CreateGeometryFromWkt(msg['geom'])
        # Get the ref_kps that overlap
        ecef = pyproj.Proj(proj='geocent', a=3396190.0, b=3376200)
        lla = pyproj.Proj(proj='longlat', a=3396190, b=3376200)

        lons, lats, alts = pyproj.transform(ecef, lla, ref_kps.xm.values, ref_kps.ym.values, ref_kps.zm.values)
        ref_idx = []
        for i in range(len(lons)):
            g = ogr.Geometry(ogr.wkbPoint)
            g.AddPoint(lons[i], lats[i])
            if fp.Contains(g):
                ref_idx.append(i)

        lons, lats, alts = pyproj.transform(ecef, lla, tar_kps.xm.values, tar_kps.ym.values, tar_kps.zm.values)
        tar_idx = []
        for i in range(len(lons)):
            g = ogr.Geometry(ogr.wkbPoint)
            g.AddPoint(lons[i], lats[i])
            if fp.Contains(g):
                tar_idx.append(i)

        ref_kps = ref_kps.iloc[ref_idx]
        ref_desc = ref_desc[ref_idx]
        ref_index = sindex[ref_idx]
        tar_kps = tar_kps.iloc[tar_idx]
        tar_desc = tar_desc[tar_idx]
        tar_index = dindex[tar_idx]"""

    ref_feats = ref_kps[['x', 'y', 'xm', 'ym', 'zm']].values
    tar_feats = tar_kps[['x', 'y', 'xm', 'ym', 'zm']].values

    _, _, pidx, ring = ring_match(ref_feats,
                                  tar_feats,
                                  ref_desc,
                                  tar_desc,
                                  ring_radius=args.ringradius,
                                  max_radius=args.maxradius,
                                  target_points=args.targetpoints,
                                  tolerance_val=args.tolerance)

    if pidx is None:
        print('Unable to find a solution.')
        return data

    # Now densify the matches if a ring has been found
    print('Initial Pass Resulted in {} matches'.format(len(pidx)))
    print('Distance ring: {}'.format(ring))

    in_feats = ref_feats[
        pidx[:,
             0]][:, :
                 2]  # all reference points[those selected by ring matcher][x,y coords]
    xextent = (np.min(in_feats[:, 1]), np.max(in_feats[:, 1]))
    yextent = (np.min(in_feats[:, 0]), np.max(in_feats[:, 0]))
    refs_to_add = add_correspondences(in_feats,
                                      ref_feats,
                                      tar_feats,
                                      ref_desc,
                                      tar_desc,
                                      xextent,
                                      yextent,
                                      ring,
                                      8,
                                      8,
                                      target_points=15,
                                      search_radius=int(args.ringradius / 3),
                                      max_search_radius=args.ringradius)
    refs_to_add = [i for i in refs_to_add if len(i)]

    if refs_to_add:
        print('Adding {} correspondences'.format(len(refs_to_add)))
        stacked_refs_to_add = np.vstack(refs_to_add)
        pidx = np.vstack((pidx, stacked_refs_to_add))
        # Get the unique rows: https://stackoverflow.com/questions/31097247/remove-duplicate-rows-of-a-numpy-array
        # Perform lex sort and get sorted data
        sorted_idx = np.lexsort(pidx.T)
        sorted_data = pidx[sorted_idx, :]
        # Get unique row mask
        row_mask = np.append([True], np.any(np.diff(sorted_data, axis=0), 1))
        # Get unique rows
        pidx = sorted_data[row_mask]
    else:
        print('no additional references to add')

    # Check for duplicates
    l = pidx[:, 1].tolist()
    clean = [i for i, x in enumerate(l) if l.count(x) == 1]
    pidx = pidx[clean, :]

    # Convert from the found indices into the footprint indices
    #ref_idx = ref_feats[pidx[:,0]].index.values  # all_kps[kps_in_overlap][selected]
    #tar_idx = tar_feats[pidx[:,1]].index.values

    # Package the data to round trip to the server
    data['success'] = True
    data['sidx'] = pidx[:, 0]
    data['didx'] = pidx[:, 1]
    data['ring'] = ring

    print(data)

    return data