def costs(self, v): to_db_add = [] # Get the query obj session = Session() q = session.query(Costs) # Need the new instance here to avoid __setattr__ issues df = pd.DataFrame(v) for idx, row in df.iterrows(): # Now invert the expanded dict back into a single JSONB column for storage res = q.filter(Costs.match_id == idx).first() if res: #update the JSON blob costs_new_or_updated = row.to_dict() for k, v in costs_new_or_updated.items(): if v is None: continue elif np.isnan(v): v = None res._cost[k] = v sqlalchemy.orm.attributes.flag_modified(res, '_cost') session.add(res) session.commit() else: row = row.to_dict() costs = row.pop('_costs', {}) for k, v in row.items(): if np.isnan(v): v = None costs[k] = v cost = Costs(match_id=idx, _cost=costs) to_db_add.append(cost) if to_db_add: session.bulk_save_objects(to_db_add) session.commit()
def keypoints(self, kps): session = Session() io_keypoints.to_hdf(self.keypoint_file, keypoints=kps) res = session.query(Keypoints).filter(getattr(Keypoints,'image_id') == self['node_id']).first() if res is None: _ = self.keypoint_file res = self._from_db(Keypoints) res.nkeypoints = len(kps) session.commit()
def fundamental_matrix(self, v): session = Session() res = session.query(table_obj).\ filter(table_obj.source == self.source['node_id']).\ filter(table_obj.destination == self.destination['node_id']).first() if res: res.fundamental = v else: edge = Edges(source=self.source['node_id'], destination=self.destination['node_id'], fundamental = v) session.add(edge) session.commit()
def ring(self, ring): # Setters need a single session and so should not make use of the # syntax sugar _from_db session = Session() res = session.query(Edges).\ filter(Edges.source == self.source['node_id']).\ filter(Edges.destination == self.destination['node_id']).first() if res: res.ring = ring else: edge = Edges(source=self.source['node_id'], destination=self.destination['node_id'], ring=ring) session.add(edge) session.commit() return
def matches(self, v): to_db_add = [] to_db_update = [] df = pd.DataFrame(v) df.index.name = v.index.name # Get the query obj session = Session() q = session.query(Matches) for idx, row in df.iterrows(): # Determine if this is an update or the addition of a new row if hasattr(row, 'id'): res = q.filter(Matches.id == row.id).first() match_id = row.id elif v.index.name == 'id': res = q.filter(Matches.id == row.name).first() match_id = row.name else: res = None if res: # update mapping = {} mapping['id'] = match_id for index in row.index: row_val = row[index] if isinstance(row_val, (np.int, )): row_val = int(row_val) elif isinstance(row_val, (np.float, )): row_val = float(row_val) elif isinstance(row_val, WKBElement): continue mapping[index] = row_val to_db_update.append(mapping) else: match = Matches(source=int(row.source), source_idx=int(row.source_idx), destination=int(row.destination), destination_idx=int(row.destination_idx)) to_db_add.append(match) if to_db_add: session.bulk_save_objects(to_db_add) if to_db_update: session.bulk_update_mappings(Matches, to_db_update) session.commit()
def __init__(self, *args, parent=None, **kwargs): # If this is the first time that the image is seen, add it to the DB if parent is None: self.parent = Parent(config) else: self.parent = parent # Create a session to work in session = Session() # For now, just use the PATH to determine if the node/image is in the DB res = session.query(Images).filter( Images.path == kwargs['image_path']).first() exists = False if res: exists = True kwargs['node_id'] = res.id session.close() super(NetworkNode, self).__init__(*args, **kwargs) if exists is False: # Create the camera entry try: self._camera = create_camera(self.geodata) serialized_camera = self._camera.getModelState() cam = Cameras(camera=serialized_camera) except: cam = None kpspath = io_keypoints.create_output_path(self.geodata.file_name) # Create the keypoints entry kps = Keypoints(path=kpspath, nkeypoints=0) # Create the image i = Images(name=kwargs['image_name'], path=kwargs['image_path'], footprint_latlon=self.footprint, cameras=cam, keypoints=kps) session = Session() session.add(i) session.commit() session.close() self.job_status = defaultdict(dict)
def compute_overlaps(self): query = """ SELECT ST_AsEWKB(geom) AS geom FROM ST_Dump(( SELECT ST_Polygonize(the_geom) AS the_geom FROM ( SELECT ST_Union(the_geom) AS the_geom FROM ( SELECT ST_ExteriorRing(footprint_latlon) AS the_geom FROM images) AS lines ) AS noded_lines ) )""" session = Session() oquery = session.query(Overlay) iquery = session.query(Images) rows = [] for q in self._engine.execute(query).fetchall(): overlaps = [] b = bytes(q['geom']) qgeom = shapely.wkb.loads(b) res = iquery.filter( Images.footprint_latlon.ST_Intersects( from_shape(qgeom, srid=949900))) for i in res: fgeom = to_shape(i.footprint_latlon) area = qgeom.intersection(fgeom).area if area < 1e-6: continue overlaps.append(i.id) o = Overlay(geom='SRID=949900;{}'.format(qgeom.wkt), overlaps=overlaps) res = oquery.filter(Overlay.overlaps == o.overlaps).first() if res is None: rows.append(o) session.bulk_save_objects(rows) session.commit() res = oquery.filter(sqlalchemy.func.cardinality(Overlay.overlaps) <= 1) res.delete(synchronize_session=False) session.commit() session.close()
def masks(self, v): def dict_check(input): for k, v in input.items(): if isinstance(v, dict): dict_check(v) elif v is None: continue elif np.isnan(v): input[k] = None df = pd.DataFrame(v) session = Session() res = session.query(Edges).\ filter(Edges.source == self.source['node_id']).\ filter(Edges.destination == self.destination['node_id']).first() if res: as_dict = df.to_dict() dict_check(as_dict) # Update the masks res.masks = as_dict session.add(res) session.commit()
def matches(self): session = Session() session.query(Matches).filter(Matches.source == self.source['node_id'], Matches.destination == self.destination['node_id']).delete() session.commit() session.close() return