def __init__(self, scale=1.0, fov=97.62815): # 106.2602 comes from 2*atan(4/3), to give the 90 deg inner image a dim of 3/4 of the full dim. # 97.62815 comes from 2*atan(8/7), for 7/8 image as inner 90 deg self.scale = scale self.proj = { 'forward': Projection(xrot=0.0, yrot=0.0, fov=fov, aspect=1.0), 'left': Projection(xrot=0.0, yrot=-90.0, fov=fov, aspect=1.0), 'right': Projection(xrot=0.0, yrot=90.0, fov=fov, aspect=1.0), 'up': Projection(xrot=-90.0, yrot=0.0, fov=fov, aspect=1.0), 'down': Projection(xrot=90.0, yrot=0.0, fov=fov, aspect=1.0) }
def add_projection(self, movie_id, type, date, time): projection = Projection(movie_id=movie_id, type=type, date=date, time=time) self.session.add(projection) self.session.commit()
def find_projection(self, proj_id, movie_id): chosen_proj = Projection() chosen_movie = self.find_movie(movie_id) for projection in chosen_movie.projections: if projection.id == proj_id: chosen_proj = projection return chosen_proj
def add_projections(self, data_for_projections): projections = [] for projection in data_for_projections: projections.append( Projection(type=projection[0], dateTime=projection[1], movie_id=projection[2]))
def add_projection(self): type_ = input('type>') date_ = input('date>') time_ = input('time>') movie_id = int(input('movie_id')) new_projection = Projection(type_=type_, date_=date_, time_=time_, movie_id=movie_id) self.__session.add(new_projection) self.__session.commit()
def select_clusters(img, clusters, origin): """Return 4 borderlines that bound the table""" if len(clusters) < 4: return clusters, None img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) in_frame = lambda x, y: 0 <= x < img_hsv.shape[ 1] and 0 <= y < img_hsv.shape[0] attempt = 1 for quadruple in combinations(clusters, 4): quadruple.sort() borders = [lines[0] for lines in quadruple] corners = get_corners(borders, origin) slope = horizon_slope(borders) if corners and slope < 0.3: # average color in the central area mean_color, stddev = mean_deviation(img_hsv, corners, green_mask(img)) for i in range(4): for line in reversed(quadruple[i]): borders[i] = line corners = get_corners(borders, origin) if not all(in_frame(*c) for c in corners): continue pr = Projection(corners, origin) inner_strip, outer_strip = pr.border_neighbourhoods(i - 1) inner_color, inner_dev = mean_deviation( img_hsv, inner_strip) outer_color, outer_dev = mean_deviation( img_hsv, outer_strip) inner_diff = np.abs(inner_color - mean_color) outer_diff = np.abs(outer_color - mean_color) if TEST: imgt = img.copy() for line in borders: draw_line(imgt, line, origin) draw_polygon(imgt, inner_strip) draw_polygon(imgt, outer_strip) cv2.imwrite( 'data/lines/steps/%s_%02d.jpg' % (filename.replace('.', ''), attempt), imgt) attempt += 1 print('in %s out %s ind %s outd %s' % (inner_diff, outer_diff, inner_dev, outer_dev)) if (inner_diff[0] < 10 and inner_dev[0] < 20 and max(inner_diff) < 100 and (outer_diff[0] > 10 or outer_dev[0] > 20)): if TEST: print('v') break else: # loop finished without break, no i'th borderline found break # stop search else: # search was not stopped, all borderlines found clusters = [[line for line in lines if line[1] < border[1]] for lines, border in zip(quadruple, borders)] return clusters, borders return clusters, None
def read_from_file(self, filename): # Open data file for reading # File must be kept open, otherwise GDAL methods segfault. fid = self.fid = gdal.Open(filename, gdal.GA_ReadOnly) if fid is None: msg = 'Could not open file %s' % filename raise Exception(msg) # Record raster metadata from file basename, ext = os.path.splitext(filename) # If file is ASCII, check that projection is around. # GDAL does not check this nicely, so it is worth an # error message if ext == '.asc': try: open(basename + '.prj') except IOError: msg = ('Projection file not found for %s. You must supply ' 'a projection file with extension .prj' % filename) raise RuntimeError(msg) # Look for any keywords self.keywords = read_keywords(basename + '.keywords') # Determine name if 'title' in self.keywords: rastername = self.keywords['title'] else: # Use basename without leading directories as name rastername = os.path.split(basename)[-1] self.name = rastername self.filename = filename self.projection = Projection(self.fid.GetProjection()) self.geotransform = self.fid.GetGeoTransform() self.columns = fid.RasterXSize self.rows = fid.RasterYSize self.number_of_bands = fid.RasterCount # Assume that file contains all data in one band msg = 'Only one raster band currently allowed' if self.number_of_bands > 1: msg = ('WARNING: Number of bands in %s are %i. ' 'Only the first band will currently be ' 'used.' % (filename, self.number_of_bands)) # FIXME(Ole): Let us use python warnings here raise Exception(msg) # Get first band. band = self.band = fid.GetRasterBand(1) if band is None: msg = 'Could not read raster band from %s' % filename raise Exception(msg)
def __init__(self, id, name, rating): proj = Movie.cursor.execute("SELECT * FROM projections") self.id = id self.name = name self.rating = rating self.projections = [] for row in proj: if row[1] == self.id: self.projections.append( Projection(row[0], row[1], row[2], row[3], row[4]))
def main(): engine = create_engine("sqlite:///cinema_database.db") Base.metadata.create_all(engine) session = Session(bind=engine) session.add_all([ Movie(name="The Hunger Games: Catching Fire", rating=7.9), Movie(name="Wreck-It Ralph", rating=7.8), Movie(name="Her", rating=8.3) ]) session.add_all([ Projection(movie_id=1, type="3D", date="2014-04-01", time="19:10"), Projection(movie_id=1, type="2D", date="2014-04-01", time="19:00"), Projection(movie_id=1, type="4DX", date="2014-04-02", time="21:00"), Projection(movie_id=3, type="2D", date="2014-04-05", time="20:20"), Projection(movie_id=2, type="3D", date="2014-04-02", time="22:00"), Projection(movie_id=2, type="2D", date="2014-04-02", time="19:30") ]) session.add_all([ Reservation(username="******", projection_id=1, row=2, col=1), Reservation(username="******", projection_id=1, row=3, col=5), Reservation(username="******", projection_id=1, row=7, col=8), Reservation(username="******", projection_id=3, row=1, col=1), Reservation(username="******", projection_id=3, row=1, col=2), Reservation(username="******", projection_id=5, row=2, col=3), Reservation(username="******", projection_id=5, row=2, col=4) ]) session.commit()
def create_projection(): database_url = os.environ[DATABASE_URL] database_replica_set = os.environ[DATABASE_REPLICA_SET] database_name = os.environ[DATABASE_NAME] parent_filename = request.json[PARENT_FILENAME_NAME] projection_filename = request.json[PROJECTION_FILENAME_NAME] projection_fields = request.json[FIELDS_NAME] database = Database( database_url, database_replica_set, os.environ[DATABASE_PORT], database_name, ) request_validator = UserRequest(database) request_errors = analyse_request_errors(request_validator, parent_filename, projection_filename, projection_fields) if request_errors is not None: return request_errors database_url_input = Database.collection_database_url( database_url, database_name, parent_filename, database_replica_set, ) database_url_output = Database.collection_database_url( database_url, database_name, projection_filename, database_replica_set, ) metadata_creator = Metadata(database) projection = Projection(metadata_creator, database_url_input, database_url_output) projection.create(parent_filename, projection_filename, projection_fields) return ( jsonify({ MESSAGE_RESULT: MICROSERVICE_URI_GET + projection_filename + MICROSERVICE_URI_GET_PARAMS }), HTTP_STATUS_CODE_SUCCESS_CREATED, )
def __init__(self, w, h): self.res = self.w, self.h = w, h self.screen = pg.display.set_mode((w, h)) self.clock = pg.time.Clock() self.fps = 60 self.objects = [] self.camera = Camera(self, (0, 0, 0)) self.camera.camera_yaw(radians(190)) self.projection = Projection(self)
def add_projection(self): movie_id = input("Type the movie_id: ") movie_name = self.get_movie_title_by_id(movie_id) print("Adding projection for " + movie_name) type = input("Enter the type of the projection: ") date = self.obtain_date() time = self.obtain_time() projection = Projection(type=type, date=date, time=time, movie_id=movie_id) self.__session.add(projection) self.__session.commit()
def create_table_projcetions(): projectionscount = 6 projections = [Projection() for i in range(projectionscount)] movie_id = [1, 1, 1, 3, 2, 2] types = ["3D", "2D", "4DX", "2D", "3D", "2D"] datetimes = [[2014, 4, 1, 19, 10], [2014, 4, 1, 19, 00], [2014, 4, 2, 21, 00], [2014, 4, 5, 20, 20], [2014, 4, 2, 22, 00], [2014, 4, 2, 19, 30]] for i in range(projectionscount): projections[i].movie_id = movie_id[i] projections[i].type = types[i] projections[i].datetime = datetime(*datetimes[i]) session.add_all(projections)
def loadModules(self): self.modules['poi']['rss'] = geoRss(self.modules, os.path.join(os.path.dirname(__file__), 'Setup', 'feeds.txt')) #self.modules['poi']['geonames'] = geonames(self.modules) #self.modules['poi']['waypoints'] = waypointsModule(self.modules, "data/waypoints.gpx") self.modules['poi']['osm'] = osmPoiModule(self.modules) self.modules['overlay'] = guiOverlay(self.modules) self.modules['position'] = geoPosition() self.modules['tiles'] = tileHandler(self.modules) self.modules['data'] = DataStore(self.modules) self.modules['events'] = pyrouteEvents(self.modules) self.modules['sketch'] = sketching(self.modules) self.modules['osmdata'] = osmData(self.modules) self.modules['projection'] = Projection() self.modules['tracklog'] = tracklog(self.modules) self.modules['meta'] = moduleInfo(self.modules) self.modules['route'] = RouteOrDirect(self.modules['osmdata'].data)
def calc_brightness(self): """ Project the electron number density or gas mass density profile to calculate the 2D surface brightness profile. """ if self.cf_radius is None or self.cf_value is None: raise ValueError("cooling function profile missing") if self.cf_spline is None: self.fit_spline(spline="cooling_function", log10=[]) # ne = self.calc_density_electron() # flux per unit volume cf_new = self.eval_spline(spline="cooling_function", x=self.r) flux = cf_new * ne**2 / AstroParams.ratio_ne_np # project the 3D flux into 2D brightness rout = (self.r + self.r_err) * au.kpc.to(au.cm) projector = Projection(rout) brightness = projector.project(flux) return brightness
def check0(self, ch, w, debug=False): env = Env(w, []) visitor = Projection() visitor.execute(ch, env, debug) chor = visitor.choreography vectorize(chor, w) checker = CompatibilityCheck(chor, w) checker.localChoiceChecks() checker.generateTotalGuardsChecks() checker.computePreds(debug) checker.generateCompatibilityChecks(debug) for i in range(0, len(checker.vcs)): vc = checker.vcs[i] if not vc.discharge(debug=debug): print(i, "inFP", vc.title) if vc.hasModel(): print(vc.modelStr()) return False return True
def __init__(self, name=None, projection=None, keywords=None, style_info=None, sublayer=None): """Common constructor for all types of layers See docstrings for class Raster and class Vector for details. """ # Name msg = ('Specified name must be a string or None. ' 'I got %s with type %s' % (name, str(type(name))[1:-1])) verify(isinstance(name, basestring) or name is None, msg) self.name = name # Projection self.projection = Projection(projection) # Keywords if keywords is None: self.keywords = {} else: msg = ('Specified keywords must be either None or a ' 'dictionary. I got %s' % keywords) verify(isinstance(keywords, dict), msg) self.keywords = keywords # Style info if style_info is None: self.style_info = {} else: msg = ('Specified style_info must be either None or a ' 'dictionary. I got %s' % style_info) verify(isinstance(style_info, dict), msg) self.style_info = style_info # Defaults self.sublayer = sublayer self.filename = None self.data = None
def make_reservation(self): while True: name = input("Step 1 (User) Choose name> ") if name == "give_up": break num_tickets = input("Step 1 (User) Choose the number of tickets> ") if num_tickets == "give_up": break num_tickets = int(num_tickets) print("Current movies:") self.show_movies() chosen_movie = None chosen_proj = Projection() movie_id = input("Step 2 (Movie) Choose a movie> ") if movie_id == "give_up": break movie_id = int(movie_id) chosen_movie = self.find_movie(movie_id) for projection in chosen_movie.projections: projection.load_reservations(projection.id) print("Projections for movie {}".format(chosen_movie.name)) self.show_projection(movie_id) proj_id = input("Step 3 (Projection) Choose projection> ") if proj_id == "give_up": break proj_id = int(proj_id) print("Available seats (marked with a dot):") chosen_proj = self.find_projection(proj_id, movie_id) print(chosen_proj.id) chosen_proj.show_seats() seats = [] seats = self.choose_seats(num_tickets, chosen_proj) if seats is False: break chosen_proj.show_seats() self.print_reservation_details(chosen_movie, chosen_proj, seats) command = input("To finalize type <finalize>: ") if command == "finalize": for seat in seats: manage_tables.add_reservations(name, chosen_proj.id, seat[0], seat[1])
def create_projection(): parent_filename = request.json[PARENT_FILENAME_NAME] projection_filename = request.json[PROJECTION_FILENAME_NAME] projection_fields = request.json[FIELDS_NAME] request_errors = analyse_request_errors(request_validator, parent_filename, projection_filename, projection_fields) if request_errors is not None: return request_errors database_url_input = Database.collection_database_url( database_url, database_name, parent_filename, database_replica_set, ) database_url_output = Database.collection_database_url( database_url, database_name, projection_filename, database_replica_set, ) metadata_creator = Metadata(database) projection = Projection(metadata_creator, database_url_input, database_url_output) projection.create(parent_filename, projection_filename, projection_fields) return ( jsonify({ MESSAGE_RESULT: f'{MICROSERVICE_URI_GET}{projection_filename}' f'{MICROSERVICE_URI_GET_PARAMS}' }), HTTP_STATUS_CODE_SUCCESS_CREATED, )
def main(): """The main function in which everything you run should start.""" # Make sure that the output/ directory exists, or create it otherwise. output_dir = pathlib.Path.cwd() / "output" if not output_dir.is_dir(): output_dir.mkdir() #DAY 1 print("DAY 1 \nSquare Trial") rectangle_1 = Rectangle(-1, 0, -1, 0, 1) rectangle_2 = Rectangle(0, 1, 0, 1, .5) circle_1 = Circle(.8, .1, .05, 1) #attenuation for a rectangle print('The attenuation at your point for the is:', rectangle_1.attenuation(7, 0)) collection = ObjectCollection() #collection.append(rectangle_1) #collection.append(rectangle_2) collection.append(circle_1) #print(Projection.theta(theta_idx)) #DAY 2 #attenutation of a circle because this would be useful for changing the #coordinates of eta and xi print('\nDAY 2 \nCircle Trial') print('The attenuation at (1.2 , 1.2):', circle_1.attenuation(1.2, 1.2)) print('Integrated attenuation for a given eta:', circle_1.project_attenuation(np.pi, 0, (-2, 2))) myproj = Projection([0, np.pi], 100, [-2, 2], 100) myproj.add_object(collection, (-2, 2)) array_to_img( collection.to_array(np.linspace(-2, 2, 100), np.linspace( -2, 2, 100))).save(output_dir / "myproj1(.8, .1, .05, 1).png") array_to_img(myproj.data).save(output_dir / "sin(.8, .1, .05, 1).png")
def calc_electron_density(self): """ Deproject the surface brightness profile to derive the 3D electron number density (and then gas mass density) profile by incorporating the cooling function profile. unit: [ cm^-3 ] if the units converted for input data """ if self.s_spline is None: self.fit_spline(spline="brightness", log10=["x", "y"]) if self.cf_spline is None: self.fit_spline(spline="cooling_function", log10=[]) # s_new = self.eval_spline(spline="brightness", x=self.r) cf_new = self.eval_spline(spline="cooling_function", x=self.r) # projector = Projection(rout=self.r + self.r_err) s_deproj = projector.deproject(s_new) # emission measure per unit volume em_v = s_deproj / cf_new ne = np.sqrt(em_v * AstroParams.ratio_ne_np) self.ne = ne return ne
def check(self, P1, P2): w = World() p1 = P1(w, 0) p2 = P2(w, 1) env = Env(w, []) ch = choreo() visitor = Projection() visitor.execute(ch, env) chor = visitor.choreography vectorize(chor, w) checker = CompatibilityCheck(chor, w) checker.localChoiceChecks() checker.generateTotalGuardsChecks() checker.computePreds() checker.generateCompatibilityChecks() for i in range(0, len(checker.vcs)): vc = checker.vcs[i] if not vc.discharge(): print(i, "inFP", vc.title) if vc.hasModel(): print(vc.modelStr()) return False return True
def read_from_file(self, filename): """ Read and unpack vector data. It is assumed that the file contains only one layer with the pertinent features. Further it is assumed for the moment that all geometries are points. * A feature is a geometry and a set of attributes. * A geometry refers to location and can be point, line, polygon or combinations thereof. * The attributes or obtained through GetField() The full OGR architecture is documented at * http://www.gdal.org/ogr/ogr_arch.html * http://www.gdal.org/ogr/ogr_apitut.html Examples are at * danieljlewis.org/files/2010/09/basicpythonmap.pdf * http://invisibleroads.com/tutorials/gdal-shapefile-points-save.html * http://www.packtpub.com/article/geospatial-data-python-geometry """ basename, _ = os.path.splitext(filename) # Look for any keywords self.keywords = read_keywords(basename + '.keywords') # FIXME (Ole): Should also look for style file to populate style_info # Determine name if 'title' in self.keywords: vectorname = self.keywords['title'] else: # Use basename without leading directories as name vectorname = os.path.split(basename)[-1] self.name = vectorname self.filename = filename self.geometry_type = None # In case there are no features fid = ogr.Open(filename) if fid is None: msg = 'Could not open %s' % filename raise IOError(msg) # Assume that file contains all data in one layer msg = 'Only one vector layer currently allowed' if fid.GetLayerCount() > 1: msg = ('WARNING: Number of layers in %s are %i. ' 'Only the first layer will currently be ' 'used.' % (filename, fid.GetLayerCount())) raise Exception(msg) layer = fid.GetLayerByIndex(0) # Get spatial extent self.extent = layer.GetExtent() # Get projection p = layer.GetSpatialRef() self.projection = Projection(p) # Get number of features N = layer.GetFeatureCount() # Extract coordinates and attributes for all features geometry = [] data = [] for i in range(N): feature = layer.GetFeature(i) if feature is None: msg = 'Could not get feature %i from %s' % (i, filename) raise Exception(msg) # Record coordinates ordered as Longitude, Latitude G = feature.GetGeometryRef() if G is None: msg = ('Geometry was None in filename %s ' % filename) raise Exception(msg) else: self.geometry_type = G.GetGeometryType() if self.geometry_type == ogr.wkbPoint: geometry.append((G.GetX(), G.GetY())) elif self.geometry_type == ogr.wkbLineString: M = G.GetPointCount() coordinates = [] for j in range(M): coordinates.append((G.GetX(j), G.GetY(j))) # Record entire line as an Mx2 numpy array geometry.append( numpy.array(coordinates, dtype='d', copy=False)) elif self.geometry_type == ogr.wkbPolygon: ring = G.GetGeometryRef(0) M = ring.GetPointCount() coordinates = [] for j in range(M): coordinates.append((ring.GetX(j), ring.GetY(j))) # Record entire polygon ring as an Mx2 numpy array geometry.append( numpy.array(coordinates, dtype='d', copy=False)) #elif self.geometry_type == ogr.wkbMultiPolygon: # # FIXME: Unpact multiple polygons to simple polygons # # For hints on how to unpack see #http://osgeo-org.1803224.n2.nabble.com/ #gdal-dev-Shapefile-Multipolygon-with-interior-rings-td5391090.html # ring = G.GetGeometryRef(0) # M = ring.GetPointCount() # coordinates = [] # for j in range(M): # coordinates.append((ring.GetX(j), ring.GetY(j))) # # Record entire polygon ring as an Mx2 numpy array # geometry.append(numpy.array(coordinates, # dtype='d', # copy=False)) else: msg = ('Only point, line and polygon geometries are ' 'supported. ' 'Geometry type in filename %s ' 'was %s.' % (filename, self.geometry_type)) raise Exception(msg) # Record attributes by name number_of_fields = feature.GetFieldCount() fields = {} for j in range(number_of_fields): name = feature.GetFieldDefnRef(j).GetName() # FIXME (Ole): Ascertain the type of each field? # We need to cast each appropriately? # This is issue #66 # (https://github.com/AIFDR/riab/issues/66) #feature_type = feature.GetFieldDefnRef(j).GetType() fields[name] = feature.GetField(j) #print 'Field', name, feature_type, j, fields[name] data.append(fields) # Store geometry coordinates as a compact numeric array self.geometry = geometry self.data = data
def __init__(self, data=None, projection=None, geometry=None, geometry_type=None, name='', keywords=None, style_info=None): """Initialise object with either geometry or filename Input data: Can be either * a filename of a vector file format known to GDAL * List of dictionaries of fields associated with point coordinates * None projection: Geospatial reference in WKT format. Only used if geometry is provide as a numeric array, geometry: A list of either point coordinates or polygons/lines (see note below) geometry_type: Desired interpretation of geometry. Valid options are 'point', 'line', 'polygon' or the ogr types: 1, 2, 3 If None, a geometry_type will be inferred name: Optional name for layer. Only used if geometry is provide as a numeric array keywords: Optional dictionary with keywords that describe the layer. When the layer is stored, these keywords will be written into an associated file with extension .keywords. Keywords can for example be used to display text about the layer in a web application. Notes If data is a filename, all other arguments are ignored as they will be inferred from the file. The geometry type will be inferred from the dimensions of geometry. If each entry is one set of coordinates the type will be ogr.wkbPoint, if it is an array of coordinates the type will be ogr.wkbPolygon. Each polygon or line feature take the form of an Nx2 array representing vertices where line segments are joined """ if data is None and projection is None and geometry is None: # Instantiate empty object self.name = name self.projection = None self.geometry = None self.geometry_type = None self.filename = None self.data = None self.extent = None self.keywords = {} self.style_info = {} return if isinstance(data, basestring): self.read_from_file(data) else: # Assume that data is provided as sequences provided as # arguments to the Vector constructor # with extra keyword arguments supplying metadata self.name = name self.filename = None if keywords is None: self.keywords = {} else: msg = ('Specified keywords must be either None or a ' 'dictionary. I got %s' % keywords) verify(isinstance(keywords, dict), msg) self.keywords = keywords if style_info is None: self.style_info = {} else: msg = ('Specified style_info must be either None or a ' 'dictionary. I got %s' % style_info) verify(isinstance(style_info, dict), msg) self.style_info = style_info msg = 'Geometry must be specified' verify(geometry is not None, msg) msg = 'Geometry must be a sequence' verify(is_sequence(geometry), msg) self.geometry = geometry self.geometry_type = get_geometry_type(geometry, geometry_type) #msg = 'Projection must be specified' #verify(projection is not None, msg) self.projection = Projection(projection) if data is None: # Generate default attribute as OGR will do that anyway # when writing data = [] for i in range(len(geometry)): data.append({'ID': i}) # Check data self.data = data if data is not None: msg = 'Data must be a sequence' verify(is_sequence(data), msg) msg = ('The number of entries in geometry and data ' 'must be the same') verify(len(geometry) == len(data), msg)
def add_projection(self, projection_type, projection_datetime, movie_id): projection = Projection(type=projection_type, dateTime=projection_datetime, movie_id=movie_id) self.__session.add(projection) self.__session.commit()
def read_from_file(self, filename): """Read and unpack raster data """ # Open data file for reading # File must be kept open, otherwise GDAL methods segfault. fid = self.fid = gdal.Open(filename, gdal.GA_ReadOnly) if fid is None: # As gdal doesn't return to us what the problem is we have to # figure it out ourselves. Maybe capture stderr? if not os.path.exists(filename): msg = 'Could not find file %s' % filename else: msg = ('File %s exists, but could not be read. ' 'Please check if the file can be opened with ' 'e.g. qgis or gdalinfo' % filename) raise ReadLayerError(msg) # Record raster metadata from file basename, ext = os.path.splitext(filename) # If file is ASCII, check that projection is around. # GDAL does not check this nicely, so it is worth an # error message if ext == '.asc': try: open(basename + '.prj') except IOError: msg = ('Projection file not found for %s. You must supply ' 'a projection file with extension .prj' % filename) raise ReadLayerError(msg) # Look for any keywords self.keywords = read_keywords(basename + '.keywords') # Determine name if 'title' in self.keywords: title = self.keywords['title'] # Lookup internationalised title if available title = tr(title) rastername = title else: # Use basename without leading directories as name rastername = os.path.split(basename)[-1] if self.name is None: self.name = rastername self.filename = filename self.projection = Projection(self.fid.GetProjection()) self.geotransform = self.fid.GetGeoTransform() self.columns = fid.RasterXSize self.rows = fid.RasterYSize self.number_of_bands = fid.RasterCount # Assume that file contains all data in one band msg = 'Only one raster band currently allowed' if self.number_of_bands > 1: msg = ('WARNING: Number of bands in %s are %i. ' 'Only the first band will currently be ' 'used.' % (filename, self.number_of_bands)) # FIXME(Ole): Let us use python warnings here raise ReadLayerError(msg) # Get first band. band = self.band = fid.GetRasterBand(1) if band is None: msg = 'Could not read raster band from %s' % filename raise ReadLayerError(msg) # Force garbage collection to free up any memory we can (TS) gc.collect() # Read from raster file data = band.ReadAsArray() # Convert to double precision (issue #75) data = numpy.array(data, dtype=numpy.float64) # Self check M, N = data.shape msg = ('Dimensions of raster array do not match those of ' 'raster file %s' % self.filename) verify(M == self.rows, msg) verify(N == self.columns, msg) nodata = self.band.GetNoDataValue() if nodata is None: nodata = -9999 if nodata is not numpy.nan: NaN = numpy.ones((M, N), numpy.float64) * numpy.nan data = numpy.where(data == nodata, NaN, data) self.data = data
def read_from_file(self, filename): """Read and unpack vector data. It is assumed that the file contains only one layer with the pertinent features. Further it is assumed for the moment that all geometries are points. * A feature is a geometry and a set of attributes. * A geometry refers to location and can be point, line, polygon or combinations thereof. * The attributes or obtained through GetField() The full OGR architecture is documented at * http://www.gdal.org/ogr/ogr_arch.html * http://www.gdal.org/ogr/ogr_apitut.html Examples are at * danieljlewis.org/files/2010/09/basicpythonmap.pdf * http://invisibleroads.com/tutorials/gdal-shapefile-points-save.html * http://www.packtpub.com/article/geospatial-data-python-geometry Limitation of the Shapefile are documented in http://resources.esri.com/help/9.3/ArcGISDesktop/com/Gp_ToolRef/ geoprocessing_tool_reference/ geoprocessing_considerations_for_shapefile_output.htm :param filename: a fully qualified location to the file :type filename: str :raises: ReadLayerError """ base_name = os.path.splitext(filename)[0] # Look for any keywords self.keywords = read_keywords(base_name + '.keywords') # FIXME (Ole): Should also look for style file to populate style_info # Determine name if 'title' in self.keywords: title = self.keywords['title'] # Lookup internationalised title if available title = safe_tr(title) vector_name = title else: # Use base_name without leading directories as name vector_name = os.path.split(base_name)[-1] if self.name is None: self.name = vector_name self.filename = filename self.geometry_type = None # In case there are no features fid = ogr.Open(filename) if fid is None: msg = 'Could not open %s' % filename raise ReadLayerError(msg) # Assume that file contains all data in one layer msg = 'Only one vector layer currently allowed' if fid.GetLayerCount() > 1 and self.sublayer is None: msg = ('WARNING: Number of layers in %s are %i. ' 'Only the first layer will currently be ' 'used. Specify sublayer when creating ' 'the Vector if you wish to use a different layer.' % (filename, fid.GetLayerCount())) LOGGER.warn(msg) # Why do we raise an exception if it is only a warning? TS raise ReadLayerError(msg) if self.sublayer is not None: layer = fid.GetLayerByName(self.sublayer) else: layer = fid.GetLayerByIndex(0) # Get spatial extent self.extent = layer.GetExtent() # Get projection p = layer.GetSpatialRef() self.projection = Projection(p) layer.ResetReading() # Extract coordinates and attributes for all features geometry = [] data = [] # Use feature iterator for feature in layer: # Record coordinates ordered as Longitude, Latitude G = feature.GetGeometryRef() if G is None: msg = ('Geometry was None in filename %s ' % filename) raise ReadLayerError(msg) else: self.geometry_type = G.GetGeometryType() if self.is_point_data: geometry.append((G.GetX(), G.GetY())) elif self.is_line_data: ring = get_ring_data(G) geometry.append(ring) elif self.is_polygon_data: polygon = get_polygon_data(G) geometry.append(polygon) elif self.is_multi_polygon_data: try: G = ogr.ForceToPolygon(G) except: msg = ('Got geometry type Multipolygon (%s) for ' 'filename %s and could not convert it to ' 'singlepart. However, you can use QGIS ' 'functionality to convert multipart vector ' 'data to singlepart (Vector -> Geometry Tools ' '-> Multipart to Singleparts and use the ' 'resulting dataset.' % (ogr.wkbMultiPolygon, filename)) raise ReadLayerError(msg) else: # Read polygon data as single part self.geometry_type = ogr.wkbPolygon polygon = get_polygon_data(G) geometry.append(polygon) else: msg = ('Only point, line and polygon geometries are ' 'supported. ' 'Geometry type in filename %s ' 'was %s.' % (filename, self.geometry_type)) raise ReadLayerError(msg) # Record attributes by name number_of_fields = feature.GetFieldCount() fields = {} for j in range(number_of_fields): name = feature.GetFieldDefnRef(j).GetName() # FIXME (Ole): Ascertain the type of each field? # We need to cast each appropriately? # This is issue #66 # (https://github.com/AIFDR/riab/issues/66) #feature_type = feature.GetFieldDefnRef(j).GetType() fields[name] = feature.GetField(j) # We do this because there is NaN problem on windows # NaN value must be converted to _pseudo_in to solve the # problem. But, when InaSAFE read the file, it'll be # converted back to NaN value, so that NaN in InaSAFE is a # numpy.nan # please check https://github.com/AIFDR/inasafe/issues/269 # for more information if fields[name] == _pseudo_inf: fields[name] = float('nan') #print 'Field', name, feature_type, j, fields[name] data.append(fields) # Store geometry coordinates as a compact numeric array self.geometry = geometry self.data = data
def read_from_file(self, filename): """Read and unpack raster data """ # Open data file for reading # File must be kept open, otherwise GDAL methods segfault. fid = self.fid = gdal.Open(filename, gdal.GA_ReadOnly) if fid is None: # As gdal doesn't return to us what the problem is we have to # figure it out ourselves. Maybe capture stderr? if not os.path.exists(filename): msg = 'Could not find file %s' % filename else: msg = ('File %s exists, but could not be read. ' 'Please check if the file can be opened with ' 'e.g. qgis or gdalinfo' % filename) raise ReadLayerError(msg) # Record raster metadata from file basename, ext = os.path.splitext(filename) # If file is ASCII, check that projection is around. # GDAL does not check this nicely, so it is worth an # error message if ext == '.asc': try: open(basename + '.prj') except IOError: msg = ('Projection file not found for %s. You must supply ' 'a projection file with extension .prj' % filename) raise ReadLayerError(msg) # Look for any keywords self.keywords = read_keywords(basename + '.keywords') # Determine name if 'title' in self.keywords: title = self.keywords['title'] # Lookup internationalised title if available title = safe_tr(title) rastername = title else: # Use basename without leading directories as name rastername = os.path.split(basename)[-1] if self.name is None: self.name = rastername self.filename = filename self.projection = Projection(self.fid.GetProjection()) self.geotransform = self.fid.GetGeoTransform() self.columns = fid.RasterXSize self.rows = fid.RasterYSize self.number_of_bands = fid.RasterCount # Assume that file contains all data in one band msg = 'Only one raster band currently allowed' if self.number_of_bands > 1: msg = ('WARNING: Number of bands in %s are %i. ' 'Only the first band will currently be ' 'used.' % (filename, self.number_of_bands)) # FIXME(Ole): Let us use python warnings here raise ReadLayerError(msg) # Get first band. band = self.band = fid.GetRasterBand(1) if band is None: msg = 'Could not read raster band from %s' % filename raise ReadLayerError(msg)
images = [] for view in range(view_num): ret, frame = captures[view].read() if ret == True: frame = cv2.resize(frame, (1200, 900)) if cfg.HAND == 'RIGHT': frame = cv2.flip(frame, cfg.AXIS) frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) images.append(frame) else: continue if len(images) < view_num: i = i + 1 continue detected_images, masks, boxes = seg.run(images, cfg.AXIS) for c in range(len(images)): images[c] = cv2.flip(images[c], cfg.AXIS) if i == 0: trainer = pose.HMRTrainer() trainer.images = detected_images trainer.frame = i skeletons = trainer.test() #check(images, masks, skeletons, boxes, detected_images) project_points = Projection(calibration, images, skeletons, boxes) points_3d = project_points.run() fitter.skel_fit(points_3d) fitter.seg_fit(masks) fitter.save_images(i, images) i = i + 1
def __init_projection(self, args): self.projection = Projection(args[0], args[1], args[2])