def phases(self, min, max): """ Go through station channels to retrieve all arrival phases """ self.logger.debug("Events():phases(%s,%s) "%(min,max)) phases = defaultdict(lambda: defaultdict(dict)) assoc = False arrival = False dbname = self.dbcentral(min) self.logger.debug('Events():phases(%s,%s) db:(%s)' % (min,max,dbname)) if not dbname: return phases try: db = datascope.dbopen (dbname , 'r' ) db = db.lookup( table='arrival' ) db = db.join( 'assoc' ) nrecs = db.query(datascope.dbRECORD_COUNT) except: try: db = datascope.dbopen (dbname , 'r' ) db = db.lookup( table='arrival') nrecs = db.query(datascope.dbRECORD_COUNT) except Exception,e: self.logger.exception( "Events: Exception %s on phases(): %s" % (e,phases)) return phases
def test_method_open(self): db = antdb.dbopen(self.testdbname, 'r') self.assertTrue(db[0] >= 0) self.assertEqual(db[1], -501) self.assertEqual(db[2], -501) self.assertEqual(db[3], -501) db.close()
def _main(): import ant_tools from core_tools import Locator, parse_cfg from antelope.datascope import closing, dbopen args = _parse_command_line() if args.pf: ant_tools.pf_2_cfg(args.pf, 'pyloceq') else: ant_tools.pf_2_cfg('pyloceq', 'pyloceq') cfg_dict = parse_cfg('pyloceq.cfg') locator = Locator(cfg_dict) with closing(dbopen(args.db, 'r+')) as db: tbl_event = db.schema_tables['event'] if args.subset: view = tbl_event.join('origin') view = view.subset(args.subset) tbl_event = view.separate('event') for record in tbl_event.iter_record(): evid = record.getv('evid')[0] view = tbl_event.subset('evid == %d' % evid) event_list = ant_tools.create_event_list(view) for event in event_list: origin = event.preferred_origin origin = locator.locate_eq(origin) if origin == None: continue origin.update_predarr_times(cfg_dict) ant_tools.write_origin(origin, db) return 0
def _main(): import ant_tools from core_tools import Locator, parse_cfg from antelope.datascope import closing, dbopen args = _parse_command_line() if args.pf: ant_tools.pf_2_cfg(args.pf, "pyloceq") else: ant_tools.pf_2_cfg("pyloceq", "pyloceq") cfg_dict = parse_cfg("pyloceq.cfg") locator = Locator(cfg_dict) with closing(dbopen(args.db, "r+")) as db: tbl_event = db.schema_tables["event"] if args.subset: view = tbl_event.join("origin") view = view.subset(args.subset) tbl_event = view.separate("event") for record in tbl_event.iter_record(): evid = record.getv("evid")[0] view = tbl_event.subset("evid == %d" % evid) event_list = ant_tools.create_event_list(view) for event in event_list: origin = event.preferred_origin origin = locator.locate_eq(origin) if origin == None: continue origin.update_predarr_times(cfg_dict) ant_tools.write_origin(origin, db) return 0
def _test_db(self,time,endtime,dbname): """ verify that the db is valid before saving the value. Skips databases that don't match the criteria specified. If self.verifytables is set, extra tests are performed to ensure that the database contains the requested tables, and that the tables contain data. """ self.logger.debug( "Test for time=%s =>> %s" % (time,dbname) ) #if os.path.isfile(dbname): # self.dbs[dbname] = {'times': [time,endtime]} # return #if self.glob("%s.*" % dbname): # self.dbs[dbname] = {'times': [time,endtime]} # self.logger.warning( "No descriptor file for (%s)." % dbname ) # return try: db = datascope.dbopen(dbname, 'r') except datascope.DatascopeError, e: self.logger.error('Cannot dbopen %s, skipping.' % dbname) return False
def test_table(dbname, tbl, verbose=False): """ Verify that we can work with table. Returns path if valid and we see data. """ logging = getLogger() path = False try: with datascope.closing(datascope.dbopen(dbname, "r")) as db: db = db.lookup(table=tbl) if not db.query(datascope.dbTABLE_PRESENT): logging.warning("No dbTABLE_PRESENT on %s" % dbname) return False if not db.record_count: logging.warning("No %s.record_count" % dbname) path = db.query("dbTABLE_FILENAME") except Exception, e: logging.warning("Prolembs with db[%s]: %s" % (dbname, e)) return False
def dbgetorigins(dbpath, subset_expr): # open the origin table, join to event table, subset for preferred origins db = datascope.dbopen( dbpath, 'r') dborigin = db.lookup( table = 'origin' ) dborigin = dborigin.join('event') dborigin = dborigin.subset("orid == prefor") # apply the optional subset expression if there is one, order by time, and display number of events. dborigin = dborigin.subset(subset_expr) dborigin = dborigin.sort('time') n = dborigin.nrecs() print "- number of events = {}".format(n) # if size of arrays already known, preallocation much faster than recreating each time with append dictorigin = dict() origin_id = np.empty(n) origin_ml = np.empty(n) origin_epoch = np.empty(n) # load origins from database and store them in a dictionary for dborigin[3] in range(n): (origin_id[dborigin[3]], origin_ml[dborigin[3]], origin_epoch[dborigin[3]]) = dborigin.getv('orid','ml','time') dictorigin['id'] = origin_id dictorigin['ml'] = origin_ml dictorigin['time'] = mpl.dates.epoch2num(origin_epoch) # close the database and free the memory. # It seems that db.close and db.free both close the database, and closing twice produces error db.free() return dictorigin, n
def test_table(dbname, tbl, verbose=False): """Verify that we can work with table. Args: dbname (string): name of the Datascope database. tbl (string): name of the database table. verbose (bool): be more verbose in output. Returns: string: path if valid and we see data. False: if table is invalid for any reason. """ path = False try: with datascope.closing(datascope.dbopen(dbname, "r")) as db: db = db.lookup(table=tbl) if not db.query(datascope.dbTABLE_PRESENT): logger.warning("No dbTABLE_PRESENT on %s" % dbname) return False if not db.record_count: logger.warning("No %s.record_count" % dbname) path = db.query("dbTABLE_FILENAME") except Exception as e: logger.warning("Prolembs with db[%s]: %s" % (dbname, e)) return False return path
def _test_db(self, time, endtime, dbname): """ verify that the db is valid before saving the value. Skips databases that don't match the criteria specified. If self.verifytables is set, extra tests are performed to ensure that the database contains the requested tables, and that the tables contain data. """ self.logger.debug("Test for time=%s =>> %s" % (time, dbname)) #if os.path.isfile(dbname): # self.dbs[dbname] = {'times': [time,endtime]} # return #if self.glob("%s.*" % dbname): # self.dbs[dbname] = {'times': [time,endtime]} # self.logger.warning( "No descriptor file for (%s)." % dbname ) # return try: db = datascope.dbopen(dbname, 'r') except datascope.DatascopeError, e: self.logger.error('Cannot dbopen %s, skipping.' % dbname) return False
def _get_events(self): """Update all orids/evids from the database.""" self.cache = [] # Test if we have event table with datascope.closing(datascope.dbopen(self.db, "r")) as db: dbtable = db.lookup(table="event") if dbtable.query(datascope.dbTABLE_PRESENT): steps = ["dbopen event"] steps.extend(["dbjoin origin"]) steps.extend(["dbsubset origin.orid != NULL"]) steps.extend(["dbsubset origin.orid == prefor"]) fields = ["evid"] else: steps = ["dbopen origin"] steps.extend(["dbsubset orid != NULL"]) fields = [] fields.extend([ "orid", "time", "lat", "lon", "depth", "auth", "nass", "ndef", "review" ]) for v in extract_from_db(self.db, steps, fields, self.db_subset): if "evid" not in v: v["evid"] = v["orid"] self.logger.debug("Events(): new event #%s" % v["evid"]) v["allmags"] = [] v["magnitude"] = "-" v["maglddate"] = 0 v["srname"] = "-" v["grname"] = "-" v["time"] = parse_sta_time(v["time"]) v["strtime"] = readable_time(v["time"], self.timeformat, self.timezone) try: v["srname"] = stock.srname(v["lat"], v["lon"]) except Exception as e: self.logger.warning("Problems with srname for orid %s: %s" % (v["orid"], v["lat"], v["lon"], e)) try: v["grname"] = stock.grname(v["lat"], v["lon"]) except Exception as e: self.logger.warning("Problems with grname for orid %s: %s" % (v["orid"], v["lat"], v["lon"], e)) orid = v["orid"] if orid in self.mags: for o in self.mags[orid]: v["allmags"].append(self.mags[orid][o]) if self.mags[orid][o]["lddate"] > v["maglddate"]: v["magnitude"] = self.mags[orid][o]["strmag"] v["maglddate"] = self.mags[orid][o]["lddate"] self.cache.append(v)
def test_method_lookup_clusters(self): db = antdb.dbopen(self.testdbname, 'r') db = db.lookup(table='clusters') self.assertTrue(db[0] >= 0) self.assertEqual(db[1], 0) self.assertEqual(db[2], -501) self.assertEqual(db[3], -501) db.close()
def test_method_paths(self): db = antdb.dbopen(self.testdbname, 'r') db = db.lookup(table='clusters') db = db.subset('clustername =~ /%s/' % self.testclustername) db.record = 0 dbcentral = DbCentral(self.testdbname, self.testclustername, False) paths = dbcentral.namelist() self.assertTrue(len(paths) > 0) db.close()
def _get_open_dbmaster_pointer(self): """Get a reference to an open database pointer to the dbmaster. If the database pointer is closed, open it. """ if self._dbmaster_pointer is None: self._dbmaster_pointer = datascope.dbopen(self.dbmaster, "r") return self._dbmaster_pointer
def test_method_lookup_clustername(self): db = antdb.dbopen(self.testdbname, 'r') db = db.lookup(table='clusters') db = db.subset('clustername =~ /%s/' % self.testclustername) self.assertTrue(db.query('dbRECORD_COUNT') >= 0) self.assertTrue(db.table >= 0) self.assertEqual(db[2], -501) self.assertEqual(db[3], -501) db.close()
def _test_db(self, time, endtime, dbname): """ verify that the db is valid before saving the value. Skips databases that don't match the criteria specified. If self.verifytables is set, extra tests are performed to ensure that the database contains the requested tables, and that the tables contain data. """ self.logger.debug("Test for time=%s =>> %s" % (time, dbname)) # if os.path.isfile(dbname): # self.dbs[dbname] = {'times': [time,endtime]} # return # if self.glob("%s.*" % dbname): # self.dbs[dbname] = {'times': [time,endtime]} # self.logger.warning( "No descriptor file for (%s)." % dbname ) # return try: db = datascope.dbopen(dbname, "r") except datascope.DatascopeError: self.logger.error("Cannot dbopen %s, skipping." % dbname) return False # self.logger.debug( db.query(datascope.dbDATABASE_COUNT)) # self.logger.debug( db.query(datascope.dbDATABASE_FILENAME)) # self.logger.debug( db.query(datascope.dbDATABASE_FILES)) if not db.query(datascope.dbDATABASE_FILENAME): self.logger.error("Cannot list files on %s, skipping." % dbname) return False for table in self.required_tables: try: dbtbl = db.lookup(table=table) try: records = dbtbl.query(datascope.dbRECORD_COUNT) if not records: self.logger.error( "%s.%s is an empty table. Skipping db." % (dbname, table)) return False except datascope.DatascopeError: self.logger.error( "Table %s.%s is not present. Skipping db." % (dbname, table)) return False finally: dbtbl.free() db.close() # If we get here, the database passes our tests. Add to list return True
def _get_stachan_cache(self): """ private function to load data """ records = 0 self.logger.info("Stations(): update cache") for dbname in self.dbcentral.list(): self.logger.debug('Station(): dbname: %s' % dbname) dates = {} query_start_time = time.time() try: self.logger.debug("Dbopen " + dbname) db = datascope.dbopen(dbname, 'r') table = 'wfdisc' field = 'time' self.logger.debug("Dblookup table=%s field=%s" % (table, field)) dbwfdisc = db.lookup(table=table, field=field) self.logger.debug("Getting record count of " + table) records = dbwfdisc.query(datascope.dbRECORD_COUNT) self.mintime = dbwfdisc.ex_eval('min(time)') self.maxtime = dbwfdisc.ex_eval('max(endtime)') except Exception, e: self.logger.exception('Problem with wfdisc table. %s: %s' % (Exception, e)) sys.exit(reactor.stop()) elapsed_time = time.time() - query_start_time self.logger.debug( "Intial dbquery and wfdisc record count took %d seconds" % elapsed_time) if self.maxtime > stock.now() or self.maxtime > (stock.now() - 3600): self.maxtime = -1 self.logger.debug("Starting wfdisc processing of %d records" % records) prog = ProgressLogger("Stations: processing wfdisc record ", records, logger=self.logger) for j in range(records): prog.tick() dbwfdisc.record = j try: sta, chan, dbtime = dbwfdisc.getv('sta', 'chan', 'time') self.wfdisc_stachan[sta].add(chan) self.wfdates[stock.yearday(dbtime)] = 1 except datascope.DatascopeException, e: self.logger.exception('(%s=>%s)' % (Exception, e))
def make_instrument(inv,dbdir,dbnam,respdir): flds = ('inid','insname','instype','samprate','dir','dfile') datemin = UTCDateTime(2011,1,1) # What if instrument table already exists? instabfile = dbdir+dbnam+".instrument" if os.path.isfile(instabfile): kok = 0; while kok==0: yn = raw_input("Instrument table already seems to exist. Overwrite/append/cancel? [o/a/c] ") if yn=="o": print("Ok,deleting and overwriting...") os.remove(instabfile) kok=1; elif yn=="a": print("Ok, appending...") kok=1; elif yn=="c": raise SystemExit("Ok, not deleting... aborting") exit kok=1; with closing(dbopen(dbdir+dbnam,'r+')) as db: dbins = db.lookup(table='instrument') inid = dbins.record_count insnams_all = [] for inn in range(0,len(inv.networks)): network = inv[inn] for iss in range(0, len(network.stations)): station = inv[inn][iss] for icc in range(0,len(station.channels)): channel = inv[inn][iss][icc] if (channel.code[1]=='H' or channel.code[1:3]=='DH') and channel.start_date>datemin: insnam = channel.sensor.description if not list(set(insnams_all) & set([insnam])): inid=inid+1 insnams_all.append(insnam) respfile = find_respfile(respdir,station.code,channel.code) # if respfile is not found write own respfile with write_sac_response_file) if not respfile respfile = write_sac_response_file(channel,\ "junk_respfile",network.code,station.code) cprespfile = "SAC_PZs__CAdb_instr"+str(inid).zfill(2) shutil.copy(respdir+respfile,respdir+cprespfile) if network.code == '7D': instype = 'O_'+channel.code[0:2]+"x" else: instype = 'L_'+channel.code[0:2]+"x" print("Adding instrument "+str(inid)+", " +insnam) vals = (inid,insnam,instype,channel.sample_rate,respdir,cprespfile) fldvals = zip(flds, vals) dbaddrec = dbins.addv(*fldvals)
def _test_db(self, time, dbname): """ Verify that the db is valid before saving the value. Skips databases that don't match the criteria specified. If self.verifytables is set, extra tests are performed to ensure that the database contains the requested tables, and that the tables contain data. """ self.logger.debug("Test for time=%s =>> %s" % (time, dbname)) # if os.path.isfile(dbname): # self.dbs[dbname] = {'times': [time,endtime]} # return # if self.glob("%s.*" % dbname): # self.dbs[dbname] = {'times': [time,endtime]} # self.logger_instance.warning( "No descriptor file for (%s)." % dbname ) # return db = None dbtbl = None try: db = datascope.dbopen(dbname, "r") except datascope.DatascopeError: self.logger.error("Cannot dbopen %s, skipping." % dbname) return False else: for table in self.required_tables: try: dbtbl = db.lookup(table=table) try: dbtbl.query(datascope.dbTABLE_PRESENT) records = dbtbl.query(datascope.dbRECORD_COUNT) if not records: self.logger.error( "%s.%s is an empty table. Skipping db." % (dbname, table) ) return False except datascope.DatascopeError: self.logger.error( "The table %s.%s is not present. Skipping db." % (dbname, table) ) return False finally: if dbtbl is not None: datascope.dbfree(dbtbl) finally: if db is not None: datascope.dbclose(db) # If we get here, the database passes our tests. Add it to the station_patterns return True
def extract_all_events(ev, qml, db_path, output_dir): with ds.closing(ds.dbopen(db_path, 'r')) as db: with ds.freeing(db.process(['dbopen event', 'dbsort evid'])) as view: for row in view.iter_record(): log.info('Processing event' + ' '.join([str(row.getv(x)[0]) for x in EVENT_FIELDS])) event_id = row.getv(EVENT_FIELDS[0])[0] event_xml(event_id=event_id, event=ev, quakeml=qml, output_file=os.path.join(output_dir, str(event_id)))
def _get_stachan_cache(self): """ private function to load data """ records = 0 self.logger.info("Stations(): update cache") for dbname in self.dbcentral.list(): self.logger.debug('Station(): dbname: %s' % dbname) dates = {} query_start_time = time.time() try: self.logger.debug("Dbopen "+dbname) db = datascope.dbopen( dbname , 'r' ) table='wfdisc' field='time' self.logger.debug("Dblookup table=%s field=%s" % (table,field)) dbwfdisc = db.lookup( table=table,field=field) self.logger.debug("Getting record count of "+table) records = dbwfdisc.query(datascope.dbRECORD_COUNT) self.mintime = dbwfdisc.ex_eval('min(time)') self.maxtime = dbwfdisc.ex_eval('max(endtime)') except Exception,e: self.logger.exception('Problem with wfdisc table. %s: %s' % ( Exception, e)) sys.exit(reactor.stop()) elapsed_time = time.time() - query_start_time self.logger.debug( "Intial dbquery and wfdisc record count took %d seconds" % elapsed_time) if self.maxtime > stock.now() or self.maxtime > (stock.now()-3600): self.maxtime = -1 self.logger.debug("Starting wfdisc processing of %d records" % records) prog=ProgressLogger("Stations: processing wfdisc record ", records, logger=self.logger) for j in range(records): prog.tick() dbwfdisc.record = j try: sta,chan,dbtime = dbwfdisc.getv('sta','chan','time') self.wfdisc_stachan[sta].add(chan) self.wfdates[stock.yearday(dbtime)] = 1 except datascope.DatascopeException, e: self.logger.exception('(%s=>%s)' % (Exception,e))
def verify_table(db, tablename): ''' Open a database (or database pointer) and verify a table On multiple instances we perform the same process of verifying the presence of a table before we get to interact with it. This will make that process easy since you can get to that point either from a database name or from a database pointer. The function will return the database pointer that you are responsible for cleaning later. The local view of the table will be freed. Remember to free the return pointer later!!! ''' import antelope.datascope as datascope from logging_helper import getLogger logging = getLogger( inspect.stack()[0][3] ) logging.debug( 'Verify table [%s]' % tablename ) # Verify if we have a string or a pointer DB object if isinstance( db, datascope.Dbptr ): dbview = db else: logging.debug( 'dbopen( %s )' % db ) dbview = datascope.dbopen( db, "r+" ) # Verify if we have a table or if we should open it try: if dbview.query(datascope.dbTABLE_PRESENT): tableview = dbview else: raise except: logging.debug( 'Lookup table: %s' % tablename ) tableview = dbview.lookup(table=tablename) logging.debug( 'dbTABLE_PRESENT => %s' % \ tableview.query(datascope.dbTABLE_PRESENT)) # Check if we don't have anything to continue if not tableview.query(datascope.dbTABLE_PRESENT): logging.warning( 'Missing table [%s] in db view.' % tablename ) return False if not tableview.record_count: logging.warning( 'EMPTY table %s' % tablename ) # Return valid view if table is present return tableview
def read_volcanoes(): # THIS IS A PREVIOUS VERSION I WROTE FOR GETTING VOLCANOES FROM PLACESDB THAT SEEMS TO WORK dbplacespath = 'places/volcanoes' dbhandle = datascope.dbopen( dbplacespath, 'r') dbptr = dbhandle.lookup( table = 'places' ) n = dbptr.nrecs() dictplaces = dict() for dbptr[3] in range(n): thisrecord = {'place': "%s" % dbptr.getv('place'), 'lat': "%s" % dbptr.getv('lat'), 'lon': "%s" % dbptr.getv('lon') } dictplaces[dbptr[3]] = thisrecord dbhandle.free() dbhandle.close() return dictplaces
def read_volcanoes(): dbplacespath = 'volcanoes' dbhandle = datascope.dbopen( dbplacespath, 'r') dbptr = dbhandle.lookup( table = 'places' ) n = dbptr.nrecs() dictplaces = dict() for dbptr[3] in range(n): thisrecord = {'place': "%s" % dbptr.getv('place'), 'lat': "%s" % dbptr.getv('lat'), 'lon': "%s" % dbptr.getv('lon') } dictplaces[dbptr[3]] = thisrecord dbhandle.free() dbhandle.close() return dictplaces
def _get_main_list(self): self.logging.debug("_get_main_list()") # Default is with no snetsta steps = ["dbopen site", "dbsort sta"] fields = ["sta", "ondate", "offdate", "lat", "lon", "elev", "staname", "statype", "dnorth", "deast"] # Test if we have snetsta table with datascope.closing(datascope.dbopen(self.db, "r")) as db: dbtable = db.lookup(table="snetsta") if dbtable.query(datascope.dbTABLE_PRESENT): steps = ["dbopen site", "dbjoin -o snetsta", "dbsort sta"] fields = [ "snet", "sta", "ondate", "offdate", "lat", "lon", "elev", "staname", "statype", "dnorth", "deast", ] for v in extract_from_db(self.db, steps, fields, self.db_subset): sta = v["sta"] if "snet" in v: snet = v["snet"] else: snet = "-" self.logging.debug("_get_main_list(%s_%s)" % (snet, sta)) # Fix values of time and endtime v["time"] = parse_sta_date(v["ondate"], epoch=True) v["endtime"] = parse_sta_date(v["offdate"], epoch=True) # Readable times v["strtime"] = readable_time(v["time"], self.timeformat, self.timezone) v["strendtime"] = readable_time(v["endtime"], self.timeformat, self.timezone) # Need lat and lon with 2 decimals only v["latlat"] = v["lat"] v["lonlon"] = v["lon"] v["lat"] = round(v["lat"], 2) v["lon"] = round(v["lon"], 2) self._verify_cache(snet, sta, primary=True) self.cache[snet][sta] = v
def _init_db(self, db): """ Initialize station list using a CSS3.0 database as input. """ with closing(dbopen(db, 'r')) as db: tbl_site = db.schema_tables['site'] #The following line will be taken out tbl_site = tbl_site.subset('lon >= -117.80 && lat >= 32.5 && lon <= -115.4456 && lat <= 34.5475') tbl_site = tbl_site.sort('sta', unique=True) for record in tbl_site.iter_record(): sta, lat, lon, elev = record.getv('sta', 'lat', 'lon', 'elev') self.append(Station(sta, lat, lon, elev))
def _generate_tt_maps(db, write_binary=True): logger = logging.getLogger(sys.argv[0]) logger.debug('Begin travel-time map generateion.') with closing(dbopen(db, 'r')) as db: tbl_site = db.schema_tables['site'] for record in tbl_site.iter_record(): sta, lat, lon, elev = record.getv('sta', 'lat', 'lon', 'elev') logger.debug('Begin travel-time map generation for station %s' % sta) _write_sources_file(0.10, lat, lon) os.system(tt_calculator) logger.debug('End travel-time map generation for station %s' % sta)
def _get_nulls(self): """ Private function to load values from dbs. Go through the tables on the database and return dictionary with NULL values for each field. """ # We will assume all databases have the same schema. # Get the first only. dbname = self.dbcentral.list()[0] try: db = datascope.dbopen(dbname, "r") except Exception as e: logger.exception("dbopen(%s)=>(%s)" % (dbname, e)) sys.exit(twisted.internet.reactor.stop()) self.logger.debug("Looking for tables: %s" % self.tables) # Loop over all tables for table in db.query(datascope.dbSCHEMA_TABLES): if len(self.tables) > 0 and table not in self.tables: continue self.logger.debug("Test table: [%s]" % table) db = db.lookup("", table, "", "dbNULL") # Test every field try: db.query(datascope.dbTABLE_FIELDS) except Exception: pass else: for field in db.query(datascope.dbTABLE_FIELDS): self.null_vals[field] = db.getv(field)[0] self.logger.debug( "table:[%s] field(%s):[%s]" % (table, field, self.null_vals[field]) ) try: db.close() except Exception: pass
def open_db_or_string(database, perm='r'): ''' Check if a variable is a valid db or a string Returns a pointer to an open db or throw an error ''' if isinstance(database, Dbptr): ptr = Dbptr(database) elif isinstance(database, str): ptr = dbopen(database, perm) opened = True else: raise TypeError("Input must be a Dbptr or string of a valid database path") return ptr, opened
def _open(database, perm='r', **kwargs): """ Return a pointer to an open database from a string or Dbptr. Any keyword arguments not for dbopen are passed to dblookup """ if isinstance(database, Dbptr): db = Dbptr(database) elif isinstance(database, str): db = dbopen(database, perm=perm) else: raise TypeError("Input pointer or string of valid database") if kwargs: db = dblookup(db,**kwargs) return db
def _get_events(self): """ Read all orids/evids from the database and update local dict with the info. """ self.cache = [] # Test if we have event table with datascope.closing(datascope.dbopen(self.db, 'r')) as db: dbtable = db.lookup(table='event') if dbtable.query(datascope.dbTABLE_PRESENT): steps = ['dbopen event'] steps.extend(['dbjoin origin']) steps.extend(['dbsubset origin.orid != NULL']) steps.extend(['dbsubset origin.orid == prefor']) fields = ['evid'] else: steps = ['dbopen origin'] steps.extend(['dbsubset orid != NULL']) fields = [] fields.extend(['orid','time','lat','lon','depth','auth','nass', 'ndef','review']) for v in extract_from_db(self.db, steps, fields, self.db_subset): if not 'evid' in v: v['evid'] = v['orid'] self.logging.debug( "Events(): new event #%s" % v['evid'] ) v['allmags'] = [] v['magnitude'] = '-' v['maglddate'] = 0 v['srname'] = '-' v['grname'] = '-' v['time'] = parse_sta_time(v['time']) v['strtime'] = readable_time(v['time'], self.timeformat, self.timezone) try: v['srname'] = stock.srname(v['lat'],v['lon']) except Exception,e: warninig('Problems with srname for orid %s: %s' % (v['orid'], v['lat'],v['lon'],e) ) try: v['grname'] = stock.grname(v['lat'],v['lon']) except Exception,e: warninig('Problems with grname for orid %s: %s' % (v['orid'], v['lat'], v['lon'],e) )
def phases(self, min, max): """ Go through station channels to retrieve all arrival phases """ self.logger.debug("Events():phases(%s,%s) " % (min, max)) phases = defaultdict(lambda: defaultdict(dict)) assoc = False arrival = False dbname = self.dbcentral(min) self.logger.debug('Events():phases(%s,%s) db:(%s)' % (min, max, dbname)) if not dbname: return phases try: db = datascope.dbopen(dbname, 'r') db = db.lookup(table='arrival') db = db.join('assoc') nrecs = db.query(datascope.dbRECORD_COUNT) except: try: db = datascope.dbopen(dbname, 'r') db = db.lookup(table='arrival') nrecs = db.query(datascope.dbRECORD_COUNT) except Exception, e: self.logger.exception("Events: Exception %s on phases(): %s" % (e, phases)) return phases
def extract_event(db_path, ev_file): """ :param db_path: database location :param ev_file: events file name, csv file """ ev_file = ev_file if ev_file else DEFAULT_EVENT_FILE with ds.closing(ds.dbopen(db_path, 'r')) as db: with ds.freeing(db.process(['dbopen event', 'dbsort evid'])) as view: with open(ev_file, 'w') as csv_file: writer = csv.writer(csv_file, delimiter=',') csv_file.write(','.join(EVENT_FIELDS) + '\n') for row in view.iter_record(): writer.writerow([str(row.getv(x)[0]) for x in EVENT_FIELDS])
def open_db_or_string(database, perm='r'): ''' Check if a variable is a valid db or a string Returns a pointer to an open db or throw an error ''' if isinstance(database, Dbptr): ptr = Dbptr(database) elif isinstance(database, str): ptr = dbopen(database, perm) opened = True else: raise TypeError( "Input must be a Dbptr or string of a valid database path") return ptr, opened
def read_volcanoes(): # THIS IS A PREVIOUS VERSION I WROTE FOR GETTING VOLCANOES FROM PLACESDB THAT SEEMS TO WORK dbplacespath = "places/volcanoes" dbhandle = datascope.dbopen(dbplacespath, "r") dbptr = dbhandle.lookup(table="places") n = dbptr.nrecs() dictplaces = dict() for dbptr[3] in range(n): thisrecord = { "place": "%s" % dbptr.getv("place"), "lat": "%s" % dbptr.getv("lat"), "lon": "%s" % dbptr.getv("lon"), } dictplaces[dbptr[3]] = thisrecord dbhandle.free() dbhandle.close() return dictplaces
def dbgetorigins(dbpath, subset_expr): # open the origin table, join to event table, subset for preferred origins db = datascope.dbopen(dbpath, "r") dborigin = db.lookup(table="origin") dborigin = dborigin.join("event") dbnetmag = db.lookup(table="netmag") dborigin = dborigin.subset("orid == prefor") # apply the optional subset expression if there is one, order by time, and display number of events. if subset_expr: dborigin = dborigin.subset(subset_expr) dborigin = dborigin.sort("time") n = dborigin.nrecs() # print "- number of events = {}".format(n) # if size of arrays already known, preallocation much faster than recreating each time with append dictorigin = dict() origin_id = np.empty(n) origin_ml = np.empty(n) origin_epoch = np.empty(n) # load origins from database and store them in a dictionary for dborigin[3] in range(n): (origin_id[dborigin[3]], origin_ml[dborigin[3]], origin_epoch[dborigin[3]]) = dborigin.getv( "orid", "ml", "time" ) if origin_ml[dborigin[3]] < -1.0: db2 = dbnetmag.subset("orid == %d" % origin_id[dborigin[3]]) maxmag = -1.0 n_netmag = db2.nrecs() if n_netmag > 0: for db2[3] in range(n_netmag): (magtype, magnitude) = db2.getv("magtype", "magnitude") if magnitude > maxmag: maxmag = magnitude origin_ml[dborigin[3]] = maxmag dictorigin["id"] = origin_id dictorigin["ml"] = origin_ml dictorigin["time"] = mpl.dates.epoch2num(origin_epoch) # close the database and free the memory. # It seems that db.close and db.free both close the database, and closing twice produces error db.free() return dictorigin, n
def _init_from_db(self, db, evid): """ Initialize Event object using a CSS3.0 database as input. """ if evid == None: raise(Exception('No \'evid\' supplied. Could not ' 'initialize Event object from CSS3.0 database.')) with closing(dbopen(db, 'r')) as db: view = db.schema_tables['event'] view = view.join('origin') view = view.subset('evid == %s' % evid) view = view.subset('orid == prefor') #If for some reason this subset is empty, just take the first #solution as preferred. EG. prefor field is unitialized. if view.record_count == 0: view = db.schema_tables['origin'] view = db.schema_tables['event'] view = view.join('origin') view = view.subset('evid == %s' % evid) view = view.join('netmag', outer=True) view.record = 0 evid, time, lat, lon, depth, mag, magtype = view.getv('evid', 'time', 'lat', 'lon', 'depth', 'magnitude', 'magtype') self.evid = evid self.time = time self.lat = lat self.lon = lon self.depth = depth self.mag = mag self.magtype = magtype self.year = int(epoch2str(time, '%Y')) self.month = int(epoch2str(time, '%m')) self.day = int(epoch2str(time, '%d')) self.hour = int(epoch2str(time, '%H')) self.minute = int(epoch2str(time, '%M')) self.second = float(epoch2str(time, '%S.%s')) view = view.join('assoc') view = view.join('arrival') arrivals = [ record.getv('sta', 'arrival.time', 'phase') \ + (None, ) \ for record in view.iter_record() ] self.arrivals = [ Phase(sta, time, phase, qual) for sta, time, phase, qual in arrivals ]
def _get_main_list(self): self.logging.debug( "_get_main_list()" ) # Default is with no snetsta steps = [ 'dbopen site', 'dbsort sta'] fields = ['sta','ondate','offdate','lat','lon','elev','staname','statype', 'dnorth','deast'] # Test if we have snetsta table with datascope.closing(datascope.dbopen(self.db, 'r')) as db: dbtable = db.lookup(table='snetsta') if dbtable.query(datascope.dbTABLE_PRESENT): steps = [ 'dbopen site', 'dbjoin -o snetsta', 'dbsort sta'] fields = ['snet','sta','ondate','offdate','lat','lon','elev','staname','statype', 'dnorth','deast'] for v in extract_from_db(self.db, steps, fields, self.db_subset): sta = v['sta'] if 'snet' in v: snet = v['snet'] else: snet = '-' self.logging.debug( "_get_main_list(%s_%s)" % (snet,sta) ) # Fix values of time and endtime v['time'] = parse_sta_date( v['ondate'],epoch=True ) v['endtime'] = parse_sta_date( v['offdate'],epoch=True ) # Readable times v['strtime'] = readable_time(v['time'], self.timeformat, self.timezone) v['strendtime'] = readable_time(v['endtime'], self.timeformat, self.timezone) # Need lat and lon with 2 decimals only v['latlat'] = v['lat'] v['lonlon'] = v['lon'] v['lat'] = round(v['lat'],2) v['lon'] = round(v['lon'],2) self._verify_cache(snet,sta,primary=True) self.cache[snet][sta] = v
def __init__(self, database=None, **kwargs): """ Sets the pointer. :type dbv: antelope.datascope.Dbptr :param dbv: Open pointer to an Antelope database view or table """ super(AttribDbptr,self).__init__() if isinstance(database, Dbptr): self.Ptr = Dbptr(database) elif isinstance(database, str): db = dbopen(database,'r') self.Ptr = db self._opened = True else: raise TypeError("Input pointer or string of valid database") if kwargs: self.Ptr = dblookup(self.Ptr,**kwargs)
def __init__(self, database=None, dbpointer=None, table=None): ''' Either a database descriptor or a database pointer must be provided. Note ---- Checking for existence of tables is done when view is constructed. The table property is effectively purely a label. ''' self.logger = logging.getLogger(self.__class__.__name__) if (not isinstance(dbpointer, datascope.Dbptr) and isinstance(database, basestring)): dbpointer = datascope.dbopen(database) self.documents = {} self.db = dbpointer self.table = table
def _get_nulls(self): """ Private function to load values from dbs Go through the tables on the database and return dictionary with NULL values for each field. """ # We will assume all databases have the same schema. # Get the first only. dbname = self.dbcentral.list()[0] try: db = datascope.dbopen(dbname, "r") except Exception, e: logger.exception('dbopen(%s)=>(%s)' % (dbname, e)) sys.exit(twisted.internet.reactor.stop())
def _get_nulls(self): """ Private function to load values from dbs Go through the tables on the database and return dictionary with NULL values for each field. """ # We will assume all databases have the same schema. # Get the first only. dbname = self.dbcentral.list()[0] try: db = datascope.dbopen( dbname , "r" ) except Exception, e: logger.exception('dbopen(%s)=>(%s)' % (dbname,e)) sys.exit(twisted.internet.reactor.stop())
def _run_tests(params): import sys import os sys.path.append("%s/data/python" % os.environ["ANTELOPE"]) from antelope.datascope import dbopen sys.path.remove("%s/data/python" % os.environ["ANTELOPE"]) db = dbopen(params["dbin"], "r") vw_wfmeas = db.lookup(table="wfmeas") vw_wfmeas = vw_wfmeas.subset( "time == _%f_ && endtime == _%f_" % (params["tstart"], params["tend"]) ) vw_wfmeas = vw_wfmeas.sort("sta") vw_wfmeas = vw_wfmeas.group("sta") for i in range(vw_wfmeas.nrecs()): vw_wfmeas_sta = vw_wfmeas.list2subset(i) vw_wfmeas_sta = vw_wfmeas_sta.ungroup() vw_wfmeas_sta.record = 0 sta = vw_wfmeas_sta.getv("sta")[0] vw_wfmeas_sta = vw_wfmeas_sta.sort("chan") vw_wfmeas_sta = vw_wfmeas_sta.group("chan") for j in range(vw_wfmeas_sta.nrecs()): vw_wfmeas_stachan = vw_wfmeas_sta.list2subset(j) vw_wfmeas_stachan = vw_wfmeas_stachan.ungroup() vw_wfmeas_stachan.record = 0 chan = vw_wfmeas_stachan.getv("chan")[0] vw_wfmeas_stachan = vw_wfmeas_stachan.sort("meastype") for vw_wfmeas_stachan.record in range(vw_wfmeas_stachan.nrecs()): meastype, val1 = vw_wfmeas_stachan.getv("meastype", "val1") val1, val2 = None, None if not vw_wfmeas_stachan.getv("units1")[0] == "-": val1 = vw_wfmeas_stachan.getv("val1")[0] if not vw_wfmeas_stachan.getv("units2")[0] == "-": val2 = vw_wfmeas_stachan.getv("val2")[0] thresholds = _get_thresholds( sta, meastype, params["thresholds"], params["thresholds_per_sta"] ) message = _check_thresholds(meastype, val1, val2, thresholds) if message: params["qc_network_report"].add_issue( _QC_issue({"sta": sta, "chan": chan, "message": message}) )
def namelist(self): """Get the list of resolved paths """ if not os.path.exists(self.path) and not os.path.isfile(self.path): antelog.die("%s %s *die*: Dbpath '%s' does not exist." % (self.errstr, self.base, self.path)) db = antdb.dbopen(self.path, 'r') db = db.lookup(table='clusters') #db = db.lookup(record='dbNULL') #try: # null_time, null_endtime = db.getv('time', 'endtime') #except Exception, e: # antelog.die("%s %s *die*: %s" % (self.errstr, self.base, e)) #db = db.lookup(record='dbALL') try: db = db.subset("clustername =~ /%s/" % self.clustername) except Exception, e: antelog.die(e)
def verify_table(table=False, database=False, dbpointer=False): ''' Open a database or database pointer and verify a table On multiple objects (classes) we perform the same process of verifying the presence of a table before we get to interact with it. This will make that process easy since you can get to that point either from a database name or from a database pointer. The function will return the database pointer that you are responsible for cleaning later. The local view of the table will be freed. ''' logging = getLogger() # Get db ready if not database and not dbpointer: logging.warning( 'export_events.verify_table: Need database or dbpointer') return False if not dbpointer: logging.debug('dbopen( %s )' % database) dbpointer = datascope.dbopen(database, "r") if table: # Test if we have some table first. logging.debug('db.lookup( %s )' % table) view = dbpointer.lookup(table=table) if not view.query(datascope.dbTABLE_PRESENT): logging.warning( 'export_events.verify_table: Missing [%s] table in database' % table) return False else: logging.debug( 'db.query(dbTABLE_PRESENT ) => %s' % \ view.query(datascope.dbTABLE_PRESENT)) return dbpointer
def time(self, orid_time, window=5): """ Look for event id close to a value of epoch time + or - window time in seconds. If no widow time is provided the default is 5 secods. """ results = {} # # If running in simple mode we don't have access to the tables we need # if self.config.simple: return results orid_time = _isNumber(orid_time) if not orid_time: self.logger.error("Not a valid number in function call: %s" % orid_time) return start = float(orid_time) - float(window) end = float(orid_time) + float(window) dbname = self.dbcentral(orid_time) if not db: self.logger.error( "No match for orid_time in dbcentral object: (%s,%s)" % (orid_time, self.dbcentral(orid_time))) return try: db = datascope.dbopen(dbname, 'r') db = db.lookup(table='origin') db.query(datascope.dbTABLE_PRESENT) except Exception, e: self.logger.error('Exception on Events() time(%s): ' + 'Error on db pointer %s [%s]' % (orid_time, db, e)) return
def time(self,orid_time,window=5): """ Look for event id close to a value of epoch time + or - window time in seconds. If no widow time is provided the default is 5 secods. """ results = {} # # If running in simple mode we don't have access to the tables we need # if self.config.simple: return results orid_time = _isNumber(orid_time) if not orid_time: self.logger.error( "Not a valid number in function call: %s" % orid_time) return start = float(orid_time)-float(window) end = float(orid_time)+float(window) dbname = self.dbcentral(orid_time) if not db: self.logger.error( "No match for orid_time in dbcentral object: (%s,%s)" % ( orid_time,self.dbcentral(orid_time))) return try: db = datascope.dbopen( dbname , 'r' ) db = db.lookup( table='origin') db.query(datascope.dbTABLE_PRESENT) except Exception,e: self.logger.error('Exception on Events() time(%s): ' + 'Error on db pointer %s [%s]' % ( orid_time,db,e)) return
def store(net, sta, ondate, lon, lat, elev): lddate = datetime.now() row = zip( fields, [ sta, ondate.strftime("%Y%j"), lat, lon, elev / 1000.0, convtime(lddate) ], ) db = dbopen(dbpath, "r+") with closing(db): snetsta_table = db.lookup(table="snetsta") snetsta_view = snetsta_table.subset("sta == '{}'".format(sta)) log.debug("snetsta_view %s", snetsta_view) with freeing(snetsta_view): try: rowptr = snetsta_view.iter_record().next() except StopIteration: snetsta_table.addv( *zip(snetsta_fields, [net, sta, sta, convtime(lddate)])) log.info("added snetsta record") site_table = db.lookup(table="site") site_view = site_table.subset("sta == '{}'".format(sta)) log.debug("site_view %s", site_view) with freeing(site_view): try: rowptr = site_view.iter_record().next() except StopIteration: site_table.addv(*row) log.info("added record %s", row) else: log.debug("rowptr %s", rowptr) old_row = dict(zip(fields, rowptr.getv(*fields))) if float(convtime(lddate)) > float(old_row["lddate"]): rowptr.putv(*row) log.info("updated record %s %s", old_row, row) return old_row
def verify_db(db): """Verify a Datascope database can be opened.""" logger.debug("Verify database: [%s]" % (db)) name = False if isinstance(db, str): with datascope.closing(datascope.dbopen(db, "r")) as pointer: if pointer.query(datascope.dbDATABASE_COUNT): logger.debug(pointer.query(datascope.dbDATABASE_NAME)) name = pointer.query(datascope.dbDATABASE_NAME) logger.info("%s => valid" % name) else: logger.warning("PROBLEMS OPENING DB: %s" % db) else: logger.error("Not a valid parameter for db: [%s]" % db) return name
def extract_from_db(db, steps, fields, subset=""): logging = getLogger() if subset: steps.extend(["dbsubset %s" % subset]) logging.debug("Extract from db: " + ", ".join(steps)) results = [] with datascope.closing(datascope.dbopen(db, "r")) as dbview: dbview = dbview.process(steps) logging.debug("Records in new view: %s" % dbview.record_count) if not dbview.record_count: logging.warning("No records after deployment-site join %s" % dbview.query(datascope.dbDATABASE_NAME)) return None for temp in dbview.iter_record(): results.append(dict(zip(fields, temp.getv(*fields)))) return results
def verify_db(db): logging = getLogger() logging.debug("Verify database: [%s]" % (db)) name = False if isinstance(db, str): with datascope.closing(datascope.dbopen(db, "r")) as pointer: if pointer.query(datascope.dbDATABASE_COUNT): logging.debug(pointer.query(datascope.dbDATABASE_NAME)) name = pointer.query(datascope.dbDATABASE_NAME) logging.info("%s => valid" % name) else: logging.warning("PROBLEMS OPENING DB: %s" % db) else: logging.error("Not a valid parameter for db: [%s]" % db) return name
def extract_from_db(db, steps, fields, subset=""): """Retrieve data from a datascope database.""" logger = getLogger() if subset: steps.extend(["dbsubset %s" % subset]) logger.debug("Extract from db: " + ", ".join(steps)) results = [] with datascope.closing(datascope.dbopen(db, "r")) as dbview: dbview = dbview.process(steps) logger.debug("Records in new view: %s" % dbview.record_count) if not dbview.record_count: logger.warning("No records after deployment-site join %s" % dbview.query(datascope.dbDATABASE_NAME)) return None for temp in dbview.iter_record(): results.append(dict(zip(fields, temp.getv(*fields)))) return results
import misc_tools from mtools import * from numpy import arange, asarray from antelope.stock import pfread, pfin from antelope.datascope import closing, dbopen params = pfin('eqloc3d.pf') loc_params = params['location_parameters'] #These should go in parameter file. #nr = int(loc_params['nr']) #nlat = int(loc_params['nlat']) #nlon = int(loc_params['nlon']) #nx, ny, nz = nlon, nlat, nr earth_rad = 6371 #Load events print 'Reading db' with closing(dbopen('/Users/mcwhite/staging/dbs/anza_sub/anza')) as db: tbl_event = db.schema_tables['event'] tbl_event = tbl_event.join('origin') tbl_event = tbl_event.subset('time >= _2013319 00:00:00_') tbl_event = tbl_event.separate('event') event_list = misc_tools.create_event_list(tbl_event, 'CSS3.0') print 'Done reading db' for ev in event_list: origin = ev.preferred_origin if origin.lon < -117 or origin.lon > -116 or origin.lat < 33.0 or origin.lat > 34.0: continue misc_tools.locate_eq(origin) sys.exit()
def _get_main_list(self): self.logger.debug("_get_main_list()") # Default is with no snetsta steps = ["dbopen site", "dbsort sta"] fields = [ "sta", "ondate", "offdate", "lat", "lon", "elev", "staname", "statype", "dnorth", "deast", ] # Test if we have snetsta table with datascope.closing(datascope.dbopen(self.db, "r")) as db: dbtable = db.lookup(table="snetsta") if dbtable.query(datascope.dbTABLE_PRESENT): steps = ["dbopen site", "dbjoin -o snetsta", "dbsort sta"] fields = [ "snet", "sta", "ondate", "offdate", "lat", "lon", "elev", "staname", "statype", "dnorth", "deast", ] for v in extract_from_db(self.db, steps, fields, self.db_subset): sta = v["sta"] if "snet" in v: snet = v["snet"] else: snet = "-" self.logger.debug("_get_main_list(%s_%s)" % (snet, sta)) # Fix values of time and endtime v["time"] = parse_sta_date(v["ondate"], epoch=True) v["endtime"] = parse_sta_date(v["offdate"], epoch=True) # Readable times v["strtime"] = readable_time(v["time"], self.timeformat, self.timezone) v["strendtime"] = readable_time( v["endtime"], self.timeformat, self.timezone ) # Need lat and lon with 2 decimals only v["latlat"] = v["lat"] v["lonlon"] = v["lon"] v["lat"] = round(v["lat"], 2) v["lon"] = round(v["lon"], 2) self._verify_cache(snet, sta, primary=True) self.cache[snet][sta] = v