def _get_chanperf(self): self.logger.debug("_get_chanperf()") today = stock.str2epoch(str(stock.yearday(stock.now()))) lastmonth = today - (86400 * int(self.perf_days_back)) fields = ["snet", "sta", "chan", "time", "perf"] steps = [ "dbopen chanperf", "dbjoin -o snetsta", "dbsubset time >= %s" % lastmonth, ] if self.perf_subset: steps.append("dbsubset %s" % self.perf_subset) for v in extract_from_db(self.perf_db, steps, fields, self.db_subset): snet = v.pop("snet") sta = v.pop("sta") chan = v.pop("chan") self.logger.debug("_get_chanperf(%s_%s)" % (snet, sta)) if self._verify_cache(snet, sta, "chanperf"): try: if len(self.cache[snet][sta]["chanperf"][chan]) < 1: raise except Exception: self.cache[snet][sta]["chanperf"][chan] = {} # v['time'] = readable_time( v['time'], '%Y-%m-%d' ) v["time"] = int(v["time"]) self.cache[snet][sta]["chanperf"][chan][v["time"]] = v["perf"]
def _get_chanperf(self): self.logging.debug("_get_chanperf()") today = stock.str2epoch(str(stock.yearday(stock.now()))) lastmonth = today - (86400 * int(self.perf_days_back)) month = {} week = {} fields = ["snet", "sta", "chan", "time", "perf"] steps = ["dbopen chanperf", "dbjoin -o snetsta", "dbsubset time >= %s" % lastmonth] if self.perf_subset: steps.append("dbsubset %s" % self.perf_subset) for v in extract_from_db(self.perf_db, steps, fields, self.db_subset): snet = v.pop("snet") sta = v.pop("sta") chan = v.pop("chan") fullname = "%s.%s.%s" % (snet, sta, chan) self.logging.debug("_get_chanperf(%s_%s)" % (snet, sta)) if self._verify_cache(snet, sta, "chanperf"): try: if len(self.cache[snet][sta]["chanperf"][chan]) < 1: raise except: self.cache[snet][sta]["chanperf"][chan] = {} # v['time'] = readable_time( v['time'], '%Y-%m-%d' ) v["time"] = int(v["time"]) self.cache[snet][sta]["chanperf"][chan][v["time"]] = v["perf"]
def _get_chanperf(self): self.logging.debug( "_get_chanperf()") today = stock.str2epoch( str(stock.yearday( stock.now() )) ) lastmonth = today - (86400 * int(self.perf_days_back)) month = {} week = {} fields = ['snet','sta','chan','time','perf'] steps = [ 'dbopen chanperf', 'dbjoin -o snetsta', 'dbsubset time >= %s' % lastmonth ] if self.perf_subset: steps.append ( 'dbsubset %s' % self.perf_subset ) for v in extract_from_db(self.perf_db, steps, fields, self.db_subset): snet = v.pop('snet') sta = v.pop('sta') chan = v.pop('chan') fullname = "%s.%s.%s" % ( snet, sta, chan ) self.logging.debug( "_get_chanperf(%s_%s)" % (snet,sta) ) if self._verify_cache(snet,sta,'chanperf'): try: if len( self.cache[snet][sta]['chanperf'][chan] ) < 1: raise except: self.cache[snet][sta]['chanperf'][chan] = {} #v['time'] = readable_time( v['time'], '%Y-%m-%d' ) v['time'] = int( v['time'] ) self.cache[snet][sta]['chanperf'][chan][ v['time'] ] = v['perf']
def _get_stachan_cache(self): """ private function to load data """ records = 0 self.logger.info("Stations(): update cache") for dbname in self.dbcentral.list(): self.logger.debug('Station(): dbname: %s' % dbname) dates = {} query_start_time = time.time() try: self.logger.debug("Dbopen " + dbname) db = datascope.dbopen(dbname, 'r') table = 'wfdisc' field = 'time' self.logger.debug("Dblookup table=%s field=%s" % (table, field)) dbwfdisc = db.lookup(table=table, field=field) self.logger.debug("Getting record count of " + table) records = dbwfdisc.query(datascope.dbRECORD_COUNT) self.mintime = dbwfdisc.ex_eval('min(time)') self.maxtime = dbwfdisc.ex_eval('max(endtime)') except Exception, e: self.logger.exception('Problem with wfdisc table. %s: %s' % (Exception, e)) sys.exit(reactor.stop()) elapsed_time = time.time() - query_start_time self.logger.debug( "Intial dbquery and wfdisc record count took %d seconds" % elapsed_time) if self.maxtime > stock.now() or self.maxtime > (stock.now() - 3600): self.maxtime = -1 self.logger.debug("Starting wfdisc processing of %d records" % records) prog = ProgressLogger("Stations: processing wfdisc record ", records, logger=self.logger) for j in range(records): prog.tick() dbwfdisc.record = j try: sta, chan, dbtime = dbwfdisc.getv('sta', 'chan', 'time') self.wfdisc_stachan[sta].add(chan) self.wfdates[stock.yearday(dbtime)] = 1 except datascope.DatascopeException, e: self.logger.exception('(%s=>%s)' % (Exception, e))
def _get_stachan_cache(self): """ private function to load data """ records = 0 self.logger.info("Stations(): update cache") for dbname in self.dbcentral.list(): self.logger.debug('Station(): dbname: %s' % dbname) dates = {} query_start_time = time.time() try: self.logger.debug("Dbopen "+dbname) db = datascope.dbopen( dbname , 'r' ) table='wfdisc' field='time' self.logger.debug("Dblookup table=%s field=%s" % (table,field)) dbwfdisc = db.lookup( table=table,field=field) self.logger.debug("Getting record count of "+table) records = dbwfdisc.query(datascope.dbRECORD_COUNT) self.mintime = dbwfdisc.ex_eval('min(time)') self.maxtime = dbwfdisc.ex_eval('max(endtime)') except Exception,e: self.logger.exception('Problem with wfdisc table. %s: %s' % ( Exception, e)) sys.exit(reactor.stop()) elapsed_time = time.time() - query_start_time self.logger.debug( "Intial dbquery and wfdisc record count took %d seconds" % elapsed_time) if self.maxtime > stock.now() or self.maxtime > (stock.now()-3600): self.maxtime = -1 self.logger.debug("Starting wfdisc processing of %d records" % records) prog=ProgressLogger("Stations: processing wfdisc record ", records, logger=self.logger) for j in range(records): prog.tick() dbwfdisc.record = j try: sta,chan,dbtime = dbwfdisc.getv('sta','chan','time') self.wfdisc_stachan[sta].add(chan) self.wfdates[stock.yearday(dbtime)] = 1 except datascope.DatascopeException, e: self.logger.exception('(%s=>%s)' % (Exception,e))
voltime = stock.str2epoch("%d/1/%d" % (vol_month,vol_year) ) if vol_month < 12: vol_month = vol_month + 1 else: vol_year = vol_year + 1 vol_month = 1 volendtime = stock.str2epoch("%d/1/%d" % (vol_endmonth,vol_endyear) ) - 1 dbname = stock.epoch2str(int(voltime), dbname_template) self._test_db(voltime,volendtime,dbname) elif volumes == 'day': start_day = int(stock.yearday(time)) end_day = int(stock.yearday(endtime)) vol_day = start_day while vol_day <= end_day: voltime = stock.epoch(vol_day) volendtime = voltime + 86399 # one second less than a full day dbname = stock.epoch2str(voltime, dbname_template) if self._test_db(voltime,volendtime,dbname): self.dbs[dbname] = {'times': [time,endtime]} vol_day = stock.yearday((stock.epoch(vol_day)+86400))
def _get_list(self): try: db = datascope.dbopen(self.path, "r") except Exception as e: raise DbcentralException("Cannot open database %s (%s)" % (self.path, e)) try: db = db.lookup("", "clusters", "", "") except datascope.DblookupFieldError: self.type = "masquerade" self.nickname = None self.dbs[self.path] = {"times": [-10000000000.0, 10000000000.0]} self.logger.info("Not a dbcentral database. Set single database.") return else: self.type = "dbcentral" if self.nickname is None: raise ValueError("Need nickname for Dbcentral clustername regex.") try: db = db.lookup("", "clusters", "", "dbNULL") null_time, null_endtime = db.getv("time", "endtime") except Exception as e: raise DbcentralException( "Cannot look up null values in clusters table. (%s)" % e ) expr = "clustername =='%s'" % self.nickname try: db = db.subset(expr) except Exception as e: raise DbcentralException("Cannot subset on clustername. %s" % e) try: db = db.sort("time") nclusters = db.record_count except Exception as e: raise DbcentralException("Cannot sort on 'time' . %s" % e) if nclusters < 1: raise DbcentralException('No matches for nickname "%s".' % self.nickname) self.logger.debug("Records=%s" % nclusters) for i in range(nclusters): self.logger.debug("db.record=%s" % i) db.record = i try: dbname_template = db.extfile()[-1] except Exception as e: raise DbcentralException("Cannot run db.extfile(). %s" % e) self.logger.debug("dbname_template=%s" % dbname_template) try: volumes, net, time, endtime = db.getv( "volumes", "net", "time", "endtime" ) except Exception as e: raise DbcentralException( "Problems with db.getv('volumes','net'," + "'time','endtime'). (%s)\n" % e ) self.logger.debug("volumes=%s" % volumes) self.logger.debug("net=%s" % net) self.logger.debug("time=%s" % time) self.logger.debug("endtime=%s" % endtime) if endtime == null_endtime: # This will be problematic with realtime systems endtime = stock.now() self.logger.debug("endtime=%s" % endtime) start_year = int(stock.epoch2str(time, "%Y")) end_year = int(stock.epoch2str(endtime, "%Y")) start_month = int(stock.epoch2str(time, "%L")) end_month = int(stock.epoch2str(endtime, "%L")) if volumes == "single": dbname = stock.epoch2str(time, dbname_template) self._test_db(time, endtime, dbname) elif volumes == "year": for y in range(start_year, end_year + 1): voltime = stock.str2epoch("1/1/%s 00:00:00" % y) volendtime = stock.str2epoch("12/31/%s 23:59:59" % y) dbname = stock.epoch2str(voltime, dbname_template) self._test_db(voltime, volendtime, dbname) elif volumes == "month": vol_month = start_month vol_year = start_year vol_endmonth = end_month vol_endyear = end_year while vol_year < end_year or ( vol_year == end_year and vol_month <= end_month ): voltime = stock.str2epoch("%d/1/%d" % (vol_month, vol_year)) if vol_month < 12: vol_month = vol_month + 1 else: vol_year = vol_year + 1 vol_month = 1 volendtime = ( stock.str2epoch("%d/1/%d" % (vol_endmonth, vol_endyear)) - 1 ) dbname = stock.epoch2str(int(voltime), dbname_template) self._test_db(voltime, volendtime, dbname) elif volumes == "day": start_day = int(stock.yearday(time)) end_day = int(stock.yearday(endtime)) vol_day = start_day while vol_day <= end_day: voltime = stock.epoch(vol_day) volendtime = voltime + 86399 # full day -1 sec dbname = stock.epoch2str(voltime, dbname_template) if self._test_db(voltime, volendtime, dbname): self.dbs[dbname] = {"times": [time, endtime]} vol_day = stock.yearday((stock.epoch(vol_day) + 86400)) else: raise UnknownVolumeTypeException(volumes) self.logger.debug("DBS=%s" % self.dbs.keys())
def _get_stachan_cache(self): """Load data into cache.""" records = 0 self.logger.info("Stations(): update cache") for dbname in self.dbcentral.list(): self.logger.debug("Station(): dbname: %s" % dbname) dates = {} query_start_time = time.time() try: self.logger.debug("Dbopen " + dbname) db = datascope.dbopen(dbname, "r") table = "wfdisc" field = "time" self.logger.debug("Dblookup table=%s field=%s" % (table, field)) dbwfdisc = db.lookup(table=table, field=field) self.logger.debug("Getting record count of " + table) records = dbwfdisc.query(datascope.dbRECORD_COUNT) self.mintime = dbwfdisc.ex_eval("min(time)") self.maxtime = dbwfdisc.ex_eval("max(endtime)") except Exception as e: self.logger.exception( "Problem with wfdisc table. %s: %s" % (Exception, e) ) sys.exit(reactor.stop()) elapsed_time = time.time() - query_start_time self.logger.debug( "Intial dbquery and wfdisc record count took %d seconds" % elapsed_time ) if self.maxtime > stock.now() or self.maxtime > (stock.now() - 3600): self.maxtime = -1 self.logger.debug("Starting wfdisc processing of %d records" % records) prog = ProgressLogger( "Stations: processing wfdisc record ", records, logger=self.logger ) for j in range(records): prog.tick() dbwfdisc.record = j try: sta, chan, dbtime = dbwfdisc.getv("sta", "chan", "time") self.wfdisc_stachan[sta].add(chan) self.wfdates[stock.yearday(dbtime)] = 1 except datascope.DatascopeException as e: self.logger.exception("(%s=>%s)" % (Exception, e)) prog.finish() self.logger.debug("Stations(): maxtime: %s" % self.maxtime) self.logger.debug("Stations(): mintime: %s" % self.mintime) self.logger.debug("Stations(): dates: %s" % dates.keys()) try: dbsitechan = db.lookup(table="sitechan") ssc = dbsitechan.sort(["sta", "chan"]) records = ssc.query(datascope.dbRECORD_COUNT) except Exception as e: self.logger.exception( "Stations(): Problems with sitechan table %s: %s" % (Exception, e) ) sys.exit(reactor.stop()) if not records: self.logger.critical("Stations(): No records after sitechan sort.") sys.exit(reactor.stop()) prog = ProgressLogger( "Stations: processing stachan record ", records, logger=self.logger ) for j in range(records): prog.tick() ssc.record = j try: sta, chan, ondate, offdate = ssc.getv( "sta", "chan", "ondate", "offdate" ) except Exception as e: self.logger.exception("Station(): (%s=>%s)" % (Exception, e)) ondate = stock.str2epoch(str(ondate)) if chan in self.wfdisc_stachan[sta]: if offdate != -1: offdate = stock.str2epoch(str(offdate)) self.stachan_cache[sta][chan]["dates"].extend([[ondate, offdate]]) self.logger.debug( "Station(): %s.%s dates: %s" % (sta, chan, self.stachan_cache[sta][chan]["dates"]) ) else: self.logger.debug( "Station(): %s.%s was not in the wfdisc. Skipping" % (sta, chan) ) try: ssc.free() db.close() except Exception: pass prog.finish(level=logging.INFO) self.logger.info( "Stations(): Done updating cache (%s) sta-chan pairs." % len(self.stachan_cache) )
volendtime = stock.str2epoch( "%d/1/%d" % (temp_vol_endmonth, temp_vol_endyear)) - 1 dbname = stock.epoch2str(int(voltime), dbname_template) self._test_db(voltime, volendtime, dbname) if vol_month < 12: vol_month = vol_month + 1 else: vol_year = vol_year + 1 vol_month = 1 elif self.volumes == 'day': start_day = int(stock.yearday(time)) end_day = int(stock.yearday(endtime)) vol_day = start_day while vol_day <= end_day: voltime = stock.epoch(vol_day) volendtime = voltime + 86399 # one second less than a full day dbname = stock.epoch2str(voltime, dbname_template) self._test_db(voltime, volendtime, dbname) vol_day = stock.yearday((stock.epoch(vol_day) + 86400)) else: