def smap_load(): p = get_parser() opts, args = p.parse_args() if len(args) < 1: p.error("conf file is a required argument") log.startLogging(sys.stdout) sections = map(util.norm_path, args[1:]) inst = loader.load(args[0], sections=sections) for dpath, driver in inst.drivers.iteritems(): if len(sections) > 1 and not dpath in sections: continue if not hasattr(driver, "load"): log.err('Error: driver does not have "load" method') sys.exit(1) if hasattr(driver, 'reset') and \ callable(driver.reset) and \ opts.reset: log.msg("Resetting driver") driver.reset() try: # find the date range for loading... st, et = None, None now = dtutil.now(tzstr=opts.timezone) if (opts.start_time=="now_minus_1hour"): st = now - datetime.timedelta(hours=1) else: st = dtutil.strptime_tz(opts.start_time, opts.timefmt, opts.timezone) if (opts.end_time=="now"): et = now else: et = dtutil.strptime_tz(opts.end_time, opts.timefmt, opts.timezone) except: pass dl = [] for dpath, driver in inst.drivers.iteritems(): if len(sections) > 1 and not dpath in sections: continue # try: # loader = driver.load(st, et, cache=opts.cache) # except TypeError: dl.append(defer.maybeDeferred(driver.load, st, et, cache=opts.cache)) dl = defer.DeferredList(dl, consumeErrors=True) dl.addCallback(lambda x: inst._flush()) dl.addCallbacks(lambda x: reactor.callFromThread(reactor.stop)) reactor.run()
def parse_time(ts): for pat in TIMEZONE_PATTERNS: try: return dtutil.strptime_tz(ts, pat, tzstr='America/Los_Angeles') except ValueError: continue raise ValueError("Invalid time string:" + ts)
def _update(self, val): print "update using", val try: point, value, quality, time = val if quality != 'Good': log.msg("bad quality on point " + point + ": " + quality) return # parse the timestamp in the timezone of the server if self.use_opc_timestamps == 'true': ts = dtutil.strptime_tz(time, self.opc_timefmt, self.opc_timezone) ts = dtutil.dt2ts(ts) else: ts = dtutil.dt2ts(dtutil.now()) path = self.make_path(point) series = self.get_timeseries(path) if series: if series['Properties']['ReadingType'] == 'double': series._add(ts, float(value)) else: series._add(ts, int(value)) except: log.err()
def test_fill_missing(self): now = dtutil.strptime_tz("1 1 2000 0", "%m %d %Y %H", tzstr="America/Los_Angeles") now = dtutil.dt2ts(now) self.setUp(now) # check that we fill the end correctly op = grouping.GroupByDatetimeField(self.inputs, arithmetic.first, field='hour', width=1, skip_empty=False) now *= 1000 rv = op(operators.DataChunk((now, now + ((self.hours + 5) * 3600 * 1000)), True, True, [self.testdata])) self.assertEqual(len(rv[0]), self.hours + 5) self.assertEqual(np.sum(np.isnan(rv[0][-5:, 1])), 5) self.assertEqual(np.sum(np.isnan(rv[0][:-5, 1])), 0) # and the beginning op = grouping.GroupByDatetimeField(self.inputs, arithmetic.first, field='hour', width=1, skip_empty=False) rv = op(operators.DataChunk((now - (5 * 3600 * 1000), now + ((self.hours) * 3600 * 1000)), True, True, [self.testdata])) self.assertEqual(len(rv[0]), self.hours + 5) self.assertEqual(np.sum(np.isnan(rv[0][:5, 1])), 5) self.assertEqual(np.sum(np.isnan(rv[0][5:, 1])), 0)
def read(self): object_ = {} print 'read running' try: #get the text from the ur wa = urllib2.urlopen( 'http://transmission.bpa.gov/business/operations/wind/baltwg.txt' ) data = [ line for line in wa.readlines()[7:] if len(line.split()) > 3 ] #parse most recent data rawTime = " ".join(data[-1].split()[:2]) currentTime = int( dtutil.dt2ts( dtutil.strptime_tz(rawTime, "%m/%d/%Y %H:%M", 'US/Pacific'))) object_["Wind"] = data[-1].split()[3] object_["Hydro"] = data[-1].split()[4] object_["Thermal"] = data[-1].split()[5] object_["Load"] = data[-1].split()[2] except Exception as e: logging.exception(type(e)) print e else: if currentTime != self.previousTime: self.w.add(currentTime, int(object_["Wind"])) self.h.add(currentTime, int(object_["Hydro"])) self.t.add(currentTime, int(object_["Thermal"])) self.l.add(currentTime, int(object_["Load"])) self.previousTime = currentTime wa.close()
def read(self): object_ = {} print 'read running' try: #get the text from the ur wa = urllib2.urlopen('http://transmission.bpa.gov/business/operations/wind/baltwg.txt') data = [line for line in wa.readlines()[7:] if len(line.split()) > 3] #parse most recent data rawTime = " ".join(data[-1].split()[:2]) currentTime = int(dtutil.dt2ts(dtutil.strptime_tz(rawTime,"%m/%d/%Y %H:%M",'US/Pacific'))) object_["Wind"] = data[-1].split()[3] object_["Hydro"] = data[-1].split()[4] object_["Thermal"] = data[-1].split()[5] object_["Load"] = data[-1].split()[2] except Exception as e: logging.exception(type(e)) print e else: if currentTime != self.previousTime: self.w.add(currentTime,int(object_["Wind"])) self.h.add(currentTime,int(object_["Hydro"])) self.t.add(currentTime,int(object_["Thermal"])) self.l.add(currentTime,int(object_["Load"])) self.previousTime = currentTime wa.close()
def test_inclusive(self): now = dtutil.strptime_tz("1 1 2000 0", "%m %d %Y %H", tzstr="America/Los_Angeles") now = dtutil.dt2ts(now) self.setUp(now) # startshape = self.testdata.shape # startdata = np.copy(self.testdata) # op = grouping.GroupByDatetimeField(self.inputs, oputils.NullOperator, field='day') # rv = op([self.testdata[:30, :]]) # self.assertEquals(rv[0].shape, (24, 2)) # # check for mutations # self.assertEquals(self.testdata.shape, startshape) # self.assertEquals(np.sum(startdata - self.testdata), 0) op2 = grouping.GroupByDatetimeField(self.inputs, oputils.NullOperator, field='day', inclusive=(True, True), snap_times=False) rv = op2([self.testdata[0:30, :]]) self.assertEquals(rv[0].shape, (25, 2)) self.assertEquals(rv[0][0, 0], self.testdata[0, 0]) self.assertEquals(rv[0][24, 0], self.testdata[24, 0])
def _update(self): vals = self.opc.read(group="smap-points-group") for point, value, quality, time in vals: # parse the timestamp in the timezone of the server ts = dtutil.strptime_tz(time, self.opc_timefmt, self.opc_timezone) ts = dtutil.dt2ts(ts) self._add(self.make_path(point), ts, value)
def test_fill_missing(self): now = dtutil.strptime_tz("1 1 2000 0", "%m %d %Y %H", tzstr="America/Los_Angeles") now = dtutil.dt2ts(now) self.setUp(now) # check that we fill the end correctly op = grouping.GroupByDatetimeField(self.inputs, arithmetic.first, field='hour', width=1, skip_missing=False) now *= 1000 rv = op( operators.DataChunk((now, now + ((self.hours + 5) * 3600 * 1000)), True, True, [self.testdata])) self.assertEqual(len(rv[0]), self.hours + 5) self.assertEqual(np.sum(np.isnan(rv[0][-5:, 1])), 5) self.assertEqual(np.sum(np.isnan(rv[0][:-5, 1])), 0) # and the beginning op = grouping.GroupByDatetimeField(self.inputs, arithmetic.first, field='hour', width=1, skip_missing=False) rv = op( operators.DataChunk( (now - (5 * 3600 * 1000), now + ((self.hours) * 3600 * 1000)), True, True, [self.testdata])) self.assertEqual(len(rv[0]), self.hours + 5) self.assertEqual(np.sum(np.isnan(rv[0][:5, 1])), 5) self.assertEqual(np.sum(np.isnan(rv[0][5:, 1])), 0)
def test_offset(self): now = dtutil.strptime_tz("1 1 2000 0", "%m %d %Y %H", tzstr="America/Los_Angeles") now = dtutil.dt2ts(now) self.setUp(now) op = grouping.GroupByDatetimeField(self.inputs, oputils.NullOperator, field='day') for i in xrange(0, 24): rv = op([self.testdata[i:25+i, :]]) self.assertEquals(rv[0].shape, (24 - i, 2)) op.reset()
def test_snap_times(self): now = dtutil.strptime_tz("1 1 2000 0", "%m %d %Y %H", tzstr="America/Los_Angeles") now = dtutil.dt2ts(now) self.setUp(now) op = grouping.GroupByDatetimeField(self.inputs, arithmetic.first, field='day', snap_times=True) rv = op([self.testdata[10:30]]) self.assertEquals(rv[0][0, 0], self.testdata[0, 0])
def test_slide(self): now = dtutil.strptime_tz("1 1 2000 0", "%m %d %Y %H", tzstr="America/Los_Angeles") now = dtutil.dt2ts(now) self.setUp(now) op = grouping.GroupByDatetimeField(self.inputs, arithmetic.first, field='hour', width=4, slide=2) rv = op([self.testdata]) self.assertEquals(np.sum(rv[0][:, 0] - self.testdata[:-2:2, 0]), 0) self.assertEquals(np.sum(rv[0][:, 1] - self.testdata[:-2:2, 1]), 0)
def setUp(self): now = dtutil.strptime_tz("1 1 2000 0", "%m %d %Y %H", tzstr="America/Los_Angeles") now = dtutil.dt2ts(now) self.testdata = np.ones((self.hours, 2)) for i in xrange(0, self.hours): self.testdata[i, :] = i self.testdata[:, 0] *= 3600 self.testdata[:, 0] += now self.ma = grouping.MaskedDTList(self.testdata[:, 0], dtutil.gettz("America/Los_Angeles")) self.width = datetime.timedelta(days=1)
def _update(self): vals = self.opc.read(group="smap-points-group") for point, value, quality, time in vals: # parse the timestamp in the timezone of the server if time is not None: ts = dtutil.strptime_tz(time, self.opc_timefmt, self.opc_timezone) ts = dtutil.dt2ts(ts) else: ts = dtutil.now(self.opc_timezone) ts = dtutil.dt2ts(ts) if self.get_timeseries(self.make_path(point)) and value is not None: if isinstance(value, bool): value = int(value) self._add(self.make_path(point), ts, float(value))
def process(self, doc): doc = bs(doc) now = doc.livedata.gatewaytime now = dtutil.strptime_tz("%s %s %s %s %s %s" % (now.month.contents[0], now.day.contents[0], now.year.contents[0], now.hour.contents[0], now.minute.contents[0], now.maxsecond.contents[0]), "%m %d %y %H %M %S", tzstr=self.timezone) now = dtutil.dt2ts(now) self.add('/voltage', now, int(doc.livedata.voltage.total.voltagenow.contents[0])) self.add('/real_power', now, int(doc.livedata.power.total.powernow.contents[0])) self.add('/apparent_power', now, int(doc.livedata.power.total.kva.contents[0]))
def test_offset(self): now = dtutil.strptime_tz("1 1 2000 0", "%m %d %Y %H", tzstr="America/Los_Angeles") now = dtutil.dt2ts(now) self.setUp(now) op = grouping.GroupByDatetimeField(self.inputs, oputils.NullOperator, field='day') for i in xrange(0, 24): rv = op([self.testdata[i:25 + i, :]]) self.assertEquals(rv[0].shape, (24 - i, 2)) op.reset()
def test_flush(self): now = dtutil.strptime_tz("1 1 2000 0", "%m %d %Y %H", tzstr="America/Los_Angeles") now = dtutil.dt2ts(now) self.setUp(now) op = grouping.GroupByDatetimeField(self.inputs, arithmetic.first, field='hour', width=1) rv = op(operators.DataChunk((now * 1000, now * 1000 + (self.hours * 3600 * 1000)), True, True, [self.testdata])) # if we don't properly flush the last hour, we should only get hours - 1 results self.assertEquals((rv[0][-1, 0] - (now * 1000)) / (3600 * 1000), self.hours - 1)
def _update(self): vals = self.opc.read(group="smap-points-group") for point, value, quality, time in vals: # parse the timestamp in the timezone of the server if time is not None: ts = dtutil.strptime_tz(time, self.opc_timefmt, self.opc_timezone) ts = dtutil.dt2ts(ts) else: ts = dtutil.now(self.opc_timezone) ts = dtutil.dt2ts(ts) if self.get_timeseries( self.make_path(point)) and value is not None: if isinstance(value, bool): value = int(value) self._add(self.make_path(point), ts, float(value))
def OnMultipleItemsChanged(self, sender, args): for v in args.ArgsArray: driver = OPC_DRIVERS[v.State] if v.Exception: continue print v.ItemDescriptor.ItemId, print v.Vtq.Timestamp, v.Vtq.Value ts = dtutil.strptime_tz(str(v.Vtq.Timestamp), "%m/%d/%y %H:%M:%S", tzstr=driver.opc_timezone) path = driver.make_path(v.ItemDescriptor.ItemId) try: driver._add(driver.make_path(v.ItemDescriptor.ItemId), int(dtutil.dt2ts(ts)), v.Vtq.Value) except Exception, e: log.err("Error adding data: " + str(e))
def test_oneatatime(self): now = dtutil.strptime_tz("1 1 2000 0", "%m %d %Y %H", tzstr="America/Los_Angeles") now = dtutil.dt2ts(now) self.setUp(now) op = grouping.GroupByDatetimeField(self.inputs, oputils.NullOperator, field='day') for i in xrange(0, 24): rv = op([self.testdata[i:i+1, :]]) self.assertEquals(rv[0].shape, operators.null.shape) rv = op([self.testdata[24:25, :]]) self.assertEquals(rv[0].shape, (24, 2)) # make sure we snapped self.assertEquals(np.sum(rv[0][:, 0] - self.testdata[0, 0]), 0) # and got back the right data self.assertEquals(np.sum(rv[0][:, 1] - self.testdata[:24, 1]), 0)
def process(self, doc): doc = bs(doc) now = doc.livedata.gatewaytime now = dtutil.strptime_tz( "%s %s %s %s %s %s" % (now.month.contents[0], now.day.contents[0], now.year.contents[0], now.hour.contents[0], now.minute.contents[0], now.maxsecond.contents[0]), "%m %d %y %H %M %S", tzstr=self.timezone) now = dtutil.dt2ts(now) self.add('/voltage', now, int(doc.livedata.voltage.total.voltagenow.contents[0])) self.add('/real_power', now, int(doc.livedata.power.total.powernow.contents[0])) self.add('/apparent_power', now, int(doc.livedata.power.total.kva.contents[0]))
def test_increment(self): now = dtutil.strptime_tz("1 1 2000 0", "%m %d %Y %H", tzstr="America/Los_Angeles") now = dtutil.dt2ts(now) self.setUp(now) for incr in [2, 4, 6, 8, 12, 24]: op = grouping.GroupByDatetimeField(self.inputs, arithmetic.first, field='hour', width=incr) rv = op([self.testdata[:25, :]]) # check the shape self.assertEquals(len(rv[0]), 24 / incr) for i in xrange(0, 24/incr): # the timestamps self.assertEquals(rv[0][i, 0], self.testdata[i * incr, 0]) # and the values self.assertEquals(rv[0][i, 1], i * incr) del op
def test_flush(self): now = dtutil.strptime_tz("1 1 2000 0", "%m %d %Y %H", tzstr="America/Los_Angeles") now = dtutil.dt2ts(now) self.setUp(now) op = grouping.GroupByDatetimeField(self.inputs, arithmetic.first, field='hour', width=1) rv = op( operators.DataChunk( (now * 1000, now * 1000 + (self.hours * 3600 * 1000)), True, True, [self.testdata])) # if we don't properly flush the last hour, we should only get hours - 1 results self.assertEquals((rv[0][-1, 0] - (now * 1000)) / (3600 * 1000), self.hours - 1)
def test_increment(self): now = dtutil.strptime_tz("1 1 2000 0", "%m %d %Y %H", tzstr="America/Los_Angeles") now = dtutil.dt2ts(now) self.setUp(now) for incr in [2, 4, 6, 8, 12, 24]: op = grouping.GroupByDatetimeField(self.inputs, arithmetic.first, field='hour', width=incr) rv = op([self.testdata[:25, :]]) # check the shape self.assertEquals(len(rv[0]), 24 / incr) for i in xrange(0, 24 / incr): # the timestamps self.assertEquals(rv[0][i, 0], self.testdata[i * incr, 0]) # and the values self.assertEquals(rv[0][i, 1], i * incr) del op
def test_oneatatime(self): now = dtutil.strptime_tz("1 1 2000 0", "%m %d %Y %H", tzstr="America/Los_Angeles") now = dtutil.dt2ts(now) self.setUp(now) op = grouping.GroupByDatetimeField(self.inputs, oputils.NullOperator, field='day') for i in xrange(0, 24): rv = op([self.testdata[i:i + 1, :]]) self.assertEquals(rv[0].shape, operators.null.shape) rv = op([self.testdata[24:25, :]]) self.assertEquals(rv[0].shape, (24, 2)) # make sure we snapped self.assertEquals(np.sum(rv[0][:, 0] - self.testdata[0, 0]), 0) # and got back the right data self.assertEquals(np.sum(rv[0][:, 1] - self.testdata[:24, 1]), 0)
def process(self): readcnt = 0 data = [] if self.reader == None: return data try: for r in self.reader: ts = dtutil.strptime_tz(r[0], TIMEFMT, tzstr='UTC') if ts > self.push_hist: self.push_hist = ts ts = dtutil.dt2ts(ts) data.append((ts, zip(self.field_map, r))) readcnt += 1 if readcnt > 100: return data except Exception, e: self.fp.close() self.reader = None raise e
def process(self, body): reader = csv.reader(StringIO.StringIO(body), dialect='excel-tab') header = reader.next() if len(header) == 0: print "Warning: no data from", self.url raise core.SmapException("no data!") try: self.field_map, self.map = make_field_idxs(self.meter_type, header, location=self.location) except: traceback.print_exc() if not self.added: self.added = True for channel in self.map['sensors'] + self.map['meters']: try: self.add_timeseries('/%s/%s' % channel[2:4], channel[4], data_type='double') self.set_metadata('/%s/%s' % channel[2:4], { 'Extra/ChannelName' : re.sub('\(.*\)', '', channel[0]).strip(), }) except: traceback.print_exc() # add all the values for r in reader: ts = dtutil.strptime_tz(r[0], TIMEFMT, tzstr='UTC') if ts > self.push_hist: self.push_hist = ts ts = dtutil.dt2ts(ts) for descr, val in zip(self.field_map, r): if descr == None: continue try: self._add('/' + '/'.join(descr), ts, float(val)) except ValueError: pass
def get_data(self,uuid,start,end,limit=-1): # print start # print end startTime = dtutil.dt2ts(dtutil.strptime_tz(start, "%m/%d/%Y %H:%M:%S %p" )) endTime = dtutil.dt2ts(dtutil.strptime_tz(end, "%m/%d/%Y %H:%M:%S %p")) return self.c.data_uuid(uuid, startTime, endTime,True,limit)
def get_readings(self, market, start_date, stop_date): readings = {'total_price': [], 'loss': [], 'energy': [], 'congestion': []} print "get_readings", market if market == 'DAM': q = 'PRC_LMP' m = 'DAM' elif market == 'HASP': q = 'PRC_HASP_LMP' m = 'HASP' elif market == 'RTM': q = 'PRC_INTVL_LMP' m = 'RTM' else: raise Exception("Invalid market: " + market) url = 'http://oasis.caiso.com/mrtu-oasis/SingleZip?' url += 'queryname=' + q url += '&startdate=' + dtutil.strftime_tz(start_date, '%Y%m%d', 'US/Pacific') url += '&enddate=' + dtutil.strftime_tz(stop_date, '%Y%m%d', 'US/Pacific') url += '&market_run_id=' + m url += '&node=' + self.location logging.info("Get url %s" % url) h = None for d in [5, 20, 60]: try: h = urllib2.urlopen(url, timeout=50) break except urllib2.URLError: logging.warn("urlopen failed.") time.sleep(d) if h == None: raise Exception("Failed to open url: %s" % url) z = zipfile.ZipFile(StringIO.StringIO(h.read())) xml = z.read(z.namelist()[0]) b = BeautifulSoup.BeautifulSoup(xml) sec_per_int = int( b.find('m:sec_per_interval').contents[0] ) rows = b.findAll('m:report_data') for d in rows: res = d.find('m:resource_name').contents[0] item = d.find('m:data_item').contents[0] day = d.find('m:opr_date').contents[0] inter = int( d.find('m:interval_num').contents[0] ) val = float( d.find('m:value').contents[0] ) secs = (inter - 1) * sec_per_int dt = dtutil.strptime_tz(day, '%Y-%m-%d', 'US/Pacific') + datetime.timedelta(seconds=secs) timestamp = dtutil.dt2ts(dt) key = None if item == 'LMP_PRC': key = 'total_price' elif item == 'LMP_LOSS_PRC': key = 'loss' elif item == 'LMP_ENE_PRC': key = 'energy' elif item == 'LMP_CONG_PRC': key = 'congestion' else: continue readings[key].append((timestamp, val)) num_readings = len(readings[readings.keys()[0]]) for k in readings.keys(): if len(readings[k]) != num_readings: raise Exception('Missing readings') readings[k] = sorted(readings[k], key=lambda (t, v): t) return readings
@author Stephen Dawson-Haggerty <*****@*****.**> """ import sys import time import datetime from twisted.internet import reactor, defer from twisted.python import log from smap import loader from smap.drivers.obvius import bmo from smap.contrib import dtutil # day to start import at startdt = dtutil.strptime_tz("09 01 2011", "%m %d %Y") enddt = startdt + datetime.timedelta(days=1) # number of days to request days = 3 def next_day(): global startdt global enddt global inst global days print "\n\nSTARTING DAY (%i remaining)\n" % days tasks = [] for d in inst.drivers.itervalues(): if isinstance(d, bmo.BMOLoader):
def data_acquisition(): ############################################################################### day0 = '6-1-2014' day1 = '9-9-2015' timestep=15 # timestep in minutes ############################################################################### # make a client client = SmapClient("http://www.openbms.org/backend") # start and end values are Unix timestamps start = dtutil.dt2ts(dtutil.strptime_tz(day0, "%m-%d-%Y")) end = dtutil.dt2ts(dtutil.strptime_tz(day1, "%m-%d-%Y")) print 'Download start..' # perform temperature data download T_tags1 = client.tags("uuid = '" + T_uuid1 + "'")[0] T_data1 = client.data_uuid([T_uuid1], start, end, cache=True)[0] T_tags2 = client.tags("uuid = '" + T_uuid2 + "'")[0] T_data2 = client.data_uuid([T_uuid2], start, end, cache=True)[0] # perform humidity data download Hum_tags3 = client.tags("uuid = '" + Hum_uuid3 + "'")[0] Hum_data3 = client.data_uuid([Hum_uuid3], start, end, cache=True)[0] Hum_tags4 = client.tags("uuid = '" + Hum_uuid4 + "'")[0] Hum_data4 = client.data_uuid([Hum_uuid4], start, end, cache=True)[0] # perform co2 data download co2_tags5 = client.tags("uuid = '" + co2_uuid5 + "'")[0] co2_data5 = client.data_uuid([co2_uuid5], start, end, cache=True)[0] co2_tags6 = client.tags("uuid = '" + co2_uuid6 + "'")[0] co2_data6 = client.data_uuid([co2_uuid6], start, end, cache=True)[0] # perform outdoor temperature download and correction T_outdoor_tags7 = client.tags("uuid = '" + T_outdoor_uuid7 + "'")[0] T_outdoor_data7 = client.data_uuid([T_outdoor_uuid7], start, end, cache=True)[0] for i in range(len(T_outdoor_data7)): if T_outdoor_data7 [i][1]>0: T_outdoor_data7 [i][1]=(T_outdoor_data7 [i][1]-32)*5/9 else: T_outdoor_data7 [i][1]=T_outdoor_data7 [i-1][1] if T_outdoor_data7 [i][0]<10000: T_outdoor_data7 [i][0]=T_outdoor_data7 [i-1][0]+32000 T_outdoor_tags7 = client.tags("uuid = '" + T_outdoor_uuid7 + "'")[0] # perform power data download light_data = client.data_uuid([light_uuid1], start, end, cache=True)[0] recep_data = client.data_uuid([recep_uuid1], start, end, cache=True)[0] fan_power_total_data = client.data_uuid([fan_power_total_uuid1], start, end, cache=True)[0] air_a_sat_data = client.data_uuid([air_a_sat_uuid1], start, end, cache=True)[0] air_b_sat_data = client.data_uuid([air_b_sat_uuid1 ], start, end, cache=True)[0] air_a_mat_data= client.data_uuid([air_a_mat_uuid1], start, end, cache=True)[0] air_b_mat_data = client.data_uuid([air_b_mat_uuid1], start, end, cache=True)[0] for data in [air_a_sat_data , air_a_mat_data , air_b_sat_data , air_b_mat_data]: for i in range(len(data)): data[i][1]=(data[i][1] - 32)*5/9 air_a_flow_data = client.data_uuid([air_a_flow_uuid1], start, end, cache=True)[0] air_b_flow_data = client.data_uuid([air_b_flow_uuid1], start, end, cache=True)[0] vav_data=[] for i in VAV_flow_uuid: download=client.data_uuid([i], start, end, cache=True)[0] vav_data.append(download) flow_floor_4=[] for j in range(min([len(x) for x in vav_data])): somme=0 for i in vav_data: somme=somme+i[j][1] flow_floor_4.append([vav_data[0][j][0] , somme]) # perform temperature setpoints download T_setpt_data=[] setpt_download=[] for i in VAV_setpt_uuid: download=client.data_uuid([i], start, end, cache=True)[0] setpt_download.append(download) for j in range(min([len(x) for x in setpt_download])): value = sum([setpt_download[i][j][1] for i in range(len(setpt_download))])/len(setpt_download) T_setpt_data.append([setpt_download[1][j][0] , value]) print 'Download done' print ############################################################################### print 'Calculation start..' # convert temperature setpoint unit for i in range(len(T_setpt_data)): T_setpt_data[i][1]=( T_setpt_data[i][1]-32)*5/9 # calculate ventilation power vent_power_data=[] for i in range(min(len(fan_power_total_data) , len(air_a_flow_data) , len(air_b_flow_data) , len(flow_floor_4))): power=fan_power_total_data[i][1]*flow_floor_4[i][1]/(air_a_flow_data[i][1] + air_b_flow_data[i][1]) vent_power_data.append([flow_floor_4[i][0] , power]) # calculate H/C power capacity=0.00056937 #kW/C.cfm ratio=[] for i in range(min(len(air_a_flow_data), len(air_b_flow_data), len(flow_floor_4))): division= flow_floor_4[i][1]/(air_a_flow_data[i][1] + air_b_flow_data[i][1]) ratio.append([air_a_flow_data[i][0] , division ]) cool_power_data=[] for i in range(min(len(air_a_sat_data) , len(air_b_sat_data) , len(air_a_mat_data) , len(air_b_mat_data) , len(ratio))): result = ((capacity*(air_a_sat_data[i][1]-air_a_mat_data[i][1])*air_a_flow_data[i][1]) + (capacity*(air_b_sat_data[i][1]-air_b_mat_data[i][1])*air_b_flow_data[i][1]))*ratio[i][1] cool_power_data.append([ ratio[i][0] , result]) H_C_power_data=[] for i in range(min(len(light_data) , len(recep_data) , len(cool_power_data))): H_C_power_data.append([cool_power_data[i][0] , cool_power_data[i][1] + light_data[i][1]+ recep_data[i][1]]) ############################################################################### # interpolate the data over a fixed time step imposed_time=[] x = max( T_data1[0][0], T_data2[0][0], Hum_data3[0][0], Hum_data4[0][0], co2_data5[0][0], co2_data6[0][0], T_outdoor_data7[0][0] ) limit= min( T_data1[-1][0] , T_data2[-1][0], Hum_data3[-1][0], Hum_data4[-1][0], co2_data5[-1][0], co2_data6[-1][0], T_outdoor_data7[-1][0] ) while x <= limit: imposed_time.append(x) x += timestep*60*1000 def interpole(data, time): time1 = [item[0] for item in data] value1= [item[1] for item in data] data_synchro = interp1d(time1,value1)(time) data_synchro = [i for i in data_synchro] return data_synchro # interpolate temperature setpoint data T_setpt_data_synchro=interpole(T_setpt_data, imposed_time) # interpolate temperature data T_data1_synchro=interpole(T_data1, imposed_time) T_data2_synchro=interpole(T_data2, imposed_time) # interpolate humidity data Hum_data3_synchro=interpole(Hum_data3, imposed_time) Hum_data4_synchro=interpole(Hum_data4, imposed_time) # interpolate co2 data co2_data5_synchro=interpole(co2_data5, imposed_time) co2_data6_synchro=interpole(co2_data6, imposed_time) # interpolate co2 data T_outdoor_data7_synchro=interpole(T_outdoor_data7, imposed_time) # intepole ventilation data vent_power_data_synchro=interpole(vent_power_data, imposed_time) # interpole cooling power data H_C_power_data_synchro=interpole(H_C_power_data, imposed_time) # average each type of data T_data_sychro_average= [(a+b)/2 for a,b in zip(T_data1_synchro, T_data2_synchro)] for i in range(len(T_data_sychro_average)): T_data_sychro_average[i]=(T_data_sychro_average[i]-32)*5/9 Hum_data_sychro_average= [(a+b)/2 for a,b in zip(Hum_data3_synchro, Hum_data4_synchro)] co2_data_sychro_average= [(a+b)/2 for a,b in zip(co2_data5_synchro, co2_data6_synchro)] for i in range(len(co2_data_sychro_average)): if co2_data_sychro_average[i]>1500: co2_data_sychro_average [i]=co2_data_sychro_average [i-1] # calculate the calendar data Calendar_data=[] Season=[] Human_date=[] Human_power=[] ###### Calendar data=0 --> weekend ###### Calendar data=1 --> work night ###### Calendar data=2 --> work day ###### Season = 1 --> winter ###### Season = 0 --> mid season ###### Season = -1 --> summer for i in imposed_time: date = datetime.datetime.fromtimestamp(i/1000).strftime('%Y-%m-%d %H:%M:%S') Human_date.append(date) year = int (date[0:4]) month = int (date[5:7]) day = int (date[8:10]) hour = int(date[11:13]) day_number=datetime.date(year, month, day).weekday() if month==1 or month==2 or month==12: Season.append(1) if month==6 or month==7 or month==8 or month==9: Season.append(-1) if month==3 or month==4 or month==5 or month==10 or month==11: Season.append(0) if day_number!=5 and day_number!=6: if hour>=7 and hour<19: Cal_day = 2 else: Cal_day = 1 else: Cal_day = 0 Calendar_data.append(Cal_day) if Cal_day==2 and (hour<16 and hour>=10): number=30 else: if Cal_day==2 and ((hour<10 and hour>=7) or (hour<19 and hour>=16)): number=15 else: number=0 Human_power.append( number*0.1 ) ############################################################################### # output of the data aquisition DATA_LIST={'Timestamp':imposed_time, 'Temperature':T_data_sychro_average, 'Humidity':Hum_data_sychro_average, 'Calendar data':Calendar_data,'Human date':Human_date , 'Season':Season, 'CO2':co2_data_sychro_average, 'Outdoor Temperature':T_outdoor_data7_synchro , 'Ventilation':vent_power_data_synchro , 'H/C power': H_C_power_data_synchro , 'Setpoint Temperature':T_setpt_data_synchro, 'Human_power':Human_power} print 'Calculation and interpolation done' print print 'Write start..' # Create a path and a file to save the data workbook = xlsxwriter.Workbook('DATA_LIST.xlsx') worksheet = workbook.add_worksheet() col=0 for keys in DATA_LIST.keys(): worksheet.write(0, col , keys) col=col+1 col=0 for data in DATA_LIST.values(): row=1 for value in data: worksheet.write(row, col , value) row += 1 col=col+1 workbook.close() # save the metadata of the used sensors i=0 for metadata in [T_tags1,T_tags2, Hum_tags3, Hum_tags4, co2_tags5, co2_tags6, T_outdoor_tags7]: i=i+1 with open(os.path.join('.Cache', 'metadata'+str(i)+'.txt'), 'w') as f: for key, value in metadata.items(): f.write(key+':'+value+'\n') print 'Write done' return DATA_LIST
@author Stephen Dawson-Haggerty <*****@*****.**> """ import sys import time import datetime from twisted.internet import reactor, defer from twisted.python import log from smap import loader from smap.drivers.obvius import bmo from smap.contrib import dtutil # day to start import at startdt = dtutil.strptime_tz("09 01 2011", "%m %d %Y") enddt = startdt + datetime.timedelta(days=1) # number of days to request days = 3 def next_day(): global startdt global enddt global inst global days print "\n\nSTARTING DAY (%i remaining)\n" % days tasks = [] for d in inst.drivers.itervalues(): if isinstance(d, bmo.BMOLoader): tasks.append(d.update(startdt, enddt))
def parse_time(self, ts, val): if self.timefmt == None: return int(val) else: return dtutil.dt2ts( dtutil.strptime_tz(val, self.timefmt, self.timezone))
locating the streams using a metadata query. @author Stephen Dawson-Haggerty <*****@*****.**> """ from smap.archiver.client import SmapClient from smap.contrib import dtutil from matplotlib import pyplot from matplotlib import dates # make a client c = SmapClient("http://www.openbms.org/backend") # start and end values are Unix timestamps start = dtutil.dt2ts(dtutil.strptime_tz("1-1-2013", "%m-%d-%Y")) end = dtutil.dt2ts(dtutil.strptime_tz("1-2-2013", "%m-%d-%Y")) # download the data and metadata tags = c.tags("Metadata/Extra/Type = 'oat'") uuids, data = c.data("Metadata/Extra/Type = 'oat'", start, end) # make a dict mapping uuids to data vectors data_map = dict(zip(uuids, data)) # plot all the data for timeseries in tags: d = data_map[timeseries['uuid']] # since we have the tags, we can add some metadata label = "%s (%s)" % (timeseries['Metadata/SourceName'], timeseries['Properties/UnitofMeasure'])
from smap.archiver.client import SmapClient from smap.contrib import dtutil from matplotlib import pyplot from matplotlib import dates import os # make a client c = SmapClient("http://new.openbms.org/backend") # start and end values are Unix timestamps t_start = "6-12-2013 8:00" t_end = "6-19-2013 8:00" start = 1000*dtutil.dt2ts(dtutil.strptime_tz(t_start, "%m-%d-%Y %H:%M")) end = 1000*dtutil.dt2ts(dtutil.strptime_tz(t_end, "%m-%d-%Y %H:%M")) stnc = "select distinct Metadata/Location/RoomNumber where Metadata/SourceName='KETI Motes'" roomlist = c.query(stnc) #the result is a list #roomlist = roomlist[16:] #roomlist = ['621A','621B','621C','621D','621E'] for room in roomlist: print "==========Fetching streams in Room %s=========="%room stnc = "select Path where Metadata/Location/RoomNumber='%s' and not Path ~ '.*pir.*'" %room streams = c.query(stnc) if len(streams)>0: # print "----%d streams in Room %s----"%(len(streams), room) for s in streams: # fetch the metadata of path wanted tags = c.tags("Path='%s'"%s['Path'])
def get_readings(self, market, start_date, stop_date): readings = { 'total_price': [], 'loss': [], 'energy': [], 'congestion': [] } print "get_readings", market if market == 'DAM': q = 'PRC_LMP' m = 'DAM' elif market == 'HASP': q = 'PRC_HASP_LMP' m = 'HASP' elif market == 'RTM': q = 'PRC_INTVL_LMP' m = 'RTM' else: raise Exception("Invalid market: " + market) url = 'http://oasis.caiso.com/mrtu-oasis/SingleZip?' url += 'queryname=' + q url += '&startdate=' + dtutil.strftime_tz(start_date, '%Y%m%d', 'US/Pacific') url += '&enddate=' + dtutil.strftime_tz(stop_date, '%Y%m%d', 'US/Pacific') url += '&market_run_id=' + m url += '&node=' + self.location logging.info("Get url %s" % url) h = None for d in [5, 20, 60]: try: h = urllib2.urlopen(url, timeout=50) break except urllib2.URLError: logging.warn("urlopen failed.") time.sleep(d) if h == None: raise Exception("Failed to open url: %s" % url) z = zipfile.ZipFile(StringIO.StringIO(h.read())) xml = z.read(z.namelist()[0]) b = BeautifulSoup.BeautifulSoup(xml) sec_per_int = int(b.find('m:sec_per_interval').contents[0]) rows = b.findAll('m:report_data') for d in rows: res = d.find('m:resource_name').contents[0] item = d.find('m:data_item').contents[0] day = d.find('m:opr_date').contents[0] inter = int(d.find('m:interval_num').contents[0]) val = float(d.find('m:value').contents[0]) secs = (inter - 1) * sec_per_int dt = dtutil.strptime_tz( day, '%Y-%m-%d', 'US/Pacific') + datetime.timedelta(seconds=secs) timestamp = dtutil.dt2ts(dt) key = None if item == 'LMP_PRC': key = 'total_price' elif item == 'LMP_LOSS_PRC': key = 'loss' elif item == 'LMP_ENE_PRC': key = 'energy' elif item == 'LMP_CONG_PRC': key = 'congestion' else: continue readings[key].append((timestamp, val)) num_readings = len(readings[readings.keys()[0]]) for k in readings.keys(): if len(readings[k]) != num_readings: raise Exception('Missing readings') readings[k] = sorted(readings[k], key=lambda (t, v): t) return readings
import numpy as np import scipy.io x = np.linspace(0, 2 * np.pi, 100) y = np.cos(x) host='192.168.1.120' port='8079' startTime="12-8-2014" endTime="12-9-2014" c = SmapClient("http://"+host+":"+port) from smap.contrib import dtutil start = dtutil.dt2ts(dtutil.strptime_tz(startTime, "%m-%d-%Y")) end = dtutil.dt2ts(dtutil.strptime_tz(endTime, "%m-%d-%Y")) uuid = [ "63f6eb56-9bf4-5356-845a-80a315909d75" ] b=False while b==False: try: data = c.data_uuid(uuid, start, end) b=True # except Error as e: except Exception as e: print Exception, e if str(e) == 'float division by zero': b = False
from smap.archiver.client import SmapClient from smap.contrib import dtutil import numpy as np import pandas as pd import datetime import subprocess #Link to download the data c = SmapClient("http://iiitdarchiver.zenatix.com:9105") #Range of dates to which you want to download the data start = dtutil.dt2ts(dtutil.strptime_tz("01-10-2017", "%d-%m-%Y")) end = dtutil.dt2ts(dtutil.strptime_tz("01-10-2017", "%d-%m-%Y")) # hard-code the UUIDs we want to download oat = ["eec41258-f057-591e-9759-8cfdeb67b9af"] # Function to perform the download of the data data = c.data_uuid(oat, start, end) t = np.array(data) df = pd.DataFrame(t) # creating files after downloading for i, j in enumerate(t): name = str(i) + '.csv' with open(name, 'w') as f: for time, val in j: f.write( str(datetime.datetime.fromtimestamp(time / 1000.0)) + ' , ' +
search = keywordSearch(lines, "sdh room temp", 2000) add = keywordSearch(lines, "sdh rm temp", 1000) for i in water: res.remove(i) for i in stp: res.remove(i) for r in res: search_path.append(r['Path']) for i in add: search.append(i) for i in search: search_res.append(i['Path']) train_start = "6-12-2013 8:00" train_end = "6-12-2013 8:10" start1 = 1000*dtutil.dt2ts(dtutil.strptime_tz(train_start, "%m-%d-%Y %H:%M")) end1 = 1000*dtutil.dt2ts(dtutil.strptime_tz(train_end, "%m-%d-%Y %H:%M")) test_start = "6-13-2013 10:10" test_end = "6-13-2013 10:20" start2 = 1000*dtutil.dt2ts(dtutil.strptime_tz(test_start, "%m-%d-%Y %H:%M")) end2 = 1000*dtutil.dt2ts(dtutil.strptime_tz(test_end, "%m-%d-%Y %H:%M")) anomaly_path1 = [] anomaly_path2 = [] train_data = [] test_data = [] search_train = [] # anomaly_train = [] search_test = [] # anomaly_test = []
def parse_time(self, ts, val): if self.timefmt == None: return int(val) else: return dtutil.dt2ts(dtutil.strptime_tz(val, self.timefmt, self.timezone))
bldg = bldg_dict[int(num)] # start = raw_input("start time (\"%m-%d-%Y %H:%M\" or "-d" for default): ") # end = raw_input("end time (\"%m-%d-%Y %H:%M\") or "-d" for default: ") start = "10-21-2013 00:00" end = "10-27-2013 23:59" # get the outside air temperature during the period specified # get_temp(start, end) if start == '-d': print "computing the IMFs in %s from 7 days backwards till now..." %bldg else: print "computing the IMFs in %s from %s to %s..." %(bldg, start, end) if end == '-d': end = int(time.time()*1000) else: end = dtutil.dt2ts(dtutil.strptime_tz("%s" %end, "%m-%d-%Y %H:%M")) * 1000 if start == '-d': start = end - 7*24*60*60*1000 else: start = dtutil.dt2ts(dtutil.strptime_tz("%s" %start, "%m-%d-%Y %H:%M")) * 1000 applySum = "apply nansum(axis=1) < paste < window(first, field='minute', width=15) < units to data in (%f, %f) limit -1 streamlimit 10000 \ where (Metadata/Extra/System = 'total' or Metadata/Extra/System = 'electric') \ and (Properties/UnitofMeasure = 'kW' or Properties/UnitofMeasure = 'Watts' or Properties/UnitofMeasure = 'W') \ and Metadata/Location/Building like '%s%%' and not Metadata/Extra/Operator like 'sum%%' \ and not Path like '%%demand' and not Path like '/Cory_Hall/Electric_5A7/ABC/real_power' and not Path like '/Cory_Hall/Electric_5B7/ABC/real_power'" \ %(start, end, bldg) result = c.query(applySum) #the result is a list reading = result[0]['Readings'] # output readings to file