Esempio n. 1
0
    def open_page(self):
        if not self.startdt:
            self.startdt = self.push_hist
        if not self.enddt:
            self.enddt = dtutil.now()

        start, end = urllib.quote(dtutil.strftime_tz(self.startdt, TIMEFMT)), \
            urllib.quote(dtutil.strftime_tz(self.enddt, TIMEFMT))

        url = self.url % (start, end)
        url += "&mnuStartMonth=%i&mnuStartDay=%i&mnuStartYear=%i" % \
            (self.startdt.month,
             self.startdt.day,
             self.startdt.year)
        url += "&mnuStartTime=%i%%3A%i" % (self.startdt.hour, 
                                           self.startdt.minute)
        url += "&mnuEndMonth=%i&mnuEndDay=%i&mnuEndYear=%i" % \
            (self.enddt.month,
             self.enddt.day,
             self.enddt.year)
        url += "&mnuEndTime=%i%%3A%i" % (self.enddt.hour, self.enddt.minute)
        log.msg("loading " + url)

        # send the auth preemptively to avoid the 403-redirect cycle...
        auth = "Basic " + base64.encodestring(":".join(self.auth))[:-1]
        d = self.agent.request("GET", url,
                               Headers({"Authorization": [auth]}))
        d.addCallback(self.get_response)
        return d
Esempio n. 2
0
    def open_page(self):
        if not self.startdt:
            self.startdt = self.push_hist
        if not self.enddt:
            self.enddt = dtutil.now()

        start, end = urllib.quote(dtutil.strftime_tz(self.startdt, TIMEFMT)), \
            urllib.quote(dtutil.strftime_tz(self.enddt, TIMEFMT))

        url = self.url % (start, end)
        url += "&mnuStartMonth=%i&mnuStartDay=%i&mnuStartYear=%i" % \
            (self.startdt.month,
             self.startdt.day,
             self.startdt.year)
        url += "&mnuStartTime=%i%%3A%i" % (self.startdt.hour,
                                           self.startdt.minute)
        url += "&mnuEndMonth=%i&mnuEndDay=%i&mnuEndYear=%i" % \
            (self.enddt.month,
             self.enddt.day,
             self.enddt.year)
        url += "&mnuEndTime=%i%%3A%i" % (self.enddt.hour, self.enddt.minute)
        print "loading", url

        self.fp = httputils.load_http(url, as_fp=True, auth=auth.BMOAUTH)
        if not self.fp:
            raise core.SmapException("timeout!")
        self.reader = csv.reader(self.fp, dialect='excel-tab')
        header = self.reader.next()
        if len(header) == 0:
            print "Warning: no data from", self.url
            raise core.SmapException("no data!")
        try:
            self.field_map, self.map = make_field_idxs(self.meter_type,
                                                       header,
                                                       location=self.location)
        except:
            traceback.print_exc()

        if not self.added:
            self.added = True
            for channel in self.map['sensors'] + self.map['meters']:
                try:
                    self.add_timeseries('/%s/%s' % channel[2:4],
                                        channel[4],
                                        data_type='double')
                    self.set_metadata(
                        '/%s/%s' % channel[2:4], {
                            'Extra/ChannelName':
                            re.sub('\(.*\)', '', channel[0]).strip(),
                        })

                except:
                    traceback.print_exc()
Esempio n. 3
0
    def open_page(self):
        if not self.startdt:
            self.startdt = self.push_hist
        if not self.enddt:
            self.enddt = dtutil.now()

        start, end = urllib.quote(dtutil.strftime_tz(self.startdt, TIMEFMT)), \
            urllib.quote(dtutil.strftime_tz(self.enddt, TIMEFMT))

        url = self.url % (start, end)
        url += "&mnuStartMonth=%i&mnuStartDay=%i&mnuStartYear=%i" % \
            (self.startdt.month,
             self.startdt.day,
             self.startdt.year)
        url += "&mnuStartTime=%i%%3A%i" % (self.startdt.hour, 
                                           self.startdt.minute)
        url += "&mnuEndMonth=%i&mnuEndDay=%i&mnuEndYear=%i" % \
            (self.enddt.month,
             self.enddt.day,
             self.enddt.year)
        url += "&mnuEndTime=%i%%3A%i" % (self.enddt.hour, self.enddt.minute)
        print "loading", url

        self.fp = httputils.load_http(url, as_fp=True, auth=auth.BMOAUTH)
        if not self.fp:
            raise core.SmapException("timeout!")
        self.reader = csv.reader(self.fp, dialect='excel-tab')
        header = self.reader.next()
        if len(header) == 0:
            print "Warning: no data from", self.url
            raise core.SmapException("no data!")
        try:
            self.field_map, self.map = make_field_idxs(self.meter_type, header, 
                                                       location=self.location)
        except:
            traceback.print_exc()

        if not self.added:
            self.added = True
            for channel in self.map['sensors'] + self.map['meters']:
                try:
                    self.add_timeseries('/%s/%s' % channel[2:4], channel[4], data_type='double')
                    self.set_metadata('/%s/%s' % channel[2:4], {
                            'Extra/ChannelName' : re.sub('\(.*\)', '', channel[0]).strip(),
                            })
                    
                except:
                    traceback.print_exc()
Esempio n. 4
0
def make_time_formatter(request, stags):
    """Return a function that propertly formats timestamps for a
    particular request.
    """
    if 'timefmt' in request.args:
        try:
            tz = stags['Properties']['Timezone']
        except KeyError:
            tz = 'Utc'
        tz = dtutil.gettz(tz)

        # potentially do timestamp stringification here.
        # this could be a bit slow for large datasets...
        if request.args['timefmt'][0] == 'iso8601':
            fmt = dtutil.iso8601
        elif request.args['timefmt'][0] == 'excel':
            fmt = fmt = dtutil.excel
        else:
            fmt = lambda dt, tz: dtutil.strftime_tz(dt, '%s')
            tz = dtutil.gettz('Utc')

        def format(t):
            return fmt(dtutil.ts2dt(t / 1000), tz)

        return format
    else:
        return lambda x: str(int(x))
Esempio n. 5
0
    def write_one_stream(self, request, stream, stags, mime_header=False):
        """For a CSV downlod, add some hweaders and write the data to the stream
        """
        writer = csv.writer(request)
        if 'tags' in request.args and not 'none' in request.args['tags']:
            request.write("# uuid: %s\n" % stream['uuid'])
            request.write("# DownloadTime: " + time.ctime() + "\n")
            request.write("# ")
            request.write('\n# '.join(
                (': '.join(x) for x in sorted(stags.iteritems()))))
            request.write('\n')

        if 'timefmt' in request.args:
            # potentially do timestamp stringification here.
            # this could be a bit slow for large datasets...
            if request.args['timefmt'][0] == 'iso8601':
                fmt = dtutil.iso8601
                tz = dtutil.gettz(stags.get('Properties/Timezone', 'Utc'))
            elif request.args['timefmt'][0] == 'excel':
                fmt = fmt = dtutil.excel
                tz = dtutil.gettz(stags.get('Properties/Timezone', 'Utc'))
            else:
                fmt = lambda dt, tz: dtutil.strftime_tz(dt, '%s')
                tz = dtutil.gettz('Utc')

            def row_action(row):
                row[0] = fmt(dtutil.ts2dt(row[0] / 1000), tz)
                writer.writerow(row)

            map(row_action, stream['Readings'])
        else:
            map(writer.writerow, stream['Readings'])
Esempio n. 6
0
    def write_one_stream(self, request, stream, stags, mime_header=False):
        """For a CSV downlod, add some hweaders and write the data to the stream
        """
        writer = csv.writer(request)
        if 'tags' in request.args and not 'none' in request.args['tags']:
            request.write("# uuid: %s\n" % stream['uuid'])
            request.write("# DownloadTime: " + time.ctime() + "\n")
            request.write("# ")
            request.write('\n# '.join((': '.join(x) for x in sorted(stags.iteritems()))))
            request.write('\n')

        if 'timefmt' in request.args:
            # potentially do timestamp stringification here.
            # this could be a bit slow for large datasets...
            if request.args['timefmt'][0] == 'iso8601': 
                fmt = dtutil.iso8601
                tz = dtutil.gettz(stags.get('Properties/Timezone', 'Utc'))
            elif request.args['timefmt'][0] == 'excel':
                fmt = fmt = dtutil.excel
                tz = dtutil.gettz(stags.get('Properties/Timezone', 'Utc'))
            else:
                fmt = lambda dt, tz: dtutil.strftime_tz(dt, '%s')
                tz = dtutil.gettz('Utc')
            def row_action(row):
                row[0] = fmt(dtutil.ts2dt(row[0] / 1000), tz)
                writer.writerow(row)
            map(row_action, stream['Readings'])
        else:
            map(writer.writerow, stream['Readings'])
Esempio n. 7
0
def make_time_formatter(request, stags):
    """Return a function that propertly formats timestamps for a
    particular request.
    """
    if 'timefmt' in request.args:
        try:
            tz = stags['Properties']['Timezone']
        except KeyError:
            tz = 'Utc'
        tz = dtutil.gettz(tz)

        # potentially do timestamp stringification here.
        # this could be a bit slow for large datasets...
        if request.args['timefmt'][0] == 'iso8601': 
            fmt = dtutil.iso8601
        elif request.args['timefmt'][0] == 'excel':
            fmt = fmt = dtutil.excel
        else:
            fmt = lambda dt, tz: dtutil.strftime_tz(dt, '%s')
            tz = dtutil.gettz('Utc')
        def format(t):
            return fmt(dtutil.ts2dt(t / 1000), tz)
        return format
    else:
        return lambda x: str(int(x))
Esempio n. 8
0
 def writeDROMScsv(self, value):
     fcsv = open('meterdata.csv','w')
     fcsv.write(','.join(['DateTime', 'MeterId', 'Value1', 'Value2']) + '\n')
     for path, val in value:
         if not 'Readings' in val: continue
         cmps = split_path(path)
         channel = join_path(cmps[1:])
         for d in val['Readings']:
             if d is None: continue
             ts = dtutil.strftime_tz(dtutil.ts2dt(d[0] / 1000), "%Y-%m-%d %H:%M", tzstr='Local')
             if ts is None: continue
             v = d[1] 
             if v is None: continue
             if val['Properties']['UnitofMeasure']=='Watts': v /= 1000.
             v /= 4. # approximate kWh
             fcsv.write(','.join([ts,channel,str(v)]) + '\n')
     fcsv.close()
    def read(self):
        all_readings = self.client.latest(self.tempwhere)
        for p in all_readings:
            print '-'*20
            md = self.client.tags('uuid = "'+p['uuid']+'"')[0]
            print 'Room:', md['Metadata/Room']
            print 'Reading:', p['Readings'][0][1]
            ts = dtutil.ts2dt(p['Readings'][0][0]/1000)
            print 'Time:', dtutil.strftime_tz(ts, tzstr='America/Los_Angeles')
        avg_room_temp = sum([x['Readings'][0][1] for x in all_readings]) / float(len(all_readings))

        # get difference between avg room temperature and thermostat temperature
        new_diff = self.therm_temp - avg_room_temp

        # periodically update output streams.  Here a bogus adjustment
        self.add('/heatSetpoint', self.heatSP + new_diff)
        self.add('/coolSetpoint', self.coolSP + new_diff)
        print "zone controller publish: ", self.heatSP, self.coolSP
Esempio n. 10
0
"""
test script

"""

import glob
import os
from smap.contrib import dtutil

dir = "/Users/hdz_1989/Downloads/SDB/Todai/"
list = glob.glob(dir + '/*.dat')
f = open(dir + 'sample/' + 'ts_checking.txt', 'w')

for i in list:
	fp = open(i, 'r')
	j = 0
	while j<3:
		rd = fp.readline()
		ts = float(rd.strip('\n').split()[0])
		time = dtutil.strftime_tz(dtutil.ts2dt(ts), "%m-%d-%Y %H:%M:%S")
		f.write("%s, " %time)
		j += 1
	rd = fp.readline()
	ts = float(rd.strip('\n').split()[0])
	time = dtutil.strftime_tz(dtutil.ts2dt(ts), "%m-%d-%Y %H:%M:%S")
	f.write("%s\n" %time)
	fp.close()

f.close()
Esempio n. 11
0
  def get_readings(self, market, start_date, stop_date): 
    readings = {'total_price': [], 'loss': [], 'energy': [], 'congestion': []}
    print "get_readings", market
    if market == 'DAM':
      q = 'PRC_LMP'
      m = 'DAM'
    elif market == 'HASP':
      q = 'PRC_HASP_LMP'
      m = 'HASP'
    elif market == 'RTM':
      q = 'PRC_INTVL_LMP'
      m = 'RTM'
    else:
      raise Exception("Invalid market: " + market)

    url = 'http://oasis.caiso.com/mrtu-oasis/SingleZip?'
    url += 'queryname=' + q
    url += '&startdate=' + dtutil.strftime_tz(start_date, '%Y%m%d', 'US/Pacific')
    url += '&enddate=' + dtutil.strftime_tz(stop_date, '%Y%m%d', 'US/Pacific')
    url += '&market_run_id=' + m
    url += '&node=' + self.location

    logging.info("Get url %s" % url)
    h = None
    for d in [5, 20, 60]:
      try:
        h = urllib2.urlopen(url, timeout=50)
        break
      except urllib2.URLError:
        logging.warn("urlopen failed.")
      time.sleep(d)
    if h == None:
      raise Exception("Failed to open url: %s" % url)

    z = zipfile.ZipFile(StringIO.StringIO(h.read()))
    xml = z.read(z.namelist()[0])
    b = BeautifulSoup.BeautifulSoup(xml)

    sec_per_int = int( b.find('m:sec_per_interval').contents[0] )

    rows = b.findAll('m:report_data')
    for d in rows:
      res = d.find('m:resource_name').contents[0]
      item = d.find('m:data_item').contents[0]
      day = d.find('m:opr_date').contents[0]
      inter = int( d.find('m:interval_num').contents[0] )
      val = float( d.find('m:value').contents[0] )

      secs = (inter - 1) * sec_per_int
      dt = dtutil.strptime_tz(day, '%Y-%m-%d', 'US/Pacific') + datetime.timedelta(seconds=secs)
      timestamp = dtutil.dt2ts(dt)

      key = None
      if item == 'LMP_PRC':
        key = 'total_price'
      elif item == 'LMP_LOSS_PRC':
        key = 'loss'
      elif item == 'LMP_ENE_PRC':
        key = 'energy'
      elif item == 'LMP_CONG_PRC':
        key = 'congestion'
      else:
        continue

      readings[key].append((timestamp, val))


    num_readings = len(readings[readings.keys()[0]])
    for k in readings.keys():
      if len(readings[k]) != num_readings:
        raise Exception('Missing readings')

      readings[k] = sorted(readings[k], key=lambda (t, v): t)

    return readings
Esempio n. 12
0
    def get_readings(self, market, start_date, stop_date):
        readings = {
            'total_price': [],
            'loss': [],
            'energy': [],
            'congestion': []
        }
        print "get_readings", market
        if market == 'DAM':
            q = 'PRC_LMP'
            m = 'DAM'
        elif market == 'HASP':
            q = 'PRC_HASP_LMP'
            m = 'HASP'
        elif market == 'RTM':
            q = 'PRC_INTVL_LMP'
            m = 'RTM'
        else:
            raise Exception("Invalid market: " + market)

        url = 'http://oasis.caiso.com/mrtu-oasis/SingleZip?'
        url += 'queryname=' + q
        url += '&startdate=' + dtutil.strftime_tz(start_date, '%Y%m%d',
                                                  'US/Pacific')
        url += '&enddate=' + dtutil.strftime_tz(stop_date, '%Y%m%d',
                                                'US/Pacific')
        url += '&market_run_id=' + m
        url += '&node=' + self.location

        logging.info("Get url %s" % url)
        h = None
        for d in [5, 20, 60]:
            try:
                h = urllib2.urlopen(url, timeout=50)
                break
            except urllib2.URLError:
                logging.warn("urlopen failed.")
            time.sleep(d)
        if h == None:
            raise Exception("Failed to open url: %s" % url)

        z = zipfile.ZipFile(StringIO.StringIO(h.read()))
        xml = z.read(z.namelist()[0])
        b = BeautifulSoup.BeautifulSoup(xml)

        sec_per_int = int(b.find('m:sec_per_interval').contents[0])

        rows = b.findAll('m:report_data')
        for d in rows:
            res = d.find('m:resource_name').contents[0]
            item = d.find('m:data_item').contents[0]
            day = d.find('m:opr_date').contents[0]
            inter = int(d.find('m:interval_num').contents[0])
            val = float(d.find('m:value').contents[0])

            secs = (inter - 1) * sec_per_int
            dt = dtutil.strptime_tz(
                day, '%Y-%m-%d',
                'US/Pacific') + datetime.timedelta(seconds=secs)
            timestamp = dtutil.dt2ts(dt)

            key = None
            if item == 'LMP_PRC':
                key = 'total_price'
            elif item == 'LMP_LOSS_PRC':
                key = 'loss'
            elif item == 'LMP_ENE_PRC':
                key = 'energy'
            elif item == 'LMP_CONG_PRC':
                key = 'congestion'
            else:
                continue

            readings[key].append((timestamp, val))

        num_readings = len(readings[readings.keys()[0]])
        for k in readings.keys():
            if len(readings[k]) != num_readings:
                raise Exception('Missing readings')

            readings[k] = sorted(readings[k], key=lambda (t, v): t)

        return readings
Esempio n. 13
0
class CaIsoPrice(SmapDriver):

    MARKETS = [('DAM', 30 * 60, 'Day-ahead market'),
               ('HASP', 10 * 60, 'Hour-ahead scheduling process'),
               ('RTM', 2 * 60, 'Real-time market')]
    FEEDS = [('total_price', '$', 'total price'), ('loss', '$', 'loss price'),
             ('congestion', '$', 'congestion price'),
             ('energy', '$', 'energy price')]

    def setup(self, opts):
        # get our location
        self.last_reading = {}
        for m, t, d in self.MARKETS:
            self.last_reading[m] = 0

        self.location = opts.get('Location', 'OAKLAND_1_N001')
        self.set_metadata(
            '/', {
                'Location/Uri': 'http://oasis.caiso.com/mrtu-oasis/SingleZip',
                'Extra/IsoNode': self.location,
                'Extra/Driver': 'smap.drivers.caiso_price.CaIsoPrice'
            })

        # add the feeds
        for (m, i, md) in self.MARKETS:
            for (f, u, fd) in self.FEEDS:
                path = '/%s/%s' % (m, f)
                self.add_timeseries(path,
                                    u,
                                    data_type='double',
                                    description=md + ' ' + fd)

    def start(self):
        for (market, interval, description) in self.MARKETS:
            periodicSequentialCall(self.poll_stream, market,
                                   False).start(interval)

    def get_readings(self, market, start_date, stop_date):
        readings = {
            'total_price': [],
            'loss': [],
            'energy': [],
            'congestion': []
        }
        print "get_readings", market
        if market == 'DAM':
            q = 'PRC_LMP'
            m = 'DAM'
        elif market == 'HASP':
            q = 'PRC_HASP_LMP'
            m = 'HASP'
        elif market == 'RTM':
            q = 'PRC_INTVL_LMP'
            m = 'RTM'
        else:
            raise Exception("Invalid market: " + market)

        url = 'http://oasis.caiso.com/mrtu-oasis/SingleZip?'
        url += 'queryname=' + q
        url += '&startdate=' + dtutil.strftime_tz(start_date, '%Y%m%d',
                                                  'US/Pacific')
        url += '&enddate=' + dtutil.strftime_tz(stop_date, '%Y%m%d',
                                                'US/Pacific')
        url += '&market_run_id=' + m
        url += '&node=' + self.location

        logging.info("Get url %s" % url)
        h = None
        for d in [5, 20, 60]:
            try:
                h = urllib2.urlopen(url, timeout=50)
                break
            except urllib2.URLError:
                logging.warn("urlopen failed.")
            time.sleep(d)
        if h == None:
            raise Exception("Failed to open url: %s" % url)

        z = zipfile.ZipFile(StringIO.StringIO(h.read()))
        xml = z.read(z.namelist()[0])
        b = BeautifulSoup.BeautifulSoup(xml)

        sec_per_int = int(b.find('m:sec_per_interval').contents[0])

        rows = b.findAll('m:report_data')
        for d in rows:
            res = d.find('m:resource_name').contents[0]
            item = d.find('m:data_item').contents[0]
            day = d.find('m:opr_date').contents[0]
            inter = int(d.find('m:interval_num').contents[0])
            val = float(d.find('m:value').contents[0])

            secs = (inter - 1) * sec_per_int
            dt = dtutil.strptime_tz(
                day, '%Y-%m-%d',
                'US/Pacific') + datetime.timedelta(seconds=secs)
            timestamp = dtutil.dt2ts(dt)

            key = None
            if item == 'LMP_PRC':
                key = 'total_price'
            elif item == 'LMP_LOSS_PRC':
                key = 'loss'
            elif item == 'LMP_ENE_PRC':
                key = 'energy'
            elif item == 'LMP_CONG_PRC':
                key = 'congestion'
            else:
                continue

            readings[key].append((timestamp, val))

        num_readings = len(readings[readings.keys()[0]])
        for k in readings.keys():
            if len(readings[k]) != num_readings:
                raise Exception('Missing readings')

            readings[k] = sorted(readings[k], key=lambda (t, v): t)

        return readings

    def poll_stream(self, market, load_old):
        def _push_data(readings, market):
            # Zip together the values for all keys
            for vals in zip(*readings.values()):
                if vals[0][0] > self.last_reading[market]:
                    # Add all smap points for this time
                    for (k, v) in zip(readings.keys(), vals):
                        logging.debug("add /%s/%s: %s" % (market, k, str(v)))
                        self.add('/%s/%s' % (market, k), *v)
                        self.last_reading[market] = vals[0][0]

        # Load old data
        if load_old == True:
            for day in range(1, 2):
                stop = dtutil.now() - datetime.timedelta(days=day)
                start = stop - datetime.timedelta(days=2)
                try:
                    readings = self.get_readings(market, start, stop)
                    _push_data(readings, market)
                except Exception, e:
                    logging.exception('Error getting reading')

        # Continuously get new data
        try:
            stop = dtutil.now()
            start = stop - datetime.timedelta(days=1)

            readings = self.get_readings(market, start, stop)

            rt = readings['total_price'][-1][0]

            if rt > self.last_reading[market]:
                logging.info("NEW %s READING (%s) at time %s" %
                             (market,
                              dtutil.strftime_tz(dtutil.ts2dt(rt),
                                                 '%m/%d %H:%M', 'US/Pacific'),
                              dtutil.strftime_tz(dtutil.now(), '%m/%d %H:%M',
                                                 'US/Pacific')))
                _push_data(readings, market)
                # self.last_reading = rt

        except Exception, e:
            logging.exception('Error getting reading')