示例#1
0
def p_abstime(t):
    """abstime : NUMBER 
               | QSTRING
               | NOW"""
    if t[1] == 'now':
        t[0] = dtutil.now()
    elif type(t[1]) == type(''):
        t[0] = parse_time(t[1])
    else:
        t[0] = dtutil.ts2dt(t[1] / 1000)
示例#2
0
def p_abstime(t):
    """abstime : NUMBER 
               | QSTRING
               | NOW"""
    if t[1] == 'now':
        t[0] = dtutil.now()
    elif type(t[1]) == type(''):
        t[0] = parse_time(t[1])
    else:
        t[0] = dtutil.ts2dt(t[1] / 1000)
示例#3
0
    def process_one(self, data, tz, 
                    prev=None,
                    prev_datetimes=None):
        times, values = data[:,0], data[:,1]

        if (prev is not None):
            times = np.append(prev_datetimes, times)
            values = np.append(prev, values)
            st = self.snapper(dtutil.ts2dt(prev_datetimes[-1] / 1000))
        else:
            st = self.snapper(dtutil.ts2dt(times[0] / 1000))
        st = dtutil.dt2ts(st) * 1000 + self.width
        et = int(times[-1])
        mesh = np.arange(st, et, self.width)
        
        if (self.max_time_delta): 
            gaps = self.detect_gaps(times)
            remove = np.array([False] * len(mesh))
            for gap in gaps:
                gt = np.greater(mesh, gap[0])
                lt = np.less(mesh, gap[1])
                this_gap = np.logical_and(gt, lt)
                remove = np.logical_or(remove, this_gap)
            remove_inds = np.nonzero(remove)[0]
            mesh = np.delete(mesh, remove_inds)
        
        if (self.method == 'linear'):
            outvals = np.interp(mesh, times, values)
            prev = np.array([values[-1]])
            prev_datetimes = np.array([times[-1]])
        elif (self.method == 'spline'):
            s = UnivariateSpline(times, values, s=0) 
            outvals = s(mesh) 
            # 10 points = empirical
            prev = np.array(values[-10:])
            prev_datetimes = np.array(times[-10:])
        output = np.vstack((mesh,outvals)).T
        state = { 'prev': prev,
                  'prev_datetimes': prev_datetimes,
                }
        
        return output, state
示例#4
0
    def process_one(self, data, tz, prev=None, prev_datetimes=None):
        times, values = data[:, 0], data[:, 1]

        if (prev is not None):
            times = np.append(prev_datetimes, times)
            values = np.append(prev, values)
            st = self.snapper(dtutil.ts2dt(prev_datetimes[-1] / 1000))
        else:
            st = self.snapper(dtutil.ts2dt(times[0] / 1000))
        st = dtutil.dt2ts(st) * 1000 + self.width
        et = int(times[-1])
        mesh = np.arange(st, et, self.width)

        if (self.max_time_delta):
            gaps = self.detect_gaps(times)
            remove = np.array([False] * len(mesh))
            for gap in gaps:
                gt = np.greater(mesh, gap[0])
                lt = np.less(mesh, gap[1])
                this_gap = np.logical_and(gt, lt)
                remove = np.logical_or(remove, this_gap)
            remove_inds = np.nonzero(remove)[0]
            mesh = np.delete(mesh, remove_inds)

        if (self.method == 'linear'):
            outvals = np.interp(mesh, times, values)
            prev = np.array([values[-1]])
            prev_datetimes = np.array([times[-1]])
        elif (self.method == 'spline'):
            s = UnivariateSpline(times, values, s=0)
            outvals = s(mesh)
            # 10 points = empirical
            prev = np.array(values[-10:])
            prev_datetimes = np.array(times[-10:])
        output = np.vstack((mesh, outvals)).T
        state = {
            'prev': prev,
            'prev_datetimes': prev_datetimes,
        }

        return output, state
示例#5
0
 def writeDROMScsv(self, value):
     fcsv = open('meterdata.csv','w')
     fcsv.write(','.join(['DateTime', 'MeterId', 'Value1', 'Value2']) + '\n')
     for path, val in value:
         if not 'Readings' in val: continue
         cmps = split_path(path)
         channel = join_path(cmps[1:])
         for d in val['Readings']:
             if d is None: continue
             ts = dtutil.strftime_tz(dtutil.ts2dt(d[0] / 1000), "%Y-%m-%d %H:%M", tzstr='Local')
             if ts is None: continue
             v = d[1] 
             if v is None: continue
             if val['Properties']['UnitofMeasure']=='Watts': v /= 1000.
             v /= 4. # approximate kWh
             fcsv.write(','.join([ts,channel,str(v)]) + '\n')
     fcsv.close()
    def read(self):
        all_readings = self.client.latest(self.tempwhere)
        for p in all_readings:
            print '-'*20
            md = self.client.tags('uuid = "'+p['uuid']+'"')[0]
            print 'Room:', md['Metadata/Room']
            print 'Reading:', p['Readings'][0][1]
            ts = dtutil.ts2dt(p['Readings'][0][0]/1000)
            print 'Time:', dtutil.strftime_tz(ts, tzstr='America/Los_Angeles')
        avg_room_temp = sum([x['Readings'][0][1] for x in all_readings]) / float(len(all_readings))

        # get difference between avg room temperature and thermostat temperature
        new_diff = self.therm_temp - avg_room_temp

        # periodically update output streams.  Here a bogus adjustment
        self.add('/heatSetpoint', self.heatSP + new_diff)
        self.add('/coolSetpoint', self.coolSP + new_diff)
        print "zone controller publish: ", self.heatSP, self.coolSP
示例#7
0
def test_liveness(smap_url, opts):
    data = load_json(smap_url + '/data/+')
    # print data
    readings = [(k, 
                 v['uuid'], 
                 v['Readings'][-1] if len(v['Readings']) else [0, None],
                 v['Properties'])
                for k, v in data.iteritems() if 'uuid' in v]
    readings.sort(key=lambda v: v[2][0], reverse=True)
    # print readings
    d=[]
    for path, uid, latest, props in readings:
        tim= dtutil.iso8601(dtutil.ts2dt(latest[0] / 1000.),
                             tzinfo=dtutil.gettz(props['Timezone'])), 
        # if opts.uuids: print uid,
        path= path,
        val= "%s%s" % (latest[1], props['UnitofMeasure'])

        d.append('%s %s %s' % (tim[0],path[0],val))
    return d
示例#8
0
def get_liveness(smap_url):
    data = load_json(smap_url + '/data/+')
    readings = [(k,
                 v['uuid'],
                 v['Readings'][-1] if len(v['Readings']) else [0, None],
                 v['Properties'])
                for k, v in data.iteritems() if 'uuid' in v]
    readings.sort(key=lambda v: v[2][0], reverse=True)
    # print readings
    d={}
    for path, uid, latest, props in readings:
        d[uid]={}
        d[uid]['data']=dtutil.iso8601(dtutil.ts2dt(latest[0] / 1000.),
                             tzinfo=dtutil.gettz(props['Timezone'])),
        # if opts.uuids: print uid,
        d[uid]['curr']=  "%s%s" % (latest[1], props['UnitofMeasure'])
        d[uid]['path']=  path
        d[uid]['latest']=latest
        d[uid]['props']=props

    return d
示例#9
0
文件: api.py 项目: yisea123/smap-data
 def row_action(row):
     row[0] = fmt(dtutil.ts2dt(row[0] / 1000), tz)
     writer.writerow(row)
示例#10
0
    def process_one(self, data, op, tz,
                    prev=None,
                    prev_datetimes=None,
                    first=False, last=False,
                    region=(None, None)):
        # print "PRCESSING"
        tic = time.time()
        if prev == None:
            prev = np.copy(data)
            prev_datetimes = MaskedDTList(prev[:, 0] / 1000, tz)
        else:
            prev = np.vstack((prev, data))
            prev_datetimes.extend(data[:, 0] / 1000)
        
        assert len(prev_datetimes) == len(prev)
        output = [null] * len(op.outputs)
        # output = [null] * len(util.flatten(map(operator.attrgetter("outputs"), ops)))
        # print output

        if len(prev_datetimes) == 0:
            return output, {
                'prev': prev,
                'prev_datetimes': prev_datetimes,
                }

        # we might want to produce readings before the first data point
        if first and region[0]:
            bin_start = self.snapper(dtutil.ts2dt(region[0] / 1000))
        else:
            bin_start = self.snapper(prev_datetimes[0])
        truncate_to = 0

        while True:

            if last:
                if not region[1] and truncate_to == len(prev_datetimes):
                    break
                if region[1] and region[1] <= dtutil.dt2ts(bin_start) * 1000:
                    break

            bin_end = bin_start + self.bin_slide

            # perform a binary search to find the next window boundary
            bin_start_idx = bisect.bisect_left(prev_datetimes, bin_start) 
            bin_end_idx = bisect.bisect_left(prev_datetimes, bin_end)
            truncate_to = bin_start_idx

            # ignore bins which aren't full
            if bin_end_idx == len(prev_datetimes) and not last:
                break

            # skip empty bins
            if bin_start_idx == bin_end_idx:
                # maybe we were supposed to produce output even if
                # there's no data in the bin
                if not self.skip_empty:
                    t = dtutil.dt2ts(bin_start) * 1000
                    output = extend(output, 
                                    [np.array([[t, np.nan]])])

                bin_start += self.bin_slide
                continue

            if (bin_end_idx < len(prev_datetimes) and 
                self.comparator(bin_start, prev_datetimes[bin_end_idx])):
                take_end = bin_end_idx + 1
            else:
                take_end = bin_end_idx

            opdata = op([prev[bin_start_idx:take_end, :]])
            
            # snap the times to the beginning of the
            # window, if we were asked to.  do this here
            # so we can avoid passing datetimes around,
            # and deal with the common case where this is
            # what ya want.
            if self.snap_times:
                t = dtutil.dt2ts(bin_start)
                for j in xrange(0, len(opdata)):
                    opdata[j][:, 0] = t * 1000
            output = extend(output, opdata)

            bin_start += self.bin_slide

        toc = time.time()
#         print("dt processing took %0.05f: %i/%i converted" %  \
#                   (toc-tic,
#                    prev_datetimes.conversions,
#                    len(prev_datetimes)))

        prev_datetimes.truncate(truncate_to)
        prev = prev[truncate_to:]

        return output, {
            'prev': prev,
            'prev_datetimes': prev_datetimes,
            }
示例#11
0
 def row_action(row):
     row[0] = fmt(dtutil.ts2dt(row[0] / 1000), tz)
     writer.writerow(row)
示例#12
0
    def process_one(self,
                    data,
                    op,
                    tz,
                    prev=None,
                    prev_datetimes=None,
                    first=False,
                    last=False,
                    region=(None, None)):
        # print "PRCESSING"
        tic = time.time()
        if prev == None:
            prev = np.copy(data)
            prev_datetimes = MaskedDTList(prev[:, 0] / 1000, tz)
        else:
            prev = np.vstack((prev, data))
            prev_datetimes.extend(data[:, 0] / 1000)

        assert len(prev_datetimes) == len(prev)
        output = [null] * len(op.outputs)

        if len(prev_datetimes) == 0:
            return output, {
                'prev': prev,
                'prev_datetimes': prev_datetimes,
            }

        # we might want to produce readings before the first data point
        if first and region[0]:
            bin_start = self.snapper(dtutil.ts2dt(region[0] / 1000))
        else:
            bin_start = self.snapper(prev_datetimes[0])
        truncate_to = 0

        while True:

            if last:
                if not region[1] and truncate_to == len(prev_datetimes):
                    break
                if region[1] and region[1] <= dtutil.dt2ts(bin_start) * 1000:
                    break

            bin_end = bin_start + self.bin_slide

            # perform a binary search to find the next window boundary
            bin_start_idx = bisect.bisect_left(prev_datetimes, bin_start)
            bin_end_idx = bisect.bisect_left(prev_datetimes, bin_end)
            truncate_to = bin_start_idx

            # ignore bins which aren't full
            if (bin_end_idx == len(prev_datetimes) and not last
                    and not (region[1]
                             and dtutil.dt2ts(bin_end) * 1000 <= region[1])):
                break

            # skip empty bins
            if bin_start_idx == bin_end_idx:
                # maybe we were supposed to produce output even if
                # there's no data in the bin
                if not self.skip_empty:
                    t = dtutil.dt2ts(bin_start) * 1000
                    output = extend(output, [np.array([[t, np.nan]])])

                bin_start += self.bin_slide
                continue

            if (bin_end_idx < len(prev_datetimes) and self.comparator(
                    bin_start, prev_datetimes[bin_end_idx])):
                take_end = bin_end_idx + 1
            else:
                take_end = bin_end_idx

            opdata = op([prev[bin_start_idx:take_end, :]])

            # snap the times to the beginning of the
            # window, if we were asked to.  do this here
            # so we can avoid passing datetimes around,
            # and deal with the common case where this is
            # what ya want.
            if self.snap_times:
                t = dtutil.dt2ts(bin_start)
                for j in xrange(0, len(opdata)):
                    opdata[j][:, 0] = t * 1000
            output = extend(output, opdata)

            bin_start += self.bin_slide

        toc = time.time()
        #         print("dt processing took %0.05f: %i/%i converted" %  \
        #                   (toc-tic,
        #                    prev_datetimes.conversions,
        #                    len(prev_datetimes)))

        prev_datetimes.truncate(truncate_to)
        prev = prev[truncate_to:]

        return output, {
            'prev': prev,
            'prev_datetimes': prev_datetimes,
        }
示例#13
0
 def format(t):
     return fmt(dtutil.ts2dt(t / 1000), tz)
示例#14
0
"""
test script

"""

import glob
import os
from smap.contrib import dtutil

dir = "/Users/hdz_1989/Downloads/SDB/Todai/"
list = glob.glob(dir + '/*.dat')
f = open(dir + 'sample/' + 'ts_checking.txt', 'w')

for i in list:
	fp = open(i, 'r')
	j = 0
	while j<3:
		rd = fp.readline()
		ts = float(rd.strip('\n').split()[0])
		time = dtutil.strftime_tz(dtutil.ts2dt(ts), "%m-%d-%Y %H:%M:%S")
		f.write("%s, " %time)
		j += 1
	rd = fp.readline()
	ts = float(rd.strip('\n').split()[0])
	time = dtutil.strftime_tz(dtutil.ts2dt(ts), "%m-%d-%Y %H:%M:%S")
	f.write("%s\n" %time)
	fp.close()

f.close()
示例#15
0
 def format(t):
     return fmt(dtutil.ts2dt(t / 1000), tz)
示例#16
0
class CaIsoPrice(SmapDriver):

    MARKETS = [('DAM', 30 * 60, 'Day-ahead market'),
               ('HASP', 10 * 60, 'Hour-ahead scheduling process'),
               ('RTM', 2 * 60, 'Real-time market')]
    FEEDS = [('total_price', '$', 'total price'), ('loss', '$', 'loss price'),
             ('congestion', '$', 'congestion price'),
             ('energy', '$', 'energy price')]

    def setup(self, opts):
        # get our location
        self.last_reading = {}
        for m, t, d in self.MARKETS:
            self.last_reading[m] = 0

        self.location = opts.get('Location', 'OAKLAND_1_N001')
        self.set_metadata(
            '/', {
                'Location/Uri': 'http://oasis.caiso.com/mrtu-oasis/SingleZip',
                'Extra/IsoNode': self.location,
                'Extra/Driver': 'smap.drivers.caiso_price.CaIsoPrice'
            })

        # add the feeds
        for (m, i, md) in self.MARKETS:
            for (f, u, fd) in self.FEEDS:
                path = '/%s/%s' % (m, f)
                self.add_timeseries(path,
                                    u,
                                    data_type='double',
                                    description=md + ' ' + fd)

    def start(self):
        for (market, interval, description) in self.MARKETS:
            periodicSequentialCall(self.poll_stream, market,
                                   False).start(interval)

    def get_readings(self, market, start_date, stop_date):
        readings = {
            'total_price': [],
            'loss': [],
            'energy': [],
            'congestion': []
        }
        print "get_readings", market
        if market == 'DAM':
            q = 'PRC_LMP'
            m = 'DAM'
        elif market == 'HASP':
            q = 'PRC_HASP_LMP'
            m = 'HASP'
        elif market == 'RTM':
            q = 'PRC_INTVL_LMP'
            m = 'RTM'
        else:
            raise Exception("Invalid market: " + market)

        url = 'http://oasis.caiso.com/mrtu-oasis/SingleZip?'
        url += 'queryname=' + q
        url += '&startdate=' + dtutil.strftime_tz(start_date, '%Y%m%d',
                                                  'US/Pacific')
        url += '&enddate=' + dtutil.strftime_tz(stop_date, '%Y%m%d',
                                                'US/Pacific')
        url += '&market_run_id=' + m
        url += '&node=' + self.location

        logging.info("Get url %s" % url)
        h = None
        for d in [5, 20, 60]:
            try:
                h = urllib2.urlopen(url, timeout=50)
                break
            except urllib2.URLError:
                logging.warn("urlopen failed.")
            time.sleep(d)
        if h == None:
            raise Exception("Failed to open url: %s" % url)

        z = zipfile.ZipFile(StringIO.StringIO(h.read()))
        xml = z.read(z.namelist()[0])
        b = BeautifulSoup.BeautifulSoup(xml)

        sec_per_int = int(b.find('m:sec_per_interval').contents[0])

        rows = b.findAll('m:report_data')
        for d in rows:
            res = d.find('m:resource_name').contents[0]
            item = d.find('m:data_item').contents[0]
            day = d.find('m:opr_date').contents[0]
            inter = int(d.find('m:interval_num').contents[0])
            val = float(d.find('m:value').contents[0])

            secs = (inter - 1) * sec_per_int
            dt = dtutil.strptime_tz(
                day, '%Y-%m-%d',
                'US/Pacific') + datetime.timedelta(seconds=secs)
            timestamp = dtutil.dt2ts(dt)

            key = None
            if item == 'LMP_PRC':
                key = 'total_price'
            elif item == 'LMP_LOSS_PRC':
                key = 'loss'
            elif item == 'LMP_ENE_PRC':
                key = 'energy'
            elif item == 'LMP_CONG_PRC':
                key = 'congestion'
            else:
                continue

            readings[key].append((timestamp, val))

        num_readings = len(readings[readings.keys()[0]])
        for k in readings.keys():
            if len(readings[k]) != num_readings:
                raise Exception('Missing readings')

            readings[k] = sorted(readings[k], key=lambda (t, v): t)

        return readings

    def poll_stream(self, market, load_old):
        def _push_data(readings, market):
            # Zip together the values for all keys
            for vals in zip(*readings.values()):
                if vals[0][0] > self.last_reading[market]:
                    # Add all smap points for this time
                    for (k, v) in zip(readings.keys(), vals):
                        logging.debug("add /%s/%s: %s" % (market, k, str(v)))
                        self.add('/%s/%s' % (market, k), *v)
                        self.last_reading[market] = vals[0][0]

        # Load old data
        if load_old == True:
            for day in range(1, 2):
                stop = dtutil.now() - datetime.timedelta(days=day)
                start = stop - datetime.timedelta(days=2)
                try:
                    readings = self.get_readings(market, start, stop)
                    _push_data(readings, market)
                except Exception, e:
                    logging.exception('Error getting reading')

        # Continuously get new data
        try:
            stop = dtutil.now()
            start = stop - datetime.timedelta(days=1)

            readings = self.get_readings(market, start, stop)

            rt = readings['total_price'][-1][0]

            if rt > self.last_reading[market]:
                logging.info("NEW %s READING (%s) at time %s" %
                             (market,
                              dtutil.strftime_tz(dtutil.ts2dt(rt),
                                                 '%m/%d %H:%M', 'US/Pacific'),
                              dtutil.strftime_tz(dtutil.now(), '%m/%d %H:%M',
                                                 'US/Pacific')))
                _push_data(readings, market)
                # self.last_reading = rt

        except Exception, e:
            logging.exception('Error getting reading')
示例#17
0
文件: ts.py 项目: tarunsmalviya/smap
 def _base_operator(self, vec):
     return zip(
         map(lambda x: dtutil.ts2dt(x).astimezone(self.tz),
             map(int, vec[:, 0].astype(np.int))), vec[:, 1])