def find_device(id_str, create_new=False, device_type=None, source=None, devdef=None, dev=None ): if not devicedb.connected(): devicedb.connect() if id_str in device_cache and time.time() < device_cache[id_str].birth + CACHE_TIMEOUT: dev = device_cache[id_str] else: if not dev: devfind = devicedb.get_devices(where="idstr='%s'"%id_str,limit=1) if (len(devfind) == 0): if CREATE_NEW_DEVICE and create_new: dev = devicedb.new_device() dev.IDstr = id_str dev.device_type = device_type if device_type else DEFAULT_NEW_DEVICE dev.source_name = source if source else DEFAULT_NEW_SOURCE # generate some random places to dump data dev.source_ids = gen_source_ids(dev,devdef) devicedb.insert_device(dev) else: print "Error publishing data : %s is not in devicedb"%id_str return None else: dev = devfind[0] dev.birth = time.time() device_cache[id_str] = dev if create_new: check_source_ids(dev) return dev
def do_flowbuilder(req,environ,start_response): import config import devicedb import postgresops devicedb.connect() env = Environment(loader=FileSystemLoader(environ['DOCUMENT_ROOT'])); if ( req == '/flowbuilder' or req == '/flowbuilder/' ): req = '/flowbuilder/index' if '?' in req: tempname = req[0:req.find('?')] else: tempname = req pagename = tempname[tempname.rfind("/")+1:] try: template = env.get_template(tempname+'.html') except TemplateNotFound: start_response('404 Not Found',[]) return []; start_response('200 OK',[('Content-Type','text/html')]) d = cgi.parse_qs(environ['QUERY_STRING']) return [str(template.render())]
print "Hidden Markov Model w/ Gaussian Emission Parameters" ids = learningdb.getHMMGaussianEmissionIDs() for id in ids: met = devicedb.get_devicemetas(where="id=%d"%id, limit=1) plname = met[0].value entries = learningdb.getHMMGaussianEmissions(id); gparm = learningdb.computeGlobalHMMGaussianParameters(entries); print "\tPlugload \"%s\" Total Learning Sets: %d"%(plname,len(entries)) for g in gparm: print "\t\tState %-3d: N=%-7d mean=%20.10e variance=%20.10e"%(g.state_id,g.counts,g.mean,g.variance) print "\n"; elif (op == 'insert'): import devicedb,postgresops devicedb.connect(); fromtime = None totime = None dtype = "GHMM" ret,vals,sys.argv = utils.check_arg(sys.argv,'--from',1) if ret: try: fromtime = utils.str_to_date(vals[0]) except ValueError, msg: print "Bad from time: "+str(msg) ret,vals,sys.argv = utils.check_arg(sys.argv,'--to',1) if ret: try: totime = utils.str_to_date(vals[0])
def lookupstuff(): global lookedstuffup, CODE_LINES if lookedstuffup: return import config import utils import devicedb import postgresops devicedb.connect() for code,dat in CODES.iteritems(): if dat[0] == "P": dev = devicedb.get_devices(where="idstr='%s'"%dat[1],limit=1); if len(dev) != 1: CODE_LINES[code] = lambda:"ERROR: Can't find device"; return; dev = dev[0] # get plug load names metas = devicedb.get_devicemetas(where="key like 'PLUGLOAD%d' and %d=any(devices)"%(dat[2],dev.ID),limit=1); if len(metas) != 1 or metas[0].parent == 0: loadstr = "UNKNOWN"; else: meta = metas[0] if meta.parent == 1: loads = [int(s) for s in meta.value.split(',')] else: loads = [meta.parent] loadstr = "" for lid in loads: loadnamemeta = devicedb.get_devicemetas(where="id=%d"%lid,limit=1)[0] loadstr += loadnamemeta.value; if lid != loads[-1]: loadstr += " + " # get uuid of power and current devdef = utils.read_device(dev.device_type) poweruuid = None currentuuid = None for feednameidx in range(len(dev.feed_names)): feedname = dev.feed_names[feednameidx] if 'power %d'%(dat[2]+1) in feedname.lower(): poweruuid = dev.source_ids[feednameidx] elif 'current %d'%(dat[2]+1) in feedname.lower(): currentuuid = dev.source_ids[feednameidx] CODE_LINES[code] = lambda loadstr=loadstr,poweruuid=poweruuid,currentuuid=currentuuid,dat=dat: ("http://sensezilla.berkeley.edu:7500/showliveplot?title=%s&source=sensezilla&sourceid=%s"%(urllib.quote("Wattage of Device(s): %s (Instrument:%s Port:%d)"%(loadstr,dat[1],dat[2]+1)),poweruuid)+";Device(s): "+loadstr + "\n" + "Wattage: %.2f W"%(get_smap_data(poweruuid)) + "\n" + "Amperage: %.2f A"%(get_smap_data(currentuuid))); elif dat[0] == "R": devs = {} for t,v in dat[1].iteritems(): dev = devicedb.get_devices(where="idstr='%s'"%v,limit=1); if len(dev) != 1: CODE_LINES[code] = lambda:"ERROR: Can't find device %s"%v; return; devs[t] = dev[0] occuuid = devs['occupancy'].source_ids[2]; humiduuid = devs['humidity'].source_ids[0]; tempuuid = devs['temperature'].source_ids[1]; lightuuid = devs['light'].source_ids[3]; CODE_LINES[code] = lambda occuuid=occuuid,humiduuid=humiduuid,tempuuid=tempuuid,lightuuid=lightuuid: ("http://sensezilla.berkeley.edu:7500/showliveplot?title=%s&source=sensezilla&sourceid=%s"%(urllib.quote("Occupancy of room"),occuuid)+";"+ "Occupancy: %.1f %%"%(get_smap_data(occuuid)) + "\n" + "Humidity: %.1f %%RH"%(get_smap_data(humiduuid)) + "\n" + "Temperature: %.1f C"%(get_smap_data(tempuuid)) + "\n" + "Light Level: %.1f ulux"%(1.0e6*get_smap_data(lightuuid))); else: CODE_LINES[code] = lambda:"NOT IMPLEMENTED" lookedstuffup = True
def publish_data(id_str, time, data, feednum=None, devdef=None, device_type=None, source=None, dev=None): # Usage 1: time and data are scalars - one data point, feed # = feednum or 0 if feednum=None # Usage 2: time and data are lists of scalars (time could also be scalar) - one data point per feed, feed #s = feednum (list) or range(total feeds) if feednum=None # Usage 3: time and data are lists of scalars, feednum is a scalar - multiple data points for one feed # Usage 4: time and data are lists of lists of scalars (time could also be list of scalar) - multiple data points per feed, feed #s = feednum(list) or range(total feeds) if feednum=None if not isinstance(data, list): # Usage 1 data = [data] if feednum == None: feednum = [0] else: feednum = [feednum] else: # Usage 2,3,4 if feednum == None: # Usage 2,4 feednum = range(len(data)) elif not isinstance(feednum,list): # usage 3 feednum = [feednum] time = [time] data = [data] if not isinstance(time,list) or (not isinstance(time[0],list) and isinstance(data[0],list)): # Usage 1,2,4 time = [time]*len(feednum) if not devicedb.connected(): devicedb.connect() id_str = id_str.replace('/','_'); postgresops.check_evil(id_str); dev = find_device(id_str, create_new = True, device_type=device_type, source=source, devdef=devdef, dev=dev) if dev == None: return; source_struct = utils.read_source(dev.source_name) if devdef == None: devdef = utils.read_device(dev.device_type) driver = source_struct['driver'] for i in range(len(feednum)): if feednum[i] >= len(dev.feed_names): print "ERROR cannot publish data for feed %d because it is not defined in the definition for %s"%(feednum[i],dev.device_type) elif feednum[i] >= len(dev.source_ids) or dev.source_ids[feednum[i]] == None or dev.source_ids[feednum[i]] == '': print "ERROR cannot publish data for feed %d of device %s because it is not defined"%(feednum[i],dev.IDstr) else: source_id = dev.source_ids[feednum[i]] if driver == 'SMAP': publish_smap(dev.source_name,source_struct,dev,devdef,feednum[i],source_id,time[i],data[i]) elif driver == 'CSV': fname = source_id if fname[0] != '/': fname = source_struct['path'] + '/' + fname try: parentdir = fname[:fname.rfind('/')] try: os.makedirs(parentdir) except: pass csvfile = open(fname, "ab") #print "\t",time[i],data[i] if isinstance(time[i],list): for j in range(len(time[i])): csvfile.write("%.12f,%.12f\n"%(time[i][j],data[i][j])) else: csvfile.write("%.12f,%.12f\n"%(time[i],data[i])) csvfile.close() except OSError,e: print "ERROR Cannot publish data to %s because "%(fname),e else: print "ERROR Cannot publish data for %s b/c no %s driver"%(dev.source_name,driver) return []
def run(self, time_from, time_to, source_name=None, source_id=None, pretend=False, use_cache=True, local=False, params=[]): if source_id == None and local: print "Error: Can only run 'local' test on one stream ( source name & id pair ) at a time" sys.exit(1) if source_name == None: names = utils.list_sources(); for name in names: ids = utils.list_ids(name) for id in ids: self.run(time_from, time_to, name, id, pretend, use_cache, local) return elif source_id == None: ids = utils.list_ids(source_name) for id in ids: self.run(time_from, time_to, source_name, id, pretend, use_cache, local) return smap = utils.read_source(source_name) if not local and use_cache: # find any cached files import filedb filedb.connect() for file in self.files: stepchain = file.stepchain cfiles = filedb.get_files(where='time_from=%s and time_to=%s and source_name=%s and source_id=%s and steps=%s',params=(time_from,time_to,source_name,source_id,stepchain)) for cfile in cfiles: if ( cfile.status == filedb.INVALID or os.path.exists(cfile.file_name) ): file.cached = True file.file_id = cfile.id file.fname = cfile.file_name file.deptask = cfile.task_id file.stepchain = stepchain print "Found cached file for output of "+file.src.name break # prune any tasks that don't need to be run for step in self.steps[:]: if ( len(step.outputs) == 0): print "Step %s will be run b/c it has no outputs"%step.name continue canbepruned = True for f in step.outputs: if not f.cached: canbepruned = False break if canbepruned: print "Pruning step %s because the cache can supply all outputs"%step.name self.steps.remove(step) else: for f in step.outputs: if f.cached: f.cached = False print "Cached file %s.O%d will be regenerated b/c not all outputs were cached"%(step.name,f.index) # create all the files we'll need if local: dir = '' else: if ( self.use_tmp ): dir = '/tmp/sensezilla/flowdata/'+self.name else: dir = config.map['global']['data_dir']+'/flowdata/'+self.name if not pretend: try: os.makedirs(dir+'/outputs',0755) except:pass for file in self.files: if not file.cached: if local: file.fname = dir+'testing_%s_%s.O%d'%(self.name,file.src.name,file.index) if not pretend: if file.directory: try: os.mkdir(file.fname) except:pass else: fout = open(file.fname,'w') fout.close() else: if 'OUTPUT' not in [v[0] for v in file.dests]: if not pretend: if file.directory: file.fname = tempfile.mkdtemp(dir=dir) else: tfile = tempfile.NamedTemporaryFile('w', dir=dir, delete=False) file.fname = tfile.name; tfile.close() else: file.fname = os.tempnam(dir) else: file.fname = dir+'/outputs/%s.O%d_%s_%s_%d_to_%d'%(file.src.name,file.index,source_name,source_id.replace('/','.'), utils.date_to_unix(time_from),utils.date_to_unix(time_to)) if file.directory: if not pretend: os.mkdir(file.fname) else: if not pretend: fout = open(file.fname,'w') fout.close() if file.directory: print "Created directory : "+file.fname else: print "Created file : "+file.fname # generate dictionary of substitutions subs = { 'TIME_FROM':int(utils.date_to_unix(time_from)), 'TIME_TO':int(utils.date_to_unix(time_to)), 'SOURCE':source_name, 'ID':source_id }; subs.update(params) try: import devicedb devicedb.connect() plmeta,dev,pl_index = devicedb.find_plugload(source_name,source_id) subs['PLUGLOAD'] = plmeta.value; subs['DEVID'] = dev.ID subs['DEVIDSTR'] = dev.IDstr except Exception,e: print "Cannot contact devicedb "+str(e)