def get(self,names,t1=None,t2=None,step=None,scale=None,debug=False,types=(float,float),method='DS',verbose=True): """Query the CERN measurement database and return QueryData names: name of the variables in the database: comma separated or list t1,t2: start and end time as string in the %Y-%m-%d %H:%M:%S.SSS format or unix time step: For multiple file request '<n> <size>' scale: For scaling algoritm '<n> <size> <alg>' types: type to convert timestamp and data. If None, no concatenation is performed where: <n> is an integer number <size> is one of SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, YEAR <alg> one of AVG, MIN, MAX, REPEAT, INTERPOLATE, SUM, COUNT """ if t2 is None: t2=time.time() if t1 is None: t1=t2-1 print t1,t2 method='LD' t1=dumpdate(parsedate(t1)) t2=dumpdate(parsedate(t2)) names=self._parsenames(names) if verbose: print "CernLogDB: querying\n %s"%'\n '.join(names) print "CernLogDB: '%s' <--> '%s'"% (t1,t2) print "CernLogDB: options %s %s"% (step,scale) #query cern measurement database res=dbget(names,t1,t2,step=step,scale=scale, exe=self.exe_path,conf=None, client_name=self.client_name, app_name=self.app_name, datasource=self.datasource, timezone=self.timezone, method=method, types=types,debug=debug) # log='\n'.join(res['log']) # if debug: # print log if method=='LD': res=parse_ld(log,types) data={} bad=[] for k in names: if k in res: data[k]=res[k] else: bad.append(k) if len(bad)>0: print log raise IOError, "CernLogDB %s not retrieved" %','.join(bad) #save data in better datastructure dq=DataQuery(self,names,parsedate(t1),parsedate(t2),data,step=step,scale=scale) if method=='LD': dq.trim() return dq
def get(self, names, t1, t2): """Query local database are return QueryData names: name od tables, comma separated or list t1,t2: start and end time as string in the %Y-%m-%d %H:%M:%S.SSS format or unix time nmax max number of records """ t1 = parsedate(t1) t2 = parsedate(t2) #t1=rdts(mkts(t1),tz=False,millisec=True,tsep='T') #t2=rdts(mkts(t2),tz=False,millisec=True,tsep='T') names = self._parsenames(names) data = {} for name in names: db = self.tables[name] data[name] = db.get(t1, t2) dq = DataQuery(self, names, t1, t2, data) return dq
def get(self,names,t1,t2): """Query local database are return QueryData names: name od tables, comma separated or list t1,t2: start and end time as string in the %Y-%m-%d %H:%M:%S.SSS format or unix time nmax max number of records """ t1=parsedate(t1) t2=parsedate(t2) #t1=rdts(mkts(t1),tz=False,millisec=True,tsep='T') #t2=rdts(mkts(t2),tz=False,millisec=True,tsep='T') names=self._parsenames(names) data={} for name in names: db=self.tables[name] data[name]=db.get(t1,t2) dq=DataQuery(self,names,t1,t2,data) return dq
def extend(self, before=None, after=None, absolute=False, eps=1e-6): """Extend dataset by <before> sec and <after> secs""" if after is not None: if type(after) is str or absolute is True: after = parsedate(after) - self.t2 if after < 0: self.t2 += after for name in self.names: idx, val = self.data[name] mask = idx < (self.t2) self.data[name] = idx[mask], val[mask] else: dq = self.source.get(self.names, self.t2, self.t2 + after, **self.options) self.t2 += after for name in self.names: idx, val = self.data[name] nidx, nval = dq.data[name] ridx = np.concatenate([idx, nidx], axis=0) rval = np.concatenate([val, nval], axis=0) self.data[name] = ridx, rval if before is not None: if type(before) is str or absolute is True: before = self.t1 - parsedate(before) if before < 0: self.t1 -= before for name in self.names: idx, val = self.data[name] mask = idx > (self.t1) self.data[name] = idx[mask], val[mask] else: dq = self.source.get(self.names, self.t1 - before, self.t1 - eps, **self.options) self.t1 -= before for name in self.names: idx, val = self.data[name] nidx, nval = dq.data[name] ridx = np.concatenate([nidx, idx], axis=0) rval = np.concatenate([nval, val], axis=0) self.data[name] = ridx, rval self._emptycache() return self
def extend(self,before=None,after=None,absolute=False,eps=1e-6): """Extend dataset by <before> sec and <after> secs""" if after is not None: if type(after) is str or absolute is True: after=parsedate(after)-self.t2 if after<0: self.t2+=after for name in self.names: idx,val=self.data[name] mask=idx<(self.t2) self.data[name]=idx[mask],val[mask] else: dq=self.source.get(self.names,self.t2,self.t2+after,**self.options) self.t2+=after for name in self.names: idx,val=self.data[name] nidx,nval=dq.data[name] ridx=np.concatenate([idx,nidx],axis=0) rval=np.concatenate([val,nval],axis=0) self.data[name]=ridx,rval if before is not None: if type(before) is str or absolute is True: before=self.t1-parsedate(before) if before<0: self.t1-=before for name in self.names: idx,val=self.data[name] mask=idx>(self.t1) self.data[name]=idx[mask],val[mask] else: dq=self.source.get(self.names,self.t1-before,self.t1-eps, **self.options) self.t1-=before for name in self.names: idx,val=self.data[name] nidx,nval=dq.data[name] ridx=np.concatenate([nidx,idx],axis=0) rval=np.concatenate([nval,val],axis=0) self.data[name]=ridx,rval self._emptycache() return self
def parse_ld(s, types=(float, float)): data = {} ttype, vtype = types for line in s.split('\n'): if line.startswith('Variable:'): name = line.strip().split('Variable: ')[1] #t,v=np.zeros(1,dtype=float),np.zeros(1,dtype=float) t, v = [], [] data[name] = [t, v] elif 'Timestamp' in line and 'Value' in line: no, ts, val = line.split(': ') ts = parsedate(ts.split('"')[1]) val = val.strip() if val.startswith('{'): val = val[1:-1].split(',') t.append(ts) v.append(val) for name, (t, v) in data.items(): t = np.array(t, dtype=ttype) v = np.array(v, dtype=vtype) data[name] = [t, v] return data
def parse_ld(s,types=(float,float)): data={} ttype,vtype=types for line in s.split('\n'): if line.startswith('Variable:'): name=line.strip().split('Variable: ')[1] #t,v=np.zeros(1,dtype=float),np.zeros(1,dtype=float) t,v=[],[] data[name]=[t,v] elif 'Timestamp' in line and 'Value' in line: no,ts,val=line.split(': ') ts=parsedate(ts.split('"')[1]) val=val.strip() if val.startswith('{'): val=val[1:-1].split(',') t.append(ts) v.append(val) for name,(t,v) in data.items(): t=np.array(t,dtype=ttype) v=np.array(v,dtype=vtype) data[name]=[t,v] return data
def get(self, names, t1=None, t2=None, step=None, scale=None, debug=False, types=(float, float), method='DS', verbose=True): """Query the CERN measurement database and return QueryData names: name of the variables in the database: comma separated or list t1,t2: start and end time as string in the %Y-%m-%d %H:%M:%S.SSS format or unix time step: For multiple file request '<n> <size>' scale: For scaling algoritm '<n> <size> <alg>' types: type to convert timestamp and data. If None, no concatenation is performed where: <n> is an integer number <size> is one of SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, YEAR <alg> one of AVG, MIN, MAX, REPEAT, INTERPOLATE, SUM, COUNT """ if t2 is None: t2 = time.time() if t1 is None: t1 = t2 - 1 print t1, t2 method = 'LD' t1 = dumpdate(parsedate(t1)) t2 = dumpdate(parsedate(t2)) names = self._parsenames(names) if verbose: print "CernLogDB: querying\n %s" % '\n '.join(names) print "CernLogDB: '%s' <--> '%s'" % (t1, t2) print "CernLogDB: options %s %s" % (step, scale) #query cern measurement database res = dbget(names, t1, t2, step=step, scale=scale, exe=self.exe_path, conf=None, client_name=self.client_name, app_name=self.app_name, datasource=self.datasource, timezone=self.timezone, method=method, types=types, debug=debug) # log='\n'.join(res['log']) # if debug: # print log if method == 'LD': res = parse_ld(log, types) data = {} bad = [] for k in names: if k in res: data[k] = res[k] else: bad.append(k) if len(bad) > 0: print log raise IOError, "CernLogDB %s not retrieved" % ','.join(bad) #save data in better datastructure dq = DataQuery(self, names, parsedate(t1), parsedate(t2), data, step=step, scale=scale) if method == 'LD': dq.trim() return dq
def set_xlim_date(xa,xb): pl.xlim(parsedate(xa),parsedate(xb))
def set_xlim_date(xa, xb): pl.xlim(parsedate(xa), parsedate(xb))