def repdiff(ref,data): for vs in ref['datavars']: if vs.endswith('I_REF'): rt,rv=ref[vs] dt,dv=data[vs] rv=rv[0] dv=dv[0] if rv!=dv: t1=rdmdate.dumpdate(rt[0]) t2=rdmdate.dumpdate(dt[0]) print '%-30s %10g %10g %10s %10s' %(vs, rv,rv-dv,t1,t2) print
def mkcmd(vs, t1, t2, step=None, scale=None, exe='cern-logdb', conf='ldb.conf', filename=None, format=None): """Produce a string for the CERN logging database command line tool. Usage getdata("SPS.BCTDC.31832:INT","2010-06-10 00:00:00","2010-06-10 23:59:59",step='20 SECOND') Arguments: vs: name of the variable in the database t1,t2: start and end time as string in the %Y-%m-%d %H:%M:%S.SSS format or unix time step: For multiple file request '<n> <size>' scale: For scaling algoritm '<n> <size> <alg>' format: For output file format in CSV, XLS, TSV, MATHEMATICA filename: String for output name where: <n> is an integer number <size> is one of SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, YEAR <alg> one of AVG, MIN, MAX, REPEAT, INTERPOLATE, SUM, COUNT <date> """ if type(t1) in [float, int]: if t1 < 0: t1 += _t.time() t1 = _date.dumpdate(t1) if type(t2) in [float, int]: if t2 < 0: t2 += _t.time() t2 = _date.dumpdate(t2) cmd = '%s -vs "%s" -t1 "%s" -t2 "%s"' % (exe, vs, t1, t2) if conf: cmd += ' -C %s' % conf if scale: n, size, alg = scale.split() cmd += ' -sa "%s" -ss "%s" -si "%s"' % (alg, n, size) if step: ni, it = step.split() cmd += ' -IT "%s" -NI "%s"' % (it, ni) if filename: cmd += ' -N "%s"' % filename if format: cmd += ' -F "%s"' % filename return cmd
def find_idx(self,date): index=self.index[:] timestamp=index['timestamp'] idx=where(timestamp>=rdmdate.parsedate(date))[0][0]-1 ts=rdmdate.dumpdate(timestamp[idx]) print "Found dataset %s"% ts return index[idx]
def generate_filenames(mask,t1,t2,step): """ Generate filenames as done by the CERN DB command line tool Arguments: mask string step: For multiple file request '<n> <size>' t1,t2: start and end time as string in the %Y-%m-%d %H:%M:%S.SSS format or unix time where: <n> is an integer number <size> is one of SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, YEAR """ if type(t1) in [float,int]: t1=_date.dumpdate(t1) if type(t2) in [float,int]: t2=_date.dumpdate(t2) ni,it=step.split() sstep=int(ni)*_interval[it]
def generate_filenames(mask, t1, t2, step): """ Generate filenames as done by the CERN DB command line tool Arguments: mask string step: For multiple file request '<n> <size>' t1,t2: start and end time as string in the %Y-%m-%d %H:%M:%S.SSS format or unix time where: <n> is an integer number <size> is one of SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, YEAR """ if type(t1) in [float, int]: t1 = _date.dumpdate(t1) if type(t2) in [float, int]: t2 = _date.dumpdate(t2) ni, it = step.split() sstep = int(ni) * _interval[it]
def mkcmd(vs,t1,t2,step=None,scale=None,exe='cern-logdb',conf='ldb.conf',filename=None,format=None): """Produce a string for the CERN logging database command line tool. Usage getdata("SPS.BCTDC.31832:INT","2010-06-10 00:00:00","2010-06-10 23:59:59",step='20 SECOND') Arguments: vs: name of the variable in the database t1,t2: start and end time as string in the %Y-%m-%d %H:%M:%S.SSS format or unix time step: For multiple file request '<n> <size>' scale: For scaling algoritm '<n> <size> <alg>' format: For output file format in CSV, XLS, TSV, MATHEMATICA filename: String for output name where: <n> is an integer number <size> is one of SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, YEAR <alg> one of AVG, MIN, MAX, REPEAT, INTERPOLATE, SUM, COUNT <date> """ if type(t1) in [float,int]: if t1<0: t1+=_t.time() t1=_date.dumpdate(t1) if type(t2) in [float,int]: if t2<0: t2+=_t.time() t2=_date.dumpdate(t2) cmd='%s -vs "%s" -t1 "%s" -t2 "%s"' %(exe,vs,t1,t2) if conf: cmd+=' -C %s' % conf if scale: n,size,alg=scale.split() cmd+=' -sa "%s" -ss "%s" -si "%s"' %(alg,n,size) if step: ni,it=step.split() cmd+=' -IT "%s" -NI "%s"' %(it,ni) if filename: cmd+=' -N "%s"' % filename if format: cmd+=' -F "%s"' % filename return cmd
def dbget_repeat(vs,t1,t2,step,sa=None,sf=None,exe='cern-ldb'): if type(t1) is str: t1=round(_date.parsedate_myl(t1),0) if type(t2) is str: t2=round(_date.parsedate_myl(t2),0) data_final={} data_final['log']=[] data_final['datavars']=vs.split(',') for vname in data_final['datavars']: data_final[vname]=([],[]) for t in range(int(t1),int(t2),step): nt1=_date.dumpdate(t) nt2=_date.dumpdate(t+step-0.001) print 'calling dbget from %s to %s' % (nt1,nt2) data=dbget(vs,nt1,nt2,sa=sa,sf=sf,exe=exe) for vname in data['datavars']: t,v=data[vname] nt=data_final[vname][0].extend(t) nv=data_final[vname][0].extend(v) data_final['log'].extend(data['log']) return data_final
def dbget_repeat(vs, t1, t2, step, sa=None, sf=None, exe='cern-ldb'): if type(t1) is str: t1 = round(_date.parsedate_myl(t1), 0) if type(t2) is str: t2 = round(_date.parsedate_myl(t2), 0) data_final = {} data_final['log'] = [] data_final['datavars'] = vs.split(',') for vname in data_final['datavars']: data_final[vname] = ([], []) for t in range(int(t1), int(t2), step): nt1 = _date.dumpdate(t) nt2 = _date.dumpdate(t + step - 0.001) print 'calling dbget from %s to %s' % (nt1, nt2) data = dbget(vs, nt1, nt2, sa=sa, sf=sf, exe=exe) for vname in data['datavars']: t, v = data[vname] nt = data_final[vname][0].extend(t) nv = data_final[vname][0].extend(v) data_final['log'].extend(data['log']) return data_final
def mkcmd(vs,t1,t2,step=None,scale=None, exe=exe_path,filename=None,format=None,method='DS', conf=None, client_name='BEAM_PHYSICS', app_name='LHC_MD_ABP_ANALYSIS', datasource='LHCLOG_PRO_DEFAULT', timezone='LOCAL_TIME', file_dir='./', unix_time_output='TRUE', ): """Produce a string for the CERN logging database command line tool. Usage getdata("SPS.BCTDC.31832:INT","2010-06-10 00:00:00","2010-06-10 23:59:59",step='20 SECOND') Arguments: vs: name of the variable in the database t1,t2: start and end time as string in the %Y-%m-%d %H:%M:%S.SSS format or unix time step: For multiple file request '<n> <size>' scale: For scaling algoritm '<n> <size> <alg>' format: For output file format in CSV, XLS, TSV, MATHEMATICA filename: String for output name method: DS (dataset), LD (last data), FD (fill data not supported here) conf: Configuration file name, if None, file is created from options client_name: The name of the client (must be defined together with the BE/CO/DM section) app_name: The name of the application (must be defined together with the BE/CO/DM section) datasource: Where the data should be extracted '<dbname>' timezone: The time zone used for input and output data file_dir: Defines the folder where output files will be written unix_time_output: Timestamp are written in seconds for epoch 'TRUE' or FALSE where: <n> is an integer number <size> is one of SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, YEAR <alg> one of AVG, MIN, MAX, REPEAT, INTERPOLATE, SUM, COUNT <dbname> one of LHCLOG_PRO_DEFAULT, LHCLOG_TEST_DEFAULT, MEASDB_PRO_DEFAULT, MEASDB_DEV_DEFAULT, LHCLOG_PRO_ONLY, LHCLOG_TEST_ONLY, MEASDB_PRO_ONLY <tz> one of 'UTC_TIME' 'LOCAL_TIME' """ if not conf: conf='ldb.conf' fh=file(conf,'w') fh.write(conf_template%(client_name,app_name,datasource,timezone,file_dir,unix_time_output)) if type(t1) in [float,int]: if t1<0: t1+=_t.time() t1=_date.dumpdate(t1) if type(t2) in [float,int]: if t2<0: t2+=_t.time() t2=_date.dumpdate(t2) if hasattr(vs,'__iter__'): vs=','.join(vs) cmd='%s -vs "%s" -t1 "%s" -C %s' %(exe,vs,t1,conf) if t2: cmd+=' -t2 "%s"' % t2 if scale: n,size,alg=scale.split() if alg in ['AVG', 'MIN', 'MAX', 'REPEAT', 'INTERPOLATE', 'SUM', 'COUNT']: cmd+=' -sa "%s" -ss "%s" -si "%s"' %(alg,n,size) else: raise ValueError,'cernlogdb: %s alg not supported'%alg if step: ni,it=step.split() if it in ['SECOND', 'MINUTE', 'HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR']: cmd+=' -IT "%s" -NI "%s"' %(it,ni) else: raise ValueError,'cernlogdb: %s step not supported'%it if filename: cmd+=' -N "%s"' % filename if format: cmd+=' -F "%s"' % format if method: cmd+=' -M "%s"' % method return cmd
def mkcmd(vs, t1, t2, step=None, scale=None, exe=exe_path, filename='output.csv', format='CSV', method='DS', conf=None, client_name='BEAM_PHYSICS', app_name='LHC_MD_ABP_ANALYSIS', datasource='LHCLOG_PRO_DEFAULT', timezone='LOCAL_TIME', file_dir='./', unix_time_output='TRUE'): """Produce a string for the CERN logging database command line tool. Usage getdata("SPS.BCTDC.31832:INT","2010-06-10 00:00:00","2010-06-10 23:59:59",step='20 SECOND') Arguments: vs: name of the variable in the database t1,t2: start and end time as string in the %Y-%m-%d %H:%M:%S.SSS format or unix time step: For multiple file request '<n> <size>' scale: For scaling algoritm '<n> <size> <alg>' format: For output file format in CSV, XLS, TSV, MATHEMATICA filename: String for output name method: DS (dataset), LD (last data), FD (fill data not supported here) conf: Configuration file name, if None, file is created from options client_name: The name of the client (must be defined together with the BE/CO/DM section) app_name: The name of the application (must be defined together with the BE/CO/DM section) datasource: Where the data should be extracted '<dbname>' timezone: The time zone used for input and output data file_dir: Defines the folder where output files will be written unix_time_output: Timestamp are written in seconds for epoch 'TRUE' or FALSE where: <n> is an integer number <size> is one of SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, YEAR <alg> one of AVG, MIN, MAX, REPEAT, INTERPOLATE, SUM, COUNT <dbname> one of LHCLOG_PRO_DEFAULT, LHCLOG_TEST_DEFAULT, MEASDB_PRO_DEFAULT, MEASDB_DEV_DEFAULT, LHCLOG_PRO_ONLY, LHCLOG_TEST_ONLY, MEASDB_PRO_ONLY <tz> one of 'UTC_TIME' 'LOCAL_TIME' """ if not conf: conf = 'ldb.conf' fh = file(conf, 'w') fh.write(conf_template % (client_name, app_name, datasource, timezone, file_dir, unix_time_output)) if type(t1) in [float, int]: if t1 < 0: t1 += _t.time() t1 = _date.dumpdate(t1) if type(t2) in [float, int]: if t2 < 0: t2 += _t.time() t2 = _date.dumpdate(t2) if hasattr(vs, '__iter__'): vs = ','.join(vs) cmd = '%s -vs "%s" -t1 "%s" -C %s' % (exe, vs, t1, conf) if t2: cmd += ' -t2 "%s"' % t2 if scale: n, size, alg = scale.split() if alg in [ 'AVG', 'MIN', 'MAX', 'REPEAT', 'INTERPOLATE', 'SUM', 'COUNT' ]: cmd += ' -sa "%s" -ss "%s" -si "%s"' % (alg, n, size) else: raise ValueError, 'cernlogdb: %s alg not supported' % alg if step: ni, it = step.split() if it in ['SECOND', 'MINUTE', 'HOUR', 'DAY', 'WEEK', 'MONTH', 'YEAR']: cmd += ' -IT "%s" -NI "%s"' % (it, ni) else: raise ValueError, 'cernlogdb: %s step not supported' % it if filename: cmd += ' -N "%s"' % filename if format: cmd += ' -F "%s"' % format if method: cmd += ' -M "%s"' % method return cmd
def depick(p): print 'dateint=%s'%repr([rdmdate.dumpdate(ti,fmt='%Y-%m-%d %H:%M:%S') for ti,vi in p.data])
def mkdate(t): return rdmdate.dumpdate(t,fmt='%Y-%m-%d\n%H:%M:%S.SSS')