def check_date(icfg,options,conf,cfg,_value): r = re.match('^([0-9]{4})/([0-9]?[0-9])/([0-9][0-9])$',_value) if r: year,month,day = r.groups() if int(month)>12: kw = udict(cfg) kw.update({'OFFENDING': _value}) return ('E:DATE:001',kw), if int(day)>31: kw = udict(cfg) kw.update({'OFFENDING': _value}) return ('E:DATE:002',kw), return None kw = udict(cfg) kw.update({'OFFENDING': _value}) return ('E:DATE:003',kw),
def report(self): """ Reports back and clear accounting hash """ _acc = udict() if _accounting: _acc.update(_accounting) _accounting.clear() return _acc
def getCommands(**kw): """ Get commands from sub-directory `cli/commands` :param subdir: subdir in `cli/commands` directory :type subdir: path :param klass: type of plugin (default: Command) :type klass: class """ _dir = 'cli/commands' _class = Command if 'subdir' in kw: _subdir = kw['subdir'].lower() _dir = join(_dir, _subdir) if 'klass' in kw: _class = kw['klass'] commands = object_set('e4t', _dir, None, 'name') results = udict() for (k, fe) in commands.items(): F = fe if issubclass(F, _class): if not hasattr(F, 'active') or F.active: results[k] = copy.copy(F) LOGGER.debug('accepted commands in %s: %s', _dir, ','.join(results.keys())) return results
def check_num(icfg,options,conf,cfg,_value): r = re.match('^[0-9.]+$',_value) if r: return None kw = udict(cfg) kw.update({'OFFENDING': _value}) return ( 'E:INT:001',kw),
def produce(self,f1): _opts = { 'zorder' : str, 'linewidth' : float, 'color': str} args = {} unplotted = [] if not self._series: return [] for k in self._series: if not self.has_key(k): continue s = self[k] if s is None: unplotted.append(k) continue v = s['DATA'] p = udict(s['SPEC']) if not p: continue c = '-' l = k e = None if p.has_key('linespec'): c = p['linespec'] if p.has_key('label'): l = p['label'].strip() # if re.match('^pound$',l): # l = u'£' # if re.match('^euro$',l): # l = u'€' # if re.match('\yen',l): # l = u'\yen' if p.has_key('range'): e = p['range'] kw = {} for _k,_v in _opts.items(): if p.has_key(_k): kw[_k]=_v(p[_k]) if isinstance(v,float): unplotted.append(k) continue dates = [ d.datetime for d in v._data.dates] data = v.data logger.debug('aggiungo la serie %s (%s)',k,l) # f1.plot_date(mpl.dates.date2num(dates),data,c,label=_(l),**kw) a = f1.plot_date(mpl.dates.date2num(dates),data,c,label=_(l),**kw) if e: v = self[e]['DATA'] dates1 = [ d.datetime for d in v._data.dates] data1 = v.data kw={} if p.has_key('zorder'): kw['zorder']=p['zorder'] f1.plot_date(mpl.dates.date2num(dates),data1,c,linewidth=.5,**kw) f1.fill_between(mpl.dates.date2num(dates),data,data1,where=data1<data,facecolor=c[0],**kw) # f1.plot_date(mpl.dates.date2num(dates),data+e/2,c,linewidth=.1) return unplotted
def check_bool(icfg,options,conf,cfg,_value): try: b = boolean(_value) return None except ValueError: kw = udict(cfg) kw.update({'OFFENDING': _value}) return (( 'E:BOOL:001',kw),)
def request(self,reqs,**kw): """Random Request >>> ds = Random() >>> xs = ds.request([{'Instrument':'NAME=PIPPO~:2012-12-31~2007-01-01~M', 'Source': 'FRED'},]) >>> ts = xs['PIPPO']._data >>> print ts.freqstr M """ logger.debug('request') rx={} kw=udict(kw) if 'SEED' in kw: seed = int(kw['SEED']) rand.seed(seed) for rq in reqs: logger.debug("Request: %s",rq) s = parse_instrument(rq['Instrument']) logger.debug(s) params = ldict(mk_params(s['TICKER'])) if 'name' in params: name = params['name'].upper() del params['name'] else: name = rq['Source'].upper() if 'NAME' in kw and kw['NAME']: name = kw['NAME'].upper() kw['NAME']=name if s['FREQ']!='0': dr = ts.date_array(freq=s['FREQ'],start_date=s['START'],end_date=s['END']) L = len(dr) else: L = s['END']-s['START'] params['size']=L dd = np.zeros(L) logger.debug(params) try: if re.match('^constant$',rq['Source'],re.I): name = 'CONSTANT' v = float(name) params['LOW']=v params['HIGH']=v dd = rand.uniform(**params) elif re.match('^walk$',rq['Source'],re.I): dd = RandomWalk(**params) elif re.match('^uniform|beta|binomial|chisquare|exponential|gamma|geometric|gumbel|hypergeometric|laplace|logistic|lognormal|logseries|multinomial|multivariate_normal|negative_binomial|noncentral_chisquare|noncentral_f|normal|pareto|poisson|power|rayleigh|standard_cauchy|standard_exponential|standard_gamma|standard_normal|standard_t|triangular|uniform|vonmises|wald|weibull|zipf$',rq['Source'],re.I): generator = rq['Source'].lower() if hasattr(rand,generator): f = getattr(rand,generator) dd = f(**params) ## Add other distributions here else: raise TypeError, 'Unknown generator' except TypeError, exc: logging.error('%s generator - %s',rq['Source'],exc.args[0]) except:
def _check_keys(icfg,options,conf,cfg,arg,err,should=True,must=False): if must: should=False if should: must=False (l,r) = key_presence(cfg,arg) logger.debug("got %s --> %s | %s",arg, l,r) if should: if len(l)>0: kw = udict() kw.update(cfg) kw.update({'__LEFT': l, '__RIGHT': r, 'OFFENDING': ','.join(l),'ARG': arg}) return ( err, kw), if must: if len(r)>0: kw = udict() kw.update(cfg) kw.update({'__LEFT': l, '__RIGHT': r, 'OFFENDING': ','.join(r),'ARG': arg}) return ( err, kw), return None
def xmloutjobs(w,name,*args,**kw): (datareqs,processors,outputs) = e4t.cli.get_elem_configs(*args,**kw) cfg = e4t.cli._read_cfgfile(kw,'OUTPUT',name) _cfgs = (kw, cfg) if cfg: # pprint(cfg) cfg = udict(cfg) cfg.remove('^__.*') cfg = ldict([ (k.replace(' ','_'),v) for k,v in cfg.items()]) w.start("job",name=name,**ldict(cfg)) w.end("job")
def check_key(icfg,options,conf,cfg,_value,_search): r = re.match('^\w+$',_value,re.I) if r: if _search=='+': if not cfg.has_key(_value): kw = udict() kw.update(cfg) kw.update({'OFFENDING': _value}) return ('W:KEY:001', kw), # Il file di configurazione non ha la sezione richiesta return None return ('E:LABEL:001',_value),
def check(options,label,cfg,part,filename=None): if cfg.has_key(label): parsing_conf = cfg[label] icfg = get_check(part) errstruct = process_check(icfg,options,cfg,label,parsing_conf) return errstruct else: kw=udict(cfg) kw.update({'OFFENDING':label}) if not kw.has_key('__FILE'): kw.update({'__FILE':filename}) return ('E:CHECK:001',kw),
def func_repl(matchobj): fn,foo,args = matchobj.groups() if kw.has_key(fn): argv = args.split(',') if len(kw[fn]['args'])==len(argv): if isfunction(kw[fn]['def']): repl = kw[fn]['def'](*argv) else: kwk = udict(zip(kw[fn]['args'],argv)) repl = Template(kw[fn]['def']).safe_substitute(kwk) return repl else: return matchobj.groups(0)
def __call__(self,options,*args,**kw): """Make .tex file for JOB""" label = args[0] LOGGER.debug("LOAD %s",args[0]) setattr(options,'job',args[0]) kw = udict(kw) (datareqs,processors,outputs) = e4t.cli.get_elem_configs(*args,**kw) cfg = e4t.cli._read_cfgfile(kw,'OUTPUT',args[0]) _cfgs = (kw, cfg) LOGGER.debug('LOAD %s',','.join(datareqs)) switch_cache = options.switch_cache setattr(options,'switch_cache',False) _dataset = load(*datareqs,options=options) setattr(options,'switch_cache',switch_cache) print(_dataset.report())
def futures(self,*args): """ FUT=FUTURES("LLC(?<yy>:[0-9]{2})(?<mm>[0-9]{2})","M") """ (spec,[freq]) = args freq = str(freq) _vars = grep(spec,udict(IS)) rx = re.compile(spec,re.I) if _vars is None: logger.error('Non posso creare i futures per %s', spec) raise ValueError, "Niente futures per %s" % spec dates = [] values = [] for _v in _vars: m = re.match(spec,_v,re.IGNORECASE) if m: if freq=='Y': idx1 = int(m.group('yy')) if idx1<100: idx1 += 2000 dates.append(ts.Date(freq, year=idx1)) elif freq=='M': idx1 = int(m.group('yy')) idx2 = int(m.group('mm')) if idx1<100: idx1 += 2000 dates.append(ts.Date(freq, year=idx1, month=idx2)) elif freq=='D': idx1 = m.group('yy') idx2 = m.group('mm') idx3 = m.group('dd') dates.append(ts.Date(freq, year=2000+int(idx1), month=int(idx2), day=int(idx3))) else: raise ValueError, "FUTURES" v = IS[_v] values.append(v.data[-1]) if values and dates: _ts = ts.time_series(values, freq=freq, dates=dates) return ets.Timeseries(data=_ts) return None
def __call__(self,options,*args,**kw): """Make .tex file for JOB""" kw = udict(kw) kind = kw.xget('kind') # Datareq elems not output if not kind or kind in ('datareq'): return label = kw.xget('__LABEL') if not label: raise E4TSystemError('ECT002: Cannot typeset without a label') label = label.upper() outfile = kw.xget('OUTFILE') if not outfile: outfile = "%s.pdf" % label if not exists("%s.tex" % label): raise E4tUserError(label,"ECT001: File %s.tex not exists" , label) goutput = options.define_set.expand(outfile) if goutput[0]!='/': goutput = join(options.output_path,goutput) LOGGER.debug('Typeset composition for %s into %s',label,goutput) from e4t.dataset.output import save_latex save_latex(options,label,goutput,**kw)
def __call__(self,options,*args,**kw): """Make .tex file for JOB""" LOGGER.debug("compile") kw = udict(kw) base=join(options.process_path,'input') copy_file(kw,base) datareqs, processors, outputs = e4t.cli.get_elem_configs(*args, **kw) _dataset = None elapsed = [.0,.0,.0] setattr(options,'elapsed',elapsed) LOGGER.debug('## ------------------------------ LOAD ') # Request phase if datareqs: with Timer() as t: _dataset = load(*datareqs,options=options) LOGGER.debug("TIME tsload %f",t.secs) elapsed[0]=t.secs setattr(options,'elapsed',elapsed) LOGGER.debug('## ---------------------------- PROCESS ') # Process pase if processors: with Timer() as t: _dataset = process(_dataset,*processors,options=options) LOGGER.debug("TIME process %f",t.secs) elapsed[1]=t.secs setattr(options,'elapsed',elapsed) LOGGER.debug('## ---------------------------- OUTPUT ') # Output phase with Timer() as t: from e4t.dataset.output import output output(options,_dataset,*outputs,**kw) LOGGER.debug("TIME output %f",t.secs) elapsed[2]=t.secs setattr(options,'elapsed',elapsed)
def check_call(icfg,options,conf,cfg,arg): logger.debug("{CALL} %s %s",arg,cfg) results = [] for _k,_v in arg.items(): _f,_v1 = _v _c = cfg.get(_k.upper(),None) if _c: logger.debug("{CALL}* %s %s",_c,_v) if _f: _gels = _f(_c) else: _gels = _c, if not _gels: logger.debug("{CALL} %s %s",_c,_v) continue for _j in _gels: if _j in ('','>'): continue _g = conf.get(_j.upper(),None) if _g: logger.debug('{CALL}>%s>%s>%s',_j,_g['__LABEL'],_v) r = check_sub(icfg,options,conf, _g, # l_cfg _j, # label _v) # es. (f,jobspec.job.select) if r: results.extend(r) logger.debug('{CALL} %s-%s %s',_k,_c,r[0]) else: logger.debug('{CALL} %s-%s OK',_k,_c) else: kw = udict() kw.update(cfg) kw.update({'OFFENDING': _j}) results.append( ("E:CALL:001", kw), ) else: logger.debug('{CALL} %s-None',_k) return results
def check_conffile(icfg,options,conf,cfg,*args): logger.debug("{CONFFILE} %s",args[0]) label = cfg.get('LABEL',cfg['__LABEL']) base = dirname(cfg['__FILE']) results=[] for _n,(_f,_t) in args[0].items(): if cfg.has_key(_n.upper()): fname= cfg[_n.upper()] conffile = _i(fname,base) if conffile: if re.search("\.req$",conffile,re.I): logger.debug('{CONFFILE} req %s,%s',fname,conffile) pass else: logger.debug('{CONFFILE} conf %s,%s',fname,conffile) ncfg = cfg2hash(conffile) if _n=='processor': label='PROCESSOR' results.extend(check(options,label,ncfg,_t)) else: kw = udict(cfg) kw.update({'OFFENDING':fname}) results.append(('E:FILE:001',kw),) return results
from e4t.utils import udict logger = logging.getLogger(__name__) def url_exists(url): try: f = urllib2.urlopen(urllib2.Request(url)) return True except: return False colorschemes = udict({ 'base' : ("HTML(094972)",'HTML(FFFFFF)','HTML(FFFACC)'), 'plus' : ("HTML(094972)",'HTML(D1E5ED)','HTML(FFFACC)'), 'minus' : ("HTML(010101)",'HTML(FFFFFF)','HTML(FFFACC)'), 'esab' : ("HTML(D1E5ED)",'HTML(094972)','HTML(FFFACC)'), 'plain' : ("HTML(000000)",'HTML(FFFFFF)','HTML(FFFACC)'), 'reverse': ("HTML(FFFFFF)",'HTML(111111)','HTML(FFFACC)'), }) def dict2xml(w,d): """Transform a dict into a xml chuck""" for k,v in d.items(): tag = k.replace(' ','_').lower() if isinstance(v,dict): w.start(tag) dict2xml(w,v) w.end(tag) else: w.element(tag,unicode(v))
def report(self): _acct = udict(_accounting) _accounting.clear() return _acct
import shutil from pprint import pprint from os.path import exists, join, expandvars, basename,expanduser,dirname from e4t.utils import udict,needed,_i,append_to_toc,localize, Timer, dictview from e4t.utils.hash2cfg import get_list from e4t.output import Row,Cell from e4t.output.utils import colorschemes,get_definecolor from e4t.utils.defines import define_set logger = logging.getLogger(__name__) options = None _accounting = udict() class LaTeXJob(udict): """ """ def __init__(self,jobs): udict.__init__(self) self._jobs = jobs # self.addpackages = ( ('sristat',None), ) def layout(self): l = [] for job in self._jobs: if exists("%s.tex"%job): l.append('\\input{%s}'%job)
def produce(self,f1): """Produci una mappa""" ### Non posso produrre una mappa se non è indicata ### un'immagine sottostante e se non ci sono ### le BARMAPS needed(self._spec,'IMAGE','SERIES','ITERATOR') args = {} ### Gestione dello sfondo f1.hold(True) self.insert_background(f1) ### Gestione della LEGENDA # f1.hold(True) # if self._spec.has_key('MAPLEGEND'): # legend = self._spec['MAPLEGEND'] # if self._genspec.has_key(legend): # leg_spec = udict(self._genspec[legend]) # n = len(leg_spec) # a = plt.axes([.15,.77,.30,.015*n]) # a.set_xticks([]) # a.set_yticks([]) # l = 0 # for k,v in leg_spec.items(): # ## Escludi le chiavi che iniziano per _ # if k[0]=='_': continue # a.text(.05,l*.3+.15,"%s=%s"%(k.capitalize(),v),fontsize=6) # l += 1 # f1.add_artist(a) # # xticks=[], # # yticks=[]) unplotted = [] countries = get_list(self._spec['ITERATOR']) series = self._spec['SERIES'] if len(countries)==0: err="L'elenco dei countries è vuoto" logger.error(err) raise ValueError, err _h = {} _s = explode_list(series,countries) _v = [ self[_n] if self.has_key(_n) else None for _n in _s ] _C = dict([ (_c,_m) for _c,_n,_m in zip(countries,_s,_v) if _m is not None ]) # unplotted.extend( [ _n for _n,_m in zip(_s,_v) if _m is None ]) def hist(a,val,displ,color): Y = displ X_m = val*0.0 X_M = val a.hlines(Y,X_m,X_M,lw=4,color=color) def _mk_hist(f1,_k,_v1,_v0,label,m,M): xx = _Values[_k][0][0] yy = _Values[_k][0][1] # M = 13 # max(g,i)*1.3 a = plt.axes([xx, yy, .10, .05]) a.patch.set(color='white') a.patch.set_facecolor('#0000CD') a.patch.set_edgecolor('red') a.patch.set_alpha(0.7) plt.axis('off');plt.plot([0, 0],[0.1, .6],color='#0000CD',lw=.1) _fnt = 7 F=-2.5 a.set_xlim(xmin=F+m,xmax=M*8/7) a.set_ylim(ymin=0,ymax=1) l=F+(-.5 if min(_v0,_v1)<0 else .0)+(.2 if max(_v0,_v1)<10 else -.5) # take in account two figures numbers hist(a,_v0,.15,(.0,.0,.805)) a.text(l,.05,str("%.1f"%_v0),fontsize=_fnt,color="#0000CD") # ,backgroundcolor='w') hist(a,_v1,.52,(.99,.2,.2)) a.text(l,.40,str("%.1f"%_v1),fontsize=_fnt,color="#0000CD") #,backgroundcolor='w') a.text(l+1.0,.80,label,fontsize=10,color="#0000CD") #,backgroundcolor='w') a.set_xticks([]) a.set_yticks([]) f1.add_artist(a) M=0.0 m=0.0 for _k,_v in _C.items(): _w = _v['DATA'] _v0 = _w._data[-1] _v1 = _w._data[-2] M=max(M,_v0,_v1) m=min(m,_v0,_v1) for _k,_v in _C.items(): _w = _v['DATA'] _spec = udict(_v['SPEC']) try: label = _spec['label'] except KeyError: label = _k _v0 = _w._data[-1] _v1 = _w._data[-2] _mk_hist(f1,_k,_v1,_v0,label,m,M) return unplotted
def _read_xls(self,fname,ext,kw): logger.debug('Read_XLS from %s',fname) TS = {} kw = udict(kw) book = xlrd.open_workbook(fname) def _get_sheet(book,kw): # get sheet get_sheet = book.sheet_by_index sheetname = kw.xget('SHEET') if sheetname: sheet =exls.get_sheet(book,sheetname) else: sheetname = kw.xget('SHEETNUM',0) sheet = book.sheet_by_index(sheetname) if sheet is None: logger.error('Non posso acquisire il foglio %s',sheetname) raise ValueError, "Sheet" return sheet sheet = _get_sheet(book,kw) def _get_orient(kw): # Orientamento dei dati (H -> Orizzontale, V -> Verticale) orient = kw.xget('ORIENT','V') if orient not in ( 'H', 'V' ): logger.warn('ORIENT deve essere tra H o V, non %s. Uso V!', orient) orient = 'V' return orient orient = _get_orient(kw) # SIZE nrows = sheet.nrows ncols = sheet.ncols # Date or index dates = kw.xget('DATES',"A2:") index = kw.xget('INDEX') Aindex = index if index else dates logging.debug("{PARMS}-%s %s %s %s", 'IDX' if index else 'DAT', sheet.name, Aindex, orient) (BC,BR,EC,ER) = _get_total_range(sheet,Aindex,orient) logger.debug("{RANGE} %s:%s",_Aref(BC,BR),_Aref(EC,ER)) date_values = _get_values(sheet,BC,BR,EC,ER,kw) def _get_index(date_values): v = [ int(d) for d in date_values.flatten() ] date_array = np.array(v) return date_array # Find index as Vector or DateRange if index: date_array = _get_index(date_values) else: # TIMESERIES def _get_tseriesD(): pass def _get_tseriesA(freq,date_values,kw): v = [ int(d) for d in date_values.flatten() if not np.isnan(d) ] D = [ ts.Date(freq=str(freq),value=_v) for _v in v] date_array = ts.date_array(D) return date_array def _get_tseriesQ(freq,date_values,kw): # print date_values by=0 if kw.has_key('YEAR'): by = eval(kw['YEAR'])-1 v = [ (int(d)-1)%4+1+(int(_i/4)*4)+by*4 for _i,d in enumerate(date_values.flatten()) if not np.isnan(d) ] D = [ ts.Date(freq=str(freq),value=_v) for _v in v] date_array = ts.date_array(D) return date_array def _get_tseriesM(freq,date_values,kw): dformat='INT' if kw.has_key('DFORMAT'): dformat = kw['DFORMAT'] if dformat=='INT': v = [ (int(d)-1)*12+i%12+1 for i,d in enumerate(date_values.flatten()) if not np.isnan(d) ] D = [ ts.Date(freq=str(freq),value=_v) for _v in v] elif dformat=='XL_DATE': v = [ xlrd.xldate_as_tuple(d,0) for i,d in enumerate(date_values.flatten()) if not np.isnan(d) ] D = [ ts.Date(freq=str(freq),year=_v[0],month=_v[1]) for _v in v] else: logger.error('DATE FORMAT NOT SUPPORTED ON EXCEL READING') raise ValueError, dformat # print "FREQ=|%s|"%freq,D date_array = ts.date_array(D) return date_array def _get_tseriesD(freq,date_values,kw): v = [ xlrd.xldate_as_tuple(int(d),0) for i,d in enumerate(date_values.flatten()) if not np.isnan(d) ] D = [ ts.Date(freq=str(freq),year=_v[0],month=_v[1],day=_v[2]) for _v in v] date_array = ts.date_array(D) return date_array op = { 'A': _get_tseriesA, 'Y': _get_tseriesA, 'Q': _get_tseriesQ, 'M': _get_tseriesM, 'D': _get_tseriesD, } freq = 'D' if 'FREQ' in kw: freq = kw['FREQ'].strip() if freq in op: fnc = op[freq] date_array = fnc(freq,date_values,kw) else: logger.error('UNKNOWN FREQ %s',freq) raise ValueError, 'UNKNOWN FREQ %s',freq else: logger.error('ABSENT FREQ %s',freq) raise ValueError, 'ABSENT FREQ %s',freq logger.debug("IND: %s", ','.join([ str(x) for x in date_array])) # serie series = "B2:" if kw.has_key('SERIES'): series = kw['SERIES'] (Bc,Br,Ec,Er) = _get_total_range(sheet,series,invert(orient)) Nseries = (Er - Br) if orient == 'H' else (Ec - Bc) B = Bc if orient == 'V' else Br E = Ec if orient == 'V' else Er logger.debug("(Bc=%d,Br=%d,Ec=%d,Er=%d,B=%d,E=%d)",Bc,Br,Ec,Er,B,E) # Gestione dei nomi delle serie name = kw.xget('NAME','TS') logger.debug("{NAME} %s",name) _cmp = name if ' ' not in name and ':' in name: logger.debug("{NAME} with : %s",name) _cmp=[] _fmt = "%s" _n = name _f = str m = rx_range.search(_n) _name = [name,] if m: (_bc,_br,_ec,_er) = _get_range4(m.group(0)) logger.debug("%s",(_bc,_br,_ec,_er)) F = [ _f(v) if len(unicode(v))>0 else None for v in _get_values_list(sheet,_bc,_br,_ec,_er,kw)] _name = [ (_fmt.lower() % _p) if _p is not None else "_${NUM}_!" for _p in F ] elif ' ' in name: _cmp = [] _ns = name.split(' ') M = 0 for i,_n in enumerate(_ns): _f = str _fmt = "%s" if '|' in _n: (_n,_f,_fmt)=_n.split('|') _f = eval(_f.lower()) if '=' in _n: m = rx_range_eq.search(_n) if m: (_bc,_br,_ec,_er) = _get_range4(m.group(1)) logger.debug("%s",(_bc,_br,_ec,_er)) F = [ _f(v) if len(unicode(v))>0 else None for v in _get_values_list(sheet,_bc,_br,_ec,_er,kw)] # print "FMt=",F F = [ (_fmt.lower() % _p) if _p is not None else "_${NUM}_!" for _p in F ] _cmp.append( F ) M = max(M,len(F)) else: _cmp.append([_f(_n),]) M = max(M,1) from itertools import product,izip,cycle for p in range(0,len(_cmp)): _P = _cmp[p] if len(_P)<M: _cmp[p] = [] for i,_p in enumerate(cycle(_P)): if i < M: _cmp[p].append(_p ) else: break _P = _cmp[p] _cmp[p] = _P _name = map(operator.add, *_cmp) else: _name = [ "%s${NUM}" % _cmp , ] Nnames = len(_name) if Nnames<Nseries: for i in range(Nnames,Nseries): _name.append("TS${NUM}") for i,x in enumerate(range(B,E+1)): #print _name #print i,x,len(_name) _n = _name[i].strip() _ts = None if _n in TS.keys(): _n += "_${NUM}_?" N = Template(_n).safe_substitute({'NUM': i}) N = unicode( N.upper().strip() ) #.decode('utf-8') #print u"reading series for %s" % N.encode('ascii','xmlcharrefreplace') #print " on ", x, "Row" if orient=='H' else "Column" if orient == 'H': _tv = _get_values(sheet,BC,x,EC,x,kw) elif orient == 'V': _tv = _get_values(sheet,x,BR,x,ER,kw) #print "_tv",_tv _tv = np.array([ _v[0] for _v in _tv ]) #print "_tv2",_tv # print date_array else: logger.error('ORIENTTION ERROR') raise ValueError, 'ORIENT' if index is None: # isa timeseries #print "Tv=",_tv #print date_array _ts = ts.time_series(data=_tv[0:len(date_array)],dates=date_array,copy=True) TS[N] = Timeseries(data=_ts,name=N) else: # isa np.array TS[N] = _tv # print "TS[%s]"%N.encode('ascii','xmlcharrefreplace'),TS[N] # _report(_ts) logger.debug("Read %d series from excel: %s",Nseries,','.join( sorted(TS.keys()))) # Rename Phase if kw.has_key('RENAME'): rename_desc = get_list(kw['RENAME']) for r in rename_desc: (_f,_t) = r.split('>') if TS.has_key(_f): if not TS.has_key(_t): TS[_t]=TS[_f] del TS[_f] else: logger.error('La serie %s già esiste nell\'IS',_t) else: logger.error('La serie %s non esiste nell\'IS',_f) # Drop Phase logger.debug("Read XLS : ") # for k,t in TS.items(): # print k # print tsinfo(t) # look(t._data) return TS
def check_type_set(icfg,options,conf,cfg,*args): if args[0] not in args[1].split(','): kw = udict(cfg) kw.update({'OFFENDING': args[0], 'IN': args[1]}) return ( 'E:IS:001', kw), return None
logger.debug("RLOADS= %s",res) return res # Static Function Processor proc = FunctionProcessor() # Static Function list FUNCS = udict( { 'A0001' : proc.a0001, 'A0002' : proc.a0002, 'DECUMULATE' : proc.decumulate, 'FLOAT' : proc.float, 'FUTURES' : proc.futures, 'JOIN' : proc.join, 'SAME_TSRANGE_OF': proc.same_tsrange_of, 'SUM': proc.sum , 'TEST': proc.test , 'TSERIES': proc.tseries , 'WEIGHTED' : proc.weighted, }) def make_name(n): n = n.upper() if IS.has_key(n): return IS[n] logger.error("Information Set does not have object %s" % n) return 0.0 def make_number(n):
def request(self,profile=None): """ Request data :rtype TimeSeriesResultSet: resultset di timeseries """ global acct if hasattr(self.provider,'open'): self.provider.open() self._res = DataSet() # Base Dataset is empty # accounting stuff acct_l = {} _accounting['datareq.profile']=profile R = udict() for req in self.requesting: basevars = udict({ 'name': None, 'start': None, 'end': None, 'proc': None, 'check': None }) hostname=req[1].upper() series = [ _replace_funcs(s,self._func) for s in [req[2][1:],]] xparams = req[4] #logger.debug('dataprovider requests %s from %s | %s (%s%s)', # ','.join(series),hostname,str(xparams), # 'R' if self._opt_request_req else "S", # 'K' if self._opt_delete_base_kvars else "-" ) _accounting["datareq.series.%s"%','.join(series)]='%s | %s (%s%s)' % ( hostname,str(xparams), 'R' if self._opt_request_req else "S", 'K' if self._opt_delete_base_kvars else "-" ) # Replace self._funcs xparams = _replace_funcs(xparams,self._func) kvars = basevars if len(xparams)>0: kvars2 = udict(parse_qsl(xparams)) accepted(kvars, 'NAME', 'START', 'END', 'PROC', 'CHECK') kvars.update(kvars2) if 'NAME' in kvars: kvars['NAME']=kvars['NAME'].strip() if self._opt_delete_base_kvars: for _k in kvars.keys(): if _k in kvars2: del kvars2[_k] if self._opt_request_req: reqs = req else: reqs = series res = [] if not self._options.only_options: if self._opt_request_req: reqs = self.mk_request(req,hostname) else: reqs = self.mk_request(series,hostname) # if self._options.switch_verbose: # for k,v in kvars.items(): # logger.info('K:%s=%s',k,v) with Timer() as t: res = self.mget(reqs,**kvars) _accounting['%s.request.%s.time' % (self.name,kvars['NAME'])] = t.msecs _accounting['%s.request.%s.req' % (self.name,kvars['NAME'])] = req if res: _accounting['%s.request.%s.res' % (self.name,kvars['NAME'])] = res # acct_l[kvars['NAME']] = (reqs,kvars,res) # if 'provider' not in acct: # acct['provider']={} #acct['provider'][self.name]=acct_l kvars = {} if hasattr(self.provider,'close'): self.provider.close() _accounting['%s.request.missing' % (self.name)] = ','.join(self._missing) self._res.add_missing(*self._missing) return self._res
def check_kind(icfg,options,conf,cfg,*args): if not check_presence(icfg,options,conf,cfg,'kind',*args): kw = udict(cfg) kw.update({'OFFENDING': args[0]}) return ( 'E:KIND:001', kw ), return None