def check_presence(icfg,options,conf,cfg,keyname,*args): if ',' not in keyname: keys = (keyname.upper(),) else: keys = [ _v.upper() for _v in get_list(keyname)] models = [ _v.upper() for _v in get_list(args[0])] for k in keys: value=cfg.get(k,None) if not value or value.upper() not in models: return False return True
def check_BASE(self): """ : If BASE exists then exists BASE JOBS if exists section BASE of JF then set CBASES as collector(BASE) foreach V is list{JF{BASE}.JOBS} collect (exists V of JF) in CBASES{V} """ self.setup_base() if self._base: if not self._base.has_key('JOBLIST'): return base_jl = get_list(self._base['JOBLIST']) print base_jl for _joblist in base_jl: print _joblist if not self._gen.has_key(_joblist): self.error('E#JF#003',(_joblist,)) continue joblist_c = self._gen[_joblist] if isofkind(joblist_c,'joblist'): self.error('E#JF#004') continue if not joblist_c.has_key('JOBS'): self.error('E#JF#005') continue joblist = joblist_c.get('JOBS',None) if joblist: print "joblist=",joblist # Verify that all called jobs are defined jobs = get_list(joblist) jobs = set(jobs) jobspecs = self._gen.select(*jobs,regexp=False) jobspecs = set(jobspecs.keys()) residuals = jobs ^ jobspecs if len(residuals)>0: pprint(residuals) pprint(jobs) for r in residuals: if r in jobspecs: self.warn('W#JF#001', (r,)) if r in jobs: print r self.error('E#JF#003',(r,)) else: if self._options.name is None and self._options.only is None: self.error('E#JF#002') else: self.warn('W#JF#002')
def check_type_list(icfg,options,conf,cfg,_value,_type): results = [] _vals = get_list(_value) for _v in _vals: r = check_type(icfg,options,conf,cfg,_v,_type) if r: results.extend(r) return results
def layout(self): l = [] pages = get_list(self['PAGES']) for page in pages: l.append(""" \\clearpage\\thispagestyle{empty} \\input{%s} """ % ( page ) ) return l
def setup_elements(self): pages = get_list(self['PAGES']) for page in pages: _from = expandvars(expanduser(page)) if page[0]!="/": base = expandvars(expanduser("$SRISTAT/input")) _from = _i(_from,base) _to = basename(_from) shutil.copyfile(_from,_to) if not self.has_key('ELEMS'): return pages = get_list(self['ELEMS']) for page in pages: _from = expandvars(page) if page[0]!="/": base = expandvars("$SRISTAT/input") _from = _i(_from,base) _to = basename(_from) shutil.copyfile(_from,_to)
def go(self,label,config=None): logger.debug("%s %s",label,config) self._setup(label,config) #pprint(self) if isofkind(self,'pipe'): logger.debug("?!?") if self.has_key('PHASES'): logger.debug("PHASES %s",self['PHASES']) for _s in get_list(self['PHASES']): logger.debug("PHASE %s",_s) if self._global.has_key(_s): s = PipePhase(label,_s,self._global) s.go()
def process_check(icfg,options, conf, label, cfg): logging.debug('{PROCESS} %s',label) # if options.switch_verbose: # print "==========> Esecuzione check ",label results = [] # get processing program: check_list = get_list(programs['base']) for check in check_list: logger.debug('trying check %s',check) check_cfg = icfg.get(check,None) if check_cfg: f = checks_model[check] r = f(icfg,options,conf,cfg,check_cfg) logger.debug('check: %s on %s with %s --> %s',check,label,f,r) if r: results.extend(r) return results
def go(self): # 1. update code import pysvn self.client = pysvn.Client() self.client.callback_get_login = get_login if exists(join(self.svndir,'.svn')): logger.debug("update %s",self.svndir) self.client.update(self.svndir) else: logger.debug("checkout %s from %s",self.svndir,self['SVN']) self.client.checkout(self['SVN'],self.svndir) #pprint(self) # 2. if isofkind(self,'STEP'): if self.has_key('STEPS'): for _s in get_list(self['STEPS']): logger.debug("STEP %s",_s) if self._global.has_key(s): s = PipePhase(_s,self._global) s.go()
def produce(self,f1): """Produci una mappa""" ### Non posso produrre una mappa se non è indicata ### un'immagine sottostante e se non ci sono ### le BARMAPS needed(self._spec,'IMAGE','SERIES','ITERATOR') args = {} ### Gestione dello sfondo f1.hold(True) self.insert_background(f1) ### Gestione della LEGENDA # f1.hold(True) # if self._spec.has_key('MAPLEGEND'): # legend = self._spec['MAPLEGEND'] # if self._genspec.has_key(legend): # leg_spec = udict(self._genspec[legend]) # n = len(leg_spec) # a = plt.axes([.15,.77,.30,.015*n]) # a.set_xticks([]) # a.set_yticks([]) # l = 0 # for k,v in leg_spec.items(): # ## Escludi le chiavi che iniziano per _ # if k[0]=='_': continue # a.text(.05,l*.3+.15,"%s=%s"%(k.capitalize(),v),fontsize=6) # l += 1 # f1.add_artist(a) # # xticks=[], # # yticks=[]) unplotted = [] countries = get_list(self._spec['ITERATOR']) series = self._spec['SERIES'] if len(countries)==0: err="L'elenco dei countries è vuoto" logger.error(err) raise ValueError, err _h = {} _s = explode_list(series,countries) _v = [ self[_n] if self.has_key(_n) else None for _n in _s ] _C = dict([ (_c,_m) for _c,_n,_m in zip(countries,_s,_v) if _m is not None ]) # unplotted.extend( [ _n for _n,_m in zip(_s,_v) if _m is None ]) def hist(a,val,displ,color): Y = displ X_m = val*0.0 X_M = val a.hlines(Y,X_m,X_M,lw=4,color=color) def _mk_hist(f1,_k,_v1,_v0,label,m,M): xx = _Values[_k][0][0] yy = _Values[_k][0][1] # M = 13 # max(g,i)*1.3 a = plt.axes([xx, yy, .10, .05]) a.patch.set(color='white') a.patch.set_facecolor('#0000CD') a.patch.set_edgecolor('red') a.patch.set_alpha(0.7) plt.axis('off');plt.plot([0, 0],[0.1, .6],color='#0000CD',lw=.1) _fnt = 7 F=-2.5 a.set_xlim(xmin=F+m,xmax=M*8/7) a.set_ylim(ymin=0,ymax=1) l=F+(-.5 if min(_v0,_v1)<0 else .0)+(.2 if max(_v0,_v1)<10 else -.5) # take in account two figures numbers hist(a,_v0,.15,(.0,.0,.805)) a.text(l,.05,str("%.1f"%_v0),fontsize=_fnt,color="#0000CD") # ,backgroundcolor='w') hist(a,_v1,.52,(.99,.2,.2)) a.text(l,.40,str("%.1f"%_v1),fontsize=_fnt,color="#0000CD") #,backgroundcolor='w') a.text(l+1.0,.80,label,fontsize=10,color="#0000CD") #,backgroundcolor='w') a.set_xticks([]) a.set_yticks([]) f1.add_artist(a) M=0.0 m=0.0 for _k,_v in _C.items(): _w = _v['DATA'] _v0 = _w._data[-1] _v1 = _w._data[-2] M=max(M,_v0,_v1) m=min(m,_v0,_v1) for _k,_v in _C.items(): _w = _v['DATA'] _spec = udict(_v['SPEC']) try: label = _spec['label'] except KeyError: label = _k _v0 = _w._data[-1] _v1 = _w._data[-2] _mk_hist(f1,_k,_v1,_v0,label,m,M) return unplotted
def _export_results_in_file(results, fname, order=None, kind='XLS', base=None, cfg={}, phase='raw'): from e4t.process import getPostprocessor outfiles = [] # Se il dataset è vuoto esci if results is None or len(results)==0: logger.debug('Empty dataset: no export') return logger.debug("Dataset with %d series: %s",len(results),','.join(results)) logger.debug("Phase: %s",phase) logger.debug("Requested var order: %s",order) jcfg,ocfg,xcfg = cfg # Estrai le serie tseries = [] tseries_md = [] names = [] elems = sorted(results.items()) ipanels = {} if phase == 'raw': order = None elif order is None: if ocfg: okind = ocfg.xget('KIND') # If kind is multiple get sub-elements if re.match('^multiple$',okind,re.I): elements = ocfg.xget_list('ELEMENTS') else: elements = [ ocfg.xget('__LABEL'), ] # for subelements if elements: order = [] names = [] for el in elements: hcfg = xcfg.xget(el) hkind = hcfg.xget('KIND') # If kind=table get order and names from structure if re.match(u'table',hkind,re.I): structure = hcfg.xget('STRUCTURE') if structure: order.append( structure ) names.append( structure ) # else if kind=figure get order and names from panels/series elif re.match('figure',hkind,re.I): if 'PANELS' in hcfg: panels = hcfg.xget_list('PANELS') for panel in panels: pcfg = xcfg.xget(panel) if 'LEFT SERIES' in pcfg: snames = pcfg.xget_list('LEFT SERIES') ser = get_series_label(xcfg,snames) ipanels.update(dict([ (s,panel) for s in ser])) order.extend( ser ) names.extend( snames ) if 'SERIES' in pcfg: snames = pcfg.xget_list('SERIES') ser = get_series_label(xcfg,snames) ipanels.update(dict([ (s,panel) for s in ser])) order.extend( ser ) names.extend( snames ) if 'RIGHT SERIES' in pcfg: snames = pcfg.xget_list('RIGHT SERIES') ser = get_series_label(xcfg,snames) ipanels.update(dict([ (s,panel) for s in ser])) order.extend( ser ) names.extend( snames ) logger.debug("%s/%s - %d/%d | P:%d %s",snames,ser,len(order),len(names),len(ipanels),','.join(ipanels.keys())) order = ','.join(order) names = ','.join(names) if '$$' in names: names = None if order: order = get_list(order) else: order = sorted(results.keys()) if names: if isinstance(names,(basestring)): names = get_list(names) N = [] for name in names: if '$$' in name: res = grep(name.replace('$$','..'),results.keys()) N.extend(res) else: N.append(name) names = N else: names = order logger.debug("Var order: %s",','.join(order)) logger.debug("Var names: %s",','.join(order)) elems = [] for _i,(nam,n2) in enumerate(zip(order,names)): if nam in results: N = names[_i] if nam in names else None if N is None or '$$' in N: N = nam if nam in xcfg else None C = xcfg[N] if N and N in xcfg else None if not C: C = xcfg[n2] if n2 in xcfg else None elems.append((nam,results[nam],C,n2)) if not elems: logger.warn('No elements to export') return logger.debug('IPANELS: %s',ipanels) for k,v,c,n in elems: logger.debug('%s/%s %s',k,n,c) _ts = v if type(v) in (np.ndarray,list) else v._data _tm = { 'Name': k, 'Type': type(v) } if type(v) in (np.ndarray,list) else v._metadata if c: if 'LABEL' in c: _tm['Description'] = c['LABEL'] elif 'TITLE' in c: _tm['Description'] = c['TITLE'] else: _tm['Description'] = None # panel discover data = None if 'DATA' in c and c['DATA'] in ipanels: data = c['DATA'] if '__LABEL' in c and c['__LABEL'] in ipanels: data = c['__LABEL'] if data and data in ipanels: _tm['panel']=ipanels[data] else: _tm['panel']=None _tm['metadata']=c else: if 'Name' not in _tm: _tm['Name']=k _tm['Description']=k _tm['metadata']={} _tm['panel']=None if len(_tm['metadata'])==0: _tm['metadata']={'__LABEL':k} tseries.append(_ts) tseries_md.append(copy.deepcopy(_tm)) names.append(k) logger.debug('_tm: %s',_tm) _tm = None resultset = tseries headers = names metadata = tseries_md if len(resultset)<1: logger.error('NO RESULTS') return # Gestione del nome del file outfile = "output.%s" % kind.lower() if fname is not None: outfile = fname logger.debug("Dataset output in file %s:",outfile) logger.debug("Dataset md %s:",metadata) # Seleziona il postprocessore postproc = getPostprocessor(kind) # Trasforma il risultato postproc.export(resultset, outfile, header=headers, metadata=metadata, cfg=cfg ) outfiles.append(outfile) return outfiles
def _read_xls(self,fname,ext,kw): logger.debug('Read_XLS from %s',fname) TS = {} kw = udict(kw) book = xlrd.open_workbook(fname) def _get_sheet(book,kw): # get sheet get_sheet = book.sheet_by_index sheetname = kw.xget('SHEET') if sheetname: sheet =exls.get_sheet(book,sheetname) else: sheetname = kw.xget('SHEETNUM',0) sheet = book.sheet_by_index(sheetname) if sheet is None: logger.error('Non posso acquisire il foglio %s',sheetname) raise ValueError, "Sheet" return sheet sheet = _get_sheet(book,kw) def _get_orient(kw): # Orientamento dei dati (H -> Orizzontale, V -> Verticale) orient = kw.xget('ORIENT','V') if orient not in ( 'H', 'V' ): logger.warn('ORIENT deve essere tra H o V, non %s. Uso V!', orient) orient = 'V' return orient orient = _get_orient(kw) # SIZE nrows = sheet.nrows ncols = sheet.ncols # Date or index dates = kw.xget('DATES',"A2:") index = kw.xget('INDEX') Aindex = index if index else dates logging.debug("{PARMS}-%s %s %s %s", 'IDX' if index else 'DAT', sheet.name, Aindex, orient) (BC,BR,EC,ER) = _get_total_range(sheet,Aindex,orient) logger.debug("{RANGE} %s:%s",_Aref(BC,BR),_Aref(EC,ER)) date_values = _get_values(sheet,BC,BR,EC,ER,kw) def _get_index(date_values): v = [ int(d) for d in date_values.flatten() ] date_array = np.array(v) return date_array # Find index as Vector or DateRange if index: date_array = _get_index(date_values) else: # TIMESERIES def _get_tseriesD(): pass def _get_tseriesA(freq,date_values,kw): v = [ int(d) for d in date_values.flatten() if not np.isnan(d) ] D = [ ts.Date(freq=str(freq),value=_v) for _v in v] date_array = ts.date_array(D) return date_array def _get_tseriesQ(freq,date_values,kw): # print date_values by=0 if kw.has_key('YEAR'): by = eval(kw['YEAR'])-1 v = [ (int(d)-1)%4+1+(int(_i/4)*4)+by*4 for _i,d in enumerate(date_values.flatten()) if not np.isnan(d) ] D = [ ts.Date(freq=str(freq),value=_v) for _v in v] date_array = ts.date_array(D) return date_array def _get_tseriesM(freq,date_values,kw): dformat='INT' if kw.has_key('DFORMAT'): dformat = kw['DFORMAT'] if dformat=='INT': v = [ (int(d)-1)*12+i%12+1 for i,d in enumerate(date_values.flatten()) if not np.isnan(d) ] D = [ ts.Date(freq=str(freq),value=_v) for _v in v] elif dformat=='XL_DATE': v = [ xlrd.xldate_as_tuple(d,0) for i,d in enumerate(date_values.flatten()) if not np.isnan(d) ] D = [ ts.Date(freq=str(freq),year=_v[0],month=_v[1]) for _v in v] else: logger.error('DATE FORMAT NOT SUPPORTED ON EXCEL READING') raise ValueError, dformat # print "FREQ=|%s|"%freq,D date_array = ts.date_array(D) return date_array def _get_tseriesD(freq,date_values,kw): v = [ xlrd.xldate_as_tuple(int(d),0) for i,d in enumerate(date_values.flatten()) if not np.isnan(d) ] D = [ ts.Date(freq=str(freq),year=_v[0],month=_v[1],day=_v[2]) for _v in v] date_array = ts.date_array(D) return date_array op = { 'A': _get_tseriesA, 'Y': _get_tseriesA, 'Q': _get_tseriesQ, 'M': _get_tseriesM, 'D': _get_tseriesD, } freq = 'D' if 'FREQ' in kw: freq = kw['FREQ'].strip() if freq in op: fnc = op[freq] date_array = fnc(freq,date_values,kw) else: logger.error('UNKNOWN FREQ %s',freq) raise ValueError, 'UNKNOWN FREQ %s',freq else: logger.error('ABSENT FREQ %s',freq) raise ValueError, 'ABSENT FREQ %s',freq logger.debug("IND: %s", ','.join([ str(x) for x in date_array])) # serie series = "B2:" if kw.has_key('SERIES'): series = kw['SERIES'] (Bc,Br,Ec,Er) = _get_total_range(sheet,series,invert(orient)) Nseries = (Er - Br) if orient == 'H' else (Ec - Bc) B = Bc if orient == 'V' else Br E = Ec if orient == 'V' else Er logger.debug("(Bc=%d,Br=%d,Ec=%d,Er=%d,B=%d,E=%d)",Bc,Br,Ec,Er,B,E) # Gestione dei nomi delle serie name = kw.xget('NAME','TS') logger.debug("{NAME} %s",name) _cmp = name if ' ' not in name and ':' in name: logger.debug("{NAME} with : %s",name) _cmp=[] _fmt = "%s" _n = name _f = str m = rx_range.search(_n) _name = [name,] if m: (_bc,_br,_ec,_er) = _get_range4(m.group(0)) logger.debug("%s",(_bc,_br,_ec,_er)) F = [ _f(v) if len(unicode(v))>0 else None for v in _get_values_list(sheet,_bc,_br,_ec,_er,kw)] _name = [ (_fmt.lower() % _p) if _p is not None else "_${NUM}_!" for _p in F ] elif ' ' in name: _cmp = [] _ns = name.split(' ') M = 0 for i,_n in enumerate(_ns): _f = str _fmt = "%s" if '|' in _n: (_n,_f,_fmt)=_n.split('|') _f = eval(_f.lower()) if '=' in _n: m = rx_range_eq.search(_n) if m: (_bc,_br,_ec,_er) = _get_range4(m.group(1)) logger.debug("%s",(_bc,_br,_ec,_er)) F = [ _f(v) if len(unicode(v))>0 else None for v in _get_values_list(sheet,_bc,_br,_ec,_er,kw)] # print "FMt=",F F = [ (_fmt.lower() % _p) if _p is not None else "_${NUM}_!" for _p in F ] _cmp.append( F ) M = max(M,len(F)) else: _cmp.append([_f(_n),]) M = max(M,1) from itertools import product,izip,cycle for p in range(0,len(_cmp)): _P = _cmp[p] if len(_P)<M: _cmp[p] = [] for i,_p in enumerate(cycle(_P)): if i < M: _cmp[p].append(_p ) else: break _P = _cmp[p] _cmp[p] = _P _name = map(operator.add, *_cmp) else: _name = [ "%s${NUM}" % _cmp , ] Nnames = len(_name) if Nnames<Nseries: for i in range(Nnames,Nseries): _name.append("TS${NUM}") for i,x in enumerate(range(B,E+1)): #print _name #print i,x,len(_name) _n = _name[i].strip() _ts = None if _n in TS.keys(): _n += "_${NUM}_?" N = Template(_n).safe_substitute({'NUM': i}) N = unicode( N.upper().strip() ) #.decode('utf-8') #print u"reading series for %s" % N.encode('ascii','xmlcharrefreplace') #print " on ", x, "Row" if orient=='H' else "Column" if orient == 'H': _tv = _get_values(sheet,BC,x,EC,x,kw) elif orient == 'V': _tv = _get_values(sheet,x,BR,x,ER,kw) #print "_tv",_tv _tv = np.array([ _v[0] for _v in _tv ]) #print "_tv2",_tv # print date_array else: logger.error('ORIENTTION ERROR') raise ValueError, 'ORIENT' if index is None: # isa timeseries #print "Tv=",_tv #print date_array _ts = ts.time_series(data=_tv[0:len(date_array)],dates=date_array,copy=True) TS[N] = Timeseries(data=_ts,name=N) else: # isa np.array TS[N] = _tv # print "TS[%s]"%N.encode('ascii','xmlcharrefreplace'),TS[N] # _report(_ts) logger.debug("Read %d series from excel: %s",Nseries,','.join( sorted(TS.keys()))) # Rename Phase if kw.has_key('RENAME'): rename_desc = get_list(kw['RENAME']) for r in rename_desc: (_f,_t) = r.split('>') if TS.has_key(_f): if not TS.has_key(_t): TS[_t]=TS[_f] del TS[_f] else: logger.error('La serie %s già esiste nell\'IS',_t) else: logger.error('La serie %s non esiste nell\'IS',_f) # Drop Phase logger.debug("Read XLS : ") # for k,t in TS.items(): # print k # print tsinfo(t) # look(t._data) return TS
def key_presence(cfg,*args): keys = set([ _v.upper() for _v in cfg.keys() if _v[0] != "_" ]) models = set([ _v.upper() for _v in get_list(args[0])]) logger.debug("kp %s >>> %s", keys,models) return (keys - models, models - keys )
def get_table_list(_value): _l = get_list(_value) _l1 = [ _v.split('|')[0] if '|' in _v else _v for _v in _l if _v not in (u'>',u'')] return _l1