def get(self,link): page_list = get_cache(link,db.GqlQuery("select * from Page where url='"+link+"'")) page_list = list(page_list) url_ob = None version = self.request.get('v') if version: version = int(version) if len(page_list) > 0: url_ob = page_list[0] if url_ob: if not version: version = -1 if type(version) == int and version < len(url_ob.version): if link == '/': url = 'Main' else: url = link[1:] user_base = self.request.cookies.get('user_id') if user_base: user_base = user_base.split('|')[0] else: user_base = '' banco = get_indices(link)[0] tarjeta = get_indices(link)[1] self.render('children.html',page=url_ob.page[version],url=url,page_url=all_url()[0],user_base=user_base,link=link,credito=tarjeta,banco=banco,url_full=self.request.url) else: self.redirect('/_edit'+link+'?v='+str(len(url_ob.version)-1)) else: self.redirect('/_edit'+link)
def process_templates(): """Process template""" root = os.path.dirname(os.path.abspath(__file__)) templates_dir = os.path.join(root, 'templates') env = Environment(loader=FileSystemLoader(templates_dir)) template = env.get_template('delete_indices_action.yml.j2') if not os.path.exists('yml'): os.makedirs('yml') for i in indices.get_indices('indices.csv'): filename = os.path.join(root, 'yml', 'delete_indices_action_' + i['index'] + '.yml') with open(filename, 'w') as temp: temp.write( template.render( index_prefix=i['index'], older_than_days=i['age']))
### TEST start_ts = time.time() for i in range(training_len,data.shape[0],blocksize): if alg=='sdo': scores[i:(i+blocksize)] = detector.predict(data[i:(i+blocksize),:]) else: scores[i:(i+blocksize)] = detector.fit_predict(data[i:(i+blocksize),:]) print(".", end='', flush=True) if ((i / blocksize) % 30 == 0): print("Datapoints: ", i) end_ts = time.time() elapsed_ts = end_ts - start_ts scores_file = '%sSCORES_%s_%d_T%d_%d.pickle.gz' % (outpath, alg, int(dataset_idx), int(timepam), int(idx)) with gzip.open(scores_file, 'wb') as f: pickle.dump(scores, f) if np.any(np.isnan(scores)): print ('Warning: marking NaN as non-outlier') scores[np.isnan(scores)] = 0 perf = indices.get_indices(labels[training_len:], transform_scores(scores[training_len:])) new_row = "dataset: %d, idx: %d, P@n: %.2f, aP@n: %.2f, AP: %.2f, aAP: %.2f, MF1: %.2f, aMF1: %.2f, ROC: %.2f, Ttr: %.3f, Tts: %.3f" % (idf,idx,perf['Patn'],perf['adj_Patn'],perf['ap'],perf['adj_ap'],perf['maxf1'],perf['adj_maxf1'],perf['auc'],elapsed_tr,elapsed_ts) res.append(new_row) print("\n",new_row) df = pd.DataFrame(data=res) outfile = outpath + "SUMMARY_" + alg + "_T" + str(timepam) + ".txt" print("Summary file (txt):",outfile,"\n") df.to_csv(outfile, sep=',', header=None)
def post(self): user_base = self.request.cookies.get('user_id') if user_base: user_base = user_base.split('|')[0] else: user_base = '' banco = self.request.get('banco') tarjeta = self.request.get('tarjeta') datos = get_cache(banco+tarjeta+'_datos',datos_tarjeta('Promerica',self.request.get('tarjeta'))) contenido=get_cache(banco+tarjeta+'_contenido',obtener_tarjeta_promerica(self.request.get('tarjeta'),generar_promerica('Promerica'))) beneficios=get_cache(banco+tarjeta+'_beneficios',obtener_beneficios_promerica(datos[0],datos[1],datos[2],['h2','collapseomatic',get_indices(self.request.get('link'))[1]])) content = formato_general(self.request.get('title'),contenido,beneficios) self.render('generador.html',beneficios=beneficios,cont=contenido,contenido=content,user_base=user_base,url='Generador Promerica',link=self.request.get('link'))
def post(self): user_base = self.request.cookies.get('user_id') if user_base: user_base = user_base.split('|')[0] else: user_base = '' banco = self.request.get('banco') tarjeta = self.request.get('tarjeta') datos = get_cache(banco+tarjeta+'_datos',datos_tarjeta('LopezDeHaro',self.request.get('tarjeta')))#CONTINUAR AQUI contenido=get_cache(banco+tarjeta+'_contenido',obtener_tarjeta_lopezdeharo(self.request.get('tarjeta'),generar_info_lopezdeharo(generar_lopezdeharo('LopezDeHaro')))) beneficios=get_cache(banco+tarjeta+'_beneficios',obtener_beneficios(datos[0],datos[1],datos[2],['div','subtitulo',get_indices(self.request.get('link'))[1]])) content = formato_general(self.request.get('title'),contenido,beneficios) self.render('generador.html',beneficios=beneficios,cont=contenido,contenido=content,user_base=user_base,url='Generador LopezDeHaro',link=self.request.get('link'))
) for k, scenario in enumerate(scenarios): for j, algorithm in enumerate(algorithms): for i in range(0, number_of_datasets): datasetname = path2data + "/outlierResult_" + algorithm + "_" + scenario + "_data_" + str( i + 1) + ".txt" data_i = genfromtxt(datasetname, delimiter=',') rank = data_i[train_samples:, 1].astype(float) data_i = data_i[train_samples:, :].astype(int) data_i[:, 2] = (data_i[:, 2] > 0).choose(data_i[:, 2], 1) pred = np.array(data_i[:, 2], dtype=bool) label = np.array(data_i[:, 3], dtype=bool) label = np.invert(label) pred = np.invert(pred) ki = i + k * number_of_datasets res = indices.get_indices(label, rank) resPatn[j][ki] = res['Patn'] resap[j][ki] = res['ap'] resMF1[j][ki] = res['maxf1'] resAuc[j][ki] = res['auc'] print(" %d, %s, %d, %s, %d: %f, %f, %f, %f, %f, %f, %f, %f " % (k, scenario, j, algorithm, i, round(np.nanmean(resMF1[j][ki - i:ki]), 3), round(np.nanstd(resMF1[j][ki - i:ki]), 3), round(np.nanmean(resPatn[j][ki - i:ki]), 3), round(np.nanstd(resPatn[j][ki - i:ki]), 3), round(np.nanmean(resap[j][ki - i:ki]), 3), round(np.nanstd(resap[j][ki - i:ki]), 3), round(np.nanmean(resAuc[j][ki - i:ki]), 3), round(np.nanstd(resAuc[j][ki - i:ki]), 3)))