def training_LINE(args, G, ppmi, model, sess, epoch_start, epoch_end, saver_ckpt, ckpt_save_path): ############################# # recording best results best_epoch = 0 best_embedding = None score = 0 ############################# batch_generator = G.edge_batch iterations = int(G.edge_num / args.batch_size) + 1 print('Begin training...') for epoch in range(epoch_start + 1, epoch_end + 1): epoch_begin_time = time() for j in range(iterations): pos_pairs, neg_pairs = next(batch_generator) feed_dict = batch_feed_dict_LINE(model, pos_pairs, neg_pairs, ppmi) if args.adver: if args.base == 'deepwalk' or args.base == 'node2vec' or args.base == 'LINE_2': sess.run([model.update_T, model.update_C], feed_dict) elif args.base == 'LINE_1': sess.run([model.update_T], feed_dict) sess.run(model.optimizer, feed_dict) epoch_training_time = time() - epoch_begin_time if (epoch - epoch_start - 1) % 1 == 0: eval_begin_time = time() results = evaluation.evaluate(model, sess, args, epoch) print("Epoch [%d] - Training [%.1f s] Evaluation [%.1f s]" % (epoch, epoch_training_time, time() - eval_begin_time)) ########################################################### # recording best results if np.sum(results) > score: best_epoch = epoch score = np.sum(results) if args.normalized: best_embedding = sess.run(model.get_normalized_embeddings()) else: best_embedding = sess.run(model.embedding_T) ########################################################### if args.ckpt > 0 and (epoch - epoch_start) % args.ckpt == 0: saver_ckpt.save(sess, ckpt_save_path + 'weights', global_step=epoch) if (not args.adver) and (epoch) == (args.pretraining_nepoch + epoch_start): saver_ckpt.save(sess, ckpt_save_path + 'weights', global_step=epoch) return best_epoch, best_embedding
def main(input_path, shape_label_path, gender_label_path, checkpoint_path, device, silhouettes_from): regressor = SingleInputRegressor(resnet_in_channels=18, resnet_layers=18, ief_iters=3) print("Regressor loaded. Weights from:", checkpoint_path) regressor.to(device) checkpoint = torch.load(checkpoint_path, map_location=device) regressor.load_state_dict(checkpoint['best_model_state_dict']) shapes = np.load(shape_label_path) genders = np.load(gender_label_path) image_fnames = [f for f in sorted(os.listdir(input_path)) if f.endswith('.png') or f.endswith('.jpg')] file_path = 'evaluation/results/evaluation_measurement_exp_0.pickle' evaluation_file = { 'pve_neutral': [], 'height': [], 'weight': [], 'chest': [], 'hip': [] } for i in range(len(image_fnames)): pve_neutral, weight, height, chest, hip = evaluate(os.path.join(input_path, image_fnames[i]), shapes[i], genders[i], regressor, device, silhouettes_from=silhouettes_from) evaluation_file['pve_neutral'].append(pve_neutral[0]) evaluation_file['weight'].append(weight) evaluation_file['height'].append(height) evaluation_file['chest'].append(chest) evaluation_file['hip'].append(hip) print(i) with open(file_path, 'wb') as fp: pickle.dump(evaluation_file, fp, protocol=pickle.HIGHEST_PROTOCOL)
def __evaluate(args): queries, filenames = evaluation.get_queries_and_filenames(args.input, args.query) for filename, query in progressbar.progressbar(zip(filenames, queries)): if 'lda' in args.eval: modelname = 'lda_{}.mdl'.format(args.lda_k1) mdl = tp_lda.load_model(args.input, modelname) tmp = evaluation.evaluate(mdl, query) res_lda = tp_lda.interpret_evaluation_results(tmp, args.input, args.pages, args.determ, args.lda_k1) evaluation.save_or_print(args, '{}queries/{}'.format(args.input, 'lda'), filename, res_lda) if 'pa' in args.eval: modelname = 'pa_{}_{}.mdl'.format(args.pa_k1, args.pa_k2) mdl = tp_pachinko.load_model(args.input, modelname) tmp = evaluation.evaluate(mdl, query) res_pa = tp_pachinko.interpret_evaluation_results( tmp, args.input, args.pages, args.determ, args.pa_k1, args.pa_k2) evaluation.save_or_print(args, '{}queries/{}'.format(args.input, 'pa'), filename, res_pa)
def exec_evaluation(context, attr_label, formulas): warnings = [] value = None if formulas: for formula in formulas: if value is not None: break eval_type = formula['type'] eval_param = formula['param'] try: value = evaluation.evaluate(eval_type, eval_param, context) except Exception, e: msg = u'[E]公式{0}-{1}计算值{2}失败.'.format(eval_type, eval_param, attr_label) warnings.append(msg)
super_label = [ [72, 4, 95, 30, 55], [73, 32, 67, 91, 1], [92, 70, 82, 54, 62], [16, 61, 9, 10, 28], [51, 0, 53, 57, 83], [40, 39, 22, 87, 86], [20, 25, 94, 84, 5], [14, 24, 6, 7, 18], [43, 97, 42, 3, 88], [37, 17, 76, 12, 68], [49, 33, 71, 23, 60], [15, 21, 19, 31, 38], [75, 63, 66, 64, 34], [77, 26, 45, 99, 79], [11, 2, 35, 46, 98], [29, 93, 27, 78, 44], [65, 50, 74, 36, 80], [56, 52, 47, 59, 96], [8, 58, 90, 13, 48], [81, 69, 41, 89, 85], ] Y_copy = copy.copy(Y) for i in range(20): for j in super_label[i]: Y[Y_copy == j] = i nmi, ari, f, acc, db, s, s_dbw = evaluation.evaluate( Y, X, extracted_features, args.dataset) print( 'NMI = {:.4f} ARI = {:.4f} F = {:.4f} ACC = {:.4f} DB = {:.4f} S = {:.4f} S_DBW = {:.4f}' .format(nmi, ari, f, acc, db, s, s_dbw))
encoded = codec.encode(audData, fs, 0.02, dct2, 0) a = current_milli_time() print('Tempo de encode: ', a - b) encoded.descartar(160) #Arquivo comprimido encoded.saveToFile("./result/%s/encoded.dct" % prefixoNome) b = current_milli_time() decoded = codec.decodeFromEncoded(encoded, dct2) a = current_milli_time() print('Tempo de decode: ', a - b) #Arquivo recuperado wv.save_wave("./result/%s/recuperado-memoria.wav" % prefixoNome, fs, decoded, 16) #Recuperando dados comprimidos a partir do arquivo salvo newEncoded = enc.WaveEncoded.loadFromFile("./result/%s/encoded.dct" % prefixoNome) #Recuperando amostras a partir dos dados comprimidos restaurados do arquivo newdecoded = codec.decodeFromEncoded(newEncoded, dct2) #Arquivo recuperado sobre newEncoded wv.save_wave("./result/%s/recuperado-arquivo.wav" % prefixoNome, fs, newdecoded, 16) eval.evaluate("./waves/" + origem, "./result/%s/recuperado-memoria.wav" % prefixoNome, "./result/%s/medidas.txt" % prefixoNome)
def training(model, G, args, epoch_start, epoch_end, time_stamp): # logging and loading if args.adver: ckpt_save_path = 'Pretrain/{}/{}_adv/embed_{}/{}/'.format( args.dataset, args.base, args.embed_size, time_stamp) ckpt_restore_path = 'Pretrain/{}/{}/embed_{}/{}/'.format( args.dataset, args.base, args.embed_size, time_stamp) else: ckpt_save_path = 'Pretrain/{}/{}/embed_{}/{}/'.format( args.dataset, args.base, args.embed_size, time_stamp) ckpt_restore_path = 0 if args.restore is None else 'Pretrain/{}/{}/embed_{}/{}/'.format( args.dataset, args.base, args.embed_size, args.restore) if not os.path.exists(ckpt_save_path): os.makedirs(ckpt_save_path) if ckpt_restore_path and not os.path.exists(ckpt_restore_path): os.makedirs(ckpt_restore_path) saver_ckpt = tf.train.Saver({ 'embedding_T': model.embedding_T, 'embedding_C': model.embedding_C, 'context_bias': model.context_bias }) # initialization init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess = tf.InteractiveSession() sess.run(init) # restore if args.restore is not None or epoch_start: ckpt = tf.train.get_checkpoint_state( os.path.dirname(ckpt_restore_path + 'checkpoint')) print(ckpt.model_checkpoint_path) if ckpt and ckpt.model_checkpoint_path: saver_ckpt.restore(sess, ckpt.model_checkpoint_path) print('================Done loading======================') else: logging.info('Initialized from scratch') print('Initialized from scratch') evaluation.evaluate(model, sess, args, epoch_start) ############################################################### # adaptive l2 norm based on node similarities from PPMI matrix A = sio.loadmat(args.input_net)['network'] PPMI = utils.PPMI(A, k=2, flag=False) ############################################################### ####################################################### # deepwalk, node2vec, or LINE if args.base == 'deepwalk' or args.base == 'node2vec': best_epoch, best_embedding = training_deepwalk(args, G, PPMI, model, sess, epoch_start, epoch_end, saver_ckpt, ckpt_save_path) elif args.base == 'LINE_1' or args.base == 'LINE_2': best_epoch, best_embedding = training_LINE(args, G, PPMI, model, sess, epoch_start, epoch_end, saver_ckpt, ckpt_save_path) ####################################################### # saver_ckpt.save(sess, ckpt_save_path+'weights', global_step=epoch) print('Finish training.') ####################################################### if args.adver: sio.savemat( './output/{}-{}-adv-vis.mat'.format(args.dataset, args.base), {'rep': best_embedding}) else: sio.savemat('./output/{}-{}-vis.mat'.format(args.dataset, args.base), {'rep': best_embedding}) print('------------------------------------------') print('Best Epoch: {}'.format(best_epoch)) print('------------------------------------------') evaluation.print_settings(args, flag='best_epoch', best_epoch=best_epoch)
def training_deepwalk(args, G, ppmi, model, sess, epoch_start, epoch_end, saver_ckpt, ckpt_save_path): ############################# # recording best results best_epoch = 0 best_embedding = None score = 0 ############################# ####################################################### # walking walks = G.simulate_walks(args.num_walks, args.walk_length) print('walks:', walks.shape) pos_pairs, neg_pairs = utils.walks_to_pairs_with_ns_pool( walks, args.window_size, args.negative, G.table) print('pos_pairs:', pos_pairs.shape) print('neg_pairs:', neg_pairs.shape) pair_num = pos_pairs.shape[0] iters = int(pair_num / args.batch_size) ####################################################### print('Begin training...') np.random.seed() for epoch in range(epoch_start + 1, epoch_end + 1): epoch_begin_time = time() if epoch > epoch_start: walks = G.simulate_walks(args.num_walks, args.walk_length) pos_pairs, neg_pairs = utils.walks_to_pairs_with_ns_pool( walks, args.window_size, args.negative, G.table) random_seq = np.random.permutation(pair_num) # for j in tqdm.tqdm(range(iters)): for j in range(iters): if j != iters - 1: index_pos = random_seq[j * args.batch_size:(j + 1) * args.batch_size] else: index_pos = random_seq[j * args.batch_size:] feed_dict = batch_feed_dict(model, pos_pairs, neg_pairs, index_pos, args.negative, ppmi) if args.adver: sess.run([model.update_T, model.update_C], feed_dict) sess.run(model.optimizer, feed_dict) print("Epoch training time [%.1f s]" % (time() - epoch_begin_time)) eval_begin_time = time() results = evaluation.evaluate(model, sess, args, epoch) ########################################################### # recording best results if np.sum(results) > score: best_epoch = epoch score = np.sum(results) if args.normalized: best_embedding = sess.run(model.get_normalized_embeddings()) else: best_embedding = sess.run(model.embedding_T) ########################################################### print("Evaluation [%.1f s]" % (time() - eval_begin_time)) if args.ckpt > 0 and (epoch - epoch_start) % args.ckpt == 0: saver_ckpt.save(sess, ckpt_save_path + 'weights', global_step=epoch) if (not args.adver) and (epoch) == (args.pretraining_nepoch + epoch_start): saver_ckpt.save(sess, ckpt_save_path + 'weights', global_step=epoch) return best_epoch, best_embedding
from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import StandardScaler from dataset_process.data_io import get_train_data from evaluation.evaluation import evaluate from global_variables import get_transformed_dir method = KNeighborsClassifier(n_neighbors=3) dataset_location = get_transformed_dir() data, target = get_train_data(dataset_location) scaled = False if data.dtype != float: data = StandardScaler().fit_transform(data) scaled = True print 'Dataset scaled' evaluate(method.fit(data, target), method_name='K Nearest Neighbour', scaled=scaled, dataset_location=dataset_location)
import sklearn.naive_bayes as nb from evaluation.evaluation import evaluate from dataset_process.data_io import get_train_data from global_variables import get_transformed_dir gnb = nb.GaussianNB() dataset_location = get_transformed_dir() data, target = get_train_data(dataset_location) evaluate(gnb.fit(data, target), method_name='Naive Bayes', scaled=False, dataset_location=dataset_location)
X, Y = inference(data_loader, model, device) if args.dataset == "CIFAR-100": # super-class super_label = [ [72, 4, 95, 30, 55], [73, 32, 67, 91, 1], [92, 70, 82, 54, 62], [16, 61, 9, 10, 28], [51, 0, 53, 57, 83], [40, 39, 22, 87, 86], [20, 25, 94, 84, 5], [14, 24, 6, 7, 18], [43, 97, 42, 3, 88], [37, 17, 76, 12, 68], [49, 33, 71, 23, 60], [15, 21, 19, 31, 38], [75, 63, 66, 64, 34], [77, 26, 45, 99, 79], [11, 2, 35, 46, 98], [29, 93, 27, 78, 44], [65, 50, 74, 36, 80], [56, 52, 47, 59, 96], [8, 58, 90, 13, 48], [81, 69, 41, 89, 85], ] Y_copy = copy.copy(Y) for i in range(20): for j in super_label[i]: Y[Y_copy == j] = i nmi, ari, f, acc = evaluation.evaluate(Y, X) print('NMI = {:.4f} ARI = {:.4f} F = {:.4f} ACC = {:.4f}'.format(nmi, ari, f, acc))