def ef_test_emf(): print 'pi: {}'.format(pi) print '*****' pd.DataFrame(columns=['pi', 's', 'obj_numb', 'emf'])\ .to_csv('outputs/efficiency_emf.csv', index_label=False) for obj_number in emf_bj_number_list: print 'obj number: {}'.format(obj_number) ground_truth = [random.randint(0, len(possible_values)-1) for i in range(obj_number)] data, g_data = generator(cov_list, p_list, ground_truth, cl_size, pi, possible_values) emf_t = time.time() em_f, em_f_it, accuracy_em_f, pi_em_f = em_fuzzy(data=data, truth_obj_list=ground_truth) ex_emf = time.time() - emf_t print 'em_f: {}, seconds: {}'.format(em_f, ex_emf) print '---' result_list = [pi, s_number, obj_number, ex_emf] data_frame = pd.read_csv('outputs/efficiency_emf.csv') data_frame = data_frame.append(pd.DataFrame(data=[result_list], columns=['pi', 's', 'obj_numb', 'emf'])) data_frame.to_csv('outputs/efficiency_emf.csv', index_label=False)
def one2class(unicode_input, image_input, opt): path_class = "./pths/%s/" % (unicode_input) if not os.path.isdir(path_class): return {} images_output = {} # change image type to tensor float image_input_tensor = pil2tensor(image_input) image_input_tensor = image_input_tensor.unsqueeze(0) # if pth exist, generate another character using pth dirs = os.listdir(path_class) for dir in dirs: if dir.split('_')[0] == unicode_input: files = os.listdir("%s/%s/" % (path_class, dir)) for filename in files: ext = os.path.splitext(filename)[-1] if ext == '.pth': unicode_output = dir.split('_')[1].split('.')[-1] # log print(" start :[", unicode_output, "]") path_pth = os.path.abspath("%s/%s/%s" % (path_class, dir, filename)) print(" done! :[", unicode_output, "]") image_gen = generator(image_input_tensor, opt, path_pth) images_output[unicode_output] = image_gen break return images_output
def s_data_run(): # list of algorithms accuracy alg_ac_list = [] # run algorithms with different params(cov, accuracy of sources) # for the data generator for p in p_val_list: p_list = [p]*s_number for cov in cov_val_list: cov_list = [cov]*s_number print 's accuracy: {}'.format(p) print 'cov: {}'.format(cov) for round in range(10): print 'Round: {}'.format(round) ground_truth = dict([(i, random.randint(0, len(possible_values)-1)) for i in range(obj_number)]) # currently the data generator output is pandas dataFrame data2, g_data = generator(cov_list, p_list, ground_truth, cl_size, pi, possible_values) # transform pandas dataFrame into dict format data = get_data(data=data2) # PRINT OUT ALGORITHMS ACCURACIES mv_ac = m_voting(data=data, gt=ground_truth) print 'MV_ac: {}'.format(mv_ac) sum_ac = sums(data=data, gt=ground_truth, s_number=s_number) print 'SUM_ac: {}'.format(sum_ac) al_ac = average_log(data=data, gt=ground_truth, s_number=s_number) print 'AL_ac: {}'.format(al_ac) inv_ac = investment(data=data, gt=ground_truth, s_number=s_number) print 'INV_ac: {}'.format(inv_ac) # pInv_ac = pooled_investment(data=data, gt=ground_truth, s_number=s_number) print 'PINV_ac: {}'.format(pInv_ac) em_ac = em(data=data, gt=ground_truth, accuracy_truth=p_list, s_number=s_number) print 'EM_ac: {}'.format(em_ac) g_ac = gibbs(data=data, gt=ground_truth, accuracy_truth=p_list, s_number=s_number) print 'GB_ac: {}'.format(g_ac) alg_ac_list.append([p, mv_ac, sum_ac, al_ac, inv_ac, pInv_ac, em_ac, g_ac]) print '---' # create pandas dataFrame with algorithms outputs df_ac = pd.DataFrame(data=alg_ac_list, columns=['p', 'mv_ac', 'sums_ac', 'al_ac', 'inv_ac', 'pInv_ac', 'em_ac', 'g_ac']) # output to csv file df_ac.to_csv('outputs/alg_ac_v5_{}_{}.csv'.format(s_number, obj_number), index=False)
def ef_test(): print 'pi: {}'.format(pi) print '*****' pd.DataFrame(columns=['pi', 's', 'obj_numb', 'mv', 'em', 'g', 'gf'])\ .to_csv('outputs/efficiency.csv', index_label=False) for obj_number in obj_number_list: print 'obj number: {}'.format(obj_number) ground_truth = [random.randint(0, len(possible_values)-1) for i in range(obj_number)] data, g_data = generator(cov_list, p_list, ground_truth, cl_size, pi, possible_values) t_mv = time.time() mv = m_voting(data=data, truth_obj_list=ground_truth) ex_t_mv = time.time() - t_mv print 'm_v: {}, seconds: {}'.format(mv, ex_t_mv) t_em = time.time() em_d, em_it, accuracy_em = em(data=data, truth_obj_list=ground_truth, values=possible_values) ex_t_em = time.time() - t_em print 'em: {}, seconds: {}'.format(em_d, ex_t_em) t_g = time.time() g_d, g_it, accuracy_g = gibbs(data=data, truth_obj_list=ground_truth) ex_t_g = time.time() - t_g print 'g: {}, seconds: {}'.format(g_d, ex_t_g) t_gf = time.time() gf_d, gf_it, accuracy_gf, pi_gf = gibbs_fuzzy(data=data, truth_obj_list=ground_truth) ex_t_gf = time.time() - t_gf print 'gf: {}, seconds: {}'.format(gf_d, ex_t_gf) print '---' result_list = [pi, s_number, obj_number, ex_t_mv, ex_t_em, ex_t_g, ex_t_gf] data_frame = pd.read_csv('outputs/efficiency.csv') data_frame = data_frame.append(pd.DataFrame(data=[result_list], columns=['pi', 's', 'obj_numb', 'mv', 'em', 'g', 'gf'])) data_frame.to_csv('outputs/efficiency.csv', index_label=False)
def analyze1(): min_c = 2 max_c = 3 times = [] clients = [] for num in range(0, 5): clients.append([]) times.append([]) for i in (1, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000): start = time.clock() client_num = generator(min_c * i, max_c * i) stop = time.clock() clients[num].append(client_num) times[num].append(stop - start) print(stop - start) main() info = average(times, clients, num) f = open("analyze.csv", "w") for i in range(len(info[0])): f.write(str(info[0][i]) + ' ' + str(info[1][i]) + '\n') f.close()
def relations(lOr): ge = generator(64, root_path='..') lOi = [ge.listOFdataname.index(i) for i in lOr] print(lOi) data, target = next(ge) target, data = zip(*sorted(zip(target, data))) x = np.linspace(0, 1, 64) plt.plot(x, target, label='target') for ind, i in enumerate(lOi): di = np.array(data)[:, i] di = di/np.amax(di) plt.plot(x, di, label=lOr[ind]) plt.xlabel('x') plt.ylabel('y') plt.legend() plt.show()
def written2all(input_unicode, image_input, opt, is_demo, unicodes): if is_demo: path_class = "./data/pths_demo/%s/" % (input_unicode) else: path_class = "./data/pths/%s/" % (input_unicode) if not os.path.isdir(path_class): return {} output_images = {} # change image type to tensor float image_input_tensor = pil2tensor(image_input) image_input_tensor = image_input_tensor.unsqueeze(0) # if pth exist, generate another character using pth dirs = os.listdir(path_class) for dir in dirs: if dir.split('_')[0] == input_unicode: files = os.listdir("%s/%s/" % (path_class, dir)) for filename in files: ext = os.path.splitext(filename)[-1] if ext == '.pth': unicode_output = dir.split('_')[1].split('.')[-1] if unicode_output in unicodes: logging.info( "PASS making [%s] : unicode is aleady in input", (unicode_output)) break # log logging.info(" start making:[ " + unicode_output + " ]") path_pth = os.path.abspath("%s/%s/%s" % (path_class, dir, filename)) logging.info(" done making!:[ " + unicode_output + " ]") image_gen = generator(image_input_tensor, opt, path_pth) output_images[unicode_output] = image_gen break return output_images
import tensorflow as tf import numpy as np from generator.generator import generator from models.default_model import default from tools.losses import mse_loss from tools.tool import initialize_uninitialized from global_config.global_config import config if __name__ == '__main__': cfg = config() test_generator = generator(cfg.batch, cfg.test_csv) with tf.Session() as sess: train_P = tf.placeholder(tf.float32, [None, cfg.input_shape]) train_model = default(train_P) net_out = train_model.inputs model_pr = tf.train.Saver() model_pr.restore(sess, 'save_model\\100000save_net.ckpt') with open('datas//upload.csv', 'w') as f: f.write('Id,winPlacePerc\n') for epoch in range(cfg.epoch): test_data, IDs = next(test_generator) output = sess.run(net_out, feed_dict={train_P: test_data}) print(IDs[0])
import sys import time from generator.generator import generator if __name__ == '__main__': min_c = 2 max_c = 3 i = int(sys.argv[1]) f = open("bank_data_" + str(i) + ".sql", "w") f1 = open("bank_data_" + str(i) + "_1.sql", "w") start = time.clock() generator(min_c * i, max_c * i, f, f1) f1.close() f.close() stop = time.clock() print("Time", stop - start) # except Exception as error: # print(error)
import tensorflow as tf import numpy as np from generator.generator import generator from models.default_model import default from tools.losses import mse_loss from tools.tool import initialize_uninitialized, Tenordered_decay from global_config.global_config import config if __name__ == '__main__': cfg = config() train_generator = generator(cfg.batch, cfg.train_csv) # test_generator = generator(cfg.batch, # cfg.test_csv) with tf.Session() as sess: train_P = tf.placeholder(tf.float32, [None, cfg.input_shape, 1]) target_P = tf.placeholder(tf.float32, [None]) learning_rate = tf.placeholder(tf.float32, [None]) train_model = default(train_P) net_out = train_model.inputs loss = mse_loss(net_out, target_P) train_step = tf.train.MomentumOptimizer(learning_rate[0], cfg.momentum). \ minimize(loss) initialize_uninitialized(sess) model_af = tf.train.Saver()