def get_mixs(name1, name2, points=46): img1 = align(name1) img2 = align(name2) z1 = encode(img1) z2 = encode(img2) imgs, _ = mix_range(z1, z2, points) return imgs
def infer(sess, model, hps, iterator): # Example of using model in inference mode. Load saved model using hps.restore_path # Can provide x, y from files instead of dataset iterator # If model is uncondtional, always pass y = np.zeros([bs], dtype=np.int32) if hps.direct_iterator: iterator = iterator.get_next() xs = [] zs = [] for it in range(hps.full_test_its): if hps.direct_iterator: # replace with x, y, attr if you're getting CelebA attributes, also modify get_data x, y = sess.run(iterator) else: x, y = iterator() z = model.encode(x, y) x = model.decode(y, z) xs.append(x) zs.append(z) x = np.concatenate(xs, axis=0) z = np.concatenate(zs, axis=0) np.save('logs/x.npy', x) np.save('logs/z.npy', z) return zs
def cancel_from_requested(): meeting = requested_meetings.get() index = requested_meetings.current() if (meeting != "" and len(self.requested_meetings_list) >= index): messagebox.showinfo("cancel", "CANCEL %s" % meeting) #onfirmed_meetings.config(values=util.getParticipantList(self.client.conn)) # TODO - update with right list try: meetingNumber = self.requested_meetings_list[index][2] except Exception as e: return print("debugging") print(meetingNumber) print(meeting) cancel = model.Cancel(meetingNumber, '', self.client.sessionName) message = model.encode(cancel) messageType = "Cancelled" self.client._sender(message) self.client.lock.acquire() self.client.conn.cursor().execute( ''' UPDATE booking SET status=? where meetingNumber=? ''', (messageType, meetingNumber)) requested_meetings.config( values=util.getScheduledList(self.client.conn)) self.client.lock.release() self.refreshUI() else: messagebox.showerror("cancel", "NO MEETING SELECTED")
def create_meeting(): day = meeting_day.get() month = meeting_month.get() hour = meeting_hour.get() min_num_participants = min_participants.get() topic = meeting_topic.get() selected_participants = [[ participant_list.get(i)[1], participant_list.get(i)[2] ] for i in participant_list.curselection()] if (day == "" or month == "" or hour == "" or min_num_participants == "" or topic == "" or len(selected_participants) == 0): messagebox.showerror("create meeting", "Form cannot be empty") elif (not day.isdigit() or int(day) < 1 or int(day) > 31): messagebox.showerror("create meeting", "Invalid day entered") elif (not month.isdigit() or int(month) < 1 or int(month) > 12): messagebox.showerror("create meeting", "Invalid month entered") elif (not hour.isdigit() or int(hour) < 1 or int(hour) > 24): messagebox.showerror("create meeting", "Invalid hour entered") elif (not min_num_participants.isdigit() or int(min_num_participants) < 1): messagebox.showerror("create meeting", "Invalid participant number entered") else: self.client.lock.acquire() seek = self.client.conn.cursor().execute( ''' SELECT * from booking where date like ? and time like ? and (status="Scheduled" or status="Confirmed") ''', (month + "-" + day, hour)).fetchall() self.client.lock.release() if (len(seek) > 0): messagebox.showerror( "create meeting", "You have another meeting at the same time. Please cancel or withdrawal your other meeting first" ) else: selected_participants.append( [self.client.ip, self.client.sessionName]) request = model.Request(month + "-" + day, hour, min_num_participants, selected_participants, topic, self.client.sessionName) message = model.encode(request) try: self.client.conn.cursor().execute( ''' INSERT INTO booking(date, time, meetingNumber, sourceIP, sourceClient, status, room, confirmedParticipant, topic, reason, min) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) ''', (month + "-" + day, hour, "unassigned", self.client.ip, self.client.sessionName, "sent", "no room yet", "", topic, "", "")) self.client.conn.cursor().execute( ''' INSERT INTO requestNum(lastRequestNum) VALUES(?) ''', (request.requestNumber, )) except Exception as e: print(e) pass self.client._sender(message)
def get_z(x): bs = 10 x = x.reshape((-1, bs, 256, 256, 3)) z = [] for _x in tqdm(x): z.append(model.encode(_x)) z = np.concatenate(z, axis=0) return z
def encode(input_path): img = Image.open(input_path) img = align_face.align(input_path) img = np.reshape(img, [1,256,256,3]) t = time.time() eps = model.encode(img) print("Encoding latency {} sec/img".format(time.time() - t)) return eps
def read(self): try: path = self.view.entries["trainpath"].get() with open(path) as f: reader = csv.reader(f, delimiter=';') self.model.set_trainset(list(reader)) # Conversion of number type columns to float for train in self.model.trainset: self.model.add_class(train[-1]) for i in range(len(train) - 1): train[i] = float(train[i]) # Set colors based on class name self.model.set_train_colors( [encode(row[-1]) for row in self.model.trainset]) path = self.view.entries["testpath"].get() with open(path) as f: reader = csv.reader(f, delimiter=';') self.model.set_testset(list(reader)) # Conversion of number type columns to float for test in self.model.testset: self.model.get_category_by_name(test[-1]).increase_size() for i in range(len(test) - 1): test[i] = float(test[i]) # Set colors based on class name self.model.set_test_colors( [encode(row[-1]) for row in self.model.testset]) # Set dimensions of data in model self.model.set_dimensions() # UI feedback self.view.get_entry("trainpath").config( {"background": "pale green"}) self.view.get_entry("testpath").config( {"background": "pale green"}) self.view.set_button_normal("train") except FileNotFoundError: # UI feedback self.view.get_entry("trainpath").config({"background": "tomato"}) self.view.get_entry("testpath").config({"background": "tomato"})
def greedy_decode(model, src, src_mask, max_len, start_symbol): memory = model.encode(src, src_mask) ys = torch.ones(1, 1).fill_(start_symbol).type_as(src.data) for i in range(max_len - 1): out = model.decode( memory, src_mask, Variable(ys), Variable(subsequent_mask(ys.size(1)).type_as(src.data))) prob = model.generator(out[:, -1]) _, next_word = torch.max(prob, dim=1) next_word = next_word.data[0] ys = torch.cat( [ys, torch.ones(1, 1).type_as(src.data).fill_(next_word)], dim=1) return ys
def predict(self, state): with torch.no_grad(): state = model.encode(state) state = torch.FloatTensor(state).to(device) state = torch.unsqueeze(state, dim=0) policy, value = self.net(state) policy = policy.cpu() value = value.cpu() policy = torch.squeeze(policy, dim=0) policy = torch.exp(policy) policy = policy.numpy() value = torch.squeeze(value, dim=0).item() return policy, value
def infer(sess, model, hps, iterators, its): from tqdm import tqdm assert hps.restore_path_A != '' assert hps.restore_path_B != '' xs_A, xs_B = [], [] zs_A, zs_B = [], [] for it in tqdm(range(its)): x_A, y_A = iterators['A']() x_B, y_B = iterators['B']() # A2B z_A = model.encode(x_A, y_A, 'model_A') x_B_recon = model.decode(y_B, z_A, 'model_B') xs_B.append(x_B_recon) zs_A.append(z_A) # B2A z_B = model.encode(x_B, y_B, 'model_B') x_A_recon = model.decode(y_A, z_B, 'model_A') xs_A.append(x_A_recon) zs_B.append(z_B) x_A = np.concatenate(xs_A, axis=0) z_A = np.concatenate(zs_A, axis=0) x_B = np.concatenate(xs_B, axis=0) z_B = np.concatenate(zs_B, axis=0) np.save(os.path.join(hps.logdir, 'z_A'), z_A) np.save(os.path.join(hps.logdir, 'z_B'), z_B) from utils import npy2img npy2img(os.path.join(hps.logdir, 'B2A'), x_A) npy2img(os.path.join(hps.logdir, 'A2B'), x_B) return x_A, z_A, x_B, z_B
def infer(sess, model, hps, iterator): # Example of using model in inference mode. Load saved model using hps.restore_path # Can provide x, y from files instead of dataset iterator # If model is uncondtional, always pass y = np.zeros([bs], dtype=np.int32) print('in infer') if hps.direct_iterator: iterator = iterator.get_next() # if hps.use_samples: # iterator = tf.data.Dataset.from_tensor_slices(np.random.normal(size=(500, 32 * 32 * 3))).batch(hps.n_batch_test) # iterator = iterator.prefetch(10) # iterator = iterator.make_one_shot_iterator() xs = [] zs = [] losses = [] for it in range(hps.full_test_its): # for it in range(10): if hps.direct_iterator: # replace with x, y, attr if you're getting CelebA attributes, also modify get_data x, y = sess.run(iterator) else: x, y = iterator() if hps.use_samples: # z = iterator.get_next() # z = tf.Print(z, [tf.shape(z), tf.reduce_mean(z)]) #, tf.math.reduce_std(z)]) # y = tf.zeros([hps.n_batch_test], dtype=np.int32) z = np.random.normal(size=(hps.n_batch_test, 32 * 32 * 3)) # print(z.mean(), z.std()) else: z = model.encode(x, y) print(z.shape) x = model.decode(y, z) loss = model.calculate_likelihood(x, y) xs.append(x) zs.append(z) losses.append(loss) x = np.concatenate(xs, axis=0) z = np.concatenate(zs, axis=0) l = np.concatenate(losses, axis=0) np.save(os.path.join(hps.logdir, 'x.npy'), x) np.save(os.path.join(hps.logdir, 'z.npy'), z) np.save(os.path.join(hps.logdir, 'l.npy'), l) # from scipy.stats import norm # rv = norm() # unifs = rv.cdf(z) # np.save(os.path.join(hps.logdir, 'unifs.npy'), unifs) return zs
def align_encode(): r = request img = get(r, 'img') # img = parse_img(img) if in jpg etc format img = deserialise_img(img) img, face_found = align_face(img) if face_found: img = np.reshape(img, [1, 256, 256, 3]) print(img.shape) z = model.encode(img) proj = model.project(z) # get projections. Not used result = img, z # jsonify(img=serialise_img(img), z=serialise_nparr(z)) return send_proj(result, proj) else: return jsonify(face_found=False)
def align_encode(): r = request img = get(r, 'img') print(img) # img = parse_img(img) if in jpg etc format img = deserialise_img(img) img, face_found = align_face(img) if face_found: img = np.reshape(img, [1, 256, 256, 3]) print(img.shape) z = model.encode(img) proj = model.project(z) # get projections. Not used result = img, z # jsonify(img=serialise_img(img), z=serialise_nparr(z)) return send_proj(result, proj) else: return jsonify(face_found=False)
def greedy_decode(model, src, src_mask, src_lengths, max_len=100, sos_index=1, eos_index=None): """Greedily decode a sentence.""" with torch.no_grad(): encoder_hidden, encoder_final = model.encode(src, src_mask, src_lengths) prev_y = torch.ones(1, 1).fill_(sos_index).type_as(src) trg_mask = torch.ones_like(prev_y) output = [] attention_scores = [] hidden = None for i in range(max_len): with torch.no_grad(): out, hidden, pre_output = model.decode(encoder_hidden, encoder_final, src_mask, prev_y, trg_mask, hidden) # we predict from the pre-output layer, which is # a combination of Decoder state, prev emb, and context prob = model.generator(pre_output[:, -1]) _, next_word = torch.max(prob, dim=1) next_word = next_word.data.item() output.append(next_word) prev_y = torch.ones(1, 1).type_as(src).fill_(next_word) attention_scores.append(model.decoder.attention.alphas.cpu().numpy()) output = np.array(output) # cut off everything starting from </s> # (only when eos_index provided) if eos_index is not None: first_eos = np.where(output == eos_index)[0] if len(first_eos) > 0: output = output[:first_eos[0]] return output, np.concatenate(attention_scores, axis=1)
def VAE(): inputs = Input(shape=(120,35)) z_mean, z_log_var, z = encode(inputs) x = decode([inputs, z]) VAE = Model(inputs, x, name = "VAE") return VAE, z_mean, z_log_var
def get_manipulations(name, typ, points=46, scale=1.0): img = align(name) z = encode(img) imgs, _ = manipulate_range(z, typ, points, scale) return imgs
def main(): #load configuration conf, _ = get_config() pp.pprint(conf) if conf.is_gray : n_channel=1 else: n_channel=3 n_grid_row = int(np.sqrt(conf.n_batch)) z = tf.random_uniform((conf.n_batch, conf.n_z), minval=-1.0, maxval=1.0) # execute generator g_net,_ = generate(z, conf.n_img_out_pix, conf.n_conv_hidden, n_channel, is_train=False, reuse=False) # execute discriminator e_net,_, _ = encode(g_net, conf.n_z, conf.n_img_out_pix, conf.n_conv_hidden, is_train=False, reuse=False) d_net,_ = decode(e_net, conf.n_z, conf.n_img_out_pix, conf.n_conv_hidden, n_channel,is_train=False, reuse=False) g_img=tf.clip_by_value((g_net + 1)*127.5, 0, 255) d_img=tf.clip_by_value((d_net + 1)*127.5, 0, 255) # start session sess = tf.InteractiveSession() init = tf.global_variables_initializer() sess.run(init) # init directories checkpoint_dir = os.path.join(conf.log_dir,conf.curr_time) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) #saver = tf.train.import_meta_graph(npz_path+'began2_model.ckpt.meta') #saver.restore(sess, tf.train.latest_checkpoint(npz_path)) saver = tf.train.Saver() saver.restore(sess, os.path.join(conf.load_dir, conf.ckpt_nm)) #load real image data_files = glob(os.path.join(conf.data_dir,conf.dataset, "*")) shuffle(data_files) x_fix = data_files[0:conf.n_batch] x_fix=[get_image(f, conf.n_img_pix, is_crop=conf.is_crop, resize_w=conf.n_img_out_pix, is_grayscale = conf.is_gray) for f in x_fix] x_fix = np.array(x_fix).astype(np.float32) if(conf.is_gray == 1): s,h,w = x_fix.shape x_fix = x_fix.reshape(s,h, w,n_channel ) n_loop = 1 def getRealAR(): # run ae x_im =sess.run(d_img,feed_dict={g_net:x_fix}) save_images(x_im, [n_grid_row,n_grid_row],os.path.join(checkpoint_dir, 'anal_AE_X.png')) def getRandomG(): f_g = open(checkpoint_dir+ '/g_img.csv', 'a') # generate images from generator and ae for i in range(5): z_test =np.random.uniform(low=-1, high=1, size=(conf.n_batch, 64)).astype(np.float32) g_im =sess.run(g_img,feed_dict={z:z_test}) save_images(g_im, [n_grid_row,n_grid_row],os.path.join(checkpoint_dir, str(i)+'_anal_G.png')) # g_im = g_im/127.5 - 1. # ae_g_im =sess.run(d_img,feed_dict={g_net:g_im}) # save_images(ae_g_im, [n_grid_row,n_grid_row],os.path.join(checkpoint_dir, str(i)+'_anal_AE_G.png')) for j in range(g_im.shape[0]): f_g.write(str(g_im[j].tolist()).replace("[", "").replace("]", "")+ '\n') f_g.close() def getFixedG(f_in): l_z = list() with open(f_in,'r') as file: for line in file: l_z.append(np.fromstring(line, dtype=float, sep=',')) file.close() n_loop = int(len(l_z)/64) l_z = np.asarray(l_z) for i in range(n_loop): fr = 64*i to = 64*(i+1) z_test =l_z[fr:to] g_im =sess.run(g_img,feed_dict={z:z_test}) save_images(g_im, [n_grid_row,n_grid_row],os.path.join(checkpoint_dir, '_anal_fix_G.png')) #g_im = g_im/127.5 - 1. #ae_g_im =sess.run(d_img,feed_dict={g_net:g_im}) #save_images(ae_g_im, [n_grid_row,n_grid_row],os.path.join(checkpoint_dir, str(i)+'_anal_AE_G.png')) def getRandomAE(): # generate images from discriminator and ae for i in range(n_loop): z_test =np.random.uniform(low=-1, high=1, size=(conf.n_batch, conf.n_img_out_pix, conf.n_img_out_pix,n_channel)).astype(np.float32) d_im =sess.run(d_img,feed_dict={g_net:z_test}) save_images(d_im, [n_grid_row,n_grid_row],os.path.join(checkpoint_dir, str(i)+'_anal_D.png')) def saveFeatures(): # get latent value from real images (10*n_batch) for i in range(n_loop): shuffle(data_files) f_test = data_files[0:conf.n_batch] x_test=[get_image(f, conf.n_img_pix, is_crop=conf.is_crop, resize_w=conf.n_img_out_pix, is_grayscale = conf.is_gray) for f in f_test] x_test = np.array(x_test).astype(np.float32) if(conf.is_gray == 1): s,h,w = x_test.shape x_test = x_test.reshape(s,h, w,n_channel ) latent =sess.run(e_net,feed_dict={g_net:x_test}) f_latent = open(checkpoint_dir+ '/latent.csv', 'a') for k in range(latent.shape[0]): f_latent.write(str(latent[k].tolist()).replace("[", "").replace("]", "")+ '\n') f_latent.close() def getFeatures(): f_path=checkpoint_dir+'/latent.csv'#'C:/samples/img_download/wheels/wheeldesign/output/began2_anal/17-11-28-14-52/latent.csv' data = pd.read_csv(f_path) n_latent = data.shape[1] mean = [None]*n_latent std = [None]*n_latent for i in range(n_latent): #i+=1 latent = np.array(data.iloc[:, i:i+1]) mean[i] = np.mean(latent) std[i] = np.std(latent) plt.show() return mean, std def generateFeature(mean, std): z_size = len(mean) feature = [None]*z_size for i in range(z_size): feature[i] = np.random.normal(loc=mean[i], scale=std[i], size=z_size*n_loop) return feature def generateImage(feature): feature = np.array(feature) idx=0 for i in range(n_loop): f_net = feature[:,idx:idx+64] f_img =sess.run(d_img,feed_dict={e_net:f_net}) save_images(f_img, [n_grid_row,n_grid_row],os.path.join(checkpoint_dir, str(i)+'_anal_G_df.png')) idx+=64 def getDiscMeanFeature(mean): mean = np.array(mean) mean = mean-2 m_net = [None]*64 for i in range(64): m_net[i] = mean +1/63 *i d_mnfd =sess.run(d_img,feed_dict={e_net:m_net}) save_images(d_mnfd, [n_grid_row,n_grid_row],os.path.join(checkpoint_dir, 'anal_D_Mean_df.png')) #getFixedG(conf.log_dir+'anal/g_df/z.csv') #getRealAR() getRandomG() #getRandomAE() #saveFeatures() #z_mean, z_std = getFeatures() #z_feature = generateFeature(z_mean, z_std) #shuffle(z_feature) #generateImage(z_feature) #getDiscMeanFeature(z_mean) sess.close()
def main(): #load configuration conf, _ = get_config() pp.pprint(conf) if conf.is_gray : n_channel=1 else: n_channel=3 n_grid_row = int(np.sqrt(conf.n_batch)) ##========================= DEFINE MODEL ===========================## z = tf.random_uniform( (conf.n_batch, conf.n_z), minval=-1.0, maxval=1.0) x_net = tf.placeholder(tf.float32, [conf.n_batch, conf.n_img_pix, conf.n_img_pix, n_channel], name='real_images') k_t = tf.Variable(0., trainable=False, name='k_t') # define generator g_net, g_vars = generate(z, conf.n_img_out_pix, conf.n_conv_hidden, n_channel, is_train=True, reuse=False) # define discriminator e_g_net, enc_vars,_ = encode(g_net, conf.n_z, conf.n_img_out_pix, conf.n_conv_hidden, is_train=True, reuse=False) d_g_net, dec_vars = decode(e_g_net, conf.n_z, conf.n_img_out_pix, conf.n_conv_hidden, n_channel, is_train=True, reuse=False) e_x_net, _,_ = encode(x_net, conf.n_z, conf.n_img_out_pix, conf.n_conv_hidden, is_train=True, reuse=True) d_x_net, _ = decode(e_x_net, conf.n_z, conf.n_img_out_pix, conf.n_conv_hidden, n_channel, is_train=True, reuse=True) # image de-normalization g_img=tf.clip_by_value((g_net + 1)*127.5, 0, 255) d_g_img=tf.clip_by_value((d_g_net + 1)*127.5, 0, 255) d_x_img=tf.clip_by_value((d_x_net + 1)*127.5, 0, 255) d_vars = enc_vars + dec_vars # define discriminator and generator losses d_loss_g = tf.reduce_mean(tf.abs(d_g_net - g_net)) d_loss_x = tf.reduce_mean(tf.abs(d_x_net - x_net)) d_loss= d_loss_x - k_t * d_loss_g g_loss = tf.reduce_mean(tf.abs(d_g_net - g_net)) # define optimizer d_optim = tf.train.AdamOptimizer(conf.d_lr).minimize(d_loss, var_list=d_vars) g_optim = tf.train.AdamOptimizer(conf.g_lr).minimize(g_loss, var_list=g_vars) balance = conf.gamma * d_loss_x - g_loss measure = d_loss_x + tf.abs(balance) with tf.control_dependencies([d_optim, g_optim]): k_update = tf.assign(k_t, tf.clip_by_value(k_t + conf.lambda_k * balance, 0, 1)) # define summary for tensorboard summary_op = tf.summary.merge([ tf.summary.image("G", g_img), tf.summary.image("AE_G", d_g_img), tf.summary.image("AE_x", d_x_img), tf.summary.scalar("loss/dloss", d_loss), tf.summary.scalar("loss/d_loss_real", d_loss_x), tf.summary.scalar("loss/d_loss_fake", d_loss_g), tf.summary.scalar("loss/gloss", g_loss), tf.summary.scalar("misc/m", measure), tf.summary.scalar("misc/kt", k_t), tf.summary.scalar("misc/balance", balance), ]) # start session sess = tf.InteractiveSession()#config=tf.ConfigProto(log_device_placement=True)) init = tf.global_variables_initializer() sess.run(init) # init directories checkpoint_dir = os.path.join(conf.log_dir,conf.curr_time) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) # init summary writer for tensorboard summary_writer = tf.summary.FileWriter(checkpoint_dir,sess.graph) saver = tf.train.Saver() if(conf.is_reload): saver.restore(sess, os.path.join(conf.load_dir, conf.ckpt_nm)) # load real image info and shuffle them data_files = glob(os.path.join(conf.data_dir,conf.dataset, "*")) shuffle(data_files) # save real fixed image x_fix = data_files[0:conf.n_batch] x_fix=[get_image(f, conf.n_img_pix, is_crop=conf.is_crop, resize_w=conf.n_img_out_pix, is_grayscale = conf.is_gray) for f in x_fix] x_fix = np.array(x_fix).astype(np.float32) x_fix = x_fix.reshape(x_fix.shape[0],x_fix.shape[1], x_fix.shape[2],n_channel ) save_images(x_fix, [n_grid_row,n_grid_row],'{}/x_fix.png'.format(checkpoint_dir)) cost_file = open(checkpoint_dir+ "/cost.txt", 'w', conf.n_buffer) n_step=0 for epoch in range(conf.n_epoch): ## shuffle data shuffle(data_files) ## load image data n_iters = int(len(data_files)/conf.n_batch) for idx in range(0, n_iters): # make image batch f_batch = data_files[idx*conf.n_batch:(idx+1)*conf.n_batch] data_batch = [get_image(f, conf.n_img_pix, is_crop=conf.is_crop, resize_w=conf.n_img_out_pix, is_grayscale = conf.is_gray) for f in f_batch] img_batch = np.array(data_batch).astype(np.float32) if conf.is_gray : s,h,w = img_batch.shape img_batch = img_batch.reshape(s, h, w, n_channel ) fetch_dict = { "kupdate": k_update, "m": measure, } if n_step % conf.n_save_log_step == 0: fetch_dict.update({ "summary": summary_op, "gloss": g_loss, "dloss": d_loss, "kt": k_t, }) start_time = time.time() # run the session! result = sess.run(fetch_dict, feed_dict={x_net:img_batch}) # get the result m = result['m'] if n_step % conf.n_save_log_step == 0: summary_writer.add_summary(result['summary'], n_step) summary_writer.flush() # write cost to a file gloss = result['gloss'] dloss = result['dloss'] kt = result['kt'] cost_file.write("Epoch: ["+str(epoch)+"/"+str(conf.n_epoch)+"] ["+str(idx)+"/"+str(n_iters)+"] time: "+str(time.time() - start_time)+", d_loss: "+str(dloss)+", g_loss:"+ str(gloss)+" measure: "+str(m)+", k_t: "+ str(kt)+ "\n") # save generated image file if n_step % conf.n_save_img_step == 0: g_sample, g_ae, x_ae = sess.run([g_img, d_g_img,d_x_img] ,feed_dict={x_net: x_fix}) save_image(g_sample,os.path.join(checkpoint_dir, '{}_G.png'.format(n_step))) save_image(g_ae, os.path.join(checkpoint_dir, '{}_AE_G.png'.format(n_step))) save_image(x_ae, os.path.join(checkpoint_dir, '{}_AE_X.png'.format(n_step))) n_step+=1 # save checkpoint saver.save(sess, os.path.join(checkpoint_dir,str(epoch)+"_"+str(n_step)+"_began2_model.ckpt") ) # save final checkpoint saver.save(sess, os.path.join(checkpoint_dir,"final_began2_model.ckpt")) cost_file.close() sess.close()
def main(conf): logger = logging.getLogger("desc") logger.setLevel(logging.INFO) fileHandler = logging.FileHandler(os.path.join(base_dir, 'log.txt')) logger.addHandler(fileHandler) #streamHandler = logging.StreamHandler() #logger.addHandler(streamHandler) if conf.is_gray: n_channel = 1 else: n_channel = 3 # init directories checkpoint_dir = os.path.join(base_dir, conf.curr_time) if not os.path.exists(checkpoint_dir): os.makedirs(checkpoint_dir) ##========================= DEFINE MODEL ===========================## #z = tf.random_uniform(conf.n_batch, conf.n_z), minval=-1.0, maxval=1.0) z = readz(os.path.join(base_dir, 'z.csv'), conf.n_batch) x_net = tf.placeholder( tf.float32, [conf.n_batch, conf.n_img_pix, conf.n_img_pix, n_channel], name='real_images') k_t = tf.Variable(0., trainable=False, name='k_t') # execute generator g_net, g_vars, g_conv = generate(z, conf.n_img_out_pix, conf.n_conv_hidden, n_channel, is_train=True, reuse=False) # execute discriminator e_g_net, enc_vars, e_g_conv = encode(g_net, conf.n_z, conf.n_img_out_pix, conf.n_conv_hidden, is_train=True, reuse=False) d_g_net, dec_vars, d_g_conv = decode(e_g_net, conf.n_z, conf.n_img_out_pix, conf.n_conv_hidden, n_channel, is_train=True, reuse=False) e_x_net, _, e_x_conv = encode(x_net, conf.n_z, conf.n_img_out_pix, conf.n_conv_hidden, is_train=True, reuse=True) d_x_net, _, d_x_conv = decode(e_x_net, conf.n_z, conf.n_img_out_pix, conf.n_conv_hidden, n_channel, is_train=True, reuse=True) g_img = tf.clip_by_value((g_net + 1) * 127.5, 0, 255) #x_img=tf.clip_by_value((x_net + 1)*127.5, 0, 255) d_g_img = tf.clip_by_value((d_g_net + 1) * 127.5, 0, 255) d_x_img = tf.clip_by_value((d_x_net + 1) * 127.5, 0, 255) d_vars = enc_vars + dec_vars d_loss_g = tf.reduce_mean(tf.abs(d_g_net - g_net)) d_loss_x = tf.reduce_mean(tf.abs(d_x_net - x_net)) d_loss = d_loss_x - k_t * d_loss_g g_loss = tf.reduce_mean(tf.abs(d_g_net - g_net)) d_loss_prev = d_loss g_loss_prev = g_loss k_t_prev = k_t g_optim = tf.train.AdamOptimizer(conf.g_lr).minimize(g_loss, var_list=g_vars) d_optim = tf.train.AdamOptimizer(conf.d_lr).minimize(d_loss, var_list=d_vars) balance = conf.gamma * d_loss_x - g_loss measure = d_loss_x + tf.abs(balance) with tf.control_dependencies([d_optim, g_optim]): k_update = tf.assign( k_t, tf.clip_by_value(k_t + conf.lambda_k * balance, 0, 1)) # start session sess = tf.InteractiveSession() init = tf.global_variables_initializer() sess.run(init) loadWeight(sess, conf) x_fix = readx(os.path.join(base_dir, 'x.csv'), conf.n_batch) x_fix = x_fix.reshape(conf.n_batch, conf.n_conv_hidden, conf.n_img_out_pix, n_channel) n_loop = 2 for itr in range(n_loop): fetch_dict = { "kupdate": k_update, "m": measure, "b": balance, 'gnet': g_net, 'dgnet': d_g_net, 'dxnet': d_x_net, 'xnet': x_net, 'gconv': g_conv, 'egconv': e_g_conv, 'dgconv': d_g_conv, 'exconv': e_x_conv, 'dxconv': d_x_conv, "dlossx": d_loss_x, "gloss": g_loss, "dloss": d_loss, "kt": k_t, 'gimg': g_img, 'dgimg': d_g_img, 'dximg': d_x_img, } result = sess.run(fetch_dict, feed_dict={x_net: x_fix}) logger.info('measure: ' + str(result['m'])) logger.info('balance: ' + str(result['b'])) logger.info('gloss: ' + str(result['gloss'])) logger.info('dloss: ' + str(result['dloss'])) logger.info('dlossx: ' + str(result['dlossx'])) logger.info('k_t: ' + str(result['kt'])) if itr == 0: gconv = result['gconv'] for i in range(len(gconv)): conv = np.clip((gconv[i] + 1) * 127.5, 0, 255) s, h, w, c = conv.shape for j in range(c): c_img = conv[:, :, :, j:j + 1] save_image( c_img, os.path.join( checkpoint_dir, 'gen_' + str(i) + '_' + str(j) + '_' + str(h) + '_conv.png')) dgconv = result['dgconv'] for i in range(len(dgconv)): conv = np.clip((dgconv[i] + 1) * 127.5, 0, 255) s, h, w, c = conv.shape for j in range(c): c_img = conv[:, :, :, j:j + 1] save_image( c_img, os.path.join( checkpoint_dir, 'dec_g_' + str(i) + '_' + str(j) + '_' + str(h) + '_conv.png')) dxconv = result['dxconv'] for i in range(len(dxconv)): conv = np.clip((dxconv[i] + 1) * 127.5, 0, 255) s, h, w, c = conv.shape for j in range(c): c_img = conv[:, :, :, j:j + 1] save_image( c_img, os.path.join( checkpoint_dir, 'dec_x_' + str(i) + '_' + str(j) + '_' + str(h) + '_conv.png')) exconv = result['exconv'] for i in range(len(exconv)): conv = np.clip((exconv[i] + 1) * 127.5, 0, 255) s, h, w, c = conv.shape for j in range(c): c_img = conv[:, :, :, j:j + 1] save_image( c_img, os.path.join( checkpoint_dir, 'enc_x_' + str(i) + '_' + str(j) + '_' + str(h) + '_conv.png')) egconv = result['egconv'] for i in range(len(egconv)): conv = np.clip((egconv[i] + 1) * 127.5, 0, 255) s, h, w, c = conv.shape for j in range(c): c_img = conv[:, :, :, j:j + 1] save_image( c_img, os.path.join( checkpoint_dir, 'enc_g_' + str(i) + '_' + str(j) + '_' + str(h) + '_conv.png')) gnet = result['gnet'] dgnet = result['dgnet'] dxnet = result['dxnet'] xnet = result['xnet'] for i in range(conf.n_batch): logger.info( 'g_net: ' + str(gnet[i].tolist()).replace("[", "").replace("]", "")) logger.info( 'd_g_net: ' + str(dgnet[i].tolist()).replace("[", "").replace("]", "")) logger.info( 'x_net: ' + str(xnet[i].tolist()).replace("[", "").replace("]", "")) logger.info( 'd_x_net: ' + str(dxnet[i].tolist()).replace("[", "").replace("]", "")) gimg = result['gimg'] dgimg = result['dgimg'] dximg = result['dximg'] save_image(gimg, os.path.join(checkpoint_dir, str(itr) + '_final_g_img.png')) save_image( dgimg, os.path.join(checkpoint_dir, str(itr) + '_final_d_g_img.png')) save_image( dximg, os.path.join(checkpoint_dir, str(itr) + '_final_d_x_img.png')) sess.close()
import model import numpy as np from model import SET_HYPERPARAMETER SET_HYPERPARAMETER("contrast", 300.0) SET_HYPERPARAMETER("diffLatentSpace", 12) SET_HYPERPARAMETER("normalize", "individual") data = np.load("./npz/diffsWithNames.npz") goods = data["arr_0"] bads = data["arr_1"] model = model.emptyModel("generateEncode", use="diff", log=False, inputsShape=list(goods[0].shape)) model.restore("5feb-ls12-inorm_d") testEncoded = model.encode(bads) trainEncoded = model.encode(goods) np.savez("./npz/codesWithNames_inorm.npz", trainEncoded, testEncoded, data["arr_2"], data["arr_3"])
import numpy as np from keras.models import load_model from keras import optimizers from tgru_k2_gpu import TerminalGRU from keras.models import Sequential from keras.layers import Dense, Conv1D, Dropout, MaxPooling1D, Flatten, Dense, BatchNormalization, RepeatVector, GRU, Input, Lambda from model import encode, decode from data_loader import load_data_smiles, load_data_propertys from keras.losses import mse # load data đầu vào là ma trận one-hot, đầu ra là 3 thuộc tính # X_train, X_val, X_test = load_data_smiles() # pr_train, pr_val, pr_test = load_data_propertys() # mô hình nhánh phụ encode = encode() encode.load_weights("encode_weights.h5") inputs = Input(shape=(120, 35)) z_mean, z_log_var, z = encode(inputs) x = Dense(196, activation='relu')(z) x = Dense(196, activation='relu')(x) x = Dense(196, activation='relu')(x) x = Dense(3, activation='tanh')(x) model_prt = Model(inputs, x, name='model_property') model_prt.summary() # đóng băng encode for layer in encoder.layers: layer.trainable = False
from keras import objectives from keras.losses import mse, binary_crossentropy from keras.utils import to_categorical import numpy as np from keras.models import load_model from keras import optimizers from keras.callbacks import ModelCheckpoint from tgru_k2_gpu import TerminalGRU from keras.models import Sequential from keras.layers import Dense, Conv1D, Dropout, MaxPooling1D, Flatten, Dense, BatchNormalization,RepeatVector,GRU,Input,Lambda from data_loader import load_data from model import encode, decode # Tải dữ liệu X_train, X_test, X_val = load_data() encode = encode() decode = decode() def VAE(): inputs = Input(shape=(120,35)) z_mean, z_log_var, z = encode(inputs) x = decode([inputs, z]) VAE = Model(inputs, x, name = "VAE") return VAE, z_mean, z_log_var vae, z_mean, z_log_var = VAE() vae.summary() ''' Test thử 3 hàm loss''' def vae_loss_binary(x, x_reconstruction): xent_loss = K.sum(binary_crossentropy(x, x_reconstruction) , axis = -1) kl_loss = - 0.5 * K.sum(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
def withdraw_from_confirmed(): meeting = confirmed_meetings.get() index = confirmed_meetings.current() if (meeting != "" and len(self.confirmed_meetings_list) >= index): messagebox.showinfo("withdraw", "WITHDRAW FROM %s" % meeting) try: meetingNumber = self.confirmed_meetings_list[index][2] except Exception as e: return withdraw = model.Withdraw(meetingNumber, self.client.sessionName) message = model.encode(withdraw) messageType = "Withdrawn" self.client._sender(message) self.client.lock.acquire() self.client.conn.cursor().execute( ''' UPDATE booking SET status=? where meetingNumber=? ''', (messageType, meetingNumber)) confirmed_meetings.config(values=util.getConfirmedList( self.client.conn)) # TODO - update with right list # Can now accept another meeting at the same time slot. Attempt to find a previously refused meeting seek = self.client.conn.cursor().execute( ''' SELECT* from booking where meetingNumber=? ''', (meetingNumber, )).fetchall() date = seek[0][0] time = seek[0][1] # Find another previous refused meeting at the same date and time and accept it if one is found seek = self.client.conn.cursor().execute( ''' SELECT* from booking where date=? and time=? and status="Refused" ''', (date, time)).fetchall() if (len(seek) > 0): newMeetingNumber = seek[0][2] add = model.Add(newMeetingNumber, self.client.sessionName) message = model.encode(add) self.client.conn.cursor().execute( ''' UPDATE booking SET status =? where meetingNumber=? ''', ("Accepted", newMeetingNumber)) try: self.client.conn.cursor().execute( ''' INSERT INTO accept(meetingNumber, date, time, status, message) VALUES (?, ?, ?, ?, ?) ''', (newMeetingNumber, date, time, "sent", message)) except Exception as e: print(e) pass self.client._sender(message) self.client.lock.release() self.refreshUI() else: messagebox.showerror("withdraw", "NO MEETING SELECTED")
def _menu(self, state, prevState, storedData = {}): if (state == 0): print("\t****Server Main Menu*****") print("\t '1' for room change") print("\t'Exit' to turn off server") msg = input("\tEnter input: ") if (msg == "Exit"): self.running = False return if (msg.isdigit() and int(msg) == 1): seek = self.conn.cursor().execute( ''' SELECT* from booked where status !="Cancelled" ''' ).fetchall() print("****Room change menu") count = 0 for row in seek: print("[" + str(count) + "] " + str(row)) count+=1 msg = input("Select a meeting: ") if (msg == "Q"): return self._menu(0, 0) elif (msg.isdigit() and int(msg) >= 0 and int(msg) < len(seek)): storedData["meeting"] = seek[int(msg)] return self._menu(1, state, storedData) else: print("\t****Invalid Command. Please try again") return self._menu(state, prevState) else: print("\t****Invalid Command. Please try again") return self._menu(state, prevState) if (state == 1): msg = input("Select the new room number: ") if (msg == "Q"): return self._menu(0, 0) elif (msg.isdigit() and int(msg) >= 0): selectedMeeting = storedData["meeting"] #print(storedData["meeting"]) #print(selectedMeeting) meetingNumber = selectedMeeting[2] newRoomNumber = msg roomChange = model.Room_Change(meetingNumber, msg) message = model.encode(roomChange) inviteList = self.conn.cursor().execute( ''' SELECT * from inviteList where meetingNumber=? ''', (int(meetingNumber),) ).fetchall() for invite in inviteList: ip = invite[1] port = invite[2] self._sender(message, ip, int(port)) self.conn.cursor().execute( ''' UPDATE booked SET room=? where meetingId=? ''', (newRoomNumber, meetingNumber) ) return self._menu(0, 0) else: print("\t****Invalid Command. Please try again") return self._menu(state, prevState, storedData)
def _worker(self, data, addr): data = str(data, 'utf-8') print("\n") dataDict = json.loads(data) print(addr) print(data) ################################### # Request Handler ################################### if (dataDict["type"] == "Request"): #print("R-1") request = model.decodeJson(data) # Check the cache to see if already responded to the same request before self.lock.acquire() try: seek = self.conn.cursor().execute( ''' SELECT * from request where requestNumber like ? and IP like ? and client like ? ''', (request.requestNumber, str(addr[0]), request.requestClientName) ).fetchall() except Exception as e: print(e) pass if(len(seek) > 0): if (seek[0][0] == data): #print("Message deja vu") response = seek[0][1] self._sender(response, addr[0], int(request.requestClientName)) #self.s.sendto(response.encode(), (addr[0], int(request.requestClientName))) else: #print("Invalid message") response = model.Response(request.requestNumber, request.requestClientName, "The request number has been previously used for another message") response = model.encode(response) self._sender(response, addr[0], int(request.requestClientName)) #self.s.sendto(response.encode(), (addr[0], int(request.requestClientName))) self.lock.release() return #print("R-2") # Check if free slot is available, respond accordingly and update cache freeSlot = True try: seek = self.conn.cursor().execute( ''' SELECT * from booked where date=? and time=? and status!='Cancelled' ''', (request.date, request.time) ).fetchall() except Exception as e: print("Something went wrong") print(e) pass meetingNumber = -1 message = "" #print("R-3") if (len(seek) > self.meetingRoomNum): #print("R-4") response = model.Response(request.requestNumber, request.requestClientName, "No room available") message = model.encode(response) try: self._sender(message, addr[0], int(request.requestClientName)) #self.s.sendto(message.encode(), (addr[0], int(request.requestClientName))) except Exception as e: print(e) pass else: #print("R-5") invite = model.Invite(request.date, request.time, request.topic, addr[0], request.requestClientName, request.requestClientName) meetingNumber = invite.meetingNumber try: self.conn.cursor().execute( ''' INSERT INTO meetingNum(lastMeetingNum) VALUES (?) ''', (invite.meetingNumber,) ) self.conn.cursor().execute( ''' INSERT INTO invite(meetingNumber, invite, min) VALUES (?, ?, ?) ''', (invite.meetingNumber, model.encode(invite), request.minimum) ) except Exception as e: print(e) pass print("WTF") print(request.participant) for participant in request.participant: print("R-5-1") invite.targetName = participant[1] message = model.encode(invite) try: #print(participant[0]) #print(int(invite.targetName)) self._sender(message, participant[0], int(invite.targetName)) #self.s.sendto(message.encode(), (participant[0], int(invite.targetName))) except Exception as e: print(e) pass try: self.conn.cursor().execute( ''' INSERT INTO inviteList(meetingNumber, ip, client, status, message) VALUES (?, ?, ?, ?, ?) ''', (invite.meetingNumber, participant[0], participant[1], "Sent", message) ) except Exception as e: print(e) pass #print("R-6") self.conn.cursor().execute( ''' INSERT INTO request(request, prevResponse, IP, client, requestNumber, meetingNumber) VALUES (?, ?, ?, ?, ?, ?) ''', (data, message, addr[0], request.requestClientName, request.requestNumber, meetingNumber) ) util.commit(self.conn) self.lock.release() ################################### # Accept or Reject or Add handler ################################### if (dataDict["type"] == "Accept" or dataDict["type"] == "Reject" or dataDict["type"] == "Add"): # Note that a client will continuously ping the server (send its accept meeting or reject meeting message every 5 seconds) until it receves either a cancel, confirm, scheduled or not_scheduled message from the server acceptOrReject = model.decodeJson(data) # Does the referenced meeting exists? self.lock.acquire() invite = self.conn.cursor().execute( ''' SELECT * from invite where meetingNumber=? ''', (acceptOrReject.meetingNumber, ) ).fetchall() # If the client sent us a meeting number that does not exist in the system, we'll send a cancel message to avoid getting continuously pinged by the client instead of ignoring it if (len(invite) == 0): message = model.Cancel(acceptOrReject.meetingNumber, "Meeting does not exits") response = model.encode(message) self._sender(response, addr[0], int(acceptOrReject.clientName)) #self.s.sendto(response.encode(), (addr[0], int(acceptOrReject.clientName))) self.lock.release() return if (len(invite) > 0): originalInvite = model.decodeJson(invite[0][1]) requesterIP = originalInvite.requesterIP requesterPort = originalInvite.requesterName # Was this message sent by someone invited to the referenced meeting? inviteList = self.conn.cursor().execute( ''' SELECT * from inviteList where meetingNumber=? and ip=? and client=? ''', (acceptOrReject.meetingNumber, addr[0], acceptOrReject.clientName) ).fetchall() # If the client is not invited to the meeting, was this an add request? if (len(inviteList) == 0): # If yes, we'll add it to the IP inviteList if the add request came from a new participant if (dataDict["type"] == "Add"): self.conn.cursor().execute( ''' INSERT INTO inviteList(meetingNumber, ip, client, status) VALUES (?, ?, ?, ?) ''', (acceptOrReject.meetingNumber, addr[0], acceptOrReject.clientName, "Added") ) # Refresh the invite list inviteList = self.conn.cursor().execute( ''' SELECT * from inviteList where meetingNumber=? and ip=? and client=? ''', (acceptOrReject.meetingNumber, addr[0], acceptOrReject.clientName) ).fetchall() else: # If no we'll send a cancel message to avoid getting continuously pinged by the client instead of ignoring it print("Participant was not invited") message = model.Cancel(acceptOrReject.meetingNumber, "You are not invited to this meeting") response = model.encode(message) self._sender(response, addr[0], int(acceptOrReject.clientName)) #self.s.sendto(response.encode(), (addr[0], int(acceptOrReject.clientName))) self.lock.release() return if (dataDict["type"] == "Add"): # If add request, we'll notify the original requester added = model.Added(acceptOrReject.meetingNumber, addr[0], acceptOrReject.clientName) message = model.encode(added) self._sender(message, requesterIP, int(requesterPort)) #self.s.sendto(message.encode(), (requesterIP, int(requesterPort))) # From now on, the "add" request will be treated as a "accept request" with the same logic acceptOrReject = model.Accept(acceptOrReject.meetingNumber, acceptOrReject.clientName) # Update the tally of accepted and refused participants # if (inviteList[0][3] == "Sent"): if (acceptOrReject.type == "Accept"): self.conn.cursor().execute( ''' UPDATE inviteList SET status=? where meetingNumber=? and ip=? and client=? ''', ("Accepted", acceptOrReject.meetingNumber, addr[0], acceptOrReject.clientName) ) else: self.conn.cursor().execute( ''' UPDATE inviteList SET status=? where meetingNumber=? and ip=? and client=? ''', ("Refused", acceptOrReject.meetingNumber, addr[0], acceptOrReject.clientName) ) # What's the total number of invitees for this meeting? totalInvites = self.conn.cursor().execute( ''' SELECT COUNT(*) from inviteList where meetingNumber=? ''',(acceptOrReject.meetingNumber,) ).fetchone()[0] # How many accepted so far? totalAcceptedSoFar = self.conn.cursor().execute( ''' SELECT COUNT(*) from inviteList where meetingNumber=? and status='Accepted' ''',(acceptOrReject.meetingNumber,) ).fetchone()[0] # How many refused or withdrawn so far? totalRefusedSoFar = self.conn.cursor().execute( ''' SELECT COUNT(*) from inviteList where meetingNumber=? and (status='Refused' or status='Withdrawn') ''',(acceptOrReject.meetingNumber,) ).fetchone()[0] # Based on the current accepted or refused tally, can the meeting still happen? minThreshold = invite[0][2] howManyCanStillAccept = totalInvites - totalRefusedSoFar # If insufficient responses to come to a conclusion, we stop here and wait for more responses and do nothing for now and none of the code below will execute ''' if (totalAcceptedSoFar < minThreshold and howManyCanStillAccept >= minThreshold): self.lock.release() return ''' # Note that since the scheduler works on a first come first served basis, while it guarantees a room was free at the time the request was made, by the time the meeting gets confirmed, another request might have taken the room # If that happens, we will send a cancel message instead freeSlot = True addMeeting = True ################## try: seek = self.conn.cursor().execute( ''' SELECT * from booked where date=? and time=? and status !="Cancelled" ''', (originalInvite.date, originalInvite.time) ).fetchall() ''' seek = self.conn.cursor().execute( ''' #SELECT * from booked where date=? and time=? and meetingId!=? and status!='Cancelled' ''', (originalInvite.date, originalInvite.time, originalInvite.meetingNumber) ).fetchall() ''' #print(originalInvite.meetingNumber) #print(len(seek)) except Exception as e: print("Something went wrong") print(e) pass # If the meeting has already been scheduled, do not add a new entry. if (len(seek) >= self.meetingRoomNum): meetingNumber = seek[0][2] #print("*!*!*!*!**!*!*!*!!*") seek = self.conn.cursor().execute( ''' SELECT * from booked where meetingId=? ''', (originalInvite.meetingNumber,) ).fetchall() #print(len(seek)) if (len(seek) == 0): freeSlot = False else: addMeeting = False ############### # If we have reached the min participant threshold for the first time, we will batch send messages to all those who have previously accepted the invite. Otherwise, a message will be sent only to the current correspondant # The original meeting creator will get a slightly different conformation than the rest of the participants; we will peek at the original invite cached by the server to find out the identity of the original meeting creator # The original meeting creator will get a new scheduled message with an updated list of participant each time a new participant accepts the invite after the original scheduled message was sent. acceptedParticipants = self.conn.cursor().execute( ''' SELECT * FROM inviteList where meetingNumber=? and status=? ''',(acceptOrReject.meetingNumber, "Accepted") ).fetchall() confirm = model.Confirm(originalInvite.meetingNumber, len(seek)+1) if (freeSlot==True and totalAcceptedSoFar == minThreshold): #print("DB - 1") if addMeeting: try: self.conn.cursor().execute( ''' INSERT INTO booked(date, time, meetingId, status, room) VALUES (?, ?, ?, ?, ?) ''',(originalInvite.date, originalInvite.time, originalInvite.meetingNumber, "booked", len(seek)+1) ) except Exception as e: print("Error Ocurred*****") print(e) pass try: self.conn.cursor().execute( ''' INSERT INTO meetingToRoom(meetingNumber, room) VALUES (?, ?) ''',(originalInvite.meetingNumber, len(seek)+1) ) except Exception as e: print("Error Ocurred*****") print(e) pass message = model.encode(confirm) #print("loc-0") seekMeetingRoom = self.conn.cursor().execute( ''' SELECT * from meetingToRoom where meetingNumber=? ''',(originalInvite.meetingNumber, ) ).fetchall() meetingRoom = -1 if (len(seekMeetingRoom)> 0): meetingRoom = seekMeetingRoom[0][1] listConfirmedParticipant = [] for participant in acceptedParticipants: pIp = participant[1] pName = participant[2] listConfirmedParticipant.append([pIp,pName]) if (pIp != requesterIP or pName != requesterPort): if (freeSlot==True and totalAcceptedSoFar == minThreshold): try: print("Confirm-0") self._sender(message, pIp, int(pName)) #self.s.sendto(message.encode(), (pIp, int(pName))) except Exception as e: print(e) pass # If we find out the number of accepted participants can no longer meet the min participant threshold or no room is available, we'll send a cancel request to every participant regardless of accept or refused status if (howManyCanStillAccept == (minThreshold -1) or freeSlot == False): allParticipants = self.conn.cursor().execute( ''' SELECT * FROM inviteList where meetingNumber=? and status!='withdrawn' ''',(acceptOrReject.meetingNumber,) ).fetchall() cancel = model.Cancel(originalInvite.meetingNumber, "Below Minimum Participant") if (freeSlot == False): cancel = model.Cancel(originalInvite.meetingNumber, "The room is no longer available due to another request confirming the room before you") message = model.encode(cancel) for participant in allParticipants: pIp = participant[1] pName = participant[2] if (pIp != requesterIP or pName != requesterPort): try: #print("Cancel-0") self._sender(message, pIp, int(pName)) #self.s.sendto(message.encode(), (pIp, int(pName))) except Exception as e: print(e) pass # Creating and sending message to requester. #print("loc-1") oldRequest = self.conn.cursor().execute( ''' SELECT * FROM request where meetingNumber=? ''', (originalInvite.meetingNumber,) ).fetchall() requestNumber = oldRequest[0][5] message = '' if (totalAcceptedSoFar >= minThreshold and freeSlot == True): scheduled = model.Scheduled(requestNumber, originalInvite.meetingNumber, meetingRoom, listConfirmedParticipant) # Testing message = model.encode(scheduled) try: #print("Schedule-2") self._sender(message, requesterIP, int(requesterPort)) #self.s.sendto(message.encode(), (requesterIP, int(requesterPort))) except Exception as e: print(e) pass if (howManyCanStillAccept < minThreshold or freeSlot == False): non_schedule = model.Non_Scheduled(requestNumber, originalInvite.meetingNumber, originalInvite.date, originalInvite.time, minThreshold, listConfirmedParticipant, originalInvite.topic) message = model.encode(non_schedule) try: #print("Non_Schedule-1") self._sender(message, requesterIP, int(requesterPort)) #self.s.sendto(message.encode(), (requesterIP, int(requesterPort))) except Exception as e: print(e) pass #print("loc-2") # If we already reached the min participant threshold before and a new participant accept the meeting, we will only send a confirmation in response to the current sender instead of a batch message to all participants # The requester should had received a new participant list including this new participant with the above code # Edge case handler-> sender did not receive the first confirm response. We'll resend the message here. if (freeSlot==True and totalAcceptedSoFar > minThreshold): if (requesterIP != addr[0] or requesterPort !=acceptOrReject.clientName): try: #print("confirm-1") confirm = model.Confirm(originalInvite.meetingNumber, meetingRoom) message = model.encode(confirm) self._sender(message, addr[0], int(acceptOrReject.clientName)) #self.s.sendto(message.encode(), (addr[0], int(acceptOrReject.clientName))) except Exception as e: print(e) pass # Edge case handler-> meeting cancelled, but sender did not receive the first cancelled response. We'll resend the message if (requesterIP != addr[0] or requesterPort !=acceptOrReject.clientName): if (howManyCanStillAccept < minThreshold -1): cancel = model.Cancel(originalInvite.meetingNumber, "Below Minimum Participant") message = model.encode(cancel) try: #print("cancel-1") self._sender(message, addr[0], int(acceptOrReject.clientName)) #self.s.sendto(message.encode(), (addr[0], int(acceptOrReject.clientName))) except Exception as e: print(e) pass self.lock.release() if (dataDict["type"] == "Withdraw"): withdraw = model.decodeJson(data) # Does the referenced meeting exists? self.lock.acquire() seek = self.conn.cursor().execute( ''' SELECT * from booked where meetingId=? ''', (withdraw.meetingNumber, ) ).fetchall() if (len(seek) > 0): # Was the person in the invite list? seek = self.conn.cursor().execute( ''' SELECT * from inviteList where meetingNumber=? and ip=? and client=? ''', (withdraw.meetingNumber, addr[0], int(withdraw.clientName)) ).fetchall() if (len(seek) > 0): # If yes, fetch the saved copy of the original invite to find out the ip and sessionName of the requester, the min threshold, etc. seek = self.conn.cursor().execute( ''' SELECT * from invite where meetingNumber=? ''', (withdraw.meetingNumber, ) ).fetchall() # And update the status of the invitee to withdrawn self.conn.cursor().execute( ''' UPDATE inviteList SET status=? where meetingNumber=? and ip=? and client=? ''', ("Withdrawn", withdraw.meetingNumber, addr[0], withdraw.clientName) ) if (len(seek)>0): inviteStr = seek[0][1] invite = model.decodeJson(inviteStr) minParticipant = int(seek[0][2]) # check to see if below min now # How many accepted now? totalAcceptedSoFar = self.conn.cursor().execute( ''' SELECT COUNT(*) from inviteList where meetingNumber=? and status='Accepted' ''',(withdraw.meetingNumber,) ).fetchone()[0] # Based on the current accepted rate, can the meeting still happen after the withdrawal? if (totalAcceptedSoFar < minParticipant): # cancel the entire meeting; send notification to all participants allParticipants = self.conn.cursor().execute( ''' SELECT * FROM inviteList where meetingNumber=? and status!='Withdrawn' ''',(withdraw.meetingNumber,) ).fetchall() self.conn.cursor().execute( ''' UPDATE booked SET status="Cancelled" where meetingId=? ''', (withdraw.meetingNumber,) ) cancel = model.Cancel(withdraw.meetingNumber, "Below Minimum Participant due to withdrawal") message = model.encode(cancel) for participant in allParticipants: pIp = participant[1] pName = participant[2] try: self._sender(message, pIp, int(pName)) #self.s.sendto(message.encode(), (pIp, int(pName))) except Exception as e: print(e) pass else: withdraw = model.Withdraw(withdraw.meetingNumber, withdraw.clientName, addr[0]) message = model.encode(withdraw) requesterIP = invite.requesterIP requesterPort = invite.targetName try: self._sender(message, requesterIP, int(requesterPort)) #self.s.sendto(message.encode(), (requesterIP, int(requesterPort))) except Exception as e: print(e) pass self.lock.release() if (dataDict["type"] == "Cancel"): cancel = model.decodeJson(data) #print("Cancel-1") self.lock.acquire() # Does the referenced meeting exists? seek = self.conn.cursor().execute( ''' SELECT * from booked where meetingId=? ''', (cancel.meetingNumber, ) ).fetchall() if (len(seek) > 0): #print("Cancel-2") # If yes, fetch the saved copy of the original invite to find out the ip and sessionName of the requester, the min threshold, etc. seek = self.conn.cursor().execute( ''' SELECT * from invite where meetingNumber=? ''', (cancel.meetingNumber, ) ).fetchall() if (len(seek)>0): #print("Cancel-3") inviteStr = seek[0][1] invite = model.decodeJson(inviteStr) requesterIP = invite.requesterIP requesterName = invite.requesterName # If the current message indeed was sent by the original meeting requester, we cancel the meeting and send a message to all participants if (requesterIP == addr[0] and requesterName == cancel.clientName): #print("Cancel-4") self.conn.cursor().execute( ''' UPDATE booked SET status="Cancelled" where meetingId=? ''', (cancel.meetingNumber,) ) # cancel the entire meeting; send notification to all participants allParticipants = self.conn.cursor().execute( ''' SELECT * FROM inviteList where meetingNumber=? and status!='Withdrawn' ''',(cancel.meetingNumber,) ).fetchall() cancel = model.Cancel(cancel.meetingNumber, "Cancelled by the organizer") message = model.encode(cancel) #print("Cancel-5") for participant in allParticipants: pIp = participant[1] pName = participant[2] try: self._sender(message, pIp, int(pName)) #self.s.sendto(message.encode(), (pIp, int(pName))) except Exception as e: print(e) pass self.lock.release()
from model import vAe, encode, decode from util_io import pform from util_np import np from util_tf import tf import util_sp as sp path_vocab = "../trial/data/vocab.model" path_ckpt = "../trial/ckpt" vocab = sp.load_spm(path_vocab) s0 = "This is a test." s1 = "Dragons have been portrayed in film and television in many different forms." s2 = "His development of infinitesimal calculus opened up new applications of the methods of mathematics to science." tgt = sp.encode(vocab, (s0, s1, s2)) vae = vAe('infer') sess = tf.InteractiveSession() def auto(z, steps=256): for s in sp.decode(vocab, decode(sess, vae, z, steps)): print(s) for i in range(1, 7): print() ckpt = "master{}".format(i) tf.train.Saver().restore(sess, pform(path_ckpt, ckpt)) auto(encode(sess, vae, tgt)) auto(np.zeros((1, int(vae.mu.shape[1]))))
from pathlib import Path import collections import torch import torch.nn as nn import torch.optim as optim import model import gzip BATCH_SIZE = 100 # read all games games = collections.deque() with gzip.open(sys.stdin.buffer, mode='rt') as lines: for line in lines: state, value, policy = line.rstrip('\n').split('\t') state = model.encode(state) value = np.float32(value) policy = np.array([np.float32(x) for x in policy.split(' ')]) games.append((state, value, policy)) def sample(size): idx = np.random.choice(len(games), size, replace=True) states, values, policies = zip(*[games[i] for i in idx]) return np.array(states), np.array(values), np.array(policies) def calc_loss(batch, net): states, values, policies = batch states = torch.FloatTensor(states) values_target = torch.FloatTensor(values)