def estimator(A_val, y_batch_val, hparams): """Function that returns the estimated image""" best_keeper = utils.BestKeeper(hparams) feed_dict = {A: A_val, y_batch: y_batch_val} for i in range(hparams.num_random_restarts): sess.run([z_batch.initializer]) for j in range(hparams.max_update_iter): _, lr_val, total_loss_val, \ m_loss1_val, \ m_loss2_val, \ zp_loss_val = sess.run([update_op, learning_rate, total_loss, m_loss1, m_loss2, zp_loss], feed_dict=feed_dict) logging_format = 'rr {} iter {} lr {} total_loss {} m_loss1 {} m_loss2 {} zp_loss {}' print logging_format.format(i, j, lr_val, total_loss_val, m_loss1_val, m_loss2_val, zp_loss_val) if hparams.gif and ((j % hparams.gif_iter) == 0): images = sess.run(x_hat_batch, feed_dict=feed_dict) for im_num, image in enumerate(images): save_dir = '{0}/{1}/'.format(hparams.gif_dir, im_num) utils.set_up_dir(save_dir) save_path = save_dir + '{0}.png'.format(j) image = image.reshape(hparams.image_shape) save_image(image, save_path) x_hat_batch_val, total_loss_batch_val = sess.run( [x_hat_batch, total_loss_batch], feed_dict=feed_dict) best_keeper.report(x_hat_batch_val, total_loss_batch_val) return best_keeper.get_best()
def estimator(A_val, y_batch_val, hparams): """Function that returns the estimated image""" best_keeper = utils.BestKeeper(hparams) if hparams.measurement_type == 'project': # if y_batch_val.shape[0]!=hparams.batch_size: # y_batch_val_tmp = np.zeros((hparams.batch_size,hparams.num_measurements)) # y_batch_val_tmp[:y_batch_val.shape[0],:] = y_batch_val # y_batch_val = y_batch_val_tmp # print('Smaller INPUT NUMBER')#Or change hparams on the fly feed_dict = {y_batch: y_batch_val} else: feed_dict = {A: A_val, y_batch: y_batch_val} for i in range(hparams.num_random_restarts): sess.run(opt_reinit_op) for j in range(hparams.max_update_iter): _, lr_val, total_loss_val, \ m_loss1_val, \ m_loss2_val, \ zp_loss_val = sess.run([update_op, learning_rate, total_loss, m_loss1, m_loss2, zp_loss], feed_dict=feed_dict) logging_format = 'rr {} iter {} lr {} total_loss {} m_loss1 {} m_loss2 {} zp_loss {}' print( logging_format.format(i, j, lr_val, total_loss_val, m_loss1_val, m_loss2_val, zp_loss_val)) #print('n_z is {}'.format(hparams.n_z)) if total_loss_val == m_loss2_val and zp_loss_val > 0 and hparams.zprior_weight > 0: raise ValueError('NONONO') if hparams.gif and ((j % hparams.gif_iter) == 0): images = sess.run(x_hat_batch, feed_dict=feed_dict) for im_num, image in enumerate(images): save_dir = '{0}/{1}/'.format(hparams.gif_dir, im_num) utils.set_up_dir(save_dir) save_path = save_dir + '{0}.png'.format(j) image = image.reshape(hparams.image_shape) save_image(image, save_path) x_hat_batch_val, total_loss_batch_val = sess.run( [x_hat_batch, total_loss_batch], feed_dict=feed_dict) best_keeper.report(x_hat_batch_val, total_loss_batch_val) return best_keeper.get_best()
def estimator(Tx_val, Rx_val, Pilot_val, hparams): """Function that returns the estimated image""" best_keeper = utils.BestKeeper(hparams) if hparams.measurement_type == 'project': feed_dict = {y_batch: y_batch_val} else: feed_dict = {Tx: Tx_val, Rx: Rx_val, Pilot: Pilot_val} for i in range(hparams.num_random_restarts): sess.run(opt_reinit_op) for j in range(hparams.max_update_iter): if hparams.gif and ((j % hparams.gif_iter) == 0): images = sess.run(x_hat_batch, feed_dict=feed_dict) for im_num, image in enumerate(images): save_dir = '{0}/{1}/'.format(hparams.gif_dir, im_num) utils.set_up_dir(save_dir) save_path = save_dir + '{0}.png'.format(j) image = image.reshape(hparams.image_shape) save_image(image, save_path) _, lr_val, total_loss_val, \ m_loss1_val, \ m_loss2_val, \ zp_loss_val = sess.run([update_op, learning_rate, total_loss, m_loss1, m_loss2, zp_loss], feed_dict=feed_dict) logging_format = 'rr {} iter {} lr {} total_loss {} m_loss1 {} m_loss2 {} zp_loss {}' print logging_format.format(i, j, lr_val, total_loss_val, m_loss1_val, m_loss2_val, zp_loss_val) H_hat_val, total_loss_val = sess.run([H_hat, total_loss], feed_dict=feed_dict) best_keeper.report(H_hat_val, total_loss_val) return best_keeper.get_best()
def stage_i(A_val,y_batch_val,hparams,hid_i,init_obj,early_stop,bs,optim,recovered=False): model_def = globals()['model_def'] m_loss1_batch_dict = {} m_loss2_batch_dict = {} zp_loss_batch_dict = {} total_loss_dict = {} x_hat_batch_dict = {} model_selection = ModelSelect(hparams) hid_i=int(hid_i) # print('Matrix norm is {}'.format(np.linalg.norm(A_val))) # hparams.eps = hparams.eps * np.linalg.norm(A_val) # Get a session sess = tf.Session() # Set up palceholders A = tf.placeholder(tf.float32, shape=(hparams.n_input, hparams.num_measurements), name='A') y_batch = tf.placeholder(tf.float32, shape=(hparams.batch_size, hparams.num_measurements), name='y_batch') # Create the generator model_hparams = model_def.Hparams() model_hparams.n_z = hparams.n_z model_hparams.stdv = hparams.stdv model_hparams.mean = hparams.mean model_hparams.grid = copy.deepcopy(hparams.grid) model_selection.setup_dim(hid_i,model_hparams) if not hparams.model_types[0] == 'vae-flex-alt' and 'alt' in hparams.model_types[0]: model_def.ignore_grid = next((j for j in model_selection.dim_list if j >= hid_i), None) #set up the initialization print('The initialization is: {}'.format(init_obj.mode)) if init_obj.mode=='random': z_batch = model_def.get_z_var(model_hparams,hparams.batch_size,hid_i) elif init_obj.mode in ['previous-and-random','only-previous']: z_batch = model_def.get_z_var(model_hparams,hparams.batch_size,hid_i) init_op_par = tf.assign(z_batch, truncate_val(model_hparams,hparams,hid_i,init_obj,stdv=0)) else: z_batch = truncate_val(model_hparams,hparams,hid_i,init_obj,stdv=0.1) _, x_hat_batch, _ = model_def.generator_i(model_hparams, z_batch, 'gen', hparams.bol,hid_i,relative=False) x_hat_batch_dict[hid_i] = x_hat_batch # measure the estimate if hparams.measurement_type == 'project': y_hat_batch = tf.identity(x_hat_batch, name='y_hat_batch') else: y_hat_batch = tf.matmul(x_hat_batch, A, name='y_hat_batch') # define all losses m_loss1_batch = tf.reduce_mean(tf.abs(y_batch - y_hat_batch), 1) m_loss2_batch = tf.reduce_mean((y_batch - y_hat_batch)**2, 1) if hparams.stdv>0: norm_val = 1/(hparams.stdv**2) else: norm_val = 1e+20 zp_loss_batch = tf.reduce_sum((z_batch-tf.ones(tf.shape(z_batch))*hparams.mean)**2*norm_val, 1) #added normalization # define total loss total_loss_batch = hparams.mloss1_weight * m_loss1_batch \ + hparams.mloss2_weight * m_loss2_batch \ + hparams.zprior_weight * zp_loss_batch total_loss = tf.reduce_mean(total_loss_batch) total_loss_dict[hid_i] = total_loss # Compute means for logging m_loss1 = tf.reduce_mean(m_loss1_batch) m_loss2 = tf.reduce_mean(m_loss2_batch) zp_loss = tf.reduce_mean(zp_loss_batch) m_loss1_batch_dict[hid_i] = m_loss1 m_loss2_batch_dict[hid_i] = m_loss2 zp_loss_batch_dict[hid_i] = zp_loss # Set up gradient descent var_list = [z_batch] if recovered: global_step = tf.Variable(hparams.optim.global_step, trainable=False, name='global_step') else: global_step = tf.Variable(0, trainable=False, name='global_step') learning_rate = utils.get_learning_rate(global_step, hparams) opt = utils.get_optimizer(learning_rate, hparams) update_op = opt.minimize(total_loss, var_list=var_list, global_step=global_step, name='update_op') opt_reinit_op = utils.get_opt_reinit_op(opt, var_list, global_step) # Intialize and restore model parameters init_op = tf.global_variables_initializer() sess.run(init_op) #restore the setting if 'alt' in hparams.model_types[0]: factor = 1 else: factor = len(hparams.grid) model_def.batch_size = hparams.batch_size*factor #changes object (call by reference), necessary, since call of generator_i might change batch size. model_selection.restore(sess,hid_i) if recovered: best_keeper = hparams.optim.best_keeper else: best_keeper = utils.BestKeeper(hparams,logg_z=True) if hparams.measurement_type == 'project': feed_dict = {y_batch: y_batch_val} else: feed_dict = {A: A_val, y_batch: y_batch_val} flag = False for i in range(init_obj.num_random_restarts): if recovered and i <= hparams.optim.i: #Loosing optimizer's state, keras implementation maybe better if i < hparams.optim.i: continue else: sess.run(utils.get_opt_reinit_op(opt, [], global_step)) sess.run(tf.assign(z_batch,hparams.optim.z_batch)) else: sess.run(opt_reinit_op) if i<1 and init_obj.mode in ['previous-and-random','only-previous']: print('Using previous outcome as starting point') sess.run(init_op_par) for j in range(hparams.max_update_iter): if recovered and j < hparams.optim.j: continue _, lr_val, total_loss_val, \ m_loss1_val, \ m_loss2_val, \ zp_loss_val = sess.run([update_op, learning_rate, total_loss, m_loss1, m_loss2, zp_loss], feed_dict=feed_dict) if hparams.gif and ((j % hparams.gif_iter) == 0): images = sess.run(x_hat_batch, feed_dict=feed_dict) for im_num, image in enumerate(images): save_dir = '{0}/{1}/{2}/'.format(hparams.gif_dir, hid_i,im_num) utils.set_up_dir(save_dir) save_path = save_dir + '{0}.png'.format(j) image = image.reshape(hparams.image_shape) save_image(image, save_path) if j%100==0 and early_stop: x_hat_batch_val = sess.run(x_hat_batch, feed_dict=feed_dict) if check_tolerance(hparams,A_val,x_hat_batch_val,y_batch_val)[1]: flag = True print('Early stopping') break if j%25==0:#Now not every turn logging_format = 'hid {} rr {} iter {} lr {} total_loss {} m_loss1 {} m_loss2 {} zp_loss {}' print( logging_format.format(hid_i, i, j, lr_val, total_loss_val, m_loss1_val, m_loss2_val, zp_loss_val)) if j%100==0: x_hat_batch_val, total_loss_batch_val, z_batch_val = sess.run([x_hat_batch, total_loss_batch,z_batch], feed_dict=feed_dict) best_keeper.report(x_hat_batch_val, total_loss_batch_val,z_val=z_batch_val) optim.global_step = sess.run(global_step) optim.A = A_val optim.y_batch = y_batch_val optim.i=i optim.j=j optim.z_batch= z_batch_val optim.best_keeper=best_keeper optim.bs=bs optim.init_obj = init_obj utils.save_to_pickle(optim,utils.get_checkpoint_dir(hparams, hparams.model_types[0])+'tmp/optim.pkl') print('Checkpoint of optimization created') hparams.optim.j = 0 x_hat_batch_val, total_loss_batch_val, z_batch_val = sess.run([x_hat_batch, total_loss_batch,z_batch], feed_dict=feed_dict) best_keeper.report(x_hat_batch_val, total_loss_batch_val,z_val=z_batch_val) if flag: break tf.reset_default_graph() return best_keeper.get_best()