def main(hparams): # Set up some stuff according to hparams hparams.n_input = np.prod(hparams.image_shape) maxiter = hparams.max_outer_iter utils.print_hparams(hparams) # get inputs xs_dict = model_input(hparams) estimators = utils.get_estimators(hparams) utils.setup_checkpointing(hparams) measurement_losses, l2_losses = utils.load_checkpoints(hparams) x_hats_dict = {'dcgan' : {}} x_batch_dict = {} for key, x in xs_dict.iteritems(): if hparams.lazy: # If lazy, first check if the image has already been # saved before by *all* estimators. If yes, then skip this image. save_paths = utils.get_save_paths(hparams, key) is_saved = all([os.path.isfile(save_path) for save_path in save_paths.values()]) if is_saved: continue x_batch_dict[key] = x if len(x_batch_dict) < hparams.batch_size: continue # Reshape input x_batch_list = [x.reshape(1, hparams.n_input) for _, x in x_batch_dict.iteritems()] x_batch = np.concatenate(x_batch_list) # Construct measurements A_outer = utils.get_outer_A(hparams) y_batch_outer=np.matmul(x_batch, A_outer) x_main_batch = 0.0 * x_batch z_opt_batch = np.random.randn(hparams.batch_size, 100) for k in range(maxiter): x_est_batch=x_main_batch + hparams.outer_learning_rate*(np.matmul((y_batch_outer-np.matmul(x_main_batch,A_outer)),A_outer.T)) estimator = estimators['dcgan'] x_hat_batch,z_opt_batch = estimator(x_est_batch,z_opt_batch, hparams) x_main_batch=x_hat_batch for i, key in enumerate(x_batch_dict.keys()): x = xs_dict[key] y = y_batch_outer[i] x_hat = x_hat_batch[i] # Save the estimate x_hats_dict['dcgan'][key] = x_hat # Compute and store measurement and l2 loss measurement_losses['dcgan'][key] = utils.get_measurement_loss(x_hat, A_outer, y) l2_losses['dcgan'][key] = utils.get_l2_loss(x_hat, x) print 'Processed upto image {0} / {1}'.format(key+1, len(xs_dict)) # Checkpointing if (hparams.save_images) and ((key+1) % hparams.checkpoint_iter == 0): utils.checkpoint(x_hats_dict, measurement_losses, l2_losses, save_image, hparams) #x_hats_dict = {'dcgan' : {}} print '\nProcessed and saved first ', key+1, 'images\n' x_batch_dict = {} # Final checkpoint if hparams.save_images: utils.checkpoint(x_hats_dict, measurement_losses, l2_losses, save_image, hparams) print '\nProcessed and saved all {0} image(s)\n'.format(len(xs_dict)) if hparams.print_stats: for model_type in hparams.model_types: print model_type mean_m_loss = np.mean(measurement_losses[model_type].values()) mean_l2_loss = np.mean(l2_losses[model_type].values()) print 'mean measurement loss = {0}'.format(mean_m_loss) print 'mean l2 loss = {0}'.format(mean_l2_loss) if hparams.image_matrix > 0: utils.image_matrix(xs_dict, x_hats_dict, view_image, hparams) # Warn the user that some things were not processsed if len(x_batch_dict) > 0: print '\nDid NOT process last {} images because they did not fill up the last batch.'.format(len(x_batch_dict)) print 'Consider rerunning lazily with a smaller batch size.'
def main(hparams): hparams.n_input = np.prod(hparams.image_shape) hparams.model_type = 'vae' maxiter = hparams.max_outer_iter utils.print_hparams(hparams) xs_dict = model_input(hparams) # returns the images estimators = utils.get_estimators(hparams) utils.setup_checkpointing(hparams) measurement_losses, l2_losses = utils.load_checkpoints(hparams) x_hats_dict = {'vae': {}} x_batch_dict = {} for key, x in xs_dict.iteritems(): print key x_batch_dict[key] = x #placing images in dictionary if len(x_batch_dict) < hparams.batch_size: continue x_coll = [ x.reshape(1, hparams.n_input) for _, x in x_batch_dict.iteritems() ] #Generates the columns of input x x_batch = np.concatenate(x_coll) # Generates entire X A_outer = utils.get_outer_A(hparams) # Created the random matric A noise_batch = hparams.noise_std * np.random.randn( hparams.batch_size, 100) y_batch_outer = np.sign( np.matmul(x_batch, A_outer) ) # Multiplication of A and X followed by quantization on 4 levels #y_batch_outer = np.matmul(x_batch, A_outer) x_main_batch = 0.0 * x_batch z_opt_batch = np.random.randn(hparams.batch_size, 20) #Input to the generator of the GAN for k in range(maxiter): x_est_batch = x_main_batch + hparams.outer_learning_rate * ( np.matmul( (y_batch_outer - np.sign(np.matmul(x_main_batch, A_outer))), A_outer.T)) #x_est_batch = x_main_batch + hparams.outer_learning_rate * (np.matmul((y_batch_outer - np.matmul(x_main_batch, A_outer)), A_outer.T)) # Gradient decent in x is done estimator = estimators['vae'] x_hat_batch, z_opt_batch = estimator( x_est_batch, z_opt_batch, hparams) # Projectin on the GAN x_main_batch = x_hat_batch dist = np.linalg.norm(x_batch - x_main_batch) / 784 print 'cool' print dist for i, key in enumerate(x_batch_dict.keys()): x = xs_dict[key] y = y_batch_outer[i] x_hat = x_hat_batch[i] # Save the estimate x_hats_dict['vae'][key] = x_hat # Compute and store measurement and l2 loss measurement_losses['vae'][key] = utils.get_measurement_loss( x_hat, A_outer, y) l2_losses['vae'][key] = utils.get_l2_loss(x_hat, x) print 'Processed upto image {0} / {1}'.format(key + 1, len(xs_dict)) # Checkpointing if (hparams.save_images) and ((key + 1) % hparams.checkpoint_iter == 0): utils.checkpoint(x_hats_dict, measurement_losses, l2_losses, save_image, hparams) #x_hats_dict = {'dcgan' : {}} print '\nProcessed and saved first ', key + 1, 'images\n' x_batch_dict = {} # Final checkpoint if hparams.save_images: utils.checkpoint(x_hats_dict, measurement_losses, l2_losses, save_image, hparams) print '\nProcessed and saved all {0} image(s)\n'.format(len(xs_dict)) if hparams.print_stats: for model_type in hparams.model_types: print model_type mean_m_loss = np.mean(measurement_losses[model_type].values()) mean_l2_loss = np.mean(l2_losses[model_type].values()) print 'mean measurement loss = {0}'.format(mean_m_loss) print 'mean l2 loss = {0}'.format(mean_l2_loss) if hparams.image_matrix > 0: utils.image_matrix(xs_dict, x_hats_dict, view_image, hparams) # Warn the user that some things were not processsed if len(x_batch_dict) > 0: print '\nDid NOT process last {} images because they did not fill up the last batch.'.format( len(x_batch_dict)) print 'Consider rerunning lazily with a smaller batch size.'
def main(hparams): hparams.n_input = np.prod(hparams.image_shape) maxiter = hparams.max_outer_iter utils.print_hparams(hparams) xs_dict = model_input(hparams) estimators = utils.get_estimators(hparams) utils.setup_checkpointing(hparams) measurement_losses, l2_losses = utils.load_checkpoints(hparams) x_hats_dict = {'dcgan': {}} x_batch_dict = {} for key, x in xs_dict.iteritems(): x_batch_dict[key] = x if len(x_batch_dict) < hparams.batch_size: continue x_coll = [ x.reshape(1, hparams.n_input) for _, x in x_batch_dict.iteritems() ] x_batch = np.concatenate(x_coll) A_outer = utils.get_outer_A(hparams) # 1bitify y_batch_outer = np.sign(np.matmul(x_batch, A_outer)) x_main_batch = 0.0 * x_batch z_opt_batch = np.random.randn(hparams.batch_size, 100) for k in range(maxiter): x_est_batch = x_main_batch + hparams.outer_learning_rate * ( np.matmul( (y_batch_outer - np.sign(np.matmul(x_main_batch, A_outer))), A_outer.T)) estimator = estimators['dcgan'] x_hat_batch, z_opt_batch = estimator(x_est_batch, z_opt_batch, hparams) x_main_batch = x_hat_batch for i, key in enumerate(x_batch_dict.keys()): x = xs_dict[key] y = y_batch_outer[i] x_hat = x_hat_batch[i] x_hats_dict['dcgan'][key] = x_hat measurement_losses['dcgan'][key] = utils.get_measurement_loss( x_hat, A_outer, y) l2_losses['dcgan'][key] = utils.get_l2_loss(x_hat, x) print 'Processed upto image {0} / {1}'.format(key + 1, len(xs_dict)) if (hparams.save_images) and ((key + 1) % hparams.checkpoint_iter == 0): utils.checkpoint(x_hats_dict, measurement_losses, l2_losses, save_image, hparams) print '\nProcessed and saved first ', key + 1, 'images\n' x_batch_dict = {} if hparams.save_images: utils.checkpoint(x_hats_dict, measurement_losses, l2_losses, save_image, hparams) print '\nProcessed and saved all {0} image(s)\n'.format(len(xs_dict)) if hparams.print_stats: for model_type in hparams.model_types: print model_type mean_m_loss = np.mean(measurement_losses[model_type].values()) mean_l2_loss = np.mean(l2_losses[model_type].values()) print 'mean measurement loss = {0}'.format(mean_m_loss) print 'mean l2 loss = {0}'.format(mean_l2_loss) if hparams.image_matrix > 0: utils.image_matrix(xs_dict, x_hats_dict, view_image, hparams) # Warn the user that some things were not processsed if len(x_batch_dict) > 0: print '\nDid NOT process last {} images because they did not fill up the last batch.'.format( len(x_batch_dict)) print 'Consider rerunning lazily with a smaller batch size.'