Exemplo n.º 1
0
        # generator_inputs = tf.identity(optimised_z)
        print('total_loss_Sparse %s' % (total_loss_Sparse))
        print('generator_loss_Sparse %s' % (generator_loss_Sparse))
        print('recont_loss_Sparse %s\n' % (recont_loss_Sparse))
        reconstructions_Sparse = generatorSparse_net(optimised_z_Sparse)
        # reconstructions_Sparse = generatorSparse_net(optimised_z_Sparse,is_training = False)
        sparseDCS_reconstloss_itr.append(recont_loss_Sparse)

        if step % export_every == 0:
            rescont_sparse_file = 'reconstructions_sparse_%d_%d' % (epoch,
                                                                    step)
            data_file = 'data_%d_%d' % (epoch, step)
            # Create an object which gets data and does the processing.
            data_np = postprocess(x_batch_train)
            reconstructions_np_Sparse = postprocess(reconstructions_Sparse)
            sample_exporter = file_utils.FileExporter(
                os.path.join(output_dir, 'reconstructions_sparse'))
            reconstructions_np_Sparse = tf.reshape(reconstructions_np_Sparse,
                                                   data_np.shape)
            sample_exporter.save(reconstructions_np_Sparse,
                                 rescont_sparse_file)
            sample_exporter.save(data_np, data_file)

        # Deep Compressive Sensing
        with tf.GradientTape() as tape:
            z_i = tf.identity(generator_inputs)
            x_img_reshape = tf.reshape(
                x_batch_copy,
                [-1, tf.shape(x_batch_copy)[1] * tf.shape(x_batch_copy)[2]])
            meas_img = measure_net(x_img_reshape)
            optimised_z = optimise_and_sample(z_i, meas_img, generator_net,
                                              measure_net)
Exemplo n.º 2
0
def main(argv):
    del argv

    utils.make_output_dir(FLAGS.output_dir)
    data_processor = utils.DataProcessor()
    images = utils.get_train_dataset(data_processor, FLAGS.dataset,
                                     FLAGS.batch_size)

    logging.info('Learning rate: %d', FLAGS.learning_rate)

    # Construct optimizers.
    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)

    # Create the networks and models.
    generator = utils.get_generator(FLAGS.dataset)
    metric_net = utils.get_metric_net(FLAGS.dataset, FLAGS.num_measurements)

    model = cs.CS(metric_net, generator, FLAGS.num_z_iters, FLAGS.z_step_size,
                  FLAGS.z_project_method)
    prior = utils.make_prior(FLAGS.num_latents)
    generator_inputs = prior.sample(FLAGS.batch_size)

    model_output = model.connect(images, generator_inputs)
    optimization_components = model_output.optimization_components
    debug_ops = model_output.debug_ops
    reconstructions, _ = utils.optimise_and_sample(generator_inputs,
                                                   model,
                                                   images,
                                                   is_training=False)

    global_step = tf.train.get_or_create_global_step()
    update_op = optimizer.minimize(optimization_components.loss,
                                   var_list=optimization_components.vars,
                                   global_step=global_step)

    sample_exporter = file_utils.FileExporter(
        os.path.join(FLAGS.output_dir, 'reconstructions'))

    # Hooks.
    debug_ops['it'] = global_step
    # Abort training on Nans.
    nan_hook = tf.train.NanTensorHook(optimization_components.loss)
    # Step counter.
    step_conter_hook = tf.train.StepCounterHook()

    checkpoint_saver_hook = tf.train.CheckpointSaverHook(
        checkpoint_dir=utils.get_ckpt_dir(FLAGS.output_dir), save_secs=10 * 60)

    loss_summary_saver_hook = tf.train.SummarySaverHook(
        save_steps=FLAGS.summary_every_step,
        output_dir=os.path.join(FLAGS.output_dir, 'summaries'),
        summary_op=utils.get_summaries(debug_ops))

    hooks = [
        checkpoint_saver_hook, nan_hook, step_conter_hook,
        loss_summary_saver_hook
    ]

    if FLAGS.phase == 'train':
        # Start training.
        with tf.train.MonitoredSession(hooks=hooks) as sess:
            logging.info('starting training')

            for i in range(FLAGS.num_training_iterations):
                sess.run(update_op)

                if i % FLAGS.export_every == 0:
                    reconstructions_np, data_np = sess.run(
                        [reconstructions, images])
                    # Create an object which gets data and does the processing.
                    data_np = data_processor.postprocess(data_np)
                    reconstructions_np = data_processor.postprocess(
                        reconstructions_np)
                    sample_exporter.save(reconstructions_np, 'reconstructions')
                    sample_exporter.save(data_np, 'data')
    else:
        saver = tf.train.Saver()
        # Start testing
        with tf.Session() as sess:

            init_op = tf.global_variables_initializer()
            sess.run(init_op)

            print(" [*] Reading checkpoint...")
            checkpoint_dir = utils.get_ckpt_dir(FLAGS.output_dir)

            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
                saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))

            reconstructions_np, data_np = sess.run([reconstructions, images])
            # Create an object which gets data and does the processing.
            data_np = data_processor.postprocess(data_np)
            reconstructions_np = data_processor.postprocess(reconstructions_np)
            sample_exporter.save(reconstructions_np, 'reconstructions')
            sample_exporter.save(data_np, 'data')