Exemple #1
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    with tf.Session() as sess:
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, y_dim=10,
                    dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)
        else:
            dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,
                    dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)

        if FLAGS.is_train:
            dcgan.train(FLAGS)
        else:
            dcgan.load(FLAGS.checkpoint_dir)

        to_json("./web/js/gen_layers.js", dcgan.h0_w, dcgan.h1_w, dcgan.h2_w, dcgan.h3_w, dcgan.h4_w)

        z_sample = np.random.uniform(-1, 1, size=(FLAGS.batch_size, dcgan.z_dim))

        samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
        save_images(samples, [8, 8], './samples/test_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime()))
Exemple #2
0
def copy(ntm, seq_length, sess, max_length=50, print_=True):
    start_symbol = np.zeros([ntm.cell.input_dim], dtype=np.float32)
    start_symbol[0] = 1
    end_symbol = np.zeros([ntm.cell.input_dim], dtype=np.float32)
    end_symbol[1] = 1

    seq = generate_copy_sequence(seq_length, ntm.cell.input_dim - 2)

    feed_dict = {input_:vec for vec, input_ in zip(seq, ntm.inputs)}
    feed_dict.update(
        {true_output:vec for vec, true_output in zip(seq, ntm.true_outputs)}
    )
    feed_dict.update({
        ntm.start_symbol: start_symbol,
        ntm.end_symbol: end_symbol
    })

    result = sess.run(ntm.get_outputs(seq_length) + [ntm.get_loss(seq_length)], feed_dict=feed_dict)

    outputs = result[:-1]
    loss = result[-1]

    if print_:
        np.set_printoptions(suppress=True)
        print(" true output : ")
        pp.pprint(seq)
        print(" predicted output :")
        pp.pprint(np.round(outputs))
        print(" Loss : %f" % loss)
        np.set_printoptions(suppress=False)
    else:
        return seq, outputs, loss
def main(_):
  pp.pprint(flags.FLAGS.__flags)

  if not os.path.exists(FLAGS.checkpoint_dir):
    print(" [*] Creating checkpoint directory...")
    os.makedirs(FLAGS.checkpoint_dir)

  if FLAGS.game_name == "home":
    game = HomeGame(game_dir=FLAGS.game_dir, seq_length=FLAGS.seq_length)
  else:
    raise Exception(" [!] %s not implemented yet" % self.game_name)

  with tf.device('/cpu:0'):
    model = LSTMDQN(game, checkpoint_dir=FLAGS.checkpoint_dir,
                    seq_length=FLAGS.seq_length,
                    embed_dim=FLAGS.embed_dim,
                    layer_depth=FLAGS.layer_depth,
                    batch_size=FLAGS.batch_size,
                    start_epsilon=FLAGS.start_epsilon,
                    forward_only=FLAGS.forward_only)

    if not FLAGS.forward_only:
      model.train()
    else:
      test_loss = model.test(2)
      print(" [*] Test loss: %2.6f, perplexity: %2.6f" % (test_loss, np.exp(test_loss)))
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)
    if not os.path.exists(FLAGS.output_dir):
        os.makedirs(FLAGS.output_dir)


    config = tf.ConfigProto(
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9),
        device_count = {'GPU': 1},
        allow_soft_placement=True
        #log_device_placement=True,
    )
    config.device_filters.append('/gpu:0')
    config.device_filters.append('/cpu:0')

    with tf.Session(config=config) as sess:
        #with tf.device('/gpu:0'):

        autoencoder = Autoencoder(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,
                dataset_name=FLAGS.dataset, noise = FLAGS.noise, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)

        if FLAGS.is_train:
            autoencoder.train(FLAGS)
        elif FLAGS.is_run:
            autoencoder.run(FLAGS)
        else:
            autoencoder.load(FLAGS.checkpoint_dir)
def main(_):
  pp.pprint(flags.FLAGS.__flags)

  if not os.path.exists(FLAGS.checkpoint_dir):
    print(" [*] Creating checkpoint directory...")
    os.makedirs(FLAGS.checkpoint_dir)

  with tf.Session() as sess:
    model = model_dict[FLAGS.model](sess, checkpoint_dir=FLAGS.checkpoint_dir,
                                    seq_length=FLAGS.seq_length,
                                    word_embed_dim=FLAGS.word_embed_dim,
                                    char_embed_dim=FLAGS.char_embed_dim,
                                    feature_maps=eval(FLAGS.feature_maps),
                                    kernels=eval(FLAGS.kernels),
                                    batch_size=FLAGS.batch_size,
                                    dropout_prob=FLAGS.dropout_prob,
                                    max_word_length=FLAGS.max_word_length,
                                    forward_only=FLAGS.forward_only,
                                    dataset_name=FLAGS.dataset,
                                    use_char=FLAGS.use_char,
                                    use_word=FLAGS.use_word,
                                    data_dir=FLAGS.data_dir)

    if not FLAGS.forward_only:
      model.run(FLAGS.epoch, FLAGS.learning_rate, FLAGS.decay)
    else:
      test_loss = model.test(2)
      print(" [*] Test loss: %2.6f, perplexity: %2.6f" % (test_loss, np.exp(test_loss)))
Exemple #6
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    with tf.Session() as sess:
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, y_dim=10,
                    dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)
        else:
            dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,
                    dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)

        if FLAGS.is_train:
            dcgan.train(FLAGS)
        else:
            dcgan.load(FLAGS.checkpoint_dir)

        to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
                                      [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
                                      [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
                                      [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
                                      [dcgan.h4_w, dcgan.h4_b, None])

        # Below is codes for visualization
        OPTION = 2
        visualize(sess, dcgan, FLAGS, OPTION)
Exemple #7
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    with tf.device('/cpu:0'), tf.Session() as sess:
        FLAGS.sess = sess

        dcgan = DCGAN(y_dim=10)
Exemple #8
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    with tf.device('/cpu:0'), tf.Session() as sess:
        if FLAGS.task == 'copy':
            if FLAGS.is_train:
                cell, ntm = copy_train(FLAGS, sess)
            else:
                cell = NTMCell(input_dim=FLAGS.input_dim,
                               output_dim=FLAGS.output_dim,
                               controller_layer_size=FLAGS.controller_layer_size,
                               write_head_size=FLAGS.write_head_size,
                               read_head_size=FLAGS.read_head_size)
                ntm = NTM(cell, sess, 1, FLAGS.max_length,
                          test_max_length=FLAGS.test_max_length, forward_only=True)

            ntm.load(FLAGS.checkpoint_dir, 'copy')

            copy(ntm, FLAGS.test_max_length*1/3, sess)
            print
            copy(ntm, FLAGS.test_max_length*2/3, sess)
            print
            copy(ntm, FLAGS.test_max_length*3/3, sess)
        elif FLAGS.task == 'recall':
            pass
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    random.seed(31241)
    np.random.seed(41982)
    tf.set_random_seed(1327634)

    color = True # Must change this and the dataset Flags to the correct path to use color
    if FLAGS.is_debug:
       reader = Bouncing_Balls_Data_Reader(FLAGS.dataset, FLAGS.batch_size, color=color, train_size=160*5, validation_size=8*5, test_size=8*5, num_partitions=5)
    else:
       reader = Bouncing_Balls_Data_Reader(FLAGS.dataset, FLAGS.batch_size, color=color)

    data_fn = lambda epoch, batch_index: reader.read_data(batch_index, reader.TRAIN)
    frame_shape = reader.read_data(0, reader.TRAIN).shape[2:]
    print("Frame shape: ", frame_shape)
    num_batches = reader.num_batches(reader.TRAIN)
    print("Num batches: %d" % num_batches)
    input_sequence_range = range(5, 16)
    print("Input sequence range min: %d, max: %d" % (min(input_sequence_range), max(input_sequence_range)))

    save_sample_fn = utils.gen_save_sample_fn(FLAGS.sample_dir, image_prefix="train")

    with tf.Session() as sess:
        pgn  = PGN(sess, FLAGS.dataset_name, FLAGS.epoch, num_batches, FLAGS.batch_size, input_sequence_range,
                 data_fn, frame_shape=frame_shape, save_sample_fn=save_sample_fn, checkpoint_dir=FLAGS.checkpoint_dir,
                 lambda_adv_loss= FLAGS.lambda_adv_loss)

        if FLAGS.is_train:
            pgn.train()
        else:
            print("Loading from: %s" %(FLAGS.checkpoint_dir,))
            if pgn.load(FLAGS.checkpoint_dir) :
               print(" [*] Successfully loaded")
            else:
               print(" [!] Load failed")

        if FLAGS.is_test:
           result = test.test(pgn, reader)
           result_str = pp.pformat(result)
           fid = open(os.path.join(FLAGS.sample_dir, 'test_out.txt'), mode='w')
           fid.write(unicode(result_str))
           fid.close()

        if FLAGS.is_visualize:
           for i in range(3):
               vid_seq = reader.read_data(i, data_set_type=reader.TEST, batch_size=1)[:, 0, :, :, :]
               utils.make_prediction_gif(pgn, os.path.join(FLAGS.sample_dir, 'vis_%d.gif' % i), video_sequence=vid_seq)
           utils.plot_convergence(pgn.get_MSE_history(), "MSE Convergence",
                        path=os.path.join(FLAGS.sample_dir, "vis_MSE_convergence.png"))
def main(_):
  pp.pprint(flags.FLAGS.__flags)

  if not os.path.exists(FLAGS.checkpoint_dir):
    print(" [*] Creating checkpoint directory...")
    os.makedirs(FLAGS.checkpoint_dir)

  with tf.device('/cpu:0'), tf.Session() as sess:
    model = model_dict[FLAGS.model](batch_size=FLAGS.batch_size, 
        checkpoint_dir=FLAGS.checkpoint_dir, forward_only=FLAGS.forward_only)

    if not FLAGS.forward_only:
      model.train(sess, FLAGS.vocab_size, FLAGS.epoch,
                  FLAGS.learning_rate, FLAGS.momentum, FLAGS.decay,
                  FLAGS.data_dir, FLAGS.dataset)
    else:
      model.load(FLAGS.checkpoint_dir)
def main(_):
  pp.pprint(flags.FLAGS.__flags)

  if not os.path.exists(FLAGS.checkpoint_dir):
    os.makedirs(FLAGS.checkpoint_dir)

  Analogy = model_dict[FLAGS.dataset]

  with tf.Session() as sess:
    analogy = Analogy(sess, image_size=FLAGS.image_size, model_type=FLAGS.model_type,
                      batch_size=FLAGS.batch_size, dataset=FLAGS.dataset)

    if FLAGS.is_train:
      analogy.train(max_iter=FLAGS.max_iter, alpha=FLAGS.alpha,
                    learning_rate=FLAGS.learning_rate, checkpoint_dir=FLAGS.checkpoint_dir)
    else:
      analogy.load(FLAGS.checkpoint_dir)

    analogy.test()
def main(_):
  pp.pprint(flags.FLAGS.__flags)

  data_path = "./data/%s" % FLAGS.dataset
  reader = TextReader(data_path)

  with tf.Session() as sess:
    m = MODELS[FLAGS.model]
    model = m(sess, reader, dataset=FLAGS.dataset,
              embed_dim=FLAGS.embed_dim, h_dim=FLAGS.h_dim,
              learning_rate=FLAGS.learning_rate, max_iter=FLAGS.max_iter,
              checkpoint_dir=FLAGS.checkpoint_dir)

    if FLAGS.forward_only:
      model.load(FLAGS.checkpoint_dir)
    else:
      model.train(FLAGS)

    while True:
      text = raw_input(" [*] Enter text to test: ")
      model.sample(5, text)
def main(_):
  pp.pprint(flags.FLAGS.__flags)

  if not os.path.exists(FLAGS.checkpoint_dir):
    print(" [*] Creating checkpoint directory...")
    os.makedirs(FLAGS.checkpoint_dir)

  with tf.Session() as sess:
    model = LSTMDQN(checkpoint_dir=FLAGS.checkpoint_dir,
                    seq_length=FLAGS.seq_length,
                    embed_dim=FLAGS.embed_dim,
                    layer_depth=FLAGS.layer_depth,
                    batch_size=FLAGS.batch_size,
                    forward_only=FLAGS.forward_only,
                    game_name=FLAGS.game_name,
                    game_dir=FLAGS.game_dir)

    if not FLAGS.forward_only:
      model.run()
    else:
      test_loss = model.test(2)
      print(" [*] Test loss: %2.6f, perplexity: %2.6f" % (test_loss, np.exp(test_loss)))
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    with tf.Session() as sess:
        dcgan = DCGAN(sess, 
                      dataset=FLAGS.dataset,
                      batch_size=FLAGS.batch_size,
                      output_size=FLAGS.output_size,
                      c_dim=FLAGS.c_dim)

        if FLAGS.is_train:
            if FLAGS.preload_data == True:
                data = get_data_arr(FLAGS)
            else:
                data = glob(os.path.join('./data', FLAGS.dataset, '*.jpg'))
            train.train_wasserstein(sess, dcgan, data, FLAGS)
        else:
            dcgan.load(FLAGS.checkpoint_dir)
Exemple #15
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    with tf.device('/cpu:0'), tf.Session() as sess:
        FLAGS.sess = sess
    
        if FLAGS.task == 'copy':
            if FLAGS.is_train:
                copy_train(FLAGS)

            cell = NTMCell(input_dim=FLAGS.input_dim, output_dim=FLAGS.output_dim)
            ntm = NTM(cell, sess, 1, FLAGS.max_length,
                      test_max_length=FLAGS.test_max_length, forward_only=True)

            ntm.load(FLAGS.checkpoint_dir, 'copy')

            copy(ntm, FLAGS.test_max_length*1/3, sess)
            print
            copy(ntm, FLAGS.test_max_length*2/3, sess)
            print
            copy(ntm, FLAGS.test_max_length*3/3, sess)
        elif FLAGS.task == 'recall':
            pass
Exemple #16
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    with tf.Session(config=tf.ConfigProto(
              allow_soft_placement=True, log_device_placement=False)) as sess:
        if FLAGS.dataset == 'mnist':
            assert False
        dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,
                    sample_size = 64,
                    z_dim = 8192,
                    d_label_smooth = .25,
                    generator_target_prob = .75 / 2.,
                    out_stddev = .075,
                    out_init_b = - .45,
                    image_shape=[FLAGS.image_width, FLAGS.image_width, 3],
                    dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir,
                    sample_dir=FLAGS.sample_dir,
                    generator=Generator(),
                    train_func=train, discriminator_func=discriminator,
                    build_model_func=build_model, config=FLAGS,
                    devices=["gpu:0", "gpu:1", "gpu:2", "gpu:3"] #, "gpu:4"]
                    )

        if FLAGS.is_train:
            print "TRAINING"
            dcgan.train(FLAGS)
            print "DONE TRAINING"
        else:
            dcgan.load(FLAGS.checkpoint_dir)

        OPTION = 2
        visualize(sess, dcgan, FLAGS, OPTION)
Exemple #17
0
def main(_):
  pp.pprint(flags.FLAGS.__flags)

  if FLAGS.input_width is None:
    FLAGS.input_width = FLAGS.input_height
  if FLAGS.output_width is None:
    FLAGS.output_width = FLAGS.output_height

  if not os.path.exists(FLAGS.checkpoint_dir):
    os.makedirs(FLAGS.checkpoint_dir)
  if not os.path.exists(FLAGS.sample_dir):
    os.makedirs(FLAGS.sample_dir)

  #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
  run_config = tf.ConfigProto()
  run_config.gpu_options.allow_growth=True

  with tf.Session(config=run_config) as sess:
    if FLAGS.dataset == 'mnist':
      dcgan = DCGAN(
          sess,
          input_width=FLAGS.input_width,
          input_height=FLAGS.input_height,
          output_width=FLAGS.output_width,
          output_height=FLAGS.output_height,
          batch_size=FLAGS.batch_size,
          sample_num=FLAGS.batch_size,
          y_dim=10,
          c_dim=1,
          dataset_name=FLAGS.dataset,
          input_fname_pattern=FLAGS.input_fname_pattern,
          is_crop=FLAGS.is_crop,
          checkpoint_dir=FLAGS.checkpoint_dir,
          sample_dir=FLAGS.sample_dir)
    else:
      dcgan = DCGAN(
          sess,
          input_width=FLAGS.input_width,
          input_height=FLAGS.input_height,
          output_width=FLAGS.output_width,
          output_height=FLAGS.output_height,
          batch_size=FLAGS.batch_size,
          sample_num=FLAGS.batch_size,
          c_dim=FLAGS.c_dim,
          dataset_name=FLAGS.dataset,
          input_fname_pattern=FLAGS.input_fname_pattern,
          is_crop=FLAGS.is_crop,
          checkpoint_dir=FLAGS.checkpoint_dir,
          sample_dir=FLAGS.sample_dir)

    show_all_variables()
    if FLAGS.is_train:
      dcgan.train(FLAGS)
    else:
      if not dcgan.load(FLAGS.checkpoint_dir):
        raise Exception("[!] Train a model first, then run test mode")
      

    # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
    #                 [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
    #                 [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
    #                 [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
    #                 [dcgan.h4_w, dcgan.h4_b, None])

    # Below is codes for visualization
    OPTION = 1
    visualize(sess, dcgan, FLAGS, OPTION)
Exemple #18
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)
Exemple #19
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if FLAGS.input_width is None:
        FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None:
        FLAGS.output_width = FLAGS.output_height

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True

    with tf.Session(config=run_config) as sess:
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          sample_num=FLAGS.batch_size,
                          y_dim=10,
                          c_dim=1,
                          dataset_name=FLAGS.dataset,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          is_crop=FLAGS.is_crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir)
        else:
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          sample_num=FLAGS.batch_size,
                          c_dim=FLAGS.c_dim,
                          dataset_name=FLAGS.dataset,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          is_crop=FLAGS.is_crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir)

        show_all_variables()
        if FLAGS.is_train:
            dcgan.train(FLAGS)
            # Below is codes for visualization
            OPTION = 1  # generate 100 test image
            visualize(sess, dcgan, FLAGS, OPTION)
        else:
            # if not dcgan.load(FLAGS.checkpoint_dir):
            #   raise Exception("[!] Train a model first, then run test mode")
            # else:
            # iteration = 10000
            # project_x_to_z(dcgan, iteration, sess, FLAGS)

            projector = Sketch2Image(dcgan, FLAGS)
            projector.build_model()
            projector.train(iteration=100)
Exemple #20
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if FLAGS.input_width is None:
        FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None:
        FLAGS.output_width = FLAGS.output_height

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)
    #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True

    ##########AYAD###################################
    data, labels = readMiasDATA()
    #data = readFaces()
    options = {
        "cropping": "seq",
        "crop_step": 8,
        "crop_skip": 0.031,
        "crop_size": (FLAGS.input_width, FLAGS.input_width)
    }
    mias = MIAS(data, None, [1, 0, 0], False, ["NORM"], options)
    GANoptions = {
        'noise size': 100,
        'noise type': 'normal',
        'label smooth': True
    }
    ##########AYAD###################################
    print("entering sesh")
    with tf.Session(config=run_config) as sess:
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(sess,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          sample_num=FLAGS.batch_size,
                          y_dim=10,
                          dataset_name=FLAGS.dataset,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          crop=FLAGS.crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir)
        else:
            print("going into else")
            dcgan = DCGAN(sess,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          sample_num=FLAGS.batch_size,
                          dataset_name=FLAGS.dataset,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          crop=FLAGS.crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir,
                          input_class=mias,
                          GANoptions=GANoptions)  #AYAD

        show_all_variables()

        if FLAGS.train:
            dcgan.train(FLAGS)
        else:
            if not dcgan.load(FLAGS.checkpoint_dir)[0]:
                raise Exception("[!] Train a model first, then run test mode")

        # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
        #		  [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
        #		  [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
        #		  [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
        #		  [dcgan.h4_w, dcgan.h4_b, None])

        # Below is codes for visualization
        OPTION = 1
        visualize(sess, dcgan, FLAGS, OPTION)
Exemple #21
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if FLAGS.input_width is None:
        FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None:
        FLAGS.output_width = FLAGS.output_height

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    #memory allocation
    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True
    #run_config.gpu_options.per_process_gpu_memory_fraction = 0.4

    #tf.Session: A class for running TensorFlow operations.
    #config=~ : tells to use specific configuration(setting)
    with tf.Session(config=run_config) as sess:
        #initialization
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          test_batch_size=FLAGS.test_batch_size,
                          sample_num=FLAGS.batch_size,
                          y_dim=10,
                          z_dim=FLAGS.generate_test_images,
                          dataset_name=FLAGS.dataset,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          crop=FLAGS.crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir,
                          test_dir=FLAGS.test_dir)
        else:
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          test_batch_size=FLAGS.test_batch_size,
                          sample_num=FLAGS.batch_size,
                          z_dim=FLAGS.generate_test_images,
                          dataset_name=FLAGS.dataset,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          crop=FLAGS.crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir,
                          test_dir=FLAGS.test_dir)

        show_all_variables()

        if FLAGS.train:
            dcgan.train(FLAGS)
        else:
            if not dcgan.load(FLAGS.checkpoint_dir)[0]:
                raise Exception("[!] Train a model first, then run test mode")

        if FLAGS.anomaly_test:
            dcgan.anomaly_detector()
            assert len(dcgan.test_data_names) > 0
            fp = open('anomaly_score_record.txt', 'a+')
            #for idx in range(2):
            for idx in range(len(dcgan.test_data_names)):
                test_input = np.expand_dims(dcgan.test_data[idx],
                                            axis=0)  ###################
                test_name = dcgan.test_data_names[idx]
                dcgan.train_anomaly_detector(FLAGS, test_input, test_name)  ##
            #assert: Python evealuates the statement, if false, exception error will raise
            #image = np.expand_dims(image, <your desired dimension>)
        # Below is codes for visualization
        OPTION = 1
        visualize(sess, dcgan, FLAGS, OPTION)
Exemple #22
0
    def train(self,config,prin):
        """Train netmodel"""
        print ("Initializing all variable...")

        init = tf.global_variables_initializer()
        self.sess.run(init)
        self.sess.graph.finalize()

        counter = 1
        start_time = time.time()
        pp.pprint(prin)
        if self.load(self.checkpoint_dir, config.model_dir):
            print("[*] load success...")
        else:
            print("[*] load failed....")

        # if self.load1('some', config.model_dir):
        #     print("[*] load success...")
        # else:
        #     print("[*] load failed....")

    ############################################## load Train data ############################################
        print("[prepare loading train data...")
        train_Inputs1, train_Labels1, train_Inputs2, train_Labels2 , train_Inputs3, train_Labels3, train_Inputs4, train_Labels4 = self.prepare_data('train')#train  Set10
        train_size = train_Inputs1.shape[2]
        image_size = train_Inputs1.shape[1]
        batch_index = range(train_size)
        print("[INFO] the train dataset is: %s, image_size is %d * %d" % (str(train_size*config.num_gpus), image_size, image_size))

    ############################################# load validate data ############################################
        print("[prepare loading validate data...")
        self.val_Inputs1, self.val_Labels1, self.val_Inputs2, self.val_Labels2 , self.val_Inputs3, self.val_Labels3, self.val_Inputs4, self.val_Labels4 = self.prepare_data('validate')#validate  Set10
        validate_size  = self.val_Inputs1.shape[2]
        val_image_size = self.val_Inputs1.shape[1]
        val_batch_size = config.batch_size
        validate_index = range(validate_size)
        random.shuffle(validate_index)
        validate_idxs = validate_size // val_batch_size
        print("[INFO] the validate dataset is: %s, image_size is %d * %d" % (str(validate_size*config.num_gpus), val_image_size, val_image_size))

        trn_counter = 0
        LOSS_trn = 0
        learning_rate = config.learning_rate
        ###############################################################################################################################################################################################################
        for epoch in xrange(config.epoch):
            random.shuffle(batch_index)
            batch_idxs = min(train_size, config.train_size) // config.batch_size
            save_step = np.array(batch_idxs// 4).astype(int)

            if epoch % 3 == 0:
                learning_rate = config.learning_rate * 0.5 ** (epoch // 3)

            # if epoch ==3:
            #     learning_rate = config.learning_rate * 0.5
            # elif epoch ==7:
            #     learning_rate = config.learning_rate * 0.5**2
            #
            # elif epoch ==11:
            #     learning_rate = config.learning_rate * 0.5**3
            #
            # elif epoch == 16:
            #     learning_rate = config.learning_rate * 0.5 ** 3

            for idx in xrange(0, batch_idxs): #batch_idxs


  ############################################## Prepare Train data ############################################
                Train_batch_input1 = np.transpose(train_Inputs1[:, :, batch_index[idx*config.batch_size:(idx+1)*config.batch_size]],[2, 1, 0])
                Train_batch_input1 = np.reshape(Train_batch_input1, list(Train_batch_input1.shape) + [1])
                Train_batch_label1 = np.transpose(train_Labels1[:, :, batch_index[idx*config.batch_size:(idx+1)*config.batch_size]],[2, 1, 0])
                Train_batch_label1 = np.reshape(Train_batch_label1, list(Train_batch_label1.shape) + [1])
                Train_batch_input2 = np.transpose(train_Inputs2[:, :, batch_index[idx*config.batch_size:(idx+1)*config.batch_size]],[2, 1, 0])
                Train_batch_input2 = np.reshape(Train_batch_input2, list(Train_batch_input2.shape) + [1])
                Train_batch_label2 = np.transpose(train_Labels2[:, :, batch_index[idx*config.batch_size:(idx+1)*config.batch_size]],[2, 1, 0])
                Train_batch_label2 = np.reshape(Train_batch_label2, list(Train_batch_label2.shape) + [1])

                Train_batch_input3 = np.transpose(train_Inputs3[:, :, batch_index[idx*config.batch_size:(idx+1)*config.batch_size]],[2, 1, 0])
                Train_batch_input3 = np.reshape(Train_batch_input3, list(Train_batch_input3.shape) + [1])
                Train_batch_label3 = np.transpose(train_Labels3[:, :, batch_index[idx*config.batch_size:(idx+1)*config.batch_size]],[2, 1, 0])
                Train_batch_label3 = np.reshape(Train_batch_label3, list(Train_batch_label3.shape) + [1])
                Train_batch_input4 = np.transpose(train_Inputs4[:, :, batch_index[idx*config.batch_size:(idx+1)*config.batch_size]],[2, 1, 0])
                Train_batch_input4 = np.reshape(Train_batch_input4, list(Train_batch_input4.shape) + [1])
                Train_batch_label4 = np.transpose(train_Labels4[:, :, batch_index[idx*config.batch_size:(idx+1)*config.batch_size]],[2, 1, 0])
                Train_batch_label4 = np.reshape(Train_batch_label4, list(Train_batch_label4.shape) + [1])

                fd_trn = {self.lr: learning_rate,
                          self.input1: Train_batch_input1, self.label1: Train_batch_label1,
                          self.input2: Train_batch_input2, self.label2: Train_batch_label2,
                          self.input3: Train_batch_input3, self.label3: Train_batch_label3,
                          self.input4: Train_batch_input4, self.label4: Train_batch_label4}
                _ = self.sess.run(self.train_op, feed_dict=fd_trn)
                nmse_train = self.sess.run(self.avg_loss, feed_dict=fd_trn)
                rate = self.sess.run(self.learning_rate, feed_dict=fd_trn)
                # loss_train_all = self.sess.run(self.merged, feed_dict=fd_trn)
                # self.train_all_writer.add_summary(loss_train_all, counter)


                LOSS_trn = LOSS_trn + nmse_train
                trn_counter = trn_counter +1
                counter += 1

            ########################## The validate running ####################33
                # if counter % 5000 == 0:  # 5000
                if counter % (save_step) == 0:  # save_step

                    avg_LOSS_validate = 0
                    for val_idx in xrange(0, validate_idxs):#validate_idxs
                        Validate_batch_input1 = np.transpose(self.val_Inputs1[:, :,validate_index[ val_idx*val_batch_size:(val_idx+1) * val_batch_size]],[2, 1, 0])
                        Validate_batch_input1 = np.reshape(Validate_batch_input1,list(Validate_batch_input1.shape) + [1])
                        Validate_batch_label1 = np.transpose(self.val_Labels1[:, :,validate_index[ val_idx*val_batch_size:(val_idx+1) * val_batch_size]],[2, 1, 0])
                        Validate_batch_label1 = np.reshape(Validate_batch_label1,list(Validate_batch_label1.shape) + [1])
                        Validate_batch_input2 = np.transpose(self.val_Inputs2[:, :,validate_index[ val_idx*val_batch_size:(val_idx+1) * val_batch_size]],[2, 1, 0])
                        Validate_batch_input2 = np.reshape(Validate_batch_input2,list(Validate_batch_input2.shape) + [1])
                        Validate_batch_label2 = np.transpose(self.val_Labels2[:, :,validate_index[ val_idx*val_batch_size:(val_idx+1) * val_batch_size]],[2, 1, 0])
                        Validate_batch_label2 = np.reshape(Validate_batch_label2,list(Validate_batch_label2.shape) + [1])

                        Validate_batch_input3 = np.transpose(self.val_Inputs3[:, :,validate_index[ val_idx*val_batch_size:(val_idx+1) * val_batch_size]],[2, 1, 0])
                        Validate_batch_input3 = np.reshape(Validate_batch_input3,list(Validate_batch_input3.shape) + [1])
                        Validate_batch_label3 = np.transpose(self.val_Labels3[:, :,validate_index[ val_idx*val_batch_size:(val_idx+1) * val_batch_size]],[2, 1, 0])
                        Validate_batch_label3 = np.reshape(Validate_batch_label3,list(Validate_batch_label3.shape) + [1])
                        Validate_batch_input4 = np.transpose(self.val_Inputs4[:, :,validate_index[ val_idx*val_batch_size:(val_idx+1) * val_batch_size]],[2, 1, 0])
                        Validate_batch_input4 = np.reshape(Validate_batch_input4,list(Validate_batch_input4.shape) + [1])
                        Validate_batch_label4 = np.transpose(self.val_Labels4[:, :,validate_index[ val_idx*val_batch_size:(val_idx+1) * val_batch_size]],[2, 1, 0])
                        Validate_batch_label4 = np.reshape(Validate_batch_label4,list(Validate_batch_label4.shape) + [1])

                        fd_val = {self.lr: learning_rate,
                                  self.input1: Validate_batch_input1, self.label1: Validate_batch_label1,
                                  self.input2: Validate_batch_input2, self.label2: Validate_batch_label2,
                                  self.input3: Validate_batch_input3, self.label3: Validate_batch_label3,
                                  self.input4: Validate_batch_input4, self.label4: Validate_batch_label4}
                        # _ = self.sess.run(self.train_op, feed_dict=fd_val) # if train, can run the step
                        nmse_validate = self.sess.run(self.avg_loss, feed_dict=fd_val)
                        avg_LOSS_validate = avg_LOSS_validate + nmse_validate


                    # loss_train = self.sess.run(self.merged, feed_dict=fd_trn)
                    # self.train_writer.add_summary(loss_train, counter)
                    # loss_val = self.sess.run(self.merged, feed_dict=fd_val)
                    # self.val_writer.add_summary(loss_val, counter)

                    avg_LOSS_validate = avg_LOSS_validate / validate_idxs
                    avg_MSE_validate  = avg_LOSS_validate* np.square(255.0)
                    avg_PSNR_validate = 20.0 * np.log10(255.0 / np.sqrt(avg_MSE_validate))

                    avg_loss_trn = LOSS_trn /trn_counter
                    avg_MSE_trn  = avg_loss_trn* np.square(255.0)
                    avg_PSNR_trn = 20.0 * np.log10(255.0 / np.sqrt(avg_MSE_trn))
                    trn_counter = 0
                    LOSS_trn  =0
                    print("Epoch: [%3d] [%4d/%4d][%7d] time: %10.4f, lr: %1.8f PSNR_trn: %2.4f, PSNR_val: %2.4f, loss_trn: %.8f, loss_val: %.8f" % (epoch + 1, idx + 1, batch_idxs,counter,
                                                                                                                                        time.time() - start_time, rate,avg_PSNR_trn, avg_PSNR_validate, avg_loss_trn, avg_LOSS_validate))
                    # if counter % 5000 == 0:
                    self.save(config.checkpoint_dir, counter, config.model_dir)
def main(_):
    width_size = 905
    height_size = 565
    #width_size = 1104
    #height_size = 764
    #width_size = 1123
    #height_size = 900
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    if not os.path.exists('./logs_multifreq_skip3'):
        os.makedirs('./logs_multifreq_skip3')
    gpu_config = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu)
    #with tf.Session() as sess:
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_config)) as sess:
        if FLAGS.is_train:
            dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,\
     num_block = FLAGS.num_block,dataset_name=FLAGS.dataset,is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)
        else:
            dcgan = EVAL(sess, batch_size=1,num_block=FLAGS.num_block,ir_image_shape=[None,None,1],dataset_name=FLAGS.dataset,\
                             is_crop=False, checkpoint_dir=FLAGS.checkpoint_dir)
            print('deep model test \n')

        if FLAGS.is_train:
            dcgan.train(FLAGS)
        else:
            list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92]
            print '1: Estimating Normal maps from arbitary obejcts \n'
            print '2: EStimating Normal maps according to only object tilt angles(Light direction is fixed(EX:3) \n'
            print '3: Estimating Normal maps according to Light directions and object tilt angles \n'
            x = input('Selecting a Evaluation mode:')
            VAL_OPTION = int(x)

            if VAL_OPTION == 1:  # arbitary dataset
                print("Computing arbitary dataset ")
                trained_models = glob.glob(
                    os.path.join(FLAGS.checkpoint_dir, FLAGS.dataset,
                                 'DCGAN.model*'))
                trained_models = natsorted(trained_models)
                model = trained_models[6]
                model = model.split('/')
                model = model[-1]
                print('Load trained network: %s\n' % model)
                dcgan.load(FLAGS.checkpoint_dir, model)
                datapath = '/research3/datain/gmchoe_normal/0403/IR_0.25'
                #datapath = '/research3/dataout/ECCV_2018/2130/centerview'
                #datapath = '/research2/proposal_linux/dataset/coin'
                savepath = datapath
                mean_nir = -0.3313
                img_files = glob.glob(os.path.join(datapath, '*.png'))
                img_files = natsorted(img_files)
                pdb.set_trace()
                #listdir = natsorted(os.listdir(datapath))
                #fulldatapath = natsorted(fulldatapath)
                for idx in xrange(0, len(img_files)):
                    print('Processing %d/%d \n' % (len(img_files), idx))
                    #img_file = glob.glob(os.path.join(datapath,'nir.jpg'))
                    #img_file = glob.glob(os.path.join(datapath,listdir[idx]))
                    input_ = scipy.misc.imread(img_files[idx],
                                               'F').astype(float)
                    height_size = input_.shape[0]
                    width_size = input_.shape[1]
                    #input_ = scipy.misc.imresize(input_,[565,905])
                    input_ = np.reshape(
                        input_,
                        (height_size, width_size, 1))  # LF size:383 x 552
                    #input_ = np.power(input_,0.6)
                    nondetail_input_ = ndimage.gaussian_filter(input_,
                                                               sigma=(1, 1, 0),
                                                               order=0)
                    input_ = input_ / 127.5 - 1.0
                    nondetail_input_ = nondetail_input_ / 127.5 - 1.0  # normalize -1 ~1
                    detail_input_ = input_ - nondetail_input_
                    nondetail_input_ = np.reshape(
                        nondetail_input_,
                        (1, height_size, width_size, 1))  # LF size:383 x 552
                    detail_input_ = np.reshape(detail_input_,
                                               (1, height_size, width_size, 1))
                    #detail_input_  = detail_input_/127.5 -1.0 # normalize -1 ~1
                    start_time = time.time()
                    sample = sess.run(dcgan.G,
                                      feed_dict={
                                          dcgan.nondetail_images:
                                          nondetail_input_,
                                          dcgan.detail_images: detail_input_
                                      })
                    print('time: %.8f' % (time.time() - start_time))
                    sample = np.squeeze(sample).astype(np.float32)

                    # normalization #
                    output = np.sqrt(np.sum(np.power(sample, 2), axis=2))
                    output = np.expand_dims(output, axis=-1)
                    output = sample / output
                    output = (output + 1.) / 2.
                    """
		    if not os.path.exists(os.path.join(savepath,'%s/%s/%s' %(FLAGS.dataset,model,listdir[idx]))):
		        os.makedirs(os.path.join(savepath,'%s/%s/%s' %(FLAGS.dataset,model,listdir[idx])))
                    """
                    savename = os.path.join(
                        savepath, 'result/%s.bmp' % (img_files[idx][-10:]))
                    #savename = os.path.join(savepath,'single_normal_%02d.bmp' % (idx+1))
                    #savename = os.path.join(savepath,'%s/%s/%s/single_normal.bmp' % (FLAGS.dataset,model,listdir[idx]))
                    scipy.misc.imsave(savename, output)

            elif VAL_OPTION == 2:  # light source fixed
                list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92]
                #save_files = glob.glob(os.path.join(FLAGS.checkpoint_dir,FLAGS.dataset,'DCGAN.model*'))
                #save_files  = natsorted(save_files)
                load, iteratoin = dcgan.load(FLAGS.checkpoint_dir)
                savepath = './result/skip3_result/Light3/L2ang/%06d' % iteration
                if load:
                    for idx in range(len(list_val)):
                        if not os.path.exists(
                                os.path.join(savepath,
                                             '%03d' % list_val[idx])):
                            os.makedirs(
                                os.path.join(savepath, '%03d' % list_val[idx]))
                        for idx2 in range(1, 10):
                            print("Selected material %03d/%d" %
                                  (list_val[idx], idx2))
                            img = '/research2/IR_normal_small/save%03d/%d' % (
                                list_val[idx], idx2)
                            input_ = scipy.misc.imread(img +
                                                       '/3.bmp').astype(float)
                            #gt_ = scipy.misc.imread('/research2/IR_normal_small/save016/1/12_Normal.bmp').astype(float)
                            input_ = scipy.misc.imresize(input_, [600, 800])
                            input_ = np.reshape(input_, (600, 800, 1))

                            nondetail_input = ndimage.median_filter(input_,
                                                                    size=(3, 3,
                                                                          3))
                            #nondetail_input_ = ndimage.gaussian_filter(input_,sigma=(1,1,0),order=0)
                            input_ = input_ / 127.5 - 1.0
                            nondetail_input_ = nondetail_input_ / 127.5 - 1.0  # normalize -1 ~1
                            detail_input_ = input_ - nondetail_input_
                            nondetail_input_ = np.reshape(
                                nondetail_input_, (1, 600, 800, 1))
                            detail_input_ = np.reshape(detail_input_,
                                                       (1, 600, 800, 1))
                            start_time = time.time()
                            sample = sess.run(
                                [dcgan.G],
                                feed_dict={
                                    dcgan.nondetail_images: nondetail_input_,
                                    dcgan.detail_images: detail_input_
                                })
                            print('time: %.8f' % (time.time() - start_time))
                            # normalization #
                            sample = np.squeeze(sample).astype(np.float32)
                            output = np.zeros((600, 800, 3)).astype(np.float32)
                            output[:, :, 0] = sample[:, :, 0] / (np.sqrt(
                                np.power(sample[:, :, 0], 2) +
                                np.power(sample[:, :, 1], 2) +
                                np.power(sample[:, :, 2], 2)))
                            output[:, :, 1] = sample[:, :, 1] / (np.sqrt(
                                np.power(sample[:, :, 0], 2) +
                                np.power(sample[:, :, 1], 2) +
                                np.power(sample[:, :, 2], 2)))
                            output[:, :, 2] = sample[:, :, 2] / (np.sqrt(
                                np.power(sample[:, :, 0], 2) +
                                np.power(sample[:, :, 1], 2) +
                                np.power(sample[:, :, 2], 2)))

                            output[output == inf] = 0.0
                            sample = (output + 1.) / 2.
                            if not os.path.exists(
                                    os.path.join(
                                        savepath, '%03d/%d' %
                                        (list_val[idx], idx2))):
                                os.makedirs(
                                    os.path.join(
                                        savepath,
                                        '%03d/%d' % (list_val[idx], idx2)))
                            savename = os.path.join(
                                savepath, '%03d/%d/single_normal.bmp' %
                                (list_val[idx], idx2))
                            scipy.misc.imsave(savename, sample)
                else:
                    print("Failed to load network")
            elif VAL_OPTION == 3:  # depends on light sources
                list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92]
                mean_nir = -0.3313  #-1~1
                load, iteratoin = dcgan.load(FLAGS.checkpoint_dir)
                savepath = './result/skip3_result/allviews/L2ang/%06d' % iteration
                if not os.path.exists(os.path.join(savepath)):
                    os.makedirs(os.path.join(savepath))
                if load:
                    print(" Load Success")
                    for idx in range(len(list_val)):
                        if not os.path.exists(
                                os.path.join(savepath,
                                             '%03d' % list_val[idx])):
                            os.makedirs(
                                os.path.join(savepath, '%03d' % list_val[idx]))
                        for idx2 in range(1, 10):  #tilt angles 1~9
                            for idx3 in range(1, 13):  # light source
                                print("Selected material %03d/%d" %
                                      (list_val[idx], idx2))
                                img = '/research2/IR_normal_small/save%03d/%d' % (
                                    list_val[idx], idx2)
                                input_ = scipy.misc.imread(
                                    img + '/%d.bmp' % idx3).astype(
                                        np.float32)  #input NIR image
                                input_ = scipy.misc.imresize(
                                    input_, [600, 800], 'nearest')
                                input_ = np.reshape(input_, (600, 800, 1))

                                nondetail_input_ = ndimage.gaussian_filter(
                                    input_, sigma=(1, 1, 0), order=0)
                                input_ = input_ / 127.5 - 1.0
                                nondetail_input_ = nondetail_input_ / 127.5 - 1.0  # normalize -1 ~1
                                detail_input_ = input_ - nondetail_input_
                                nondetail_input_ = np.reshape(
                                    nondetail_input_, (1, 600, 800, 1))
                                detail_input_ = np.reshape(
                                    detail_input_, (1, 600, 800, 1))
                                start_time = time.time()
                                sample = sess.run(
                                    [dcgan.G],
                                    feed_dict={
                                        dcgan.nondetail_images:
                                        nondetail_input_,
                                        dcgan.detail_images: detail_input_
                                    })
                                sample = np.squeeze(sample[-1]).astype(
                                    np.float32)

                                print('time: %.8f' %
                                      (time.time() - start_time))
                                # normalization #
                                output = np.sqrt(
                                    np.sum(np.power(sample, 2), axis=2))
                                output = np.expand_dims(output, axis=-1)
                                output = sample / output
                                output = (output + 1.) / 2.
                                if not os.path.exists(
                                        os.path.join(
                                            savepath, '%s/%03d/%d' %
                                            (FLAGS.dataset, list_val[idx],
                                             idx2))):
                                    os.makedirs(
                                        os.path.join(
                                            savepath, '%s/%03d/%d' %
                                            (FLAGS.dataset, list_val[idx],
                                             idx2)))
                                savename = os.path.join(
                                    savepath,
                                    '%s/%03d/%d/single_normal_%03d.bmp' %
                                    (FLAGS.dataset, list_val[idx], idx2, idx3))
                                scipy.misc.imsave(savename, output)

                else:
                    print("Failed to laod network")
Exemple #24
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if FLAGS.input_width is None:
        FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None:
        FLAGS.output_width = FLAGS.output_height

    if not os.path.exists(SAMPLE_DIR):
        os.makedirs(SAMPLE_DIR)

    # If checkpoint_dir specified exists, copy its contents to the checkpoint dir used internally
    if os.path.exists(FLAGS.checkpoint_dir):
        shutil.copytree(FLAGS.checkpoint_dir, INTERNAL_CHECKPOINT_DIR)
    elif not os.path.exists(INTERNAL_CHECKPOINT_DIR):
        os.makedirs(INTERNAL_CHECKPOINT_DIR)

    # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True

    with tf.Session(config=run_config) as sess:

        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(
                sess,
                input_width=FLAGS.input_width,
                input_height=FLAGS.input_height,
                output_width=FLAGS.output_width,
                output_height=FLAGS.output_height,
                batch_size=FLAGS.batch_size,
                sample_num=FLAGS.batch_size,
                y_dim=10,
                z_dim=FLAGS.generate_test_images,
                dataset_name=FLAGS.dataset,
                input_fname_pattern=FLAGS.input_fname_pattern,
                crop=FLAGS.crop,
                checkpoint_dir=INTERNAL_CHECKPOINT_DIR,
                sample_dir=SAMPLE_DIR,
                data_dir=DATA_DIR,
            )
        else:
            dcgan = DCGAN(
                sess,
                input_width=FLAGS.input_width,
                input_height=FLAGS.input_height,
                output_width=FLAGS.output_width,
                output_height=FLAGS.output_height,
                batch_size=FLAGS.batch_size,
                sample_num=FLAGS.batch_size,
                z_dim=FLAGS.generate_test_images,
                dataset_name=FLAGS.dataset,
                input_fname_pattern=FLAGS.input_fname_pattern,
                crop=FLAGS.crop,
                checkpoint_dir=INTERNAL_CHECKPOINT_DIR,
                sample_dir=SAMPLE_DIR,
                data_dir=DATA_DIR,
            )

        show_all_variables()

        if FLAGS.train:
            dcgan.train(FLAGS)
        else:
            if not dcgan.load(INTERNAL_CHECKPOINT_DIR)[0]:
                raise Exception("[!] Train a model first, then run test mode")

        # Visualisation
        OPTION = 1
        visualize(sess, dcgan, FLAGS, OPTION)

        # Copy checkpoints to floydhub output for potential reuse
        shutil.copytree(INTERNAL_CHECKPOINT_DIR,
                        '{}/checkpoint'.format(SAMPLE_DIR))
Exemple #25
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    FLAGS.data_dir = expand_path(FLAGS.data_dir)
    FLAGS.out_dir = expand_path(FLAGS.out_dir)
    FLAGS.out_name = expand_path(FLAGS.out_name)
    FLAGS.checkpoint_dir = expand_path(FLAGS.checkpoint_dir)
    FLAGS.sample_dir = expand_path(FLAGS.sample_dir)

    if FLAGS.output_height is None: FLAGS.output_height = FLAGS.input_height
    if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height

    if FLAGS.out_name == "":
        FLAGS.out_name = '{} - {} - {}'.format(timestamp(),
                                               FLAGS.data_dir.split('/')[-1],
                                               FLAGS.dataset)
        if FLAGS.train:
            FLAGS.out_name += ' - x{}.z{}.{}.y{}.b{}'.format(
                FLAGS.input_width, FLAGS.z_dim, FLAGS.z_dist,
                FLAGS.output_width, FLAGS.batch_size)

    #FLAGS.out_dir = os.path.join(FLAGS.out_dir, FLAGS.out_name)
    #FLAGS.checkpoint_dir = os.path.join(FLAGS.out_dir, FLAGS.checkpoint_dir)
    #FLAGS.sample_dir = os.path.join(FLAGS.out_dir, FLAGS.sample_dir)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir)

    with open(os.path.join(FLAGS.out_dir, 'FLAGS.json'), 'w') as f:
        flags_dict = {k: FLAGS[k].value for k in FLAGS}
        json.dump(flags_dict, f, indent=4, sort_keys=True, ensure_ascii=False)

    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True

    with tf.Session(config=run_config) as sess:
        dcgan = DCGAN(sess,
                      input_width=FLAGS.input_width,
                      input_height=FLAGS.input_height,
                      output_width=FLAGS.output_width,
                      output_height=FLAGS.output_height,
                      batch_size=FLAGS.batch_size,
                      sample_num=FLAGS.batch_size,
                      z_dim=FLAGS.z_dim,
                      dataset_name=FLAGS.dataset,
                      input_fname_pattern=FLAGS.input_fname_pattern,
                      crop=FLAGS.crop,
                      checkpoint_dir=FLAGS.checkpoint_dir,
                      sample_dir=FLAGS.sample_dir,
                      data_dir=FLAGS.data_dir,
                      out_dir=FLAGS.out_dir,
                      max_to_keep=FLAGS.max_to_keep)

        show_all_variables()

        if FLAGS.train:
            dcgan.train(FLAGS)
        else:
            load_success, load_counter = dcgan.load(FLAGS.checkpoint_dir)
            if not load_success:
                raise Exception("checkpoint not found in " +
                                FLAGS.checkpoint_dir)

            if FLAGS.export:
                export_dir = os.path.join(FLAGS.checkpoint_dir,
                                          'export_b' + str(FLAGS.batch_size))
                dcgan.save(export_dir, load_counter, ckpt=True, frozen=False)

            if FLAGS.freeze:
                export_dir = os.path.join(FLAGS.checkpoint_dir,
                                          'frozen_b' + str(FLAGS.batch_size))
                dcgan.save(export_dir, load_counter, ckpt=False, frozen=True)

            if FLAGS.visualize:
                OPTION = 1
                visualize(sess, dcgan, FLAGS, OPTION, FLAGS.sample_dir)

        sess.close()
Exemple #26
0
def main(_):
    """
    The main function for training steps
    """
    pp.pprint(flags.FLAGS.__flags)
    n_per_itr_print_results = 100
    kb_work_on_patch = True

    # ---------------------------------------------------------------------------------------------
    # ---------------------------------------------------------------------------------------------
    # Manual Switchs ------------------------------------------------------------------------------
    # ---------------------------------------------------------------------------------------------
    # DATASET PARAMETER : UCSD
    #FLAGS.dataset = 'UCSD'
    #FLAGS.dataset_address = './dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Train'

    nd_input_frame_size = (240, 360)
    nd_slice_size = (45, 45)
    n_stride = 25
    n_fetch_data = 600
    # ---------------------------------------------------------------------------------------------
    # # DATASET PARAMETER : MNIST
    # FLAGS.dataset = 'mnist'
    # FLAGS.dataset_address = './dataset/mnist'
    # nd_input_frame_size = (28, 28)
    # nd_slice_size = (28, 28)

    FLAGS.train = True

    FLAGS.input_width = nd_slice_size[0]
    FLAGS.input_height = nd_slice_size[1]
    FLAGS.output_width = nd_slice_size[0]
    FLAGS.output_height = nd_slice_size[1]

    FLAGS.sample_dir = 'export/' + FLAGS.dataset + '_%d.%d' % (
        nd_slice_size[0], nd_slice_size[1])
    FLAGS.input_fname_pattern = '*'

    check_some_assertions()

    # manual handling of GPU
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True

    with tf.Session(config=run_config) as sess:
        tmp_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            is_training=FLAGS.train,
            log_dir=FLAGS.log_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_slice_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
            n_fetch_data=n_fetch_data)

        #show_all_variables()

        if FLAGS.train:
            print('Program is on Train Mode')
            tmp_model.train(FLAGS)
        else:
            if not tmp_model.load(FLAGS.checkpoint_dir)[0]:
                print('Program is on Test Mode')
                raise Exception(
                    "[!] Train a model first, then run test mode from file test.py"
                )
Exemple #27
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if FLAGS.input_width is None:
        FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None:
        FLAGS.output_width = FLAGS.output_height

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True

    with tf.Session(config=run_config) as sess:
        if 'cond' in FLAGS.dataset:
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          sample_num=FLAGS.batch_size,
                          y_dim=128,
                          z_dim=FLAGS.generate_test_images,
                          dataset_name=FLAGS.dataset,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          crop=FLAGS.crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir)
        else:
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          sample_num=FLAGS.batch_size,
                          z_dim=FLAGS.generate_test_images,
                          dataset_name=FLAGS.dataset,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          crop=FLAGS.crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir)

        show_all_variables()

        if FLAGS.train:
            dcgan.train(FLAGS)
        else:
            if not dcgan.load(FLAGS.checkpoint_dir)[0]:
                raise Exception("[!] Train a model first, then run test mode")
            data = np.load('bird_mappings.npy')
            #      coco = COCO('../coco/annotations/captions_train2014.json')
            #      print('Preprocessing COCO data...')
            #      imgIds = [x[1] for x in data]
            #      caps = coco.loadAnns(ids=coco.getAnnIds(imgIds=imgIds))
            #      true_img_ids = []
            #      for cap in caps:
            #          lst = cap['caption'].split(' ')
            #          if 'car' in lst or 'truck' in lst:
            #              true_img_ids.append(cap['image_id'])
            #
            #      true_img_ids = np.array(true_img_ids)
            #      true_img_ids = np.unique(true_img_ids)
            ##      all_imgs = {img_id: get_image(img_id,
            ##                    input_height=self.input_height,
            ##                    input_width=self.input_width,
            ##                    resize_height=self.output_height,
            ##                    resize_width=self.output_width,
            ##                    crop=self.crop,
            ##                    grayscale=self.grayscale,
            ##                    coco=coco) for img_id in true_img_ids}
            #      #print('Saving Stuff, brace yourself..')
            #      #np.save('vehicle_imgs.npy', all_imgs)
            #      #print('Saved the stuff, exit now dude!.....')
            #      #time.sleep(10)
            #      true_img_ids = {x:1 for x in true_img_ids}
            #      data = data[[(data[i][1] in true_img_ids) for i in range(len(data))]]
            #data = np.array([(data[i][0], [data[i][1]]) for i in range(len(data))])
            b_size = 64
            tot = 20000
            print(data.shape)
            ret = np.empty((0, 64, 64, 3), dtype=np.float32)
            #ret = np.array(birds_gt)
            data = np.array([(x[0], x[1][19:-4]) for x in data])
            for i in range(tot // b_size):
                test_idx = np.random.randint(0, len(data))
                print('Loading text embedding - ', data[test_idx][1])
                inp = np.random.uniform(-1.0, 1.0, size=[b_size, 50])
                samples = sess.run(dcgan.sampler,
                                   feed_dict={
                                       dcgan.z:
                                       inp,
                                       dcgan.y:
                                       np.array([data[test_idx][0]] * b_size)
                                   })
                print('Done with ', i, ' ...')
                print(samples.shape)
                save_images(
                    samples, image_manifold_size(samples.shape[0]),
                    './{}/train_{}_{:04d}.png'.format('output_cond_cub',
                                                      data[test_idx][1], 64))
                ret = np.vstack((ret, samples))
        print(ret.shape)
        ret = (ret + 1.) / 2
        np.save('imgs_cond_CUB_bird.npy', ret)
Exemple #28
0
def main(_):

    # 打印上面的配置参数
    pp.pprint(flags.FLAGS.__flags)
    # 定义输入和输出的宽度
    if FLAGS.input_width is None:
        FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None:
        FLAGS.output_width = FLAGS.output_height
    # 创建检查点和存放输出样本的文件夹,没有就创建
    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    # 目测是使配置参数生效

    run_config = tf.ConfigProto()
    # 使用gpu
    run_config.gpu_options.allow_growth = True
    # 在会话中训练
    with tf.Session(config=run_config) as sess:
        #  如果数据集是mnist的操作
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          sample_num=FLAGS.batch_size,
                          y_dim=10,
                          z_dim=FLAGS.generate_test_images,
                          dataset_name=FLAGS.dataset,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          crop=FLAGS.crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir,
                          data_dir=FLAGS.data_dir)
        else:

            dcgan = DCGAN(
                sess,
                # 获取输入宽度,高
                input_width=FLAGS.input_width,
                input_height=FLAGS.input_height,
                # 获取输出宽度,高
                output_width=FLAGS.output_width,
                output_height=FLAGS.output_height,
                # batch_size 和 样本数量的值
                batch_size=FLAGS.batch_size,
                sample_num=FLAGS.batch_size,
                # 噪声的维度
                z_dim=FLAGS.generate_test_images,
                # 获取数据集名字
                dataset_name=FLAGS.dataset,
                # 输入的文件格式(.jpg)
                input_fname_pattern=FLAGS.input_fname_pattern,
                # 是否使用中心裁剪
                crop=FLAGS.crop,
                # 检查点,样本储存,数据集的文件夹路径
                checkpoint_dir=FLAGS.checkpoint_dir,
                sample_dir=FLAGS.sample_dir,
                data_dir=FLAGS.data_dir)

            # 输出所有参数??
            show_all_variables()
            # 训练模式为True
        if FLAGS.train:
            # 调用函数执行训练
            dcgan.train(FLAGS)
        else:
            if not dcgan.load(FLAGS.checkpoint_dir)[0]:
                raise Exception("[!] Train a model first, then run test mode")

        # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
        #                 [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
        #                 [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
        #                 [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
        #                 [dcgan.h4_w, dcgan.h4_b, None])

        # Below is codes for visualization
        OPTION = 1
        # 可视化操作
        visualize(sess, dcgan, FLAGS, OPTION)
def main(_):
    print('Before processing flags')
    pp.pprint(flags.FLAGS.__flags)
    if FLAGS.use_s3:
        import aws
        if FLAGS.s3_bucket is None:
            raise ValueError('use_s3 flag set, but no bucket set. ')
        # check to see if s3 bucket exists:
        elif not aws.bucket_exists(FLAGS.s3_bucket):
            raise ValueError(
                '`use_s3` flag set, but bucket "%s" doesn\'t exist. Not using s3'
                % FLAGS.s3_bucket)

    if FLAGS.input_width is None:
        FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None:
        FLAGS.output_width = FLAGS.output_height

    # configure the log_dir to match the params
    log_dir = os.path.join(
        FLAGS.log_dir,
        "dataset={},isCan={},lr={},imsize={},hasStyleNet={},batch_size={}".
        format(FLAGS.dataset, FLAGS.can, FLAGS.learning_rate,
               FLAGS.input_height, FLAGS.style_net_checkpoint is not None,
               FLAGS.batch_size))
    if not glob(log_dir + "*"):
        log_dir = os.path.join(log_dir, "000")
    else:
        containing_dir = os.path.join(log_dir, "[0-9][0-9][0-9]")
        nums = [int(x[-3:])
                for x in glob(containing_dir)]  # TODO FIX THESE HACKS
        if nums == []:
            num = 0
        else:
            num = max(nums) + 1
        log_dir = os.path.join(log_dir, "{:03d}".format(num))
    FLAGS.log_dir = log_dir

    if FLAGS.checkpoint_dir is None:
        FLAGS.checkpoint_dir = os.path.join(FLAGS.log_dir, 'checkpoint')
        FLAGS.use_default_checkpoint = True
    elif FLAGS.use_default_checkpoint:
        raise ValueError(
            '`use_default_checkpoint` flag only works if you keep checkpoint_dir as None'
        )

    if FLAGS.sample_dir is None:
        FLAGS.sample_dir = os.path.join(FLAGS.log_dir, 'samples')

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)
    print('After processing flags')
    pp.pprint(flags.FLAGS.__flags)
    if FLAGS.style_net_checkpoint:
        from slim.nets import nets_factory
        network_fn = nets_factory

    sess = None
    if FLAGS.dataset == 'mnist':
        y_dim = 10
    elif FLAGS.dataset == 'wikiart':
        y_dim = 27
    else:
        y_dim = None
    dcgan = DCGAN(sess,
                  input_width=FLAGS.input_width,
                  input_height=FLAGS.input_height,
                  output_width=FLAGS.output_width,
                  output_height=FLAGS.output_height,
                  batch_size=FLAGS.batch_size,
                  sample_num=FLAGS.sample_size,
                  use_resize=FLAGS.use_resize,
                  replay=FLAGS.replay,
                  y_dim=y_dim,
                  smoothing=FLAGS.smoothing,
                  lamb=FLAGS.lambda_val,
                  dataset_name=FLAGS.dataset,
                  input_fname_pattern=FLAGS.input_fname_pattern,
                  crop=FLAGS.crop,
                  checkpoint_dir=FLAGS.checkpoint_dir,
                  sample_dir=FLAGS.sample_dir,
                  wgan=FLAGS.wgan,
                  learning_rate=FLAGS.learning_rate,
                  style_net_checkpoint=FLAGS.style_net_checkpoint,
                  can=FLAGS.can)

    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = FLAGS.allow_gpu_growth
    with tf.Session(config=run_config) as sess:
        dcgan.set_sess(sess)
        # show_all_variables()

        if FLAGS.train:
            dcgan.train(FLAGS)
        else:
            if not dcgan.load(FLAGS.checkpoint_dir)[0]:
                raise Exception("[!] Train a model first, then run test mode")

        OPTION = 0
        visualize(sess, dcgan, FLAGS, OPTION)
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if FLAGS.input_width is None:
        FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None:
        FLAGS.output_width = FLAGS.output_height

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth=True

    with tf.Session(config=run_config) as sess:
        if FLAGS.dataset == 'mnist':
            wgan = WGAN(
                sess,
                input_width=FLAGS.input_width,
                input_height=FLAGS.input_height,
                output_width=FLAGS.output_width,
                output_height=FLAGS.output_height,
                batch_size=FLAGS.batch_size,
                sample_num=FLAGS.batch_size,
                y_dim=10,
                dataset=FLAGS.dataset,
                input_fname_pattern=FLAGS.input_fname_pattern,
                crop=FLAGS.crop,
                checkpoint_dir=FLAGS.checkpoint_dir,
                sample_dir=FLAGS.sample_dir,
                clip_up=FLAGS.clip_up,
                clip_down=FLAGS.clip_down,
                critic_num=FLAGS.critic_num,
                mode=FLAGS.mode,
                LAMBDA=FLAGS.LAMBDA)
        else:
            wgan = WGAN(
                sess,
                input_width=FLAGS.input_width,
                input_height=FLAGS.input_height,
                output_width=FLAGS.output_width,
                output_height=FLAGS.output_height,
                batch_size=FLAGS.batch_size,
                sample_num=FLAGS.batch_size,
                dataset=FLAGS.dataset,
                input_fname_pattern=FLAGS.input_fname_pattern,
                crop=FLAGS.crop,
                checkpoint_dir=FLAGS.checkpoint_dir,
                sample_dir=FLAGS.sample_dir,
                clip_up=FLAGS.clip_up,
                clip_down=FLAGS.clip_down,
                critic_num=FLAGS.critic_num,
                mode=FLAGS.mode,
                LAMBDA=FLAGS.LAMBDA)

        show_all_variables()

        if FLAGS.train:
            wgan.train(FLAGS)
        else:
            if not wgan.load(FLAGS.checkpoint_dir)[0]:
                raise Exception("[!] Train a model first, then run test mode")

        # to_json("./web/js/layers.js", [wgan.h0_w, wgan.h0_b, wgan.g_bn0],
        #                 [wgan.h1_w, wgan.h1_b, wgan.g_bn1],
        #                 [wgan.h2_w, wgan.h2_b, wgan.g_bn2],
        #                 [wgan.h3_w, wgan.h3_b, wgan.g_bn3],
        #                 [wgan.h4_w, wgan.h4_b, None])

        # Below is codes for visualization
        OPTION = 1
        visualize(sess, wgan, FLAGS, OPTION)
Exemple #31
0
def main(_):
    print('Program is started at', time.clock())
    pp.pprint(flags.FLAGS.__flags)

    n_per_itr_print_results = 100
    n_fetch_data = 10
    kb_work_on_patch = False
    nd_input_frame_size = (240, 360)
    #nd_patch_size = (45, 45)
    n_stride = 10
    #FLAGS.checkpoint_dir = "./checkpoint/UCSD_128_45_45/"

    #FLAGS.dataset = 'UCSD'
    #FLAGS.dataset_address = './dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Test'
    lst_test_dirs = ['Test004', 'Test005', 'Test006']

    #DATASET PARAMETER : MNIST
    #FLAGS.dataset = 'mnist'
    #FLAGS.dataset_address = './dataset/mnist'
    #nd_input_frame_size = (28, 28)
    #nd_patch_size = (28, 28)
    #FLAGS.checkpoint_dir = "./checkpoint/mnist_128_28_28/"

    #FLAGS.input_width = nd_patch_size[0]
    #FLAGS.input_height = nd_patch_size[1]
    #FLAGS.output_width = nd_patch_size[0]
    #FLAGS.output_height = nd_patch_size[1]

    check_some_assertions()

    nd_patch_size = (FLAGS.input_width, FLAGS.input_height)
    # FLAGS.nStride = n_stride

    #FLAGS.input_fname_pattern = '*'
    FLAGS.train = False
    FLAGS.epoch = 1
    FLAGS.batch_size = 504

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True
    with tf.Session(config=run_config) as sess:
        tmp_ALOCC_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            is_training=FLAGS.train,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_patch_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
            n_fetch_data=n_fetch_data)

        show_all_variables()

        print('--------------------------------------------------')
        print('Load Pretrained Model...')
        tmp_ALOCC_model.f_check_checkpoint()

        if FLAGS.dataset == 'mnist':
            mnist = input_data.read_data_sets(FLAGS.dataset_address)

            specific_idx_anomaly = np.where(mnist.train.labels != 6)[0]
            specific_idx = np.where(mnist.train.labels == 6)[0]
            ten_precent_anomaly = [
                specific_idx_anomaly[x]
                for x in random.sample(range(0, len(specific_idx_anomaly)),
                                       len(specific_idx) // 40)
            ]

            data = mnist.train.images[specific_idx].reshape(-1, 28, 28, 1)
            tmp_data = mnist.train.images[ten_precent_anomaly].reshape(
                -1, 28, 28, 1)
            data = np.append(data, tmp_data).reshape(-1, 28, 28, 1)

            lst_prob = tmp_ALOCC_model.f_test_frozen_model(
                data[0:FLAGS.batch_size])
            print('check is ok')
            exit()
            #generated_data = tmp_ALOCC_model.feed2generator(data[0:FLAGS.batch_size])
        else:
            data = read_data.test_data(1)
            np.random.shuffle(data)
            lst_prob = tmp_ALOCC_model.f_test_frozen_model(
                data[0:FLAGS.batch_size])
            print('check is ok')
            exit()
Exemple #32
0
def main(_):
    date = time.strftime('%d%m')
    pp.pprint(flags.FLAGS.__flags)
    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.output):
        os.makedirs(FLAGS.output)
    if not os.path.exists(os.path.join('./logs', date)):
        os.makedirs(os.path.join('./logs', date))
    gpu_config = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_config)) as sess:
        if FLAGS.is_train:
            LFSR = Model(sess,date, image_wid=FLAGS.image_wid,image_hei =FLAGS.image_hei ,batch_size=FLAGS.batch_size,\
            dataset_name=FLAGS.dataset, checkpoint_dir=FLAGS.checkpoint_dir)
            LFSR.train(FLAGS)
        else:
            LFSR = Model(sess,date, image_wid=FLAGS.image_wid ,image_hei = FLAGS.image_hei ,batch_size=FLAGS.batch_size,\
            dataset_name=FLAGS.dataset, checkpoint_dir=FLAGS.checkpoint_dir)
            if LFSR.loadnet(FLAGS.checkpoint_dir):
                print('Load pretrained network \n')
            else:
                print('Fail to Load network \n')

            ssim_val = 0.0
            psnr_val = 0.0

            #test_batch_idxs = test_input_vertical.shape[-1]/FLAGS.batch_size
            view = 2
            count = 0
            if view == 0:
                for tt in [0, 5, 10, 15]:
                    for ii in range(5):
                        inputdata = sio.loadmat(
                            '/research2/SPL/spaSR/buddha/sr_%04d.mat' %
                            (tt + ii))
                        sr1 = inputdata['Predict']
                        inputdata = sio.loadmat(
                            '/research2/SPL/spaSR/buddha/sr_%04d.mat' %
                            (ii + tt + 5))
                        sr2 = inputdata['Predict']
                        final_output = np.zeros(
                            (sr1.shape[0], sr2.shape[1], 3)).astype(np.float32)
                        for ch in range(3):
                            tmp1 = np.expand_dims(sr1[:, :, ch], axis=-1)
                            tmp2 = np.expand_dims(sr2[:, :, ch], axis=-1)
                            tmp = np.concatenate([tmp1, tmp2], axis=-1)
                            input_ = np.expand_dims(tmp, axis=0)
                            output = LFSR.sess.run(
                                LFSR.output_ver,
                                feed_dict={LFSR.train_input_vertical: input_})
                            output = np.squeeze(output)
                            final_output[:, :, ch] = output
                        sio.savemat(
                            os.path.join('buddha', 'ang_ver_%04d.mat' % count),
                            {'Predict': final_output})
                        count += 1
            if view == 1:
                for tt in [0, 5, 10, 15, 20]:
                    for ii in range(4):
                        inputdata = sio.loadmat(
                            '/research2/SPL/spaSR/buddha/sr_%04d.mat' %
                            (tt + ii))
                        sr1 = inputdata['Predict']
                        inputdata = sio.loadmat(
                            '/research2/SPL/spaSR/buddha/sr_%04d.mat' %
                            (ii + tt + 1))
                        sr2 = inputdata['Predict']
                        final_output = np.zeros(
                            (sr1.shape[0], sr2.shape[1], 3)).astype(np.float32)
                        for ch in range(3):
                            tmp1 = np.expand_dims(sr1[:, :, ch], axis=-1)
                            tmp2 = np.expand_dims(sr2[:, :, ch], axis=-1)
                            tmp = np.concatenate([tmp1, tmp2], axis=-1)
                            input_ = np.expand_dims(tmp, axis=0)
                            output = LFSR.sess.run(
                                LFSR.output_hor,
                                feed_dict={
                                    LFSR.train_input_horizontal: input_
                                })
                            output = np.squeeze(output)
                            final_output[:, :, ch] = output
                        sio.savemat(
                            os.path.join('buddha', 'ang_hor_%04d.mat' % count),
                            {'Predict': final_output})
                        count += 1
            if view == 2:
                for tt in [0, 5, 10, 15]:
                    for ii in range(4):
                        inputdata = sio.loadmat(
                            '/research2/SPL/spaSR/buddha/sr_%04d.mat' %
                            (tt + ii))
                        sr1 = inputdata['Predict']
                        inputdata = sio.loadmat(
                            '/research2/SPL/spaSR/buddha/sr_%04d.mat' %
                            (tt + ii + 1))
                        sr2 = inputdata['Predict']
                        inputdata = sio.loadmat(
                            '/research2/SPL/spaSR/buddha/sr_%04d.mat' %
                            (tt + ii + 5))
                        sr3 = inputdata['Predict']
                        inputdata = sio.loadmat(
                            '/research2/SPL/spaSR/buddha/sr_%04d.mat' %
                            (ii + tt + 6))
                        sr4 = inputdata['Predict']
                        final_output = np.zeros(
                            (sr1.shape[0], sr2.shape[1], 3)).astype(np.float32)
                        for ch in range(3):
                            tmp1 = np.expand_dims(sr1[:, :, ch], axis=-1)
                            tmp2 = np.expand_dims(sr2[:, :, ch], axis=-1)
                            tmp3 = np.expand_dims(sr3[:, :, ch], axis=-1)
                            tmp4 = np.expand_dims(sr4[:, :, ch], axis=-1)
                            tmp = np.concatenate([tmp1, tmp2, tmp3, tmp4],
                                                 axis=-1)
                            input_ = np.expand_dims(tmp, axis=0)
                            output = LFSR.sess.run(
                                LFSR.output_views,
                                feed_dict={LFSR.train_input_4views: input_})
                            output = np.squeeze(output)
                            final_output[:, :, ch] = output
                        sio.savemat(
                            os.path.join('buddha',
                                         'ang_views_%04d.mat' % count),
                            {'Predict': final_output})
                        count += 1
Exemple #33
0
def main(_):
    width_size = 905
    height_size = 565
    #width_size = 1104
    #height_size = 764
    #width_size = 1123
    #height_size = 900
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    gpu_config = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu)
    #with tf.Session() as sess:
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_config)) as sess:
        if FLAGS.is_train:
            dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,\
     num_block = FLAGS.num_block,dataset_name=FLAGS.dataset,is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)
        else:
            dcgan = EVAL(sess, batch_size=1,num_block=FLAGS.num_block,ir_image_shape=[None,None,1],dataset_name=FLAGS.dataset,\
                             is_crop=False, checkpoint_dir=FLAGS.checkpoint_dir)
            print('deep model test \n')

        if FLAGS.is_train:
            dcgan.train(FLAGS)
        else:
            list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92]
            print '1: Estimating Normal maps from arbitary obejcts \n'
            print '2: EStimating Normal maps of NIR dataset \n'
            x = input('Selecting a Evaluation mode:')
            VAL_OPTION = int(x)

            if VAL_OPTION == 1:  # arbitary dataset
                print("Computing arbitary dataset ")
                trained_models = glob.glob(
                    os.path.join(FLAGS.checkpoint_dir, FLAGS.dataset,
                                 'DCGAN.model*'))
                trained_models = natsorted(trained_models)
                model = trained_models[6]
                model = model.split('/')
                model = model[-1]
                print('Load trained network: %s\n' % model)
                dcgan.load(FLAGS.checkpoint_dir, model)
                datapath = '/research3/datain/gmchoe_normal/0403/IR_0.25'
                savepath = datapath
                mean_nir = -0.3313
                img_files = glob.glob(os.path.join(datapath, '*.png'))
                img_files = natsorted(img_files)
                pdb.set_trace()
                for idx in xrange(0, len(img_files)):
                    print('Processing %d/%d \n' % (len(img_files), idx))
                    input_ = scipy.misc.imread(img_files[idx],
                                               'F').astype(float)
                    height_size = input_.shape[0]
                    width_size = input_.shape[1]
                    input_ = np.reshape(
                        input_,
                        (height_size, width_size, 1))  # LF size:383 x 552
                    nondetail_input_ = ndimage.gaussian_filter(input_,
                                                               sigma=(1, 1, 0),
                                                               order=0)
                    input_ = input_ / 127.5 - 1.0
                    nondetail_input_ = nondetail_input_ / 127.5 - 1.0  # normalize -1 ~1
                    detail_input_ = input_ - nondetail_input_
                    nondetail_input_ = np.reshape(
                        nondetail_input_,
                        (1, height_size, width_size, 1))  # LF size:383 x 552
                    detail_input_ = np.reshape(detail_input_,
                                               (1, height_size, width_size, 1))
                    #detail_input_  = detail_input_/127.5 -1.0 # normalize -1 ~1
                    start_time = time.time()
                    sample = sess.run(dcgan.G,
                                      feed_dict={
                                          dcgan.nondetail_images:
                                          nondetail_input_,
                                          dcgan.detail_images: detail_input_
                                      })
                    print('time: %.8f' % (time.time() - start_time))
                    sample = np.squeeze(sample).astype(np.float32)

                    # normalization #
                    output = np.sqrt(np.sum(np.power(sample, 2), axis=2))
                    output = np.expand_dims(output, axis=-1)
                    output = sample / output
                    output = (output + 1.) / 2.
                    """
		    if not os.path.exists(os.path.join(savepath,'%s/%s/%s' %(FLAGS.dataset,model,listdir[idx]))):
		        os.makedirs(os.path.join(savepath,'%s/%s/%s' %(FLAGS.dataset,model,listdir[idx])))
                    """
                    savename = os.path.join(
                        savepath, 'result/%s.bmp' % (img_files[idx][-10:]))
                    #savename = os.path.join(savepath,'single_normal_%02d.bmp' % (idx+1))
                    #savename = os.path.join(savepath,'%s/%s/%s/single_normal.bmp' % (FLAGS.dataset,model,listdir[idx]))
                    scipy.misc.imsave(savename, output)

            elif VAL_OPTION == 2:  # light source fixed
                list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92]
                load, iteration = dcgan.load(FLAGS.checkpoint_dir)
                pdb.set_trace()
                savepath = './singleview_nir/L2ang/%d' % iteration
                obj = 1
                count = 1
                if load:
                    for idx in range(len(list_val)):
                        if not os.path.exists(
                                os.path.join(savepath,
                                             '%03d' % list_val[idx])):
                            os.makedirs(
                                os.path.join(savepath, '%03d' % list_val[idx]))
                        for idx2 in range(1, 10):  #tilt angles
                            print("Selected material %03d/%d" %
                                  (list_val[idx], idx2))
                            img = './dataset/multi-view/testdata_3579/%03d/%03d/patch_%06d.mat' % (
                                obj, idx2, count)
                            input_ = scipy.io.loadmat(img)
                            input_ = input_['input_']
                            input_ = input_.astype(np.float)
                            #input_ = input_[:,:,0:3]
                            input_ = np.reshape(input_, (1, 600, 800, 4))
                            input_ = input_ / 127.5 - 1.0
                            start_time = time.time()
                            sample = sess.run([dcgan.G],
                                              feed_dict={dcgan.images: input_})
                            print('time: %.8f' % (time.time() - start_time))
                            # normalization #
                            sample = np.squeeze(sample).astype(np.float32)
                            output = np.sqrt(
                                np.sum(np.power(sample, 2), axis=2))
                            output = np.expand_dims(output, axis=-1)
                            output = sample / output
                            output = (output + 1.) / 2.
                            if not os.path.exists(
                                    os.path.join(
                                        savepath, '%03d/%03d' %
                                        (list_val[idx], idx2))):
                                os.makedirs(
                                    os.path.join(
                                        savepath,
                                        '%03d/%03d' % (list_val[idx], idx2)))
                            savename = os.path.join(
                                savepath, '%03d/%03d/multiview_normal.bmp' %
                                (list_val[idx], idx2))
                            scipy.misc.imsave(savename, output)
                            count = count + 1
                        obj = obj + 1
                else:
                    print("Failed to load network")
def main(_):
  pp.pprint(flags.FLAGS.__flags)

  FLAGS.train = True
  alpha_max_str = str(FLAGS.alpha_max)
#   if FLAGS.steer:
#     print('Training with steerable G -> loading model_argminGW2_{} ...'.format(FLAGS.transform_type))
#     DCGAN = getattr(importlib.import_module('model_argminGW2_{}'.format(FLAGS.transform_type)), 'DCGAN')
#   else:
#     print('Training vanilla G -> loading model_vanilla_{} ...'.format(FLAGS.transform_type))
#     DCGAN = getattr(importlib.import_module('model_vanilla_{}'.format(FLAGS.transform_type)), 'DCGAN')

  print('Training with steerable G for {} transformation ...'.format(FLAGS.transform_type))
  if FLAGS.transform_type == 'zoom':
    if FLAGS.steer:
      from model_argminGW2_zoom import DCGAN
    else: 
      from model_vanilla_zoom import DCGAN
        
  if FLAGS.transform_type == 'shiftx':
    alpha_max_str = str(np.uint8(FLAGS.alpha_max))
    if FLAGS.steer:
      from model_argminGW2_shiftx import DCGAN
    else: 
      from model_vanilla_shiftx import DCGAN
    
  if FLAGS.transform_type == 'shifty':
    alpha_max_str = str(np.uint8(FLAGS.alpha_max))
    if FLAGS.steer:
      from model_argminGW2_shifty import DCGAN
    else: 
      from model_vanilla_shifty import DCGAN
    
  if FLAGS.transform_type == 'rot2d':
    alpha_max_str = str(np.uint8(FLAGS.alpha_max))
    if FLAGS.steer:
      from model_argminGW2_rot2d import DCGAN
    else: 
      from model_vanilla_rot2d import DCGAN
    
  augment_flag_str = 'NoAug'
  if FLAGS.aug:
    augment_flag_str = 'aug'
  
  steer_flag_str = 'vanilla'
  if FLAGS.steer:
    steer_flag_str = 'argminGW'
  else:
    if FLAGS.aug:
        steer_flag_str = 'argminW'

  if FLAGS.out_name:
    FLAGS.out_name = expand_path(FLAGS.out_name)
  else:
    FLAGS.out_name = FLAGS.transform_type+'_'+augment_flag_str+'_'+steer_flag_str+\
                     '_alphamax'+alpha_max_str+'_lr'+ str(FLAGS.learning_rate)
  print('Results will be saved in {}'.format(FLAGS.out_name))

  # expand user name and environment variables
  FLAGS.data_dir = expand_path(FLAGS.data_dir)
  FLAGS.out_dir = expand_path(FLAGS.out_dir)
#   FLAGS.out_name = expand_path(FLAGS.out_name)
  FLAGS.checkpoint_dir = expand_path(FLAGS.checkpoint_dir)
  FLAGS.sample_dir = expand_path(FLAGS.sample_dir)

  if FLAGS.output_height is None: FLAGS.output_height = FLAGS.input_height
  if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height
  if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height

  # output folders
  if FLAGS.out_name == "":
      FLAGS.out_name = '{} - {} - {}'.format(timestamp(), FLAGS.data_dir.split('/')[-1], FLAGS.dataset) # penultimate folder of path
      if FLAGS.train:
        FLAGS.out_name += ' - x{}.z{}.{}.y{}.b{}'.format(FLAGS.input_width, FLAGS.z_dim, FLAGS.z_dist, FLAGS.output_width, FLAGS.batch_size)

  FLAGS.out_dir = os.path.join(FLAGS.out_dir, FLAGS.out_name)
  FLAGS.checkpoint_dir = os.path.join(FLAGS.out_dir, FLAGS.checkpoint_dir)
  FLAGS.sample_dir = os.path.join(FLAGS.out_dir, FLAGS.sample_dir)

  if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir)
  if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir)

  with open(os.path.join(FLAGS.out_dir, 'FLAGS.json'), 'w') as f:
    flags_dict = {k:FLAGS[k].value for k in FLAGS}
    json.dump(flags_dict, f, indent=4, sort_keys=True, ensure_ascii=False)
  

  #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
  run_config = tf.ConfigProto()
  run_config.gpu_options.allow_growth=True

  with tf.Session(config=run_config) as sess:
    if FLAGS.dataset == 'mnist':
      dcgan = DCGAN(
          sess,
          input_width=FLAGS.input_width,
          input_height=FLAGS.input_height,
          output_width=FLAGS.output_width,
          output_height=FLAGS.output_height,
          batch_size=FLAGS.batch_size,
          sample_num=FLAGS.batch_size,
          y_dim=10,
          z_dim=FLAGS.z_dim,
          dataset_name=FLAGS.dataset,
          aug=FLAGS.aug,
          alpha_max=FLAGS.alpha_max,
          input_fname_pattern=FLAGS.input_fname_pattern,
          crop=FLAGS.crop,
          checkpoint_dir=FLAGS.checkpoint_dir,
          sample_dir=FLAGS.sample_dir,
          data_dir=FLAGS.data_dir,
          out_dir=FLAGS.out_dir,
          max_to_keep=FLAGS.max_to_keep)
    else:
      dcgan = DCGAN(
          sess,
          input_width=FLAGS.input_width,
          input_height=FLAGS.input_height,
          output_width=FLAGS.output_width,
          output_height=FLAGS.output_height,
          batch_size=FLAGS.batch_size,
          sample_num=FLAGS.batch_size,
          z_dim=FLAGS.z_dim,
          dataset_name=FLAGS.dataset,
          aug=FLAGS.aug,
          input_fname_pattern=FLAGS.input_fname_pattern,
          crop=FLAGS.crop,
          checkpoint_dir=FLAGS.checkpoint_dir,
          sample_dir=FLAGS.sample_dir,
          data_dir=FLAGS.data_dir,
          out_dir=FLAGS.out_dir,
          max_to_keep=FLAGS.max_to_keep)

    show_all_variables()

    if FLAGS.train:
      print('>>>---Traning mode is set to {}---<<<'.format(FLAGS.train))
      time.sleep(10)
      dcgan.train(FLAGS)
    else:
      print('<<<---Testing mode--->>>')
      time.sleep(10)  
      load_success, load_counter = dcgan.load(FLAGS.checkpoint_dir)
      if not load_success:
        raise Exception("Checkpoint not found in " + FLAGS.checkpoint_dir)


    # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
    #                 [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
    #                 [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
    #                 [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
    #                 [dcgan.h4_w, dcgan.h4_b, None])

    # Below is codes for visualization
      if FLAGS.export:
        export_dir = os.path.join(FLAGS.checkpoint_dir, 'export_b'+str(FLAGS.batch_size))
        dcgan.save(export_dir, load_counter, ckpt=True, frozen=False)

      if FLAGS.freeze:
        export_dir = os.path.join(FLAGS.checkpoint_dir, 'frozen_b'+str(FLAGS.batch_size))
        dcgan.save(export_dir, load_counter, ckpt=False, frozen=True)

      if FLAGS.visualize:
        OPTION = 1
        visualize(sess, dcgan, FLAGS, OPTION, FLAGS.sample_dir)
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    # expand user name and environment variables
    FLAGS.data_dir = expand_path(FLAGS.data_dir)
    FLAGS.out_dir = expand_path(FLAGS.out_dir)
    FLAGS.out_name = expand_path(FLAGS.out_name)
    FLAGS.checkpoint_dir = expand_path(FLAGS.checkpoint_dir)
    FLAGS.sample_dir = expand_path(FLAGS.sample_dir)

    if FLAGS.output_height is None: FLAGS.output_height = FLAGS.input_height
    if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height

    # output folders
    if FLAGS.out_name == "":
        FLAGS.out_name = '{} - {} - {}'.format(timestamp(), FLAGS.data_dir.split('/')[-1],
                                               FLAGS.dataset)  # penultimate folder of path
        if FLAGS.train:
            FLAGS.out_name += ' - x{}.z{}.{}.y{}.b{}'.format(FLAGS.input_width, FLAGS.z_dim, FLAGS.z_dist,
                                                             FLAGS.output_width, FLAGS.batch_size)

    FLAGS.out_dir = os.path.join(FLAGS.out_dir, FLAGS.out_name)
    FLAGS.checkpoint_dir = os.path.join(FLAGS.out_dir, FLAGS.checkpoint_dir)
    FLAGS.sample_dir = os.path.join(FLAGS.out_dir, FLAGS.sample_dir)

    if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir)

    with open(os.path.join(FLAGS.out_dir, 'FLAGS.json'), 'w') as f:
        flags_dict = {k: FLAGS[k].value for k in FLAGS}
        json.dump(flags_dict, f, indent=4, sort_keys=True, ensure_ascii=False)

    # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True

    with tf.Session(config=run_config) as sess:
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(
                sess,
                input_width=FLAGS.input_width,
                input_height=FLAGS.input_height,
                output_width=FLAGS.output_width,
                output_height=FLAGS.output_height,
                batch_size=FLAGS.batch_size,
                sample_num=FLAGS.batch_size,
                y_dim=10,
                z_dim=FLAGS.z_dim,
                dataset_name=FLAGS.dataset,
                input_fname_pattern=FLAGS.input_fname_pattern,
                crop=FLAGS.crop,
                checkpoint_dir=FLAGS.checkpoint_dir,
                sample_dir=FLAGS.sample_dir,
                data_dir=FLAGS.data_dir,
                out_dir=FLAGS.out_dir,
                max_to_keep=FLAGS.max_to_keep)
        else:
            dcgan = DCGAN(
                sess,
                input_width=FLAGS.input_width,
                input_height=FLAGS.input_height,
                output_width=FLAGS.output_width,
                output_height=FLAGS.output_height,
                batch_size=FLAGS.batch_size,
                sample_num=FLAGS.batch_size,
                z_dim=FLAGS.z_dim,
                dataset_name=FLAGS.dataset,
                input_fname_pattern=FLAGS.input_fname_pattern,
                crop=FLAGS.crop,
                checkpoint_dir=FLAGS.checkpoint_dir,
                sample_dir=FLAGS.sample_dir,
                data_dir=FLAGS.data_dir,
                out_dir=FLAGS.out_dir,
                max_to_keep=FLAGS.max_to_keep)

        show_all_variables()

        if FLAGS.train:
            dcgan.train(FLAGS)
        else:
            # load_success, load_counter = dcgan.load(
            #     ' ./out\\20190807.100629 - data - mnist - x28.z100.uniform_signed.y28.b64\checkpoint')
            load_success, load_counter = dcgan.load(FLAGS.checkpoint_dir)
        if not load_success:
            raise Exception("Checkpoint not found in " + FLAGS.checkpoint_dir)

        if FLAGS.export:
            export_dir = os.path.join(FLAGS.checkpoint_dir, 'export_b' + str(FLAGS.batch_size))
            dcgan.save(export_dir, load_counter, ckpt=True, frozen=False)

        if FLAGS.freeze:
            export_dir = os.path.join(FLAGS.checkpoint_dir, 'frozen_b' + str(FLAGS.batch_size))
            dcgan.save(export_dir, load_counter, ckpt=False, frozen=True)

        if FLAGS.visualize:
            OPTION = 1
            visualize(sess, dcgan, FLAGS, OPTION, FLAGS.sample_dir)
Exemple #36
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)
    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1)
    gpu_options.allow_growth = True
    run_config = tf.ConfigProto(allow_soft_placement=False,
                                gpu_options=gpu_options,
                                log_device_placement=False)
    run_config.gpu_options.allow_growth = True

    tf.reset_default_graph()
    input_shape = [FLAGS.batch_size, 182, 218, 182, 1]
    # input_shape = [FLAGS.batch_size, 145, 174, 145, 1]
    modal = FLAGS.modal
    seqs = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
    # seqs=[0,3]
    t = len(seqs)
    start_t = time.time()
    for iter in range(1):
        acc = [0] * t
        acc_val = [0] * t
        for acc_id, dataset_seq in enumerate(seqs):
            tf.reset_default_graph()
            if FLAGS.train:
                with tf.Session(config=run_config) as sess:

                    next_batch, next_batch_v, next_batch_test = load_data_with_val(
                        sess,
                        batch_size=FLAGS.batch_size,
                        zoom_rate=FLAGS.zoom_rate,
                        cross=dataset_seq,
                        modal='flirt')

                    cnn_mri = CNN_MRI(sess,
                                      input_shape=input_shape,
                                      config=FLAGS,
                                      batch_size=FLAGS.batch_size,
                                      zoom_rate=FLAGS.zoom_rate,
                                      y_dim1=2,
                                      y_dim2=4,
                                      stride=[1, 1, 1, 1, 1],
                                      padding='SAME',
                                      checkpoint_dir=FLAGS.checkpoint_dir,
                                      model_name='CNN_{}_{}'.format(
                                          modal, dataset_seq),
                                      isTrain=True)

                    cnn_mri.train(next_batch, next_batch_v, FLAGS, dataset_seq)
                    n1 = 0
                    r1 = 0
                    for i in range(1):
                        data, label = sess.run(next_batch_v)
                        label = np.squeeze(label)
                        label = np.squeeze(label)
                        g1 = cnn_mri.cnn_correct(data, label, FLAGS)
                        n1 = n1 + FLAGS.batch_size
                        r1 = r1 + g1
                    a_t_g = r1 / n1
                    print('validation set 100 elements accuracy:{} '.format(
                        a_t_g))
            tf.reset_default_graph()
            loaded_graph = tf.Graph()
            with tf.Session(graph=loaded_graph) as sess:
                _, next_batch_v, next_batch_test = load_data_with_val(
                    sess,
                    batch_size=FLAGS.batch_size,
                    zoom_rate=FLAGS.zoom_rate,
                    cross=dataset_seq)
                cnn_mri = CNN_MRI(sess,
                                  config=FLAGS,
                                  input_shape=input_shape,
                                  batch_size=FLAGS.batch_size,
                                  zoom_rate=FLAGS.zoom_rate,
                                  y_dim1=2,
                                  y_dim2=4,
                                  stride=[1, 1, 1, 1, 1],
                                  padding='SAME',
                                  checkpoint_dir=FLAGS.checkpoint_dir,
                                  model_name='CNN_{}_{}'.format(
                                      modal, dataset_seq),
                                  isTrain=False)
                cnn_mri.load(FLAGS.checkpoint_dir)
                n1 = 0
                r1 = 0
                for i in range(100):
                    data, label = sess.run(next_batch_v)
                    label = np.squeeze(label)
                    g1 = cnn_mri.cnn_correct(data, label, FLAGS)
                    n1 = n1 + FLAGS.batch_size
                    r1 = r1 + g1
                a_v_g = r1 / n1
                acc_val[acc_id] = a_v_g
                print('validation set 100 elements accuracy:{} '.format(a_v_g))
                n1 = 0
                r1 = 0
                for i in range(100):
                    data, label = sess.run(next_batch_test)
                    label = np.squeeze(label)
                    label = np.squeeze(label)
                    g1 = cnn_mri.cnn_correct(data, label, FLAGS)
                    n1 = n1 + FLAGS.batch_size
                    r1 = r1 + g1
                a_v_g = r1 / n1
                acc[acc_id] = a_v_g
                print('test set 100 elements accuracy:{} '.format(a_v_g))

        print('acc_val:{}'.format(acc_val))
        print('acc:{}'.format(acc))
        print('avg_acc:{}'.format(sum(acc) / t))

    print('time cost:{}'.format(time.time() - start_t))
Exemple #37
0
def main(_):
  pp.pprint(flags.FLAGS.__flags)

  if FLAGS.input_width is None:
    FLAGS.input_width = FLAGS.input_height
  if FLAGS.output_width is None:
    FLAGS.output_width = FLAGS.output_height

  if not os.path.exists(FLAGS.checkpoint_dir):
    os.makedirs(FLAGS.checkpoint_dir)
  if not os.path.exists(FLAGS.sample_dir):
    os.makedirs(FLAGS.sample_dir)

  #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
  run_config = tf.ConfigProto()
  run_config.gpu_options.allow_growth=True

  gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1)
  sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))

  with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
    if FLAGS.dataset == 'mnist':
      dcgan = DCGAN(
          sess,
          input_width=FLAGS.input_width,
          input_height=FLAGS.input_height,
          output_width=FLAGS.output_width,
          output_height=FLAGS.output_height,
          batch_size=FLAGS.batch_size,
          sample_num=FLAGS.batch_size,
          y_dim=10,
          c_dim=1,
          dataset_name=FLAGS.dataset,
          input_fname_pattern=FLAGS.input_fname_pattern,
          is_crop=FLAGS.is_crop,
          checkpoint_dir=FLAGS.checkpoint_dir,
          sample_dir=FLAGS.sample_dir)
    else:
      dcgan = DCGAN(
          sess,
          input_width=FLAGS.input_width,
          input_height=FLAGS.input_height,
          output_width=FLAGS.output_width,
          output_height=FLAGS.output_height,
          batch_size=FLAGS.batch_size,
          sample_num=FLAGS.batch_size,
          c_dim=FLAGS.c_dim,
          dataset_name=FLAGS.dataset,
          input_fname_pattern=FLAGS.input_fname_pattern,
          is_crop=FLAGS.is_crop,
          checkpoint_dir=FLAGS.checkpoint_dir,
          sample_dir=FLAGS.sample_dir)

    show_all_variables()
    if FLAGS.is_train:
      dcgan.train(FLAGS)
    else:
      if not dcgan.load(FLAGS.checkpoint_dir):
        raise Exception("[!] Train a model first, then run test mode")
      

    # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
    #                 [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
    #                 [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
    #                 [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
    #                 [dcgan.h4_w, dcgan.h4_b, None])

    # Below is codes for visualization
    OPTION = 1
    visualize(sess, dcgan, FLAGS, OPTION)
Exemple #38
0
def main(_):
    STARTED_DATESTRING = "{0:%Y-%m-%dT%H:%M:%S}".format(
        datetime.now()).replace(":", "-")
    pp.pprint(flags.FLAGS.__flags)

    assert FLAGS.mode.lower() in (
        'train', 'generate'), "mode must be 'train' or 'generate'!"
    FLAGS.mode = FLAGS.mode.lower()
    if FLAGS.mode == 'train':
        if FLAGS.out_dir is None:
            FLAGS.out_dir = 'out/train_' + STARTED_DATESTRING
            print('Using default out_dir {0}'.format(FLAGS.out_dir))
        else:
            if FLAGS.out_dir.endswith('/'): FLAGS.out_dir = FLAGS.out_dir[:-1]
        if FLAGS.checkpoint_dir is None:
            FLAGS.checkpoint_dir = FLAGS.out_dir + '/checkpoint'
    else:
        if FLAGS.checkpoint_dir is None:
            raise Exception(
                'Cannot generate: checkpoint {0} does not exist!'.format(
                    FLAGS.checkpoint_dir))
        else:
            if FLAGS.checkpoint_dir.endswith('/'):
                FLAGS.checkpoint_dir = FLAGS.checkpoint_dir[:-1]
        if FLAGS.out_dir is None:
            FLAGS.out_dir = 'out/gene_' + STARTED_DATESTRING

    if not os.path.exists(FLAGS.out_dir):
        os.makedirs(FLAGS.out_dir)
        #import IPython; IPython.embed()
        if FLAGS.mode == 'train':
            os.makedirs(FLAGS.out_dir + '/samples')
            os.makedirs(FLAGS.out_dir + '/checkpoint')
            os.makedirs(FLAGS.out_dir + '/logs')

    if FLAGS.audio_params is None:
        if FLAGS.mode == 'train':
            FLAGS.audio_params = './audio_params.json'
            copyfile(FLAGS.audio_params,
                     FLAGS.checkpoint_dir + '/audio_params.json')
        else:
            print('Using json file from {0}'.format(FLAGS.checkpoint_dir))
            FLAGS.audio_params = FLAGS.checkpoint_dir + '/audio_params.json'

    with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
        #G
        if FLAGS.dataset == 'wav':
            with open('audio_params.json', 'r') as f:
                audio_params = json.load(f)
            FLAGS.epoch = audio_params['epoch']
            FLAGS.learning_rate = audio_params['learning_rate']
            FLAGS.beta1 = audio_params['beta1']
            FLAGS.sample_length = audio_params['sample_length']
            dcgan = DCGAN(sess,
                          batch_size=FLAGS.batch_size,
                          z_dim=audio_params['z_dim'],
                          sample_length=FLAGS.sample_length,
                          c_dim=1,
                          dataset_name=FLAGS.dataset,
                          audio_params=FLAGS.audio_params,
                          data_dir=FLAGS.data_dir,
                          use_disc=FLAGS.use_disc,
                          use_fourier=FLAGS.use_fourier,
                          run_g=FLAGS.run_g,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          out_dir=FLAGS.out_dir,
                          mode=FLAGS.mode)
        else:
            raise Exception('dataset not understood')

        if FLAGS.mode == 'train':
            dcgan.train(FLAGS)
        else:
            print('Generating {0} batches of size {1} from checkpoint {2}'.
                  format(FLAGS.gen_size, FLAGS.batch_size,
                         FLAGS.checkpoint_dir))
            dcgan.load(FLAGS.checkpoint_dir)
            dcgan.generate(FLAGS)

        if FLAGS.visualize:
            to_json("./web/js/layers.js",
                    [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
                    [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
                    [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
                    [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
                    [dcgan.h4_w, dcgan.h4_b, None])

            # Below is codes for visualization
            OPTION = 2
            visualize(sess, dcgan, FLAGS, OPTION)
Exemple #39
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        if FLAGS.is_train:
            dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, input_size=FLAGS.input_size,
                      dataset_name=FLAGS.dataset,
                      is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)
        else:
   
            dcgan = EVAL(sess, input_size = 600, batch_size=1,ir_image_shape=[600,800,1],normal_image_shape=[600,800,3],dataset_name=FLAGS.dataset,\
                      is_crop=False, checkpoint_dir=FLAGS.checkpoint_dir)

        if FLAGS.is_train:
            dcgan.train(FLAGS)
        else:
            dcgan.load(FLAGS.checkpoint_dir)
            OPTION = 2 # for validation
            list_val = [11,16,21,22,33,36,38,53,59,92]
            VAL_OPTION =2
            """
            if OPTION == 1:
                data = json.load(open("/research2/IR_normal_small/json/traininput_single_224_ori_small.json"))
                data_label = json.load(open("/research2/IR_normal_small/json/traingt_single_224_ori_small.json"))
            
            elif OPTION == 2:
                data = json.load(open("/research2/IR_normal_small/json/testinput_single_224_ori_small.json"))
                data_label = json.load(open("/research2/IR_normal_small/json/testgt_single_224_ori_small.json"))
            """
            if VAL_OPTION ==1:
                list_val = [11,16,21,22,33,36,38,53,59,92]
                for idx in range(len(list_val)):
                    for idx2 in range(1,10): 
                        print("Selected material %03d/%d" % (list_val[idx],idx2))
                        img = '/research2/IR_normal_small/save%03d/%d' % (list_val[idx],idx2)
                        input_ = scipy.misc.imread(img+'/3.bmp').astype(float)
                        gt_ = scipy.misc.imread('/research2/IR_normal_small/save016/1/12_Normal.bmp').astype(float)
                        input_ = scipy.misc.imresize(input_,[600,800])
                        gt_ = scipy.misc.imresize(gt_,[600,800])
                        #input_ = input_[240:840,515:1315]
                        #gt_ = gt_[240:840,515:1315]
                        input_ = np.reshape(input_,(1,600,800,1)) 
                        gt_ = np.reshape(gt_,(1,600,800,3)) 
                        input_ = np.array(input_).astype(np.float32)
                        gt_ = np.array(gt_).astype(np.float32)
                        start_time = time.time() 
                        sample = sess.run(dcgan.sampler, feed_dict={dcgan.ir_images: input_})
                        print('time: %.8f' %(time.time()-start_time))     
                        # normalization #
                        sample = np.squeeze(sample).astype(np.float32)
                        gt_ = np.squeeze(gt_).astype(np.float32)

                        output = np.zeros((600,800,3)).astype(np.float32)
                        output[:,:,0] = sample[:,:,0]/(np.sqrt(np.power(sample[:,:,0],2) + np.power(sample[:,:,1],2) + np.power(sample[:,:,2],2)))
                        output[:,:,1] = sample[:,:,1]/(np.sqrt(np.power(sample[:,:,0],2) + np.power(sample[:,:,1],2) + np.power(sample[:,:,2],2)))
                        output[:,:,2] = sample[:,:,2]/(np.sqrt(np.power(sample[:,:,0],2) + np.power(sample[:,:,1],2) + np.power(sample[:,:,2],2)))
   
                        output[output ==inf] = 0.0
                        sample = (output+1.)/2.
                        savename = '/home/yjyoon/Dropbox/ECCV16_IRNormal/single_result/%03d/%d/single_normal_L2ang.bmp' % (list_val[idx],idx2)

                        scipy.misc.imsave(savename, sample)

            
            elif VAL_OPTION ==2:
                print("Computing all validation set ")
                ErrG =0.0
		num_img =13
                for idx in xrange(5, num_img+1):
                    print("[Computing Validation Error %d/%d]" % (idx, num_img))
                    img = '/home/yjyoon/Dropbox/ECCV16_IRNormal/extra/extra_%d.bmp' % (idx)
                    input_ = scipy.misc.imread(img).astype(float)
                    input_ = input_[:,:,0]
                    gt_ = scipy.misc.imread('/research2/IR_normal_small/save016/1/12_Normal.bmp').astype(float)
                    input_ = scipy.misc.imresize(input_,[600,800])
                    gt_ = scipy.misc.imresize(gt_,[600,800])
                    input_ = np.reshape(input_,(1,600,800,1)) 
                    gt_ = np.reshape(gt_,(1,600,800,3)) 
                    input_ = np.array(input_).astype(np.float32)
                    gt_ = np.array(gt_).astype(np.float32)
                    start_time = time.time() 
                    sample = sess.run(dcgan.sampler, feed_dict={dcgan.ir_images: input_})
                    print('time: %.8f' %(time.time()-start_time))     
                    # normalization #
                    sample = np.squeeze(sample).astype(np.float32)
                    gt_ = np.squeeze(gt_).astype(np.float32)

                    output = np.zeros((600,800,3)).astype(np.float32)
                    output[:,:,0] = sample[:,:,0]/(np.sqrt(np.power(sample[:,:,0],2) + np.power(sample[:,:,1],2) + np.power(sample[:,:,2],2)))
                    output[:,:,1] = sample[:,:,1]/(np.sqrt(np.power(sample[:,:,0],2) + np.power(sample[:,:,1],2) + np.power(sample[:,:,2],2)))
                    output[:,:,2] = sample[:,:,2]/(np.sqrt(np.power(sample[:,:,0],2) + np.power(sample[:,:,1],2) + np.power(sample[:,:,2],2)))
   
                    output[output ==inf] = 0.0
                    sample = (output+1.)/2.
                    savename = '/home/yjyoon/Dropbox/ECCV16_IRNormal/extra/extra_result%d.bmp' % (idx)

                    scipy.misc.imsave(savename, sample)
def main(_):
    global INTERMEDIATE_PATH, OUTPUT_PATH
    pp.pprint(flags.FLAGS.__flags)

    if FLAGS.input_width is None:
        FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None:
        FLAGS.output_width = FLAGS.output_height
    print(FLAGS.base_dir)
    if not os.path.exists(os.path.join(FLAGS.base_dir, FLAGS.checkpoint_dir)):
        os.makedirs(os.path.join(FLAGS.base_dir, FLAGS.checkpoint_dir))
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True
    # print('Training: ',FLAGS.train)
    with tf.Session(config=run_config) as sess:
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          sample_num=FLAGS.batch_size,
                          y_dim=FLAGS.num_classes,
                          z_dim=FLAGS.noise_dim,
                          dataset_name=FLAGS.dataset,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          crop=FLAGS.crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir,
                          base_dir=FLAGS.base_dir,
                          num_test_images=FLAGS.num_test_images)
        elif FLAGS.dataset == 'images5':
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          sample_num=FLAGS.batch_size,
                          y_dim=FLAGS.num_classes,
                          z_dim=FLAGS.noise_dim,
                          dataset_name=FLAGS.dataset,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          crop=FLAGS.crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir,
                          dataset_dir=FLAGS.dataset_dir,
                          base_dir=FLAGS.base_dir,
                          num_test_images=FLAGS.num_test_images)
        else:
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          sample_num=FLAGS.batch_size,
                          z_dim=FLAGS.noise_dim,
                          dataset_name=FLAGS.dataset,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          crop=FLAGS.crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir,
                          dataset_dir=FLAGS.dataset_dir,
                          base_dir=FLAGS.base_dir,
                          num_test_images=FLAGS.num_test_images)
            INTERMEDIATE_PATH = FLAGS.intermediate_path
            OUTPUT_PATH = FLAGS.output_path

        show_all_variables()
        if FLAGS.train:
            print('going to train')
            dcgan.train(FLAGS)
        else:
            print('Saving Images..')
            if not dcgan.load(FLAGS.checkpoint_dir)[0]:
                raise Exception("[!] Train a model first, then run test mode")

        # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
        #                 [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
        #                 [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
        #                 [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
        #                 [dcgan.h4_w, dcgan.h4_b, None])

        # Below is codes for visualization
        OPTION = 1
        num_gen_images, images_list = visualize(
            sess, dcgan, FLAGS, OPTION, generate_output_path=OUTPUT_PATH)
        with open(os.path.join(INTERMEDIATE_PATH, 'num_images_gen.txt'),
                  'w+') as ffile:
            ffile.write(str(num_gen_images) + '\n')

        # for i in images_list:
        #     print(i)
        with open(os.path.join(INTERMEDIATE_PATH, 'gen_images_list.txt'),
                  'w+') as ffile:
            for i in images_list:
                ffile.write(str(i) + '\n')
Exemple #41
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    with tf.Session() as sess:
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size, y_dim=10,
                    dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)
        else:
            dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,
                    dataset_name=FLAGS.dataset, is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)

        if FLAGS.is_train:
            dcgan.train(FLAGS)
        else:
            dcgan.load(FLAGS.checkpoint_dir)

        to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
                                      [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
                                      [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
                                      [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
                                      [dcgan.h4_w, dcgan.h4_b, None])

        # Below is codes for visualization
        OPTION = 2
        if OPTION == 0:
          z_sample = np.random.uniform(-0.5, 0.5, size=(FLAGS.batch_size, dcgan.z_dim))
          samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
          save_images(samples, [8, 8], './samples/test_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime()))
        elif OPTION == 1:
          values = np.arange(0, 1, 1./FLAGS.batch_size)
          for idx in xrange(100):
            print(" [*] %d" % idx)
            z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
            for kdx, z in enumerate(z_sample):
              z[idx] = values[kdx]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            save_images(samples, [8, 8], './samples/test_arange_%s.png' % (idx))
        elif OPTION == 2:
          values = np.arange(0, 1, 1./FLAGS.batch_size)
          for idx in [random.randint(0, 99) for _ in xrange(100)]:
            print(" [*] %d" % idx)
            z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
            z_sample = np.tile(z, (FLAGS.batch_size, 1))
            #z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
            for kdx, z in enumerate(z_sample):
              z[idx] = values[kdx]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            make_gif(samples, './samples/test_gif_%s.gif' % (idx))
        elif OPTION == 3:
          values = np.arange(0, 1, 1./FLAGS.batch_size)
          for idx in xrange(100):
            print(" [*] %d" % idx)
            z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
            for kdx, z in enumerate(z_sample):
              z[idx] = values[kdx]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            make_gif(samples, './samples/test_gif_%s.gif' % (idx))
        elif OPTION == 4:
          image_set = []
          values = np.arange(0, 1, 1./FLAGS.batch_size)

          for idx in xrange(100):
            print(" [*] %d" % idx)
            z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
            for kdx, z in enumerate(z_sample): z[idx] = values[kdx]

            image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
            make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))

          new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) for idx in range(64) + range(63, -1, -1)]
          make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8)
        elif OPTION == 5:
          image_set = []
          values = np.arange(0, 1, 1./FLAGS.batch_size)
          z_idx = [[random.randint(0,99) for _ in xrange(5)] for _ in xrange(200)]

          for idx in xrange(200):
            print(" [*] %d" % idx)
            #z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
            z = np.random.uniform(-1e-1, 1e-1, size=(dcgan.z_dim))
            z_sample = np.tile(z, (FLAGS.batch_size, 1))

            for kdx, z in enumerate(z_sample):
              for jdx in xrange(5):
                z_sample[kdx][z_idx[idx][jdx]] = values[kdx]

            image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
            make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))

          new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 20]) for idx in range(64) + range(63, -1, -1)]
          make_gif(new_image_set, './samples/test_gif_random_merged.gif', duration=4)
        elif OPTION == 6:
          image_set = []

          values = np.arange(0, 1, 1.0/FLAGS.batch_size).tolist()
          z_idx = [[random.randint(0,99) for _ in xrange(10)] for _ in xrange(100)]

          for idx in xrange(100):
            print(" [*] %d" % idx)
            z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
            z_sample = np.tile(z, (FLAGS.batch_size, 1))

            for kdx, z in enumerate(z_sample):
              for jdx in xrange(10):
                z_sample[kdx][z_idx[idx][jdx]] = values[kdx]

            image_set.append(sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
            save_images(image_set[-1], [8, 8], './samples/test_random_arange_%s.png' % (idx))

          new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10]) for idx in range(64) + range(63, -1, -1)]
          make_gif(new_image_set, './samples/test_gif_merged_random.gif', duration=4)
        elif OPTION == 7:
          for _ in xrange(50):
            z_idx = [[random.randint(0,99) for _ in xrange(10)] for _ in xrange(8)]

            zs = []
            for idx in xrange(8):
              z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
              zs.append(np.tile(z, (8, 1)))

            z_sample = np.concatenate(zs)
            values = np.arange(0, 1, 1/8.)

            for idx in xrange(FLAGS.batch_size):
              for jdx in xrange(8):
                z_sample[idx][z_idx[idx/8][jdx]] = values[idx%8]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            save_images(samples, [8, 8], './samples/multiple_testt_%s.png' % strftime("%Y-%m-%d %H:%M:%S", gmtime()))
        elif OPTION == 8:
          counter = 0
          for _ in xrange(50):
            import scipy.misc
            z_idx = [[random.randint(0,99) for _ in xrange(10)] for _ in xrange(8)]

            zs = []
            for idx in xrange(8):
              z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
              zs.append(np.tile(z, (8, 1)))

            z_sample = np.concatenate(zs)
            values = np.arange(0, 1, 1/8.)

            for idx in xrange(FLAGS.batch_size):
              for jdx in xrange(8):
                z_sample[idx][z_idx[idx/8][jdx]] = values[idx%8]

            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            for sample in samples:
              scipy.misc.imsave('./samples/turing/%s.png' % counter, sample)
              counter += 1
        else:
          import scipy.misc
          from glob import glob

          samples = []
          fnames = glob("/Users/carpedm20/Downloads/x/1/*.png")
          fnames = sorted(fnames, key = lambda x: int(x.split("_")[1]) * 10000 + int(x.split('_')[2].split(".")[0]))
          for f in fnames:
            samples.append(scipy.misc.imread(f))
          make_gif(samples, './samples/training.gif', duration=8, true_image=True)
Exemple #42
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    # only width needed for square images
    if FLAGS.input_width is None:
        FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None:
        FLAGS.output_width = FLAGS.output_height
    # if no sample dir, set to subdir with dataset name
    if FLAGS.sample_dir is None:
        FLAGS.sample_dir = os.path.join('samples', FLAGS.dataset)
    # make sure set paths exist
    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)
    # during training it makes no sense to have interpolation active
    if FLAGS.is_train:
        FLAGS.interpolate = False
    if FLAGS.interpolate:
        FLAGS.visualize = False
    if FLAGS.ipython:
        FLAGS.visualize = False

    # allows specifying additional options
    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True

    # initialise session with chosen flags
    with tf.Session(config=run_config) as sess:
        dcgan = DCGAN(
                sess,
                input_width=FLAGS.input_width,
                input_height=FLAGS.input_height,
                output_width=FLAGS.output_width,
                output_height=FLAGS.output_height,
                batch_size=FLAGS.batch_size,
                sample_num=FLAGS.batch_size,
                c_dim=FLAGS.c_dim,
                dataset_name=FLAGS.dataset,
                input_fname_pattern=FLAGS.input_fname_pattern,
                is_crop=FLAGS.is_crop,
                checkpoint_dir=FLAGS.checkpoint_dir,
                sample_dir=FLAGS.sample_dir,
                num_g_updates=FLAGS.num_g_updates,
                z_dim=FLAGS.z_dim,
                gf_dim=FLAGS.gf_dim,
                gf_size=FLAGS.gf_size,
                df_dim=FLAGS.df_dim,
                df_size=FLAGS.df_size,
                gfc_dim=FLAGS.gfc_dim,
                dfc_dim=FLAGS.dfc_dim,
                data_dir=FLAGS.data_dir,
                is_train=FLAGS.is_train,
                label_path=FLAGS.labels,
                gauss_sigma=FLAGS.gauss_sigma,
                gauss_trunc=FLAGS.gauss_trunc,
                blur_fade=FLAGS.blur_fade,
                y_dim=FLAGS.y_dim)
        show_all_variables()
        if FLAGS.is_train:
            dcgan.train(FLAGS)
        else:
            if not dcgan.load(FLAGS.checkpoint_dir):
                raise Exception("[!] Train a model first, then run test mode")
        # visualisation
        if FLAGS.visualize:
            option = 2
            visualize(sess, dcgan, FLAGS, option)
        if FLAGS.interpolate:
            interactive_interp(sess, dcgan, FLAGS, sampling='uniform')
        if FLAGS.ipython:
            from vector import Vector
            from IPython import embed
            vec = Vector(sess, dcgan, FLAGS)
            embed()
Exemple #43
0
def main(_):
    print('begin')
    pp.pprint(flags.FLAGS.__flags)
    print('flags.FLAGS[epoch]=', flags.FLAGS.__flags['epoch'])

    # expand user name and environment variables
    FLAGS.data_dir = expand_path(FLAGS.data_dir)
    FLAGS.out_dir = expand_path(FLAGS.out_dir)
    FLAGS.out_name = expand_path(FLAGS.out_name)
    #FLAGS.checkpoint_dir = expand_path(FLAGS.checkpoint_dir)
    FLAGS.sample_dir = expand_path(FLAGS.sample_dir)

    if FLAGS.output_height is None: FLAGS.output_height = FLAGS.input_height
    if FLAGS.input_width is None: FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None: FLAGS.output_width = FLAGS.output_height

    # output folders
    if FLAGS.out_name == "":
        FLAGS.out_name = '{} - {} - {}'.format(
            timestamp(),
            FLAGS.data_dir.split('/')[-1],
            FLAGS.dataset)  # penultimate folder of path
        if FLAGS.train:
            FLAGS.out_name += ' - x{}.z{}.{}.y{}.b{}'.format(
                FLAGS.input_width, FLAGS.z_dim, FLAGS.z_dist,
                FLAGS.output_width, FLAGS.batch_size)

    FLAGS.out_dir = os.path.join(FLAGS.out_dir, FLAGS.out_name)
    #FLAGS.checkpoint_dir = os.path.join(FLAGS.out_dir, FLAGS.checkpoint_dir)
    FLAGS.sample_dir = os.path.join(FLAGS.out_dir, FLAGS.sample_dir)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir)

    with open(os.path.join(FLAGS.out_dir, 'FLAGS.json'), 'w') as f:
        flags_dict = {k: flags.FLAGS.__flags[k] for k in flags.FLAGS.__flags}
        #flags_dict = {k:FLAGS[k].value for k in FLAGS.__flags}
        json.dump(flags_dict, f, indent=4, sort_keys=True, ensure_ascii=False)

    #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True

    with tf.Session(config=run_config) as sess:
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          sample_num=FLAGS.batch_size,
                          y_dim=10,
                          z_dim=FLAGS.z_dim,
                          dataset_name=FLAGS.dataset,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          crop=FLAGS.crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir,
                          data_dir=FLAGS.data_dir,
                          out_dir=FLAGS.out_dir,
                          max_to_keep=FLAGS.max_to_keep)
        else:
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          sample_num=FLAGS.batch_size,
                          z_dim=FLAGS.z_dim,
                          dataset_name=FLAGS.dataset,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          crop=FLAGS.crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir,
                          data_dir=FLAGS.data_dir,
                          out_dir=FLAGS.out_dir,
                          max_to_keep=FLAGS.max_to_keep)

        show_all_variables()

        if FLAGS.train:
            dcgan.train(FLAGS)
        else:
            load_success, load_counter = dcgan.load(FLAGS.checkpoint_dir)
            if not load_success:
                raise Exception("Checkpoint not found in " +
                                FLAGS.checkpoint_dir)

        # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
        #                 [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
        #                 [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
        #                 [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
        #                 [dcgan.h4_w, dcgan.h4_b, None])

        # Below is codes for visualization
            if FLAGS.export:
                export_dir = os.path.join(FLAGS.checkpoint_dir,
                                          'export_b' + str(FLAGS.batch_size))
                dcgan.save(export_dir, load_counter, ckpt=True, frozen=False)

            if FLAGS.freeze:
                export_dir = os.path.join(FLAGS.checkpoint_dir,
                                          'frozen_b' + str(FLAGS.batch_size))
                dcgan.save(export_dir, load_counter, ckpt=False, frozen=True)

            if FLAGS.visualize:
                OPTION = 2
                visualize(sess, dcgan, FLAGS, OPTION, FLAGS.sample_dir)
Exemple #44
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if FLAGS.input_width is None:
        FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None:
        FLAGS.output_width = FLAGS.output_height

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True
    #run_config.gpu_options.per_process_gpu_memory_fraction = 0.4

    with tf.Session(config=run_config) as sess:
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          test_batch_size=FLAGS.test_batch_size,
                          sample_num=FLAGS.batch_size,
                          y_dim=10,
                          z_dim=FLAGS.generate_test_images,
                          dataset_name=FLAGS.dataset,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          crop=FLAGS.crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir,
                          test_dir=FLAGS.test_dir)
        else:
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          test_batch_size=FLAGS.test_batch_size,
                          sample_num=FLAGS.batch_size,
                          z_dim=FLAGS.generate_test_images,
                          dataset_name=FLAGS.dataset,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          crop=FLAGS.crop,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir,
                          test_dir=FLAGS.test_dir)

        show_all_variables()

        if FLAGS.train:
            dcgan.train(FLAGS)
        else:
            if not dcgan.load(FLAGS.checkpoint_dir)[0]:
                raise Exception("[!] Train a model first, then run test mode")

        if FLAGS.anomaly_test:
            dcgan.anomaly_detector()
            assert len(dcgan.test_data_names) > 0
            for idx in range(len(dcgan.test_data_names)):
                test_input = np.expand_dims(dcgan.test_data[idx], axis=0)
                test_name = dcgan.test_data_names[idx]
                dcgan.train_anomaly_detector(FLAGS, test_input, test_name)
Exemple #45
0
def main(_):
    if FLAGS.auto_anneal:
        FLAGS.anneal_rate = 'auto'
    FLAGS.sample_dir = FLAGS.sample_dir + "/" + FLAGS.dataset + "/" + FLAGS.architecture + "_bluff-" + str(
        FLAGS.br_initial) + "_anneal-" + str(FLAGS.anneal_rate)
    FLAGS.checkpoint_dir = FLAGS.checkpoint_dir + "/" + FLAGS.dataset + "/" + FLAGS.architecture + "_bluff-" + str(
        FLAGS.br_initial) + "_anneal-" + str(FLAGS.anneal_rate)
    pp.pprint(flags.FLAGS.__flags)

    if FLAGS.input_width is None:
        FLAGS.input_width = FLAGS.input_height
    if FLAGS.output_width is None:
        FLAGS.output_width = FLAGS.output_height

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    run_config = tf.ConfigProto()
    run_config.gpu_options.allow_growth = True

    with tf.Session(config=run_config) as sess:
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          sample_num=FLAGS.batch_size,
                          dataset_name=FLAGS.dataset,
                          architecture=FLAGS.architecture,
                          br_initial=FLAGS.br_initial,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          crop=FLAGS.crop,
                          anneal_rate=FLAGS.anneal_rate,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir)
        else:
            dcgan = DCGAN(sess,
                          input_width=FLAGS.input_width,
                          input_height=FLAGS.input_height,
                          output_width=FLAGS.output_width,
                          output_height=FLAGS.output_height,
                          batch_size=FLAGS.batch_size,
                          sample_num=FLAGS.batch_size,
                          dataset_name=FLAGS.dataset,
                          architecture=FLAGS.architecture,
                          br_initial=FLAGS.br_initial,
                          input_fname_pattern=FLAGS.input_fname_pattern,
                          crop=FLAGS.crop,
                          anneal_rate=FLAGS.anneal_rate,
                          checkpoint_dir=FLAGS.checkpoint_dir,
                          sample_dir=FLAGS.sample_dir)

        show_all_variables()

        if FLAGS.train:
            dcgan.train(FLAGS)
        else:
            if not dcgan.load(FLAGS.checkpoint_dir)[0]:
                raise Exception("[!] Train a model first, then run test mode")

        # to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
        #                 [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
        #                 [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
        #                 [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
        #                 [dcgan.h4_w, dcgan.h4_b, None])

        # Below is codes for visualization
        OPTION = 1
        visualize(sess, dcgan, FLAGS, OPTION)
Exemple #46
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)

    with tf.Session() as sess:
        if FLAGS.dataset == 'mnist':
            dcgan = DCGAN(sess,
                          image_size=FLAGS.image_size,
                          batch_size=FLAGS.batch_size,
                          y_dim=10,
                          dataset_name=FLAGS.dataset,
                          is_crop=FLAGS.is_crop,
                          checkpoint_dir=FLAGS.checkpoint_dir)
        else:
            dcgan = DCGAN(sess,
                          image_size=FLAGS.image_size,
                          batch_size=FLAGS.batch_size,
                          dataset_name=FLAGS.dataset,
                          is_crop=FLAGS.is_crop,
                          checkpoint_dir=FLAGS.checkpoint_dir)

        if FLAGS.is_train:
            dcgan.train(FLAGS)
        else:
            dcgan.load(FLAGS.checkpoint_dir)

        to_json("./web/js/layers.js", [dcgan.h0_w, dcgan.h0_b, dcgan.g_bn0],
                [dcgan.h1_w, dcgan.h1_b, dcgan.g_bn1],
                [dcgan.h2_w, dcgan.h2_b, dcgan.g_bn2],
                [dcgan.h3_w, dcgan.h3_b, dcgan.g_bn3],
                [dcgan.h4_w, dcgan.h4_b, None])

        OPTION = 6
        if OPTION == 0:
            z_sample = np.random.uniform(-0.5,
                                         0.5,
                                         size=(FLAGS.batch_size, dcgan.z_dim))
            samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
            save_images(
                samples, [8, 8], './samples/test_%s.png' %
                strftime("%Y-%m-%d %H:%M:%S", gmtime()))
        elif OPTION == 1:
            values = np.arange(0, 1, 1. / FLAGS.batch_size)
            for idx in xrange(100):
                print(" [*] %d" % idx)
                z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
                for kdx, z in enumerate(z_sample):
                    z[idx] = values[kdx]

                samples = sess.run(dcgan.sampler,
                                   feed_dict={dcgan.z: z_sample})
                save_images(samples, [8, 8],
                            './samples/test_arange_%s.png' % (idx))
        elif OPTION == 2:
            values = np.arange(0, 1, 1. / FLAGS.batch_size)
            for idx in [random.randint(100) for _ in xrange(5)]:
                print(" [*] %d" % idx)
                z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
                for kdx, z in enumerate(z_sample):
                    z[idx] = values[kdx]

                samples = sess.run(dcgan.sampler,
                                   feed_dict={dcgan.z: z_sample})
                make_gif(samples, './samples/test_gif_%s.gif' % (idx))
        elif OPTION == 3:
            values = np.arange(0, 1, 1. / FLAGS.batch_size)
            for idx in xrange(100):
                print(" [*] %d" % idx)
                z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
                for kdx, z in enumerate(z_sample):
                    z[idx] = values[kdx]

                samples = sess.run(dcgan.sampler,
                                   feed_dict={dcgan.z: z_sample})
                make_gif(samples, './samples/test_gif_%s.gif' % (idx))
        elif OPTION == 4:
            image_set = []
            values = np.arange(0, 1, 1. / FLAGS.batch_size)

            for idx in xrange(100):
                print(" [*] %d" % idx)
                z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
                for kdx, z in enumerate(z_sample):
                    z[idx] = values[kdx]

                image_set.append(
                    sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
                make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))

            new_image_set = [
                merge(np.array([images[idx]
                                for images in image_set]), [10, 10])
                for idx in range(64) + range(63, -1, -1)
            ]
            make_gif(new_image_set,
                     './samples/test_gif_merged.gif',
                     duration=8)
        elif OPTION == 5:
            image_set = []
            values = np.arange(0, 1, 1. / FLAGS.batch_size)
            z_idx = [[random.randint(0, 99) for _ in xrange(5)]
                     for _ in xrange(200)]

            for idx in xrange(200):
                print(" [*] %d" % idx)
                #z_sample = np.zeros([FLAGS.batch_size, dcgan.z_dim])
                z = np.random.uniform(-1e-1, 1e-1, size=(dcgan.z_dim))
                z_sample = np.tile(z, (FLAGS.batch_size, 1))

                for kdx, z in enumerate(z_sample):
                    for jdx in xrange(5):
                        z_sample[kdx][z_idx[idx][jdx]] = values[kdx]

                image_set.append(
                    sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
                make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))

            new_image_set = [
                merge(np.array([images[idx]
                                for images in image_set]), [10, 20])
                for idx in range(64) + range(63, -1, -1)
            ]
            make_gif(new_image_set,
                     './samples/test_gif_random_merged.gif',
                     duration=4)
        elif OPTION == 6:
            image_set = []

            values = np.arange(0, 1, 1.0 / FLAGS.batch_size).tolist()
            z_idx = [[random.randint(0, 99) for _ in xrange(10)]
                     for _ in xrange(100)]

            for idx in xrange(100):
                print(" [*] %d" % idx)
                z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
                z_sample = np.tile(z, (FLAGS.batch_size, 1))

                for kdx, z in enumerate(z_sample):
                    for jdx in xrange(10):
                        z_sample[kdx][z_idx[idx][jdx]] = values[kdx]

                image_set.append(
                    sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
                save_images(image_set[-1], [8, 8],
                            './samples/test_random_arange_%s.png' % (idx))

            new_image_set = [
                merge(np.array([images[idx]
                                for images in image_set]), [10, 10])
                for idx in range(64) + range(63, -1, -1)
            ]
            make_gif(new_image_set,
                     './samples/test_gif_merged_random.gif',
                     duration=4)
        elif OPTION == 7:
            for _ in xrange(50):
                z_idx = [[random.randint(0, 99) for _ in xrange(10)]
                         for _ in xrange(8)]

                zs = []
                for idx in xrange(8):
                    z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
                    zs.append(np.tile(z, (8, 1)))

                z_sample = np.concatenate(zs)
                values = np.arange(0, 1, 1 / 8.)

                for idx in xrange(FLAGS.batch_size):
                    for jdx in xrange(8):
                        z_sample[idx][z_idx[idx / 8][jdx]] = values[idx % 8]

                samples = sess.run(dcgan.sampler,
                                   feed_dict={dcgan.z: z_sample})
                save_images(
                    samples, [8, 8], './samples/multiple_testt_%s.png' %
                    strftime("%Y-%m-%d %H:%M:%S", gmtime()))
        elif OPTION == 8:
            counter = 0
            for _ in xrange(50):
                import scipy.misc
                z_idx = [[random.randint(0, 99) for _ in xrange(10)]
                         for _ in xrange(8)]

                zs = []
                for idx in xrange(8):
                    z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
                    zs.append(np.tile(z, (8, 1)))

                z_sample = np.concatenate(zs)
                values = np.arange(0, 1, 1 / 8.)

                for idx in xrange(FLAGS.batch_size):
                    for jdx in xrange(8):
                        z_sample[idx][z_idx[idx / 8][jdx]] = values[idx % 8]

                samples = sess.run(dcgan.sampler,
                                   feed_dict={dcgan.z: z_sample})
                for sample in samples:
                    scipy.misc.imsave('./samples/turing/%s.png' % counter,
                                      sample)
                    counter += 1
        else:
            import scipy.misc
            from glob import glob

            samples = []
            fnames = glob("/Users/carpedm20/Downloads/x/1/*.png")
            fnames = sorted(fnames,
                            key=lambda x: int(x.split("_")[1]) * 10000 + int(
                                x.split('_')[2].split(".")[0]))
            for f in fnames:
                samples.append(scipy.misc.imread(f))
            make_gif(samples,
                     './samples/training.gif',
                     duration=8,
                     true_image=True)
Exemple #47
0
def main(_):
    print('Program is started at', time.clock())
    pp.pprint(flags.FLAGS.__flags)

    n_per_itr_print_results = 100
    n_fetch_data = 10
    kb_work_on_patch = False
    nd_input_frame_size = (240, 360)
    #nd_patch_size = (45, 45)
    n_stride = 10
    #FLAGS.checkpoint_dir = "./checkpoint/UCSD_128_45_45/"

    #FLAGS.dataset = 'UCSD'
    #FLAGS.dataset_address = './dataset/UCSD_Anomaly_Dataset.v1p2/UCSDped2/Test'
    lst_test_dirs = ['Test004', 'Test005', 'Test006']

    #DATASET PARAMETER : MNIST
    #FLAGS.dataset = 'mnist'
    #FLAGS.dataset_address = './dataset/mnist'
    #nd_input_frame_size = (28, 28)
    #nd_patch_size = (28, 28)
    #FLAGS.checkpoint_dir = "./checkpoint/mnist_128_28_28/"

    #FLAGS.input_width = nd_patch_size[0]
    #FLAGS.input_height = nd_patch_size[1]
    #FLAGS.output_width = nd_patch_size[0]
    #FLAGS.output_height = nd_patch_size[1]

    check_some_assertions()

    nd_patch_size = (FLAGS.input_width, FLAGS.input_height)
    nd_patch_step = (n_stride, n_stride)

    FLAGS.nStride = n_stride
    #FLAGS.input_fname_pattern = '*'
    FLAGS.train = False
    FLAGS.epoch = 1
    FLAGS.batch_size = 504

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True
    with tf.Session(config=run_config) as sess:
        tmp_ALOCC_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            is_training=FLAGS.train,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_patch_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
            n_fetch_data=n_fetch_data)

        show_all_variables()

        print('--------------------------------------------------')
        print('Load Pretrained Model...')
        tmp_ALOCC_model.f_check_checkpoint()

        if FLAGS.dataset == 'mnist':
            mnist = input_data.read_data_sets(FLAGS.dataset_address)

            specific_idx_anomaly = np.where(mnist.train.labels != 6)[0]
            specific_idx = np.where(mnist.train.labels == 6)[0]
            ten_precent_anomaly = [
                specific_idx_anomaly[x]
                for x in random.sample(range(0, len(specific_idx_anomaly)),
                                       len(specific_idx) // 40)
            ]

            data = mnist.train.images[specific_idx].reshape(-1, 28, 28, 1)
            tmp_data = mnist.train.images[ten_precent_anomaly].reshape(
                -1, 28, 28, 1)
            data = np.append(data, tmp_data).reshape(-1, 28, 28, 1)

            lst_prob = tmp_ALOCC_model.f_test_frozen_model(
                data[0:FLAGS.batch_size])
            print('check is ok')
            exit()
            #generated_data = tmp_ALOCC_model.feed2generator(data[0:FLAGS.batch_size])

        # else in UCDS (depends on infrustructure)
        for s_image_dirs in sorted(
                glob(os.path.join(FLAGS.dataset_address,
                                  'Test[0-9][0-9][0-9]'))):
            tmp_lst_image_paths = []
            if os.path.basename(s_image_dirs) not in ['Test004']:
                print('Skip ', os.path.basename(s_image_dirs))
                continue
            for s_image_dir_files in sorted(
                    glob(os.path.join(s_image_dirs + '/*'))):
                if os.path.basename(s_image_dir_files) not in ['068.tif']:
                    print('Skip ', os.path.basename(s_image_dir_files))
                    continue
                tmp_lst_image_paths.append(s_image_dir_files)

            #random
            #lst_image_paths = [tmp_lst_image_paths[x] for x in random.sample(range(0, len(tmp_lst_image_paths)), n_fetch_data)]
            lst_image_paths = tmp_lst_image_paths
            #images =read_lst_images(lst_image_paths,nd_patch_size,nd_patch_step,b_work_on_patch=False)
            images = read_lst_images_w_noise2(lst_image_paths, nd_patch_size,
                                              nd_patch_step)

            lst_prob = process_frame(os.path.basename(s_image_dirs), images,
                                     tmp_ALOCC_model)

            print('pseudocode test is finished')
Exemple #48
0
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    if not os.path.exists(FLAGS.checkpoint_dir):
        os.makedirs(FLAGS.checkpoint_dir)
    if not os.path.exists(FLAGS.sample_dir):
        os.makedirs(FLAGS.sample_dir)
    
    if not os.path.exists(os.path.join('./logs',time.strftime('%d%m'))):
    	os.makedirs(os.path.join('./logs',time.strftime('%d%m')))

    gpu_config = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_config)) as sess:
        if FLAGS.is_train:
            dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,\
	    dataset_name=FLAGS.dataset,is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir)
        else:
	    dcgan = EVAL(sess, batch_size=1,num_block=FLAGS.num_block,ir_image_shape=[600,800,1],normal_image_shape=[600,800,3],dataset_name=FLAGS.dataset,\
                      is_crop=False, checkpoint_dir=FLAGS.checkpoint_dir)
	    print('deep model test \n')

        if FLAGS.is_train:
	    pdb.set_trace()
            dcgan.train(FLAGS)
        else:
            list_val = [11,16,21,22,33,36,38,53,59,92]
	    print '1: Estimating Normal maps from arbitary obejcts \n'
	    print '2: Estimating Normal maps according to Light directions and object tilt angles \n'
	    x = input('Selecting a Evaluation mode:')
            VAL_OPTION = int(x)
            if VAL_OPTION ==1: # arbitary dataset 
                print("Computing arbitary dataset ")
		trained_models = glob.glob(os.path.join(FLAGS.checkpoint_dir,FLAGS.dataset,'DCGAN.model*'))
		trained_models  = natsorted(trained_models)
		datapath = '/research2/Ammonight/*.bmp'
                savepath = '/research2/Ammonight/output'
		mean_nir = -0.3313
		fulldatapath = os.path.join(glob.glob(datapath))
		model = trained_models[4]
		model = model.split('/')
		model = model[-1]
		dcgan.load(FLAGS.checkpoint_dir,model)
                for idx in xrange(len(fulldatapath)):
		    input_= scipy.misc.imread(fulldatapath[idx]).astype(float)
	            input_ = scipy.misc.imresize(input_,[600,800])
	            input_  = (input_/127.5)-1. # normalize -1 ~1
                    input_ = np.reshape(input_,(1,input_.shape[0],input_.shape[1],1)) 
                    input_ = np.array(input_).astype(np.float32)
		    mask = [input_>-1.0][0]*1.0
		    mean_mask = mask * mean_nir
		    #input_ = input_ - mean_mask
                    start_time = time.time() 
                    sample = sess.run(dcgan.sampler, feed_dict={dcgan.ir_images: input_})
                    print('time: %.8f' %(time.time()-start_time))     
                    # normalization #
                    sample = np.squeeze(sample).astype(np.float32)
	            output = np.sqrt(np.sum(np.power(sample,2),axis=2))
		    output = np.expand_dims(output,axis=-1)
		    output = sample/output
		    output[output ==inf] = 0.0
		    sample = (output+1.0)/2.0

                    name = fulldatapath[idx].split('/')
		    name = name[-1].split('.')
                    name = name[0]
		    savename = savepath + '/normal_' + name +'.bmp' 
                    scipy.misc.imsave(savename, sample)

	    elif VAL_OPTION ==2: # depends on light sources 
                list_val = [11,16,21,22,33,36,38,53,59,92]
		mean_nir = -0.3313 #-1~1
		save_files = glob.glob(os.path.join(FLAGS.checkpoint_dir,FLAGS.dataset,'DCGAN.model*'))
		save_files  = natsorted(save_files)
		savepath ='./Deconv_L1_result'
		if not os.path.exists(os.path.join(savepath)):
		    os.makedirs(os.path.join(savepath))
		selec_model=[-2]
		#[selec_model.append(ii) for ii in range(0,len(save_files),2)]
                for m in range(len(selec_model)):
		    model = save_files[selec_model[m]]
		    model = model.split('/')
		    model = model[-1]
		    dcgan.load(FLAGS.checkpoint_dir,model)
	            for idx in range(len(list_val)):
		        if not os.path.exists(os.path.join(savepath,'%03d' %list_val[idx])):
		            os.makedirs(os.path.join(savepath,'%03d' %list_val[idx]))
		        for idx2 in range(1,10): #tilt angles 1~9 
		            for idx3 in range(5,7): # light source 
			        print("Selected material %03d/%d" % (list_val[idx],idx2))
			        img = '/research2/IR_normal_small/save%03d/%d' % (list_val[idx],idx2)
				noise = np.random.rand(1,600,800,1)
				#noise = np.random.uniform(-1,1,size=(1,600,800,1))
			        input_ = scipy.misc.imread(img+'/%d.bmp' %idx3).astype(float) #input NIR image
			        input_ = scipy.misc.imresize(input_,[600,800])
			        input_  = input_/127.5 -1.0 # normalize -1 ~1
			        input_ = np.reshape(input_,(1,600,800,1)) 
			        input_ = np.array(input_).astype(np.float32)
			        gt_ = scipy.misc.imread(img+'/12_Normal.bmp').astype(float)
			        gt_ = np.sum(gt_,axis=2)
			        gt_ = scipy.misc.imresize(gt_,[600,800])
			        gt_ = np.reshape(gt_,[1,600,800,1])
			        mask =[gt_ >0.0][0]*1.0
			        mean_mask = mean_nir * mask
			        #input_ = input_ - mean_mask	
			        start_time = time.time() 
			        sample  = sess.run(dcgan.G, feed_dict={dcgan.ir_images: input_,dcgan.noise:noise})
			        #sample = sess.run(dcgan.sampler, feed_dict={dcgan.ir_images: input_})
			        print('time: %.8f' %(time.time()-start_time))     
			        # normalization #
			        sample = np.squeeze(sample).astype(np.float32)
			        output = np.sqrt(np.sum(np.power(sample,2),axis=2))
			        output = np.expand_dims(output,axis=-1)
			        output = sample/output
			        output = (output+1.)/2.
			        if not os.path.exists(os.path.join(savepath,'%03d/%d/%s' %(list_val[idx],idx2,model))):
			            os.makedirs(os.path.join(savepath,'%03d/%d/%s' %(list_val[idx],idx2,model)))
			        savename = os.path.join(savepath,'%03d/%d/%s/single_normal_%03d.bmp' % (list_val[idx],idx2,model,idx3))
			        scipy.misc.imsave(savename, output)
def main(_):
    pp.pprint(flags.FLAGS.__flags)

    with tf.device('/cpu:0'), tf.Session(config=tf.ConfigProto(
    intra_op_parallelism_threads=16)) as sess:
        if FLAGS.task == 'copy':
            if FLAGS.is_train:
                cell, ntm = copy_train(FLAGS, sess)
            else:
                cell = NTMCell(input_dim=FLAGS.input_dim,
                               output_dim=FLAGS.output_dim,
                               controller_layer_size=FLAGS.controller_layer_size,
                               write_head_size=FLAGS.write_head_size,
                               read_head_size=FLAGS.read_head_size)
                ntm = NTM(cell, sess, 1, FLAGS.max_length,
                          test_max_length=FLAGS.test_max_length, forward_only=True)

            ntm.load(FLAGS.checkpoint_dir, 'copy')

            copy(ntm, FLAGS.test_max_length*1/3, sess)
            print
            copy(ntm, FLAGS.test_max_length*2/3, sess)
            print
            copy(ntm, FLAGS.test_max_length*3/3, sess)
        elif FLAGS.task == 'recall':
            if FLAGS.is_train:
                cell, ntm = recall_train(FLAGS, sess)
            else:
                cell = NTMCell(input_dim=FLAGS.input_dim,
                               output_dim=FLAGS.output_dim,
                               controller_layer_size=FLAGS.controller_layer_size,
                               write_head_size=FLAGS.write_head_size,
                               read_head_size=FLAGS.read_head_size)
                ntm = NTM(cell, sess, 1, FLAGS.max_length,
                          test_max_length=FLAGS.test_max_length, forward_only=True)

            ntm.load(FLAGS.checkpoint_dir, 'recall')

            recall(ntm, FLAGS.test_max_length*1/3, sess)
            print
            recall(ntm, FLAGS.test_max_length*2/3, sess)
            print
            recall(ntm, FLAGS.test_max_length*3/3, sess)
        elif FLAGS.task == 'predict':
            if FLAGS.is_train:
                cell, ntm = predict_train(FLAGS, sess)
            else:
                cell = NTMCell(input_dim=FLAGS.input_dim,
                               output_dim=FLAGS.output_dim,
                               controller_layer_size=FLAGS.controller_layer_size,
                               write_head_size=FLAGS.write_head_size,
                               read_head_size=FLAGS.read_head_size)
                ntm = NTM(cell, sess, 1, FLAGS.max_length,
                          test_max_length=FLAGS.test_max_length, forward_only=True)

            ntm.load(FLAGS.checkpoint_dir, 'predict')

            predict(ntm, FLAGS.test_max_length*1/3, sess)
            print
            predict(ntm, FLAGS.test_max_length*2/3, sess)
            print
            predict(ntm, FLAGS.test_max_length*3/3, sess)
Exemple #50
0
def main(_):
    """
    The main function for training steps     
    """
    n_per_itr_print_results = 100
    kb_work_on_patch = True

    # ---------------------------------------------------------------------------------------------
    # ---------------------------------------------------------------------------------------------
    # Manual Switchs ------------------------------------------------------------------------------
    # ---------------------------------------------------------------------------------------------
    # DATASET PARAMETER : data-alocc
    FLAGS.dataset = "data-alocc"
    FLAGS.dataset_address = "./dataset/data-alocc/train"
    kb_work_on_patch = False
    nd_input_frame_size = (180, 270)
    nd_slice_size = (180, 270)
    n_stride = 1

    FLAGS.train = True

    FLAGS.input_width = nd_slice_size[0]
    FLAGS.input_height = nd_slice_size[1]
    FLAGS.output_width = nd_slice_size[0]
    FLAGS.output_height = nd_slice_size[1]

    FLAGS.sample_dir = ("export/" + FLAGS.dataset + "_%d.%d.%f_" %
                        (nd_slice_size[0], nd_slice_size[1], FLAGS.r_alpha))
    FLAGS.input_fname_pattern = "*"

    pp.pprint(flags.FLAGS.__flags)

    check_some_assertions()

    # manual handling of GPU
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
    run_config = tf.ConfigProto(gpu_options=gpu_options)
    run_config.gpu_options.allow_growth = True

    with tf.Session(config=run_config) as sess:
        tmp_model = ALOCC_Model(
            sess,
            input_width=FLAGS.input_width,
            input_height=FLAGS.input_height,
            output_width=FLAGS.output_width,
            output_height=FLAGS.output_height,
            batch_size=FLAGS.batch_size,
            sample_num=FLAGS.batch_size,
            attention_label=FLAGS.attention_label,
            r_alpha=FLAGS.r_alpha,
            dataset_name=FLAGS.dataset,
            dataset_address=FLAGS.dataset_address,
            input_fname_pattern=FLAGS.input_fname_pattern,
            checkpoint_dir=FLAGS.checkpoint_dir,
            is_training=FLAGS.train,
            log_dir=FLAGS.log_dir,
            sample_dir=FLAGS.sample_dir,
            nd_patch_size=nd_slice_size,
            n_stride=n_stride,
            n_per_itr_print_results=n_per_itr_print_results,
            kb_work_on_patch=kb_work_on_patch,
            nd_input_frame_size=nd_input_frame_size,
        )

        # show_all_variables()

        if FLAGS.train:
            print("Program is on Train Mode")
            tmp_model.train(FLAGS)
        else:
            if not tmp_model.load(FLAGS.checkpoint_dir)[0]:
                print("Program is on Test Mode")
                raise Exception(
                    "[!] Train a model first, then run test mode from file test.py"
                )