Exemplo n.º 1
0
def evaluate():
  """Eval CIFAR-10 for a number of steps."""
  with tf.Graph().as_default() as g:
    # Get images and labels for CIFAR-10.
    eval_data = FLAGS.eval_data == 'test'
    data, labels = architecture.inputs(True)

    # Make the placeholders
    d_pl, l_pl = architecture.placeholder_inputs()
  
    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits = architecture.inference(d_pl)

    # Calculate predictions.
    top_k_op = tf.nn.in_top_k(logits, l_pl, 1)

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        architecture.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()

    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)
    while True:
      eval_once(saver, summary_writer, top_k_op, summary_op, data, labels, d_pl, l_pl)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
Exemplo n.º 2
0
def eval():
   with tf.Graph().as_default() as graph:

      images = input_.inputs("test", batch_size)

      summary_op = tf.merge_all_summaries()

      summary_writer = tf.train.SummaryWriter(eval_dir, graph)

      logits = architecture.inference(images, "test")

      variables_to_restore = tf.all_variables()
      saver = tf.train.Saver(variables_to_restore)

      with tf.Session() as sess:

         ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
         saver.restore(sess, ckpt.model_checkpoint_path)

         global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
         coord = tf.train.Coordinator()

         try:
            tf.train.start_queue_runners(sess=sess)
            threads = []

            s = 0
            while not coord.should_stop():
               for q in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                  threads.extend(q.create_threads(sess, coord=coord, daemon=True, start=True))

                  imgs, gen_imgs = sess.run([images, logits])
                  for im, gen in zip(imgs, gen_imgs):
                     im = np.uint8(im)
                     im = cv2.resize(im, (200, 200))
                     gen = np.uint8(gen)
                     gen = cv2.resize(gen, (200, 200))

                     result = np.hstack((im, gen))
                     #cv2.imshow('result', result)
                     #cv2.waitKey(0)
                     #cv2.destroyAllWindows()
                     print "Step: " + str(s)
                     cv2.imwrite('../evaluations/images/image-'+str(s)+'.png', result)
                     s += 1
                     if s == int(sys.argv[1]):
                        print "Done"
                        exit()

         except Exception as e:
            print "Error"
            raise(e)
            coord.request_stop(e)
            exit()

      coord.request_stop()
      coord.join(threads, stop_grace_period_secs=10)
def eval(checkpoint_dir, image):
    with tf.Graph().as_default() as graph:

        input_image = tf.placeholder(tf.float32, shape=(1, 144, 160, 3))
        logit = architecture.inference(input_image, 'test')

        variables = tf.all_variables()
        init = tf.initialize_all_variables()
        sess = tf.Session()
        saver = tf.train.Saver(variables)

        tf.train.start_queue_runners(sess=sess)

        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)

        if ckpt and ckpt.model_checkpoint_path:
            print 'Restoring model...'
            try:
                saver.restore(sess, ckpt.model_checkpoint_path)
            except:
                print 'Could not restore model'
                raise
                exit()

        graph_def = sess.graph.as_graph_def(add_shapes=True)

        img = cv2.imread(image)

        if len(img.shape) != 3:
            img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img = np.expand_dims(img, axis=2)

        if img.shape[0] != 160:
            img = cv2.resize(img, (160, 144))

        img = img.astype('float')

        fake = np.zeros((1, 144, 160, 3))
        fake[0, :, :, :] = img

        gen_img = sess.run([logit], feed_dict={input_image: fake})[0]
        #gen_img = gen_img*255
        image_name = image.split('.png')[0] + '_output.png'
        image_name = image_name.split('/')[-1]
        try:
            print 'Writing image ', image_name
            cv2.imwrite(image_name, gen_img[0, :, :, :])
        except:
            raise
Exemplo n.º 4
0
def test(checkpoint_dir, record_file, image_path):
    with tf.Graph().as_default():

        input_image = tf.placeholder(tf.float32, shape=(10, 144, 160, 3))

        logits = architecture.inference(1, input_image, "train")

        # summary for tensorboard graph
        summary_op = tf.merge_all_summaries()

        variables = tf.all_variables()
        init = tf.initialize_all_variables()
        sess = tf.Session()

        # saver for the model
        saver = tf.train.Saver(tf.all_variables())

        tf.train.start_queue_runners(sess=sess)

        # restore previous model if one
        print checkpoint_dir
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            print "Restoring previous model..." + ckpt.model_checkpoint_path
            try:
                saver.restore(sess, ckpt.model_checkpoint_path)
                print "Model restored"
            except:
                print "Could not restore model"
                pass

        # Summary op
        graph_def = sess.graph.as_graph_def(add_shapes=True)

        # run on image
        img = cv2.imread(image_path)
        print(img)
        img = cv2.resize(img, (160, 144)) / 255.0
        #img = np.transpose(img, (1,0,2))
        fake = np.zeros((10, 144, 160, 3))
        fake[0, :, :, :] = img
        high_res = sess.run([logits], feed_dict={input_image: fake})[0]
        high_res = np.uint8(np.maximum(high_res, 0) * 255)
        cv2.imwrite('hd_mario.jpg', high_res[0, :, :, :])
Exemplo n.º 5
0
def tower_loss(scope):
    """Calculate the total loss on a single tower.

  Args:
    scope: unique prefix string identifying the tower, e.g. 'tower_0'

  Returns:
     Tensor of shape [] containing the total loss for a batch of data
  """
    # Get images and labels.
    images, labels = architecture.inputs(phase='train')

    # Build inference Graph.
    logits = architecture.inference(images, train=True)

    # Build the portion of the Graph calculating the losses. Note that we will
    # assemble the total_loss using a custom function below.
    _ = architecture.loss(logits, labels)

    # Assemble all of the losses for the current tower only.
    losses = tf.get_collection('losses', scope)

    # Calculate the total loss for the current tower.
    total_loss = tf.add_n(losses, name='total_loss')

    # Compute the moving average of all individual losses and the total loss.
    loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
    loss_averages_op = loss_averages.apply(losses + [total_loss])

    # Attach a scalar summary to all individual losses and the total loss; do the
    # same for the averaged version of the losses.
    for l in losses + [total_loss]:
        # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
        # session. This helps the clarity of presentation on tensorboard.
        loss_name = re.sub('%s_[0-9]*/' % architecture.TOWER_NAME, '',
                           l.op.name)
        # Name each loss as '(raw)' and name the moving average version of the loss
        # as the original loss name.
        tf.scalar_summary(loss_name + ' (raw)', l)
        tf.scalar_summary(loss_name, loss_averages.average(l))

    with tf.control_dependencies([loss_averages_op]):
        total_loss = tf.identity(total_loss)
    return total_loss
Exemplo n.º 6
0
def evaluate():
  """Eval for a number of steps."""
  with tf.Graph().as_default() as g:
    
    # Get images and labels.
    images, labels = architecture.inputs(phase=FLAGS.phase)

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits = architecture.inference(images, train=False)
    
    # adapt logits        
    logits = tf.reshape(logits, (-1, NUM_CLASSES))
    epsilon = tf.constant(value=1e-4)
    logits = logits + epsilon
        
    # predict
    predictions = tf.argmax(logits, dimension=1)        
    labels = tf.cast(tf.reshape(labels, shape=predictions.get_shape()), dtype=tf.int64)
    
    # compute accuracy    
    correct_predictions = tf.equal(predictions, labels)
    accuracy = tf.reduce_mean(tf.cast(correct_predictions, dtype=tf.float32))

    # Restore the moving average version of the learned variables for eval.
    variable_averages = tf.train.ExponentialMovingAverage(
        architecture.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.merge_all_summaries()
    summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, g)
    
    tf.initialize_all_variables()
   
    while True:
      eval_once(saver, summary_writer, accuracy, summary_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
Exemplo n.º 7
0
def eval(checkpoint_dir, data_dir):
   with tf.Graph().as_default() as graph:

      pattern = '*.png'
      image_list = []
      for d, s, fList in os.walk(data_dir):
         for filename in fList:
            if fnmatch.fnmatch(filename, pattern):
               image_list.append(os.path.join(d,filename))

      num_images = len(image_list)

      input_image = tf.placeholder(tf.float32, shape=(1,144,160,3))
      logit = architecture.inference(input_image, 'test')

      # specifies to use the CPU for eval, meaning I can eval while training
      config = tf.ConfigProto(device_count={'GPU':0})

      variables = tf.all_variables()
      init      = tf.initialize_all_variables()
      sess      = tf.Session(config=config)
      saver     = tf.train.Saver(variables)

      tf.train.start_queue_runners(sess=sess)

      ckpt = tf.train.get_checkpoint_state(checkpoint_dir)

      if ckpt and ckpt.model_checkpoint_path:
         print 'Restoring model...'
         try:
            saver.restore(sess, ckpt.model_checkpoint_path)
         except:
            print 'Could not restore model'
            raise
            exit()

      graph_def = sess.graph.as_graph_def(add_shapes=True)
      
      for image in image_list:
         img = cv2.imread(image)

         if  len(img.shape) != 3:
            img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
            img = np.expand_dims(img, axis=2)

         if img.shape[0] != 160:
            img = cv2.resize(img, (160,144))

         img = img.astype('float')
         #img = img/255.0
         
         fake = np.zeros((1,144,160,3))
         fake[0,:,:,:] = img

         gen_img = sess.run([logit], feed_dict={input_image:fake})[0]
         #gen_img = gen_img*255
         image_name = image.split('.png')[0]+'_output.png'
         image_name = image_name.split('/')[-1]
         try:
            print 'Writing image', image_name
            cv2.imwrite('../images/output/'+image_name, gen_img[0,:,:,:])
         except:
            raise
Exemplo n.º 8
0
def train(checkpoint_dir, image_list, batch_size, normalize):
    with tf.Graph().as_default():

        global_step = tf.Variable(0, name='global_step', trainable=False)

        original_images_placeholder = tf.placeholder(tf.float32,
                                                     shape=(batch_size, 144,
                                                            160, 3))
        gray_images_placeholder = tf.placeholder(tf.float32,
                                                 shape=(batch_size, 144, 160,
                                                        3))

        # image summary for tensorboard
        tf.image_summary('original_images',
                         original_images_placeholder,
                         max_images=100)
        tf.image_summary('gray_images',
                         gray_images_placeholder,
                         max_images=100)

        logits = architecture.inference(gray_images_placeholder, "train")
        loss = architecture.loss(original_images_placeholder, logits)

        tf.scalar_summary('loss', loss)

        train_op = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(
            loss, global_step=global_step)

        # summary for tensorboard graph
        summary_op = tf.merge_all_summaries()

        variables = tf.all_variables()
        init = tf.initialize_all_variables()
        sess = tf.Session()

        try:
            os.mkdir(checkpoint_dir)
        except:
            pass

        sess.run(init)
        print "\nRunning session\n"

        # saver for the model
        saver = tf.train.Saver(tf.all_variables())

        tf.train.start_queue_runners(sess=sess)

        # restore previous model if one
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir + "training")
        if ckpt and ckpt.model_checkpoint_path:
            print "Restoring previous model..."
            try:
                saver.restore(sess, ckpt.model_checkpoint_path)
                print "Model restored"
            except:
                print "Could not restore model"
                pass

        # Summary op
        graph_def = sess.graph.as_graph_def(add_shapes=True)
        summary_writer = tf.train.SummaryWriter(checkpoint_dir + "training",
                                                graph_def=graph_def)

        # Constants
        step = int(sess.run(global_step))
        #epoch_num = step/(train_size/batch_size)

        while True:
            step += 1
            feed_dict = get_feed_dict(batch_size, original_images_placeholder,
                                      gray_images_placeholder, image_list,
                                      normalize)
            _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict)
            print " Step: " + str(
                sess.run(global_step)) + " Loss: " + str(loss_value)

            # save tensorboard stuff
            #if step%200 == 0:
            #   summary_str = sess.run(summary_op)
            #   summary_writer.add_summary(summary_str, step)

            if step % 300 == 0:
                print "Saving model"
                print
                saver.save(sess,
                           checkpoint_dir + "training/checkpoint",
                           global_step=global_step)
                print
Exemplo n.º 9
0
def train(checkpoint_dir, record_file, batch_size, color):
    with tf.Graph().as_default():

        batch_size = int(batch_size)
        global_step = tf.Variable(0, name='global_step', trainable=False)

        train_size = 148623

        gameboy_images, hd_images = input_.inputs(record_file, batch_size,
                                                  color, "train")

        # image summary for tensorboard
        tf.image_summary('gameboy_images', gameboy_images, max_images=100)
        tf.image_summary('hd_images', hd_images, max_images=100)

        logits = architecture.inference(batch_size, gameboy_images, "train")

        # loss is the l2 norm of my input vector (the image) and the output vector (generated image)
        loss = architecture.loss(hd_images, logits)
        tf.scalar_summary('loss', loss)

        train_op = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(
            loss, global_step=global_step)

        # summary for tensorboard graph
        summary_op = tf.merge_all_summaries()

        variables = tf.all_variables()
        init = tf.initialize_all_variables()
        sess = tf.Session()

        try:
            os.mkdir(checkpoint_dir)
        except:
            pass

        sess.run(init)
        print "\nRunning session\n"

        # saver for the model
        saver = tf.train.Saver(tf.all_variables())

        tf.train.start_queue_runners(sess=sess)

        # restore previous model if one
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir + "training/")
        if ckpt and ckpt.model_checkpoint_path:
            print "Restoring previous model..."
            try:
                saver.restore(sess, ckpt.model_checkpoint_path)
                print "Model restored"
            except:
                print "Could not restore model"
                pass

        # Summary op
        graph_def = sess.graph.as_graph_def(add_shapes=True)
        summary_writer = tf.train.SummaryWriter(checkpoint_dir + "training",
                                                graph_def=graph_def)

        # Constants
        step = int(sess.run(global_step))
        epoch_num = step / (train_size / batch_size)

        while True:
            _, loss_value = sess.run([train_op, loss])
            step += 1

            print "Epoch: " + str(epoch_num) + " Step: " + str(
                sess.run(global_step)) + " Loss: " + str(loss_value)
            # save tensorboard stuff
            if step % 200 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)
            # save checkpoint
            if step % 1000 == 0:
                print "Finished epoech " + str(epoch_num) + " ....saving model"
                print
                #saver.save(sess, checkpoint_dir+"epoch-"+str(epoch_num), global_step=global_step)
                saver.save(sess,
                           checkpoint_dir + "training/checkpoint",
                           global_step=global_step)
                print
Exemplo n.º 10
0
#____________________Setup____________________#

print('Beginning program...')

if tf.gfile.Exists(train_dir):
    tf.gfile.DeleteRecursively(train_dir)
tf.gfile.MakeDirs(train_dir)

# data_dir, data_type, starting file, ending file, batch_size
data = pvpinput.Input(pvp_dir, 0, 100,  batch_size)
data_pl = tf.placeholder(tf.float32, shape=(None, ny, nx, nf))
label_pl = tf.placeholder(tf.int32, shape=(None))

print('Building graph...')

logits = architecture.inference(data_pl, 10, batch_size)
loss = architecture.loss(logits, label_pl)
train_op = architecture.train(loss, global_step)
acc_op = architecture.accuracy(logits, label_pl)
#tf.scalar_summary('Accuracy', acc_op)
#tf.scalar_summary('Loss', loss)
labels_max = tf.reduce_max(label_pl)

print('Starting session...')

saver = tf.train.Saver(tf.all_variables())
summary_op = tf.merge_all_summaries()
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
tf.train.start_queue_runners(sess=sess)