Example #1
0
def main(_):
  with tf.Graph().as_default():
    tf.logging.set_verbosity(tf.logging.INFO)
    # Read data from disk
    fiber_output, fiber_input, encoder, label = data_loader.read_inputs('train.txt', True)

    # Generator_1(inputs=[fiber_output,fiber_input], targets=[encoder,label])
    # Generator_2(inputs=[fiber_output,fiber_input], targets=[encoder,label])
    # Discriminator(inputs=[fiber_output,fiber_input], targets=[encoder,label])
    Generator_all(inputs=[fiber_output,fiber_input], targets=[encoder,label])
def train(args):
    """Train different architectures for a number of epochs."""

    with tf.Graph().as_default(), tf.device('/cpu:0'):

        # Read data from disk
        images1, images2, labels = data_loader.read_inputs(True, args)

        epoch_number = tf.get_variable('epoch_number', [],
                                       dtype=tf.int32,
                                       initializer=tf.constant_initializer(0),
                                       trainable=False)

        # Decay the learning rate
        lr = tf.train.piecewise_constant(epoch_number,
                                         args.LR_steps,
                                         args.LR_values,
                                         name='LearningRate')
        # Weight Decay policy
        wd = tf.train.piecewise_constant(epoch_number,
                                         args.WD_steps,
                                         args.WD_values,
                                         name='WeightDecay')

        is_training = not args.transfer_mode[0] == 1

        # Create an optimizer that performs gradient descent.
        opt = tf.train.MomentumOptimizer(lr, 0.9)

        # Calculate the gradients for each model tower.
        tower_grads = []
        tower_auxgrads = []
        with tf.variable_scope(tf.get_variable_scope()) as scope_sim:
            for i in xrange(args.num_gpus):
                with tf.device('/gpu:%d' % i):
                    with tf.name_scope('Tower_%d' % i) as scope:
                        # Calculate the loss for one tower. This function
                        # constructs the entire model but shares the variables across
                        # all towers.#
                        o1 = arch.get_model(images1, wd, is_training, args)
                        scope_sim.reuse_variables()
                        o2 = arch.get_model(images2, wd, is_training, args)
                        #              logits = arch.get_model(images, wd, is_training, args)

                        #            # Top-1 accuracy
                        #            top1acc = tf.reduce_mean(
                        #                tf.cast(tf.nn.in_top_k(logits, labels, 1), tf.float32))
                        #            # Top-5 accuracy
                        #            topnacc = tf.reduce_mean(
                        #                tf.cast(tf.nn.in_top_k(logits, labels, args.top_n), tf.float32))

                        # Build the portion of the Graph calculating the losses. Note that we will
                        # assemble the total_loss using a custom function below.
                        #            cross_entropy_mean = loss(logits, labels)
                        cross_entropy_mean = loss_contrastive(o1, o2, labels)

                        # Get all the regularization lesses and add them
                        regularization_losses = tf.get_collection(
                            tf.GraphKeys.REGULARIZATION_LOSSES)

                        reg_loss = tf.add_n(regularization_losses)

                        #Add a tensorboard summary
                        tf.summary.scalar('Regularization Loss', reg_loss)

                        # Compute the total loss (cross entropy loss + regularization loss)
                        total_loss = tf.add(cross_entropy_mean, reg_loss)

                        # Attach a scalar summary for the total loss and top-1 and top-5 accuracies
                        tf.summary.scalar('Total Loss', total_loss)
                        #            tf.summary.scalar('Top-1 Accuracy', top1acc)
                        #            tf.summary.scalar('Top-n Accuracy', topnacc)

                        # Reuse variables for the next tower.
                        tf.get_variable_scope().reuse_variables()

                        # Retain the summaries from the final tower.
                        summaries = tf.get_collection(tf.GraphKeys.SUMMARIES,
                                                      scope)

                        # Gather batch normaliziation update operations
                        batchnorm_updates = tf.get_collection(
                            tf.GraphKeys.UPDATE_OPS, scope)
                        # Calculate the gradients for the batch of data on this CIFAR tower.
                        if args.transfer_mode[0] == 3:
                            grads = opt.compute_gradients(
                                total_loss,
                                var_list=tf.get_collection(
                                    tf.GraphKeys.VARIABLES, scope='output'))
                            auxgrads = opt.compute_gradients(total_loss)
                            tower_auxgrads.append(auxgrads)
                        elif args.transfer_mode[0] == 1:
                            grads = opt.compute_gradients(
                                total_loss,
                                var_list=tf.get_collection(
                                    tf.GraphKeys.VARIABLES, scope='output'))
                        else:
                            grads = opt.compute_gradients(total_loss)

                        # Keep track of the gradients across all towers.
                        tower_grads.append(grads)

        # We must calculate the mean of each gradient. Note that this is the
        # synchronization point across all towers.
        grads = average_gradients(tower_grads)
        auxgrads = average_gradients(tower_auxgrads)

        # Add a summary to track the learning rate and weight decay
        summaries.append(tf.summary.scalar('learning_rate', lr))
        summaries.append(tf.summary.scalar('weight_decay', wd))

        # Group all updates to into a single train op.
        #with tf.control_dependencies(bn_update_ops):

        # Setup the train operation
        if args.transfer_mode[0] == 3:
            train_op = tf.cond(
                tf.less(epoch_number, args.transfer_mode[1]), lambda: tf.group(
                    opt.apply_gradients(grads), *batchnorm_updates), lambda: tf
                .group(opt.apply_gradients(auxgrads), *batchnorm_updates))
        elif args.transfer_mode[0] == 1:
            train_op = opt.apply_gradients(grads)
        else:
            train_op = tf.group(opt.apply_gradients(grads),
                                tf.group(*batchnorm_updates))

        # a loader for loading the pretrained model (it does not load the last layer)
        if args.retrain_from is not None:
            if args.transfer_mode[0] == 0:
                pretrained_loader = tf.train.Saver()
            else:
                pretrained_loader = tf.train.Saver(var_list=exclude())

        # Create a saver.
        saver = tf.train.Saver(tf.global_variables(),
                               max_to_keep=args.num_epochs)

        # Build the summary operation from the last tower summaries.
        summary_op = tf.summary.merge_all()

        # Build an initialization operation to run below.
        init = tf.global_variables_initializer()

        # Logging the runtime information if requested
        if args.log_debug_info:
            run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_metadata = tf.RunMetadata()
        else:
            run_options = None
            run_metadata = None

        # Creating a session to run the built graph
        sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True,
            log_device_placement=args.log_device_placement))

        sess.run(init)

        # Continue training from a saved snapshot, load a pre-trained model
        if args.retrain_from is not None:
            ckpt = tf.train.get_checkpoint_state(args.retrain_from)
            if ckpt and ckpt.model_checkpoint_path:
                # Restores from checkpoint
                pretrained_loader.restore(sess, ckpt.model_checkpoint_path)
            else:
                return

        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)

        # Setup a summary writer
        summary_writer = tf.summary.FileWriter(args.log_dir, sess.graph)

        # Set the start epoch number
        start_epoch = sess.run(epoch_number + 1)

        # The main training loop
        for epoch in xrange(start_epoch, start_epoch + args.num_epochs):
            # update epoch_number
            sess.run(epoch_number.assign(epoch))

            # Trainig batches
            for step in xrange(args.num_batches):

                start_time = time.time()
                _, loss_value = sess.run([train_op, cross_entropy_mean],
                                         options=run_options,
                                         run_metadata=run_metadata)
                duration = time.time() - start_time

                # Check for errors
                assert not np.isnan(
                    loss_value), 'Model diverged with loss = NaN'

                # Logging and writing tensorboard summaries
                if step % 10 == 0:
                    num_examples_per_step = args.chunked_batch_size * args.num_gpus
                    examples_per_sec = num_examples_per_step / duration
                    sec_per_batch = duration / args.num_gpus

                    format_str = (
                        '%s: epoch %d, step %d, loss = %.2f, (%.1f cats/sec; %.3f '
                        'sec/batch)')
                    print(format_str %
                          (datetime.now(), epoch, step, loss_value,
                           examples_per_sec, sec_per_batch))
                    sys.stdout.flush()
                if step % 100 == 0:
                    summary_str = sess.run(summary_op)
                    summary_writer.add_summary(summary_str,
                                               args.num_batches * epoch + step)
                    if args.log_debug_info:
                        summary_writer.add_run_metadata(
                            run_metadata, 'epoch%d step%d' % (epoch, step))

            # Save the model checkpoint periodically after each training epoch
            checkpoint_path = os.path.join(args.log_dir, args.snapshot_prefix)
            saver.save(sess, checkpoint_path, global_step=epoch)
Example #3
0
def evaluate(args):

  # Building the graph
  with tf.Graph().as_default() as g, tf.device('/cpu:0'):
    # Get images and labels for CIFAR-10.
    if args.save_predictions is None:
      images, labels = data_loader.read_inputs(False, args)
    else:
      images, labels, urls = data_loader.read_inputs(False, args)
    # Performing computations on a GPU
    with tf.device('/gpu:0'):
        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = arch.get_model(images, 0.0, False, args)

        # Calculate predictions accuracies top-1 and top-n
        top_1_op = tf.nn.in_top_k(logits, labels, 1)
        top_n_op = tf.nn.in_top_k(logits, labels, args.top_n)

        if args.save_predictions is not None:
          topn = tf.nn.top_k(tf.nn.softmax(logits), args.top_n)
          topnind= topn.indices
          topnval= topn.values

        saver = tf.train.Saver(tf.global_variables())

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        summary_writer = tf.summary.FileWriter(args.log_dir, g)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(tf.local_variables_initializer())

      ckpt = tf.train.get_checkpoint_state(args.log_dir)
      print(ckpt.model_checkpoint_path)

      # Load the latest model
      if ckpt and ckpt.model_checkpoint_path:
        # Restores from checkpoint
        saver.restore(sess, ckpt.model_checkpoint_path)

      else:
        return
      # Start the queue runners.
      coord = tf.train.Coordinator()

      threads = tf.train.start_queue_runners(sess=sess, coord=coord)
      true_predictions_count = 0  # Counts the number of correct predictions
      true_topn_predictions_count = 0
      all_count = 0
      step = 0
      predictions_format_str = ('%d,%s,%d,%s,%s\n')
      batch_format_str = ('Batch Number: %d, Top-1 Hit: %d, Top-'+str(args.top_n)+' Hit: %d, Top-1 Accuracy: %.3f, Top-'+str(args.top_n)+' Accuracy: %.3f')

      if args.save_predictions is not None:
        out_file = open(args.save_predictions,'w')
      while step < args.num_batches and not coord.should_stop():
        if args.save_predictions is None:
          top1_predictions, topn_predictions = sess.run([top_1_op, top_n_op])
        else:
          top1_predictions, topn_predictions, urls_values, label_values, topnguesses, topnconf = sess.run([top_1_op, top_n_op, urls, labels, topnind, topnval])
          for i in xrange(0,urls_values.shape[0]):
            out_file.write(predictions_format_str%(step*args.batch_size+i+1, urls_values[i], label_values[i],
                '[' + ', '.join('%d' % item for item in topnguesses[i]) + ']',
                '[' + ', '.join('%.4f' % item for item in topnconf[i]) + ']'))
            out_file.flush()
        true_predictions_count += np.sum(top1_predictions)
        true_topn_predictions_count += np.sum(topn_predictions)
        all_count+= top1_predictions.shape[0]
        print(batch_format_str%(step, true_predictions_count, true_topn_predictions_count, true_predictions_count / all_count, true_topn_predictions_count / all_count))
        sys.stdout.flush()
        step += 1

      if args.save_predictions is not None:
        out_file.close()
 
      summary = tf.Summary()
      summary.ParseFromString(sess.run(summary_op))
      coord.request_stop()
      coord.join(threads)
def predict(args):

    # Building the graph
    with tf.Graph().as_default() as g, tf.device('/cpu:0'):
        # Get images and labels.
        images, urls = data_loader.read_inputs(False, args, False)
        # Performing computations on a GPU
        with tf.device('/gpu:0'):
            # Build a Graph that computes the logits predictions from the
            # inference model.
            logits = arch.get_model(images, 0.0, False, args)

            # Information about the predictions for saving in a file

            # Species Identification
            top5_id = tf.nn.top_k(tf.nn.softmax(logits[0]), 5)
            top5ind_id = top5_id.indices
            top5val_id = top5_id.values
            # Count
            top3_cn = tf.nn.top_k(tf.nn.softmax(logits[1]), 3)
            top3ind_cn = top3_cn.indices
            top3val_cn = top3_cn.values
            # Additional Attributes (e.g. description)
            top1_bh = [None] * 6
            top1ind_bh = [None] * 6
            top1val_bh = [None] * 6

            for i in xrange(0, 6):
                top1_bh[i] = tf.nn.top_k(tf.nn.softmax(logits[i + 2]), 1)
                top1ind_bh[i] = top1_bh[i].indices
                top1val_bh[i] = top1_bh[i].values

            # For reading the snapshot files from file
            saver = tf.train.Saver(tf.global_variables())

            # Build the summary operation based on the TF collection of Summaries.
            summary_op = tf.summary.merge_all()

            summary_writer = tf.summary.FileWriter(args.log_dir, g)

        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True)) as sess:
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            ckpt = tf.train.get_checkpoint_state(args.log_dir)

            # Load the latest model
            if ckpt and ckpt.model_checkpoint_path:
                # Restores from checkpoint
                saver.restore(sess, ckpt.model_checkpoint_path)

            else:
                return
            # Start the queue runners.
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            step = 0

            # Output file to save predictions and their confidences
            out_file = open(args.save_predictions, 'w')
            out_file.write('{')
            first_row = True
            while step < args.num_batches and not coord.should_stop():

                urls_values, top5guesses_id, top5conf, top3guesses_cn, top3conf, top1guesses_bh, top1conf = sess.run(
                    [
                        urls, top5ind_id, top5val_id, top3ind_cn, top3val_cn,
                        top1ind_bh, top1val_bh
                    ])
                for i in xrange(0, urls_values.shape[0]):
                    step_result = {
                        'path':
                        urls_values[i],
                        'top_n_pred':
                        [int(np.asscalar(item)) for item in top5guesses_id[i]],
                        'top_n_conf': [
                            round(float(np.asscalar(item)), 4)
                            for item in top5conf[i]
                        ],
                        'top_n_pred_count':
                        [int(np.asscalar(item)) for item in top3guesses_cn[i]],
                        'top_n_conf_count': [
                            round(float(np.asscalar(item)), 4)
                            for item in top3conf[i]
                        ],
                        'top_pred_standing':
                        int(np.asscalar(top1guesses_bh[0][i])),
                        'top_pred_resting':
                        int(np.asscalar(top1guesses_bh[1][i])),
                        'top_pred_moving':
                        int(np.asscalar(top1guesses_bh[2][i])),
                        'top_pred_eating':
                        int(np.asscalar(top1guesses_bh[3][i])),
                        'top_pred_interacting':
                        int(np.asscalar(top1guesses_bh[4][i])),
                        'top_pred_young_present':
                        int(np.asscalar(top1guesses_bh[5][i])),
                        'top_conf_standing':
                        round(float(np.asscalar(top1conf[0][i])), 4),
                        'top_conf_resting':
                        round(float(np.asscalar(top1conf[1][i])), 4),
                        'top_conf_moving':
                        round(float(np.asscalar(top1conf[2][i])), 4),
                        'top_conf_eating':
                        round(float(np.asscalar(top1conf[3][i])), 4),
                        'top_conf_interacting':
                        round(float(np.asscalar(top1conf[4][i])), 4),
                        'top_conf_young_present':
                        round(float(np.asscalar(top1conf[5][i])), 4)
                    }
                    if first_row:
                        out_file.write('"' +
                                       str(int(step * args.batch_size + i +
                                               1)) + '":')
                        first_row = False
                    else:
                        out_file.write(',\n"' +
                                       str(int(step * args.batch_size + i +
                                               1)) + '":')
                    json.dump(step_result, out_file)

                    out_file.flush()
                print("Finished predicting batch %s / %s" %
                      (step, args.num_batches))
                sys.stdout.flush()

                step += 1
            out_file.write('}')
            out_file.close()

            summary = tf.Summary()
            summary.ParseFromString(sess.run(summary_op))
            coord.request_stop()
            coord.join(threads)
def evaluate(args):

    # Building the graph
    with tf.Graph().as_default() as g, tf.device('/cpu:0'):
        # Get images and labels
        images1, images2, labels, urls1, urls2 = data_loader.read_inputs(
            False, args)

        # Performing computations on a GPU
        with tf.device('/gpu:0'):
            # Build a Graph that computes the logits predictions from the
            # inference model.
            logits = arch.get_model(images1, 0.0, False, args)

            saver = tf.train.Saver(tf.global_variables())

            # Build the summary operation based on the TF collection of Summaries.
            summary_op = tf.summary.merge_all()
            summary_writer = tf.summary.FileWriter(args.log_dir, g)

        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True)) as sess:
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())

            ckpt = tf.train.get_checkpoint_state(args.log_dir)

            # Load the latest model
            if ckpt and ckpt.model_checkpoint_path:
                # Restores from checkpoint
                saver.restore(sess, ckpt.model_checkpoint_path)

            else:
                return
            # Start the queue runners.
            coord = tf.train.Coordinator()

            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            step = 0
            predictions_format_str = ('%s,%s\n')
            batch_format_str = ('Batch Number: %d')

            out_file = open(args.save_predictions, 'w')
            while step < args.num_batches and not coord.should_stop():
                features, urls_values, label_values = sess.run(
                    [logits, urls1, labels])
                for i in range(0, urls_values.shape[0]):
                    out_file.write(predictions_format_str %
                                   (urls_values[i].decode("utf-8"), ','.join(
                                       map(str, features[i]))))
                    out_file.flush()
                print(batch_format_str % (step, ))
                sys.stdout.flush()
                step += 1

            out_file.close()

            summary = tf.Summary()
            summary.ParseFromString(sess.run(summary_op))
            coord.request_stop()
            coord.join(threads)
Example #6
0
def main(_):
	tf.logging.set_verbosity(tf.logging.INFO)
	with tf.Graph().as_default():
		logdir = 'E:\GitHub\MMFI\log\GG12\\CNN'
		evaldir = os.path.join(logdir, 'eval')
		if not tf.gfile.Exists(evaldir):
			# tf.gfile.DeleteRecursively(evaldir)
			tf.gfile.MakeDirs(evaldir)

		with tf.name_scope('inputs'):
			fiber_output, fiber_input, encoder, label = data_loader.read_inputs('valid.txt', False)

		with tf.variable_scope('Generator'):
			with tf.variable_scope('G1'):
				generated_input = pix2pix_G(fiber_output, is_training=False) \
				                  * circle(FLAGS.input_size,FLAGS.input_size)
			with tf.variable_scope('G2'):
				generated_data = pix2pix_G(generated_input,is_training=False)\
				                 * circle(FLAGS.input_size,FLAGS.input_size)

		with tf.name_scope('Valid_summary'):
			reshaped_fiber_input = get_summary_image(fiber_input, FLAGS.grid_size)
			reshaped_label = get_summary_image(label, FLAGS.grid_size)
			reshaped_generated_input = get_summary_image(generated_input, FLAGS.grid_size)
			reshaped_generated_data = get_summary_image(generated_data, FLAGS.grid_size)
			tf.summary.image('Input_Fiber', reshaped_fiber_input)
			tf.summary.image('Input_Generator', reshaped_generated_input)
			tf.summary.image('Data_Real', reshaped_label)
			tf.summary.image('Data_Generator', reshaped_generated_data)

		with tf.name_scope('Valid_op'):
			psnr = tf.reduce_mean(tf.image.psnr(generated_data, label, max_val=1.0))
			ssim = tf.reduce_mean(tf.image.ssim(generated_data, label, max_val=1.0))
			corr = correlation(generated_data, label)
			# inception_score = get_inception_score(generated_data)

			tf.summary.scalar('PSNR', psnr)
			tf.summary.scalar('SSIM', ssim)
			tf.summary.scalar('Relation', corr)

			grate = tf.ones([1,FLAGS.grid_size*FLAGS.input_size,10,1],dtype=tf.float32)
			reshaped_images = tf.concat((reshaped_generated_input, grate,
			                             reshaped_fiber_input, grate,
			                             reshaped_label, grate,
			                             reshaped_generated_data, grate), 2)
			uint8_images = tf.cast(reshaped_images*255, tf.uint8)
			image_write_ops = tf.write_file('%s/%s' % (evaldir, 'Generator_is_training_False.png'),
			                                tf.image.encode_png(uint8_images[0]))

			status_message = tf.string_join([' PSNR: ', tf.as_string(psnr), ' ',
			                                 ' SSIM: ', tf.as_string(ssim), ' ',
			                                 ' Correlation: ', tf.as_string(corr)],
			                                name='status_message')


		checkpoint_path = tf.train.latest_checkpoint(logdir)
		tf.logging.info('Evaluating %s' % checkpoint_path)

		tf.contrib.training.evaluate_once(
			checkpoint_path,
			hooks=[tf.contrib.training.SummaryAtEndHook(evaldir),
			       tf.contrib.training.StopAfterNEvalsHook(50),
						 tf.train.LoggingTensorHook([status_message],every_n_iter=5)],
			eval_ops=image_write_ops)
def evaluate(args):

    # Building the graph
    with tf.Graph().as_default() as g, tf.device('/cpu:0'):
        # Get images and labels.
        images, labels, urls = data_loader.read_inputs(False, args)
        # Performing computations on a GPU
        with tf.device('/gpu:0'):
            # Build a Graph that computes the logits predictions from the
            # inference model.
            logits = arch.get_model(images, 0.0, False, args)

            # Calculate predictions accuracies top-1 and top-5
            top1acc = [None] * len(logits)
            for i in xrange(0, len(logits)):
                top1acc[i] = tf.reduce_mean(
                    tf.cast(tf.nn.in_top_k(logits[i], labels[:, i], 1),
                            tf.float32))
            # Top-5 ID accuracy
            top5acc_id = tf.reduce_mean(
                tf.cast(tf.nn.in_top_k(logits[0], labels[:, 0], 5),
                        tf.float32))
            # Top-3 count accuracy
            top3acc_cn = tf.reduce_mean(
                tf.cast(tf.nn.in_top_k(logits[1], labels[:, 1], 3),
                        tf.float32))
            # The percent of predictions within +/1 bin
            one_bin_off_loss = tf.reduce_mean(
                tf.cast(
                    tf.less_equal(
                        tf.abs(
                            tf.cast(tf.argmax(logits[1], axis=1), tf.float64) -
                            tf.cast(labels[:, 1], tf.float64)), 1),
                    tf.float32))

            # Information about the predictions for saving in a file

            # Species Identification
            top5_id = tf.nn.top_k(tf.nn.softmax(logits[0]), 5)
            top5ind_id = top5_id.indices
            top5val_id = top5_id.values
            # Count
            top3_cn = tf.nn.top_k(tf.nn.softmax(logits[1]), 3)
            top3ind_cn = top3_cn.indices
            top3val_cn = top3_cn.values
            # Additional Attributes (e.g. description)
            top1_bh = [None] * 6
            top1ind_bh = [None] * 6
            top1val_bh = [None] * 6

            for i in xrange(0, 6):
                top1_bh[i] = tf.nn.top_k(tf.nn.softmax(logits[i + 2]), 1)
                top1ind_bh[i] = top1_bh[i].indices
                top1val_bh[i] = top1_bh[i].values

            # Binarizing the additial attributes predictions
            binary_behavior_logits = tf.cast([
                top1ind_bh[0], top1ind_bh[1], top1ind_bh[2], top1ind_bh[3],
                top1ind_bh[4], top1ind_bh[5]
            ], tf.int32)
            # Cast to Boolean
            binary_behavior_predictions = tf.squeeze(
                tf.cast(binary_behavior_logits, tf.bool))
            # Group labels together
            binary_behavior_labels_logits = [
                labels[:, 2], labels[:, 3], labels[:, 4], labels[:, 5],
                labels[:, 6], labels[:, 7]
            ]
            # Cast labels to Boolean
            binary_behavior_labels = tf.cast(binary_behavior_labels_logits,
                                             tf.bool)

            # Compute the size of label sets (for each image separately)
            y_length = tf.reduce_sum(binary_behavior_labels_logits, axis=0)
            # Compute the size of prediction sets (for each image separately)
            z_length = tf.reduce_sum(binary_behavior_logits, axis=0)
            # Compute the union of the labels set and prediction set
            union_length = tf.reduce_sum(tf.cast(
                tf.logical_or(binary_behavior_labels,
                              binary_behavior_predictions), tf.int32),
                                         axis=0)
            # Compute the intersection of the labels set and prediction set
            intersect_length = tf.reduce_sum(tf.cast(
                tf.logical_and(binary_behavior_labels,
                               binary_behavior_predictions), tf.int32),
                                             axis=0)

            # For reading the snapshot files from file
            saver = tf.train.Saver(tf.global_variables())

            # Build the summary operation based on the TF collection of Summaries.
            summary_op = tf.summary.merge_all()

            summary_writer = tf.summary.FileWriter(args.log_dir, g)

        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True)) as sess:
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())
            ckpt = tf.train.get_checkpoint_state(args.log_dir)

            # Load the latest model
            if ckpt and ckpt.model_checkpoint_path:
                # Restores from checkpoint
                saver.restore(sess, ckpt.model_checkpoint_path)

            else:
                return
            # Start the queue runners.
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            true_predictions_count = [
                0
            ] * 8  # Counts the number of correct top-1 predictions
            true_top5_predictions_count = 0  # Counts the number of correct top-5 predictions for species identification
            true_top3_predictions_count = 0  # Counts the number of correct top-3 predictions for counting

            accv_all = 0  # Counts accuracy of additional attributes
            prcv_all = 0  # Counts precision of additional attributes
            recv_all = 0  # Counts recall of additional attributes
            total_examples = 0  # Counts number of total examples

            one_bin_off_val = 0
            step = 0
            predictions_format_str = ('%d,%s,%s,%s,%s,%s,%s,%s,%s]\n')
            batch_format_str = (
                'Batch Number: %d, Top-1 Accuracy: %s, Top-5 Accuracy: %.3f, Top-3 Accuracy: %.3f, One bin off Loss: %.3f, Accuracy: %.3f, Precision: %.3f, Recall: %.3f'
            )

            # Output file to save predictions and their confidences
            out_file = open(args.save_predictions, 'w')

            while step < args.num_batches and not coord.should_stop():

                top1_accuracy, top5_accuracy, top3_accuracy, urls_values, label_values, top5guesses_id, top5conf, top3guesses_cn, top3conf, top1guesses_bh, top1conf, obol_val, yval, zval, uval, ival = sess.run(
                    [
                        top1acc, top5acc_id, top3acc_cn, urls, labels,
                        top5ind_id, top5val_id, top3ind_cn, top3val_cn,
                        top1ind_bh, top1val_bh, one_bin_off_loss, y_length,
                        z_length, union_length, intersect_length
                    ])
                for i in xrange(0, urls_values.shape[0]):
                    out_file.write(
                        predictions_format_str %
                        (step * args.batch_size + i + 1, urls_values[i],
                         '[' + ', '.join('%d' % np.asscalar(item)
                                         for item in label_values[i]) + ']',
                         '[' + ', '.join('%d' % np.asscalar(item)
                                         for item in top5guesses_id[i]) + ']',
                         '[' + ', '.join('%.3f' % np.asscalar(item)
                                         for item in top5conf[i]) + ']',
                         '[' + ', '.join('%d' % np.asscalar(item)
                                         for item in top3guesses_cn[i]) + ']',
                         '[' + ', '.join('%.3f' % np.asscalar(item)
                                         for item in top3conf[i]) + ']',
                         '[' + ', '.join('%d' % np.asscalar(item) for item in [
                             top1guesses_bh[0][i], top1guesses_bh[1][i],
                             top1guesses_bh[2][i], top1guesses_bh[3][i],
                             top1guesses_bh[4][i], top1guesses_bh[5][i]
                         ]) + ']', '[' +
                         ', '.join('%.3f' % np.asscalar(item) for item in [
                             top1conf[0][i], top1conf[1][i], top1conf[2][i],
                             top1conf[3][i], top1conf[4][i], top1conf[5][i]
                         ]) + ']'))
                    out_file.flush()
                total_examples += uval.shape[0]

                # Computing Accuracy, Precision, and Recall of additional attributes
                for i in xrange(0, uval.shape[0]):
                    if (uval[i] == 0):
                        # If both the label set and prediction set are empty, it is a correct prediction
                        accv_all += 1
                    else:
                        accv_all += ival[i] / uval[i]
                    if (np.asscalar(yval[i]) == 0):
                        # If the lebal set is empty, then recall is 100%
                        recv_all += 1
                    else:
                        recv_all += np.asscalar(ival[i]) / yval[i]
                    if (zval[i] == 0):
                        # if The prediction set is empty then precision is 100%
                        prcv_all += 1
                    else:
                        prcv_all += ival[i] / zval[i]

                for i in xrange(0, len(logits)):
                    true_predictions_count[i] += top1_accuracy[i]

                true_top5_predictions_count += top5_accuracy
                true_top3_predictions_count += top3_accuracy
                one_bin_off_val += obol_val

                print(
                    batch_format_str %
                    (step, '[' + ', '.join('%.3f' % (item / (step + 1.0))
                                           for item in true_predictions_count)
                     + ']', true_top5_predictions_count /
                     (step + 1.0), true_top3_predictions_count /
                     (step + 1.0), obol_val /
                     (step + 1.0), accv_all / total_examples,
                     prcv_all / total_examples, recv_all / total_examples))
                sys.stdout.flush()
                step += 1

            out_file.close()

            summary = tf.Summary()
            summary.ParseFromString(sess.run(summary_op))
            coord.request_stop()
            coord.join(threads)
def predict(args):

  # Building the graph
  with tf.Graph().as_default() as g, tf.device('/cpu:0'):
    # Get images
    images, urls = data_loader.read_inputs(False, args, False)
    # Performing computations on a GPU
    with tf.device('/gpu:0'):
        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = arch.get_model(images, 0.0, False, args)

        # Calculate predictions
        topn = tf.nn.top_k(tf.nn.softmax(logits), args.top_n)
        topnind = topn.indices
        topnval = topn.values

        saver = tf.train.Saver(tf.global_variables())

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        summary_writer = tf.summary.FileWriter(args.log_dir, g)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(tf.local_variables_initializer())

      ckpt = tf.train.get_checkpoint_state(args.log_dir)

      # Load the latest model
      if ckpt and ckpt.model_checkpoint_path:
        # Restores from checkpoint
        saver.restore(sess, ckpt.model_checkpoint_path)

      else:
        print('Checkpoint not found: '+args.log_dir)
        return
      # Start the queue runners.
      coord = tf.train.Coordinator()

      threads = tf.train.start_queue_runners(sess=sess, coord=coord)
      step = 0

      out_file = open(args.save_predictions,'w')
      out_file.write('{')
      first_row = True
      while step < args.num_batches and not coord.should_stop():
        urls_values, topnguesses, topnconf = sess.run([urls, topnind, topnval])
        for i in xrange(0,urls_values.shape[0]):
            step_result = {
                'path': urls_values[i],
                'top_n_pred':  [int(item) for item in topnguesses[i]],
                'top_n_conf': [round(float(item), 4) for item in topnconf[i]]
                           }
            if first_row:
              out_file.write('"' + str(int(step*args.batch_size+i+1)) + '":')
              first_row = False
            else:
              out_file.write(',\n"' + str(int(step*args.batch_size+i+1)) + '":')
            json.dump(step_result, out_file)
            out_file.flush()
        print("Finished predicting batch %s / %s" % (step, args.num_batches))
        sys.stdout.flush()
        step += 1
      out_file.write('}')
      out_file.close()

      summary = tf.Summary()
      summary.ParseFromString(sess.run(summary_op))
      coord.request_stop()
      coord.join(threads)
Example #9
0
def evaluate(args):

    # Building the graph
    with tf.Graph().as_default() as g, tf.device('/cpu:0'):
        # Get images and labels
        images, labels, urls = data_loader.read_inputs(False, args)
        # Performing computations on a GPU
        with tf.device('/gpu:0'):
            # Build a Graph that computes the logits predictions from the
            # inference model.
            logits = arch.get_model(images, 0.0, False, args)

            # Calculate predictions accuracies top-1 and top-5
            top_1_op = tf.nn.in_top_k(logits, labels, 1)
            top_n_op = tf.nn.in_top_k(logits, labels, args.top_n)

            topn = tf.nn.top_k(tf.nn.softmax(logits), args.top_n)
            topnind = topn.indices
            topnval = topn.values

            saver = tf.train.Saver(tf.global_variables())

            # Build the summary operation based on the TF collection of Summaries.
            summary_op = tf.summary.merge_all()

            summary_writer = tf.summary.FileWriter(args.log_dir, g)

        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=True)) as sess:
            sess.run(tf.global_variables_initializer())
            sess.run(tf.local_variables_initializer())

            ckpt = tf.train.get_checkpoint_state(args.log_dir)

            # Load the latest model
            if ckpt and ckpt.model_checkpoint_path:
                # Restores from checkpoint
                saver.restore(sess, ckpt.model_checkpoint_path)

            else:
                print('Checkpoint not found: ' + args.log_dir)
                return
            # Start the queue runners.
            coord = tf.train.Coordinator()

            threads = tf.train.start_queue_runners(sess=sess, coord=coord)
            true_predictions_count = 0  # Counts correct predictions
            true_topn_predictions_count = 0  # Counts correct top-n predictions
            all_count = 0  #Counts all images
            step = 0
            #predictions_format_str = ('%d;%s;%d;%s;%s\n')
            batch_format_str = (
                'Batch Number: %d, Top-1 Hit: %d, Top-5 Hit: %d, Top-1 Accuracy: %.3f, Top-5 Accuracy: %.3f'
            )

            out_file = open(args.save_predictions, 'w')
            out_file.write('{')
            # out_file.write('id', 'path', 'true', 'top_n_class', 'top_n_conf')
            first_row = True
            while step < args.num_batches and not coord.should_stop():
                top1_predictions, topn_predictions, urls_values, label_values, topnguesses, topnconf = sess.run(
                    [top_1_op, top_n_op, urls, labels, topnind, topnval])
                for i in xrange(0, urls_values.shape[0]):
                    step_result = {
                        'path':
                        urls_values[i],
                        'true':
                        int(label_values[i]),
                        'top_n_pred': [int(item) for item in topnguesses[i]],
                        'top_n_conf':
                        [round(float(item), 3) for item in topnconf[i]]
                    }
                    if first_row:
                        out_file.write('"' +
                                       str(int(step * args.batch_size + i +
                                               1)) + '":')
                        first_row = False
                    else:
                        out_file.write(',\n"' +
                                       str(int(step * args.batch_size + i +
                                               1)) + '":')
                    json.dump(step_result, out_file)
                    out_file.flush()

                true_predictions_count += np.sum(top1_predictions)
                true_topn_predictions_count += np.sum(topn_predictions)
                all_count += top1_predictions.shape[0]
                print(batch_format_str %
                      (step, true_predictions_count,
                       true_topn_predictions_count, true_predictions_count /
                       all_count, true_topn_predictions_count / all_count))
                sys.stdout.flush()
                step += 1

            out_file.write('}')
            out_file.close()

            summary = tf.Summary()
            summary.ParseFromString(sess.run(summary_op))
            coord.request_stop()
            coord.join(threads)