Пример #1
0
def evaluate(dataset):
  """Evaluate model on Dataset for a number of steps."""
  with tf.Graph().as_default():
    # Get images and labels from the dataset.
    images, pitchs, yaws, rolls, names = image_processing.inputs(dataset)
   
    p = tf.expand_dims(pitchs,1)
    y = tf.expand_dims(yaws,1)
    r = tf.expand_dims(rolls,1)
    labels = tf.concat([p, y, r],1)

    eval_output = model.inference(images,FLAGS.is_training)
    # Calculate predictions.
    error_op = tf.reduce_sum(tf.abs(eval_output-labels),0)
    acc_op = tf.abs(eval_output-labels)

    saver = tf.train.Saver()

    # Build the summary operation based on the TF collection of Summaries.
    summary_op = tf.summary.merge_all()

    graph_def = tf.get_default_graph().as_graph_def()
    summary_writer = tf.summary.FileWriter(FLAGS.eval_dir,
                                            graph_def=graph_def)

    while True:
      _eval_once(saver, summary_writer, error_op, summary_op, acc_op)
      if FLAGS.run_once:
        break
      time.sleep(FLAGS.eval_interval_secs)
Пример #2
0
def tower_loss(scope, images, labels):
    train_output = model.inference(images, FLAGS.is_training)
    _ = model.losses(train_output, labels)

    losses = tf.get_collection('losses', scope)

    total_loss = tf.add_n(losses, name='total_loss')

    return total_loss
Пример #3
0
def evaluate_one_image(image_array, _index):
    label = index.index()
    with tf.Graph().as_default():
        BATCH_SIZE = 1

        N_CLASSES = len(label)

        image = tf.cast(image_array, tf.float32)
        image = tf.image.per_image_standardization(image)
        image = tf.reshape(image, [1, 112, 112, 3])

        logit = model_2.inference(image, BATCH_SIZE, N_CLASSES)

        logit = tf.nn.softmax(logit)

        x = tf.placeholder(tf.float32, shape=[112, 112, 3])

        # you need to change the directories to yours.
        logs_train_dir = r'D:\MyProjects\understand\save_2'
        logs_train_dir = os.path.join(os.getcwd(), '..\\', 'understand',
                                      'save_2')
        print(logs_train_dir)
        saver = tf.train.Saver()

        with tf.Session() as sess:

            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(logs_train_dir)
            # ckpt = tf.train.get_checkpoint_state(r'D:\MyProjects\understand\save')
            # if ckpt and ckpt.model_checkpoint_path:

            if True:
                global_step = ckpt.model_checkpoint_path.split('\\')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            # else:
            #     print('No checkpoint file found')

            prediction = sess.run(logit, feed_dict={x: image_array})
            max_index = np.argmax(prediction)
            for i in range(len(label)):
                if max_index == i:
                    # result = ('这是{}的可能性为:'.format(label[str(i)]) + '%.6f' % prediction[:, i])
                    print(label[str(i)] + '-' + str(_index))
def train(dataset):
  """Train on dataset for a number of steps."""
  # with tf.Graph().as_default(), tf.device('/cpu:0'):
  with tf.Graph().as_default():

    # ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
    # if ckpt and ckpt.model_checkpoint_path:
    #     global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]

    global_step = tf.Variable(0,trainable=False)
    # global_step = tf.contrib.framework.get_or_create_global_step()

    num_preprocess_threads = FLAGS.num_preprocess_threads * FLAGS.num_gpus
    with tf.device('/cpu:0'):
      images, pitchs, yaws, rolls, names = image_processing.distorted_inputs(
        dataset,
        num_preprocess_threads=num_preprocess_threads)
    
    p = tf.expand_dims(pitchs,1)
    y = tf.expand_dims(yaws,1)
    r = tf.expand_dims(rolls,1)
    labels = tf.concat([p, y, r],1)

    train_output = model.inference(images,FLAGS.is_training)
    train_loss = model.losses(train_output, labels)    
    add_global = global_step.assign_add(1)     
    train_op = model.trainning(train_loss, FLAGS.learning_rate, global_step)
   
    summary_op = tf.summary.merge_all()
    sess = tf.Session()
    train_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
    saver = tf.train.Saver()
    
    # ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
    # if ckpt and ckpt.model_checkpoint_path:
    #   if os.path.isabs(ckpt.model_checkpoint_path):
    #     # Restores from checkpoint with absolute path.
    #     saver.restore(sess, ckpt.model_checkpoint_path)
    #   else:
    #     # Restores from checkpoint with relative path.
    #     saver.restore(sess, os.path.join(FLAGS.checkpoint_dir,
    #                                      ckpt.model_checkpoint_path))

      # Assuming model_checkpoint_path looks something like:
      #   /my-favorite-path/imagenet_train/model.ckpt-0,
      # extract global_step from it.
      # global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
      # print('Successfully loaded model from %s at step=%s.' %
      #       (ckpt.model_checkpoint_path, global_step))
    # else:
    #   print('No checkpoint file found')
    #   return

    sess.run(tf.global_variables_initializer())
    
    """
    these codes get the variable in conv1

    print(sess.run(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)))
    w = tf.contrib.framework.get_variables('conv1')
    t = tf.nn.l2_loss(w[0])
    print(sess.run(t))
    """

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    
    try:
        for step in np.arange(FLAGS.max_steps):
            if coord.should_stop():
                    break
            _, _, tra_loss= sess.run([add_global, train_op, train_loss])
               
            if step % 50 == 0:
                print('Step %d, train loss = %.2f'  %(step, tra_loss))
                summary_str = sess.run(summary_op)
                train_writer.add_summary(summary_str, step)
            
            if step % 2000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
                
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()
        
    coord.join(threads)
    sess.close()
def train():
    #for which data set to use
    """Train CIFAR-10 for a number of steps."""
    with tf.Graph().as_default():
        global_step = tf.Variable(0, trainable=False, name='global_step')

        # Get images and labels
        images, labels = drd.distorted_inputs()
        #get validation data
        val_images, val_labels = drd.inputs(False)
        #get drop out probability
        print(images.get_shape(), val_images.get_shape())

        #logits1= drd.inference(images, FLAGS.n_residual_blocks)
        logits = model_2.inference(images,
                                   n=4,
                                   reuse=tf.AUTO_REUSE,
                                   is_training=True)
        val_logits = model_2.inference(images,
                                       n=4,
                                       reuse=tf.AUTO_REUSE,
                                       is_training=False)
        #logits = drd.resnet_v1_50(images, training=True)
        #val_logits = drd.resnet_v1_50(val_images, training = False)

        #softmx logits
        soft_max_logits = tf.nn.softmax(logits)
        soft_max_logits_val = tf.nn.softmax(val_logits)
        # calculate predictions
        predictions = tf.cast(tf.argmax(soft_max_logits, axis=1), tf.int32)
        val_predictions = tf.cast(tf.argmax(soft_max_logits_val, axis=1),
                                  tf.int32)

        # ops for batch accuracy calcultion
        correct_prediction = tf.equal(predictions, labels)
        val_correct_prediction = tf.equal(val_predictions, labels)

        batch_accuracy = tf.reduce_mean(tf.cast(correct_prediction,
                                                tf.float32))
        val_batch_accuracy = tf.reduce_mean(
            tf.cast(val_correct_prediction, tf.float32))

        # Calculate loss, which includes softmax cross entropy and L2 regularization.
        cross_entropy = tf.losses.sparse_softmax_cross_entropy(logits=logits,
                                                               labels=labels)

        # Create a tensor named cross_entropy for logging purposes.
        tf.identity(cross_entropy, name='cross_entropy')
        tf.summary.scalar('cross_entropy', cross_entropy)

        # If no loss_filter_fn is passed, assume we want the default behavior,
        # which is that batch_normalization variables are excluded from loss.
        def exclude_batch_norm(name):
            return 'batch_normalization' not in name

        loss_filter_fn = None or exclude_batch_norm

        # Add weight decay to the loss.
        l2_loss = weight_decay * tf.add_n(
            # loss is computed using fp32 for numerical stability.
            [
                tf.nn.l2_loss(tf.cast(v, tf.float32))
                for v in tf.trainable_variables() if loss_filter_fn(v.name)
            ])
        tf.summary.scalar('l2_loss', l2_loss)
        loss = cross_entropy + l2_loss
        global_step = tf.train.get_or_create_global_step()

        #list of lr decay factors
        lr_decay_factors = [1, 0.1, 0.01, 0.001, 0.0001]
        learning_rate = 0.001
        # Create a tensor named learning_rate for logging purposes
        tf.identity(learning_rate, name='learning_rate')
        tf.summary.scalar('learning_rate', learning_rate)

        optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
                                               momentum=momentum)

        minimize_op = optimizer.minimize(loss, global_step)

        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        train_op = tf.group(minimize_op, update_ops)
        # calculate training accuracy
        # Calculate loss.
        #loss = drd.loss(logits, labels)

        # Build a Graph that trains the model with one batch of examples and
        # updates the model parameters.
        #train_op = drd.train(loss, global_step)

        # Create a saver.
        saver = tf.train.Saver(tf.global_variables())

        #variables = slim.get_variables_to_restore()
        #variables_to_restore = [v for v in variables if not v.name.split('/')[-1] != 'weights:0']
        # Add ops to save and restore all the variables.
        #saver_pre = tf.train.Saver(variables_to_restore[0:-2])  # exclude logits layer
        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # # Build an initialization operation to run below.
        init = tf.global_variables_initializer()
        # Start running operations on the Graph.
        sess = tf.Session(config=tf.ConfigProto(
            log_device_placement=FLAGS.log_device_placement))
        # sess.run(init)

        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)

        summary_writer = tf.summary.FileWriter(FLAGS.save_dir, sess.graph)

        step_start = 0
        try:
            ####Trying to find last checkpoint file fore full final model exist###
            print("Trying to restore last checkpoint ...")
            save_dir = FLAGS.save_dir
            # Use TensorFlow to find the latest checkpoint - if any.
            last_chk_path = tf.train.latest_checkpoint(checkpoint_dir=save_dir)
            # Try and load the data in the checkpoint.
            saver.restore(sess, save_path=last_chk_path)

            # If we get to this point, the checkpoint was successfully loaded.
            print("Restored checkpoint from:", last_chk_path)
            # get the step integer from restored path to start step from there
            uninitialized_vars = []
            for var in tf.global_variables():
                try:
                    sess.run(var)
                except tf.errors.FailedPreconditionError:
                    print("not init")
                    print(var)
                    uninitialized_vars.append(var)

            # create init op for the still unitilized variables
            init_new_vars_op = tf.variables_initializer(uninitialized_vars)
            sess.run(init_new_vars_op)

        except:
            # If all the above failed for some reason, simply
            # initialize all the variables for the TensorFlow graph.
            print(
                "Failed to restore any checkpoints. Initializing variables instead."
            )
            sess.run(init)

        accuracy_dev = []
        val_accuracy_dev = []
        step_start = 0
        for step in range(step_start, FLAGS.max_steps):
            start_time = time.time()
            #run train op
            _, loss_value, accuracy, gs = sess.run(
                [train_op, loss, batch_accuracy, global_step])

            #setting up a learning rate decay scheme
            if ((gs * FLAGS.batch_size) / NUM_IMAGES) == 30:
                learning_rate = learning_rate * lr_decay_factors[1]
            if ((gs * FLAGS.batch_size) / NUM_IMAGES) == 60:
                learning_rate = learning_rate * lr_decay_factors[2]
            if ((gs * FLAGS.batch_size) / NUM_IMAGES) == 90:
                learning_rate = learning_rate * lr_decay_factors[3]
            if ((gs * FLAGS.batch_size) / NUM_IMAGES) == 120:
                learning_rate = learning_rate * lr_decay_factors[4]

            accuracy_dev.append(accuracy)
            duration = time.time() - start_time
            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            if step % 10 == 0:
                val_acc = sess.run([val_batch_accuracy])
                val_accuracy_dev.append(val_acc)

                num_examples_per_step = FLAGS.batch_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)

                format_str = (
                    '%s: step %d, loss = %.2f, avg_batch_accuracy = %.2f, (%.1f examples/sec; %.3f '
                    'sec/batch), validation accuracy %.2f')
                # take averages of all the accuracies from the previous bathces
                print(format_str % (datetime.now(), step, loss_value,
                                    np.mean(accuracy_dev), examples_per_sec,
                                    sec_per_batch, np.mean(val_accuracy_dev)))

            if step % 10 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

            # Save the model checkpoint periodically.
            if step % 100 == 0 or (step + 1) == FLAGS.max_steps:
                #set paths and saving ops for the full and sub_network
                checkpoint_path = os.path.join(FLAGS.save_dir, 'model.ckpt')
                #pre_trained_path = os.path.join(FLAGS.pre_trained_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
Пример #6
0
# train_dir = r'D:\MyProjects\inpute_date_2'  # 训练样本的读入路径
train_dir = os.path.join(os.getcwd(),'input_data_2')  # 训练样本的读入路径
# logs_train_dir = r'D:\MyProjects\understand\save_2'  # logs存储路径
logs_train_dir = os.path.join(os.getcwd(),'understand','save_2')  # logs存储路径
# logs_test_dir = r'D:\PyCharm\KinZhang_First_ImageDetection\generate_data_2'
logs_test_dir = os.path.join(os.getcwd(),'understand','generate_data_2')
# train, train_label = input_data.get_files(train_dir)
train, train_label, val, val_label = inpute_date_2.get_files(train_dir, 0.2)
# print(val)
# 训练数据及标签
train_batch, train_label_batch = inpute_date_2.get_batch(train, train_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
# 测试数据及标签
val_batch, val_label_batch = inpute_date_2.get_batch(val, val_label, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)

# 训练操作定义
train_logits = model_2.inference(train_batch, BATCH_SIZE, N_CLASSES)
train_loss = model_2.losses(train_logits, train_label_batch)
train_op = model_2.trainning(train_loss, learning_rate)
train_acc = model_2.evaluation(train_logits, train_label_batch)

# 测试操作定义
test_logits = model_2.inference(val_batch, BATCH_SIZE, N_CLASSES)
test_loss = model_2.losses(test_logits, val_label_batch)
test_acc = model_2.evaluation(test_logits, val_label_batch)
test_op = model_2.trainning(test_loss,learning_rate)

# 这个是log汇总记录
summary_op = tf.summary.merge_all()

# 产生一个会话
sess = tf.Session()