def run(): tfrecord_dataset = tf_record_dataset.TFRecordDataset( tfrecord_dir=FLAGS.record_dir, dataset_name=FLAGS.dataset_name, num_classes=FLAGS.num_classes) dataset = tfrecord_dataset.get_split(split_name='validation') # Choice Model . vgg, inception.V1~3 , alexnet , resnet .. etc... inception = nets.inception X_image = tf.placeholder(tf.float32, shape=[ None, inception.inception_v3.default_image_size, inception.inception_v3.default_image_size, 3 ]) images, labels, _ = load_batch( dataset, height=inception.inception_v3.default_image_size, width=inception.inception_v3.default_image_size, num_classes=FLAGS.num_classes) with slim.arg_scope(inception.inception_v3_arg_scope()): logits, end_points = inception.inception_v3( inputs=X_image, num_classes=FLAGS.num_classes) predictions = tf.argmax(logits, 1) Y_label = tf.placeholder(tf.float32, shape=[None, 5]) targets = tf.argmax(Y_label, 1) provider = slim.dataset_data_provider.DatasetDataProvider(dataset) log_dir = FLAGS.log eval_dir = FLAGS.log if not tf.gfile.Exists(eval_dir): tf.gfile.MakeDirs(eval_dir) if not tf.gfile.Exists(log_dir): raise Exception("trained check point does not exist at %s " % log_dir) else: checkpoint_path = tf.train.latest_checkpoint(log_dir) import matplotlib.pyplot as plt with tf.Session() as sess: saver = tf.train.Saver() saver.restore(sess, checkpoint_path) with slim.queues.QueueRunners(sess): for i in range(100): np_image, np_label = sess.run([images, labels]) tempimage, tflabel = sess.run([predictions, targets], feed_dict={ X_image: np_image, Y_label: np_label }) print("Predict : ", tempimage) print("Answer : ", tflabel) print('enter')
def run(): tfrecord_dataset = tf_record_dataset.TFRecordDataset( tfrecord_dir=FLAGS.record_dir, dataset_name=FLAGS.dataset_name, num_classes=FLAGS.num_classes) dataset = tfrecord_dataset.get_split(split_name='train') # Choice Model . vgg, inception.V1~3 , alexnet , resnet .. etc... inception = nets.inception images, labels, _ = load_batch( dataset, height=inception.inception_v3.default_image_size, width=inception.inception_v3.default_image_size, num_classes=FLAGS.num_classes) with slim.arg_scope(inception.inception_v3_arg_scope()): logits, end_points = inception.inception_v3( inputs=images, num_classes=FLAGS.num_classes) #loss Function loss = slim.losses.softmax_cross_entropy(logits, labels) total_loss = slim.losses.get_total_loss() #optimizer optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) predictions = tf.argmax(logits, 1) targets = tf.argmax(labels, 1) correct_prediction = tf.equal(predictions, targets) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) tf.summary.scalar('losses/Total', total_loss) tf.summary.scalar('accuracy', accuracy) summary_op = tf.summary.merge_all() # log_dir = FLAGS.log if not tf.gfile.Exists(log_dir): tf.gfile.MakeDirs(log_dir) # 훈련 오퍼레이션 정의 train_op = slim.learning.create_train_op(total_loss, optimizer) final_loss = slim.learning.train(train_op, log_dir, number_of_steps=FLAGS.step, summary_op=summary_op, save_summaries_secs=30, save_interval_secs=30)
# train 데이터셋 생성 dataset = mnist_tfrecord_dataset.get_split(split_name='train') """ # slim.dataset_data_provider.DatasetDataProvider를 생성 """ provider = slim.dataset_data_provider.DatasetDataProvider(dataset) [image, label] = provider.get(['image', 'label']) # 테스트 import matplotlib.pyplot as plt with tf.Session() as sess: with slim.queues.QueueRunners(sess): plt.figure() for i in range(4): np_image, np_label = sess.run([image, label]) height, width, _ = np_image.shape class_name = name = dataset.labels_to_names[np_label] plt.subplot(2, 2, i+1) plt.imshow(np_image) plt.title('%s, %d x %d' % (name, height, width)) plt.axis('off') plt.show() ''' tf.train.batch를 생성 ''' images, labels, _ = load_batch(dataset)
from utils.dataset_utils import load_batch from datasets import tf_record_dataset tf.logging.set_verbosity(tf.logging.INFO) ''' # 평가 데이터 로드 ''' batch_size = 16 tfrecord_dataset = tf_record_dataset.TFRecordDataset( tfrecord_dir='/home/itrocks/Git/Tensorflow/dog-breed-classification.tf/raw_data/dog/tfrecord', dataset_name='dog', num_classes=120) # Selects the 'train' dataset. dataset = tfrecord_dataset.get_split(split_name='validation') images, labels, num_samples = load_batch(dataset, batch_size=batch_size, height=224, width=224) ''' # 네트워크 모델 로드: VGG-16 ''' vgg = tf.contrib.slim.nets.vgg with slim.arg_scope(vgg.vgg_arg_scope()): logits, end_points = vgg.vgg_16(inputs=images, num_classes=120, is_training=True) ''' # 메트릭 정의 ''' logits = tf.argmax(logits, 1) labels = tf.argmax(labels, 1)