예제 #1
0
def test_pre_trained():
    FLAGS = get_args()
    # Read ImageNet label into a dictionary
    label_dict = loader.load_label_dict()
    # Create a Dataflow object for test images
    image_data = loader.read_image(im_name=FLAGS.im_name,
                                   n_channel=IM_CHANNEL,
                                   data_dir=FLAGS.data_path,
                                   batch_size=1)

    # Create a testing GoogLeNet model
    test_model = GoogLeNet(n_channel=IM_CHANNEL,
                           n_class=1000,
                           pre_trained_path=FLAGS.pretrained_path)
    test_model.create_test_model()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        while image_data.epochs_completed < 1:
            # read batch files
            batch_data = image_data.next_batch_dict()
            # get batch file names
            batch_file_name = image_data.get_batch_file_name()[0]
            # get prediction results
            pred = sess.run(test_model.layers['top_5'],
                            feed_dict={test_model.image: batch_data['image']})
            # display results
            for re_prob, re_label, file_name in zip(pred[0], pred[1],
                                                    batch_file_name):
                print('===============================')
                print('[image]: {}'.format(file_name))
                for i in range(5):
                    print('{}: probability: {:.02f}, label: {}'.format(
                        i + 1, re_prob[i], label_dict[re_label[i]]))
예제 #2
0
def predict():
    FLAGS = get_args()
    # Read Cifar label into a dictionary
    label_dict = loader.load_label_dict(dataset='cifar')
    # Create a Dataflow object for test images
    image_data = loader.read_image(
        im_name=FLAGS.im_name, n_channel=3,
        data_dir=IM_PATH, batch_size=1, rescale=False)

    # Create a testing GoogLeNet model
    test_model = GoogLeNet_cifar(
        n_channel=3, n_class=10, bn=True, sub_imagenet_mean=False)
    test_model.create_test_model()

    with tf.Session() as sess:
        saver = tf.train.Saver()
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, '{}inception-cifar-epoch-{}'.format(SAVE_PATH, FLAGS.load))
        while image_data.epochs_completed < 1:
            # read batch files
            batch_data = image_data.next_batch_dict()
            # get batch file names
            batch_file_name = image_data.get_batch_file_name()[0]
            # get prediction results
            pred = sess.run(test_model.layers['top_5'],
                            feed_dict={test_model.image: batch_data['image']})
            # display results
            for re_prob, re_label, file_name in zip(pred[0], pred[1], batch_file_name):
                print('===============================')
                print('[image]: {}'.format(file_name))
                for i in range(5):
                    print('{}: probability: {:.02f}, label: {}'
                          .format(i+1, re_prob[i], label_dict[re_label[i]]))
예제 #3
0
def detect():
    config = parscfg.ConfigParser('configs/config_path.cfg',
                                  'configs/coco80.cfg')

    label_dict, category_index = loader.load_coco80_label_yolo()
    # Create a Dataflow object for test images
    image_data = loader.read_image(im_name=config.im_name,
                                   n_channel=config.n_channel,
                                   data_dir=config.data_dir,
                                   batch_size=config.test_bsize,
                                   rescale=config.im_rescale)

    test_model = YOLOv3(
        bsize=config.test_bsize,
        n_channel=config.n_channel,
        n_class=config.n_class,
        anchors=config.anchors,
        feature_extractor_trainable=False,
        detector_trainable=False,
        pre_trained_path=config.coco_pretrained_path,
    )
    test_model.create_test_model()

    sessconfig = tf.ConfigProto()
    sessconfig.gpu_options.allow_growth = True
    with tf.Session(config=sessconfig) as sess:
        sess.run(tf.global_variables_initializer())

        test_model.predict_epoch_or_step(sess,
                                         image_data,
                                         config.im_rescale,
                                         config.obj_score_thr,
                                         config.nms_iou_thr,
                                         label_dict,
                                         category_index,
                                         config.save_path,
                                         run_type='epoch')
예제 #4
0
def predict():
    FLAGS = get_args()
    # load class id and the corresponding class name
    label_dict = loader.load_imagenet1k_label_darknet()
    # create a Dataflow object for test images
    image_data = loader.read_image(
        im_name=FLAGS.im_name, 
        n_channel=3,
        batch_size=1,
        rescale=FLAGS.rescale,
        data_dir=FLAGS.data_dir, )

    # create test model
    test_model = DarkNet53(
        n_channel=3,
        n_class=1000,
        pre_trained_path=FLAGS.pretrained_path, 
        trainable=False)
    test_model.create_valid_model()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        while image_data.epochs_completed < 1:
            batch_data = image_data.next_batch_dict()
            # get batch file names
            batch_file_name = image_data.get_batch_file_name()[0]
            # get prediction results
            pred = sess.run(test_model.layers['top_5'],
                            feed_dict={test_model.image: batch_data['image']})
            # display results
            for re_prob, re_label, file_name in zip(pred[0], pred[1], batch_file_name):
                print('===============================')
                print('[image]: {}'.format(file_name))
                for i in range(5):
                    print('{}: probability: {:.02f}, label: {}'
                          .format(i+1, re_prob[i], label_dict[re_label[i]]))
예제 #5
0
def test_pre_trained():
    FLAGS = get_args()
    label_dict = loader.load_label_dict()
    image_data = loader.read_image(
        im_name=FLAGS.im_name, n_channel=IM_CHANNEL,
        data_dir=FLAGS.data_path, batch_size=1)

    test_model = VGG19(
        n_channel=IM_CHANNEL, n_class=1000, pre_trained_path=FLAGS.vgg_path)
    test_model.create_test_model()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        while image_data.epochs_completed < 1:
            batch_data = image_data.next_batch_dict()
            batch_file_name = image_data.get_batch_file_name()[0]
            pred = sess.run(test_model.layers['top_5'],
                            feed_dict={test_model.image: batch_data['image']})

            for re_prob, re_label, file_name in zip(pred[0], pred[1], batch_file_name):
                print('===============================')
                print('[image]: {}'.format(file_name))
                for i in range(5):
                    print('{}: probability: {:.02f}, label: {}'
                          .format(i+1, re_prob[i], label_dict[re_label[i]]))