def evaluate(label_indices = {'1': 0, '2': 1, '3': 2 ,'4': 3,'5': 4},
             channel_means = np.array([147.12697, 160.21092, 167.70029]),
             data_path = '../data_trial',
             minibatch_size = 32,
             num_batches_to_test = 10,
             checkpoint_dir = 'tf_data/sample_model'):

    
    print("1. Loading data")
    data = data_loader(label_indices = label_indices, 
               		   channel_means = channel_means,
               		   train_test_split = 0.5, 
               		   data_path = data_path)

    print("2. Instantiating the model")
    M = Model(mode = 'test')

    #Evaluate on test images:
    GT = Generator(data.test.X, data.test.y, minibatch_size = minibatch_size)
    
    num_correct = 0
    num_total = 0
    
    print("3. Evaluating on test images")
    for i in range(num_batches_to_test):
        GT.generate()
        yhat = M.predict(X = GT.X, checkpoint_dir = checkpoint_dir)
        correct_predictions = (np.argmax(yhat, axis = 1) == np.argmax(GT.y, axis = 1))
        num_correct += np.sum(correct_predictions)
        num_total += len(correct_predictions)
    
    accuracy =  round(num_correct/num_total,4)

    return accuracy
示例#2
0
    def do_run(self):
        for i in range(0, self.num_simulations):

            model = Model(self.model_config)
            model.initialize_agents()
            result = model.do_update()

            self.results.append(result)
            self.write_results(model)
示例#3
0
def evaluate_with_path(image_path):
    im = cv2.imread(image_path)
    input_image_size = (227, 227)
    im = cv2.resize(im, (input_image_size[0], input_image_size[1]))
    channel_means = np.array([147.12697, 160.21092, 167.70029])
    X = np.zeros((1, input_image_size[0], input_image_size[1], 3),
                 dtype='float32')

    X[0, :, :, :] = im - channel_means

    M = Model(mode='test')

    #Evaluate on test images:
    # print(f'Predicted {}')
    return M.predict(X)
示例#4
0
def evaluate(label_indices={
    'mountain_bikes': 1,
    'road_bikes': 0
},
             channel_means=np.array([147.12697, 160.21092, 167.70029]),
             data_path='test_data',
             minibatch_size=32,
             num_batches_to_test=10,
             checkpoint_dir='tf_data/sample_model'):

    print("1. Loading data")
    data = data_loader(label_indices=label_indices,
                       channel_means=channel_means,
                       train_test_split=0,
                       data_path=data_path)

    print("2. Instantiating the model")
    M = Model(mode='test')

    #Evaluate on test images:
    accuracy = M.test(data)
    return accuracy
示例#5
0
##
## Simple Training Script
##

from sample_model import Model

M = Model(mode='train')
M.train()
示例#6
0
import numpy as np
import time
from sample_model import Model
from data_loader import data_loader
from generator import Generator  


checkpoint_dir='tf_data/sample_model'
X='C:/Users/Karthick/Desktop/cvproject/data/5/00000_00000.ppmspeed_2_.ppm'
M = Model(mode = 'test')
yhat = M.predict(X = X, checkpoint_dir = checkpoint_dir)
	


# save_dir="C:/Users/Karthick/Desktop/cvproject/speedlimitckp/"
# #saver = tf.train.Saver()
# sess = tf.Session()
# saver = tf.train.import_meta_graph('C:/Users/Karthick/Desktop/cvproject/src/tf_data/sample_model/model_epoch70.ckpt.meta')
# saver.restore(sess,tf.train.latest_checkpoint('C:/Users/Karthick/Desktop/cvproject/src/tf_data/sample_model/'))
# #checkpoint_name = tf.train.latest_checkpoint(save_dir)
# #saver.restore(sess, checkpoint_name)
# yhat_numpy = sess.run(yhat, feed_dict = {X : X, keep_prob: 1.0})
# print(yhat_numpy)

# #C:/Users/Karthick/Desktop/cvproject/src/tf_data/sample_model