示例#1
1
def train(holdout=False, holdout_list=[]):
    # load data
    X, Y = load_X_and_Y()
    x_train, x_dev, x_test = X
    y_train, y_dev, y_test = Y

    # holdout training and dev data if requested
    if holdout:
        X_descr = load_X_descr()
        x_train_all_descr, x_dev_all_descr, _ = X_descr

        holdout_x_train = []
        holdout_y_train = []
        for idx in range(len(x_train)):
            if x_train_all_descr[idx] not in holdout_list:
                holdout_x_train.append(x_train[idx])
                holdout_y_train.append(y_train[idx])

        holdout_x_dev = []
        holdout_y_dev = []
        for idx in range(len(x_dev)):
            if x_dev_all_descr[idx] not in holdout_list:
                holdout_x_dev.append(x_dev[idx])
                holdout_y_dev.append(y_dev[idx])
        
        x_train = np.array(holdout_x_train).reshape((-1, 224, 224, 3))
        y_train = np.array(holdout_y_train).reshape((-1, 1))

        x_dev = np.array(holdout_x_dev).reshape((-1, 224, 224, 3))
        y_dev = np.array(holdout_y_dev).reshape((-1, 1))
    
    # train model
    model = CNN()
    model.fit(x_train, y_train, x_dev, y_dev, save=True)
    model.evaluate(x_test, y_test)
示例#2
0
def train(holdout=False, holdout_list=[]):
    # load data
    X, Y = load_X_and_Y()
    x_train, x_dev, x_test = X
    y_train, y_dev, y_test = Y

    # holdout training and dev data if requested
    if holdout:
        X_descr = load_X_descr()
        x_train_all_descr, x_dev_all_descr, _ = X_descr

        holdout_x_train = []
        holdout_y_train = []
        for idx in range(len(x_train)):
            if x_train_all_descr[idx] not in holdout_list:
                holdout_x_train.append(x_train[idx])
                holdout_y_train.append(y_train[idx])

        holdout_x_dev = []
        holdout_y_dev = []
        for idx in range(len(x_dev)):
            if x_dev_all_descr[idx] not in holdout_list:
                holdout_x_dev.append(x_dev[idx])
                holdout_y_dev.append(y_dev[idx])

        x_train = np.array(holdout_x_train).reshape((-1, 224, 224, 3))
        y_train = np.array(holdout_y_train).reshape((-1, 1))

        x_dev = np.array(holdout_x_dev).reshape((-1, 224, 224, 3))
        y_dev = np.array(holdout_y_dev).reshape((-1, 1))

    # train model
    model = CNN()
    model.fit(x_train, y_train, x_dev, y_dev, save=True)
    model.evaluate(x_test, y_test)
示例#3
0
def train():
    # load data
    X, Y = load_X_and_Y()
    x_train, x_dev, x_test = X
    y_train, y_dev, y_test = Y

    # train model
    model = CNN()
    model.fit(x_train, y_train, x_dev, y_dev, save=True)
    model.evaluate(x_test, y_test)
def test_train():
    from tensorflow.keras.datasets import mnist
    import tensorflow.keras as keras

    bsize_entity = 128
    no_of_cls = 10
    epochs = 1
    img_row, img_col = 28, 28
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 28, 28, 1)
    x_test = x_test.reshape(10000, 28, 28, 1)
    y_train = keras.utils.to_categorical(y_train, no_of_cls)
    y_test = keras.utils.to_categorical(y_test, no_of_cls)

    model = CNN()
    model.add_input_layer(shape=(28, 28, 1), name="Input")
    model.append_conv2d_layer(32, kernel_size=(3, 3))
    model.append_conv2d_layer(64, kernel_size=(3, 3))
    model.append_maxpooling2d_layer(pool_size=(2, 2))
    model.append_flatten_layer()
    model.append_dense_layer(128, activation="relu")
    model.append_dense_layer(no_of_cls, activation="softmax")
    model.set_loss_function("categorical_crossentropy")
    model.set_metric("accuracy")
    model.set_optimizer("Adagrad")
    model.train(x_train, y_train, batch_size=bsize_entity, num_epochs=epochs)

    mk = model.evaluate(x_test, y_test)
    correct = np.array([0.06997422293154523, 0.9907])

    np.testing.assert_almost_equal(correct[0], mk[0], decimal=2)
    np.testing.assert_almost_equal(correct[1], mk[1], decimal=2)
示例#5
0
	def test(self):
		'''

		use self.clf to get score/accuracy
		prints accuracy
		draws plot
		'''
		img_rows,img_columns = 45,45
		test_data = self.test_img.reshape((self.test_img.shape[0], img_rows,img_columns))
		test_data = test_data[:, np.newaxis, :, :]
		label_map={}
		count = 0
		for folder in os.listdir("./data"):
			label_map[folder]=count
			count+=1
		for j in range(len(self.test_labels)):
			self.test_labels[j]=label_map[self.test_labels[j]]
		total_classes = count
		test_labels = np_utils.to_categorical(self.test_labels,total_classes)
		sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
		clf = CNN().build(img_rows,img_columns,1,total_classes,'model.h5')
		clf.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy"])
		loss, accuracy = clf.evaluate(test_data, test_labels, batch_size=self.b_size, verbose=1)
		print('Accuracy of Model: {:.2f}%'.format(accuracy * 100))
		return accuracy
示例#6
0
def test_evaluate():
    from tensorflow.keras.datasets import cifar10
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    X_train = X_train[:100, :]
    y_train = y_train[:100]
    X_test=X_test[:100,:]
    y_test = y_test[:100]
    # np.random.seed(100)
    initilizer = tensorflow.keras.initializers.Zeros()
    # initilizer = tensorflow.keras.initializers.RandomUniform(minval=-0.05, maxval=0.05, seed=20)
    model_testing = CNN()
    model = Sequential()
    model.add(Conv2D(filters=64, kernel_size=3, strides=1, padding='same', activation='relu', trainable=True,
                     input_shape=(32, 32, 3), kernel_initializer=initilizer, bias_initializer=initilizer))
    model.add(Conv2D(filters=70, kernel_size=3, strides=1, padding='same', activation='relu', trainable=True,
                     kernel_initializer=initilizer, bias_initializer=initilizer))
    model.add(Conv2D(filters=75, kernel_size=3, strides=1, padding='same', activation='relu', trainable=True,
                     kernel_initializer=initilizer, bias_initializer=initilizer))
    model.add(Conv2D(filters=90, kernel_size=3, strides=1, padding='same', activation='relu', trainable=True,
                     kernel_initializer=initilizer, bias_initializer=initilizer))
    model.add(MaxPool2D(pool_size=2, padding='same', strides=1))
    model.add(Flatten())
    model.add(
        Dense(units=256, activation='relu', trainable=True, kernel_initializer=initilizer, bias_initializer=initilizer))
    model.add(
        Dense(units=256, activation='relu', trainable=True, kernel_initializer=initilizer, bias_initializer=initilizer))
    model.add(Dense(units=256, activation='sigmoid', trainable=True, kernel_initializer=initilizer,
                    bias_initializer=initilizer))
    model.compile(optimizer='Adagrad', loss='hinge', metrics=['mse'])
    history = model.fit(x=X_train, y=y_train, batch_size=32, epochs=5, shuffle=False)





    model_testing.add_input_layer(shape=(32, 32, 3), name="")
    model_testing.append_conv2d_layer(num_of_filters=64, kernel_size=3, padding='same', strides=1, activation='relu',
                                      name="1")
    model_testing.append_conv2d_layer(num_of_filters=70, kernel_size=3, padding='same', strides=1, activation='relu',
                                      name="2")
    model_testing.append_conv2d_layer(num_of_filters=75, kernel_size=3, padding='same', strides=1, activation='relu',
                                      name="3")
    model_testing.append_conv2d_layer(num_of_filters=90, kernel_size=3, padding='same', strides=1, activation='relu',
                                      name="4")
    model_testing.append_maxpooling2d_layer(pool_size=2, padding='same', strides=1, name="5")
    model_testing.append_flatten_layer(name='6')
    model_testing.append_dense_layer(num_nodes=256, activation='relu', name='7')
    model_testing.append_dense_layer(num_nodes=256, activation='relu', name='8')
    model_testing.append_dense_layer(num_nodes=256, activation='sigmoid', name='9')
    model_testing.set_optimizer(optimizer='adagrad')
    model_testing.set_loss_function(loss='hinge')
    model_testing.set_metric(metric='mse')
    loss = model_testing.train(X_train=X_train, y_train=y_train, batch_size=32, num_epochs=5)

    model_evaluate = model.evaluate(X_test,y_test)

    model_testing_evaluate = model_testing.evaluate(X_test,y_test)

    #assert model_testing_evaluate == model_evaluate
    assert np.allclose(model_testing_evaluate,model_evaluate,rtol=1e-2,atol=1e-2)
def test_evaluate():
    cnn = CNN()
    batch_size = 10
    num_epochs = 10
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    samples = 100

    X_train = X_train[0:samples, :]
    X_train = X_train.astype('float32') / 255

    test_samples = 10
    X_test = X_test[0:test_samples, :]
    X_test = X_test.astype('float32') / 255

    from tensorflow.keras.utils import to_categorical

    y_train = to_categorical(y_train, 10)
    y_train = y_train[0:samples, :]
    y_test = to_categorical(y_test, 10)
    y_test = y_test[0:test_samples, :]
    cnn.add_input_layer(shape=(32, 32, 3))
    cnn.append_conv2d_layer(num_of_filters=64, kernel_size=(3, 3), activation='relu', name="conv1")
    cnn.append_conv2d_layer(num_of_filters=32, kernel_size=(3, 3), activation='relu', name="conv2")
    cnn.append_flatten_layer(name="flat1")
    cnn.append_dense_layer(num_nodes=10, activation="relu", name="dense1")
    cnn.set_optimizer(optimizer="SGD")
    cnn.set_loss_function(loss="hinge")
    cnn.set_metric(metric='accuracy')
    ListofLoss = cnn.train(X_train=X_train, y_train=y_train, batch_size=None, num_epochs=num_epochs)

    (loss, metric) = cnn.evaluate(X=X_test, y=y_test)
    
    assert loss < 5
    assert metric < 2
def test_evaluate():
    (X_train, y_train), (X_test, y_test) = cifar10.load_data()
    number_of_train_samples_to_use = 500
    number_of_test_samples_to_use = 200
    X_train = X_train[0:number_of_train_samples_to_use, :]
    y_train = y_train[0:number_of_train_samples_to_use]
    X_test = X_test[0:number_of_test_samples_to_use, :]
    y_test = y_test[0:number_of_test_samples_to_use]
    my_cnn = CNN()
    my_cnn.add_input_layer(shape=(32, 32, 3), name="input")
    my_cnn.append_conv2d_layer(num_of_filters=32,
                               kernel_size=(3, 3),
                               padding="same",
                               activation='relu',
                               name="conv1")
    my_cnn.append_maxpooling2d_layer(pool_size=(2, 2), name="pool1")
    my_cnn.append_conv2d_layer(num_of_filters=64,
                               kernel_size=3,
                               activation='relu',
                               name="conv2")
    my_cnn.append_maxpooling2d_layer(pool_size=(2, 2), name="pool2")
    my_cnn.append_conv2d_layer(num_of_filters=64,
                               kernel_size=3,
                               activation='relu',
                               name="conv3")
    my_cnn.append_flatten_layer(name="flat1")
    my_cnn.append_dense_layer(num_nodes=64, activation="relu", name="dense1")
    my_cnn.append_dense_layer(num_nodes=10,
                              activation="softmax",
                              name="dense2")
    np.random.seed(seed=1)
    weigh = my_cnn.get_weights_without_biases(layer_name="conv1")
    w_set = np.random.rand(*weigh.shape)
    my_cnn.set_weights_without_biases(w_set, layer_name="conv1")
    np.random.seed(seed=1)
    weigh = my_cnn.get_weights_without_biases(layer_name="conv2")
    w_set = np.random.rand(*weigh.shape)
    my_cnn.set_weights_without_biases(w_set, layer_name="conv2")
    np.random.seed(seed=1)
    weigh = my_cnn.get_weights_without_biases(layer_name="conv3")
    w_set = np.random.rand(*weigh.shape)
    my_cnn.set_weights_without_biases(w_set, layer_name="conv3")
    np.random.seed(seed=1)
    weigh = my_cnn.get_weights_without_biases(layer_name="dense1")
    w_set = np.random.rand(*weigh.shape)
    my_cnn.set_weights_without_biases(w_set, layer_name="dense1")
    np.random.seed(seed=1)
    weigh = my_cnn.get_weights_without_biases(layer_name="dense2")
    w_set = np.random.rand(*weigh.shape)
    my_cnn.set_weights_without_biases(w_set, layer_name="dense2")
    my_cnn.set_loss_function()
    my_cnn.set_optimizer(optimizer="SGD", learning_rate=0.01, momentum=0.0)
    my_cnn.set_metric(metric="accuracy")
    # los = np.array([2.30277, 2.30264, 2.30242, 2.30225, 2.30207, 2.30190, 2.30171, 2.30154, 2.30138])
    # los = np.around(los,decimals=2)
    my_cnn.train(X_train, y_train, 60, 10)
    acc = my_cnn.evaluate(X_test, y_test)
    de = np.float32(0.07)
    assert (acc == de)
示例#9
0
def test_train_and_evaluate():
    from tensorflow.keras.datasets import cifar10
    batch_size = 32
    num_classes = 10
    epochs = 1
    save_dir = os.path.join(os.getcwd(), 'saved_models')
    model_name = 'keras_cifar10_trained_model.h5'
    (train_images, train_labels), (test_images,
                                   test_labels) = cifar10.load_data()
    train_labels = keras.utils.to_categorical(train_labels, num_classes)
    test_labels = keras.utils.to_categorical(test_labels, num_classes)

    my_cnn = CNN()

    my_cnn.append_conv2d_layer(num_of_filters=32,
                               kernel_size=(3, 3),
                               padding="same",
                               activation='relu',
                               name="conv1",
                               input_shape=train_images.shape[1:])
    # my_cnn.append_conv2d_layer(num_of_filters=32, kernel_size=(3,3),padding="same", activation='relu', name="conv2")
    my_cnn.append_maxpooling2d_layer(pool_size=2,
                                     padding="same",
                                     strides=2,
                                     name="pool1")

    # my_cnn.append_conv2d_layer(num_of_filters=64, kernel_size=(3,3),padding="same", activation='relu', name="conv3")
    my_cnn.append_conv2d_layer(num_of_filters=64,
                               kernel_size=(3, 3),
                               padding="same",
                               activation='relu',
                               name="conv4")
    my_cnn.append_maxpooling2d_layer(pool_size=2,
                                     padding="same",
                                     strides=2,
                                     name="pool2")

    my_cnn.append_flatten_layer(name="flat1")
    # my_cnn.append_dense_layer(num_nodes=512,activation="relu",name="dense1")
    my_cnn.append_dense_layer(num_nodes=10,
                              activation="softmax",
                              name="dense2")

    my_cnn.set_metric('accuracy')
    my_cnn.set_optimizer('RMSprop')
    my_cnn.set_loss_function('categorical_crossentropy')
    loss = my_cnn.train(train_images, train_labels, batch_size, epochs)

    path = os.getcwd()
    file_path = os.path.join(path, model_name)
    my_cnn.save_model(model_file_name=file_path)

    print("loss :{0}".format(loss))
    assert len(loss) == 1

    test_loss, test_acc = my_cnn.evaluate(test_images, test_labels)
    assert test_loss < 5
    assert test_acc < 1
示例#10
0
def test_evaluate():
    from tensorflow.keras.datasets import mnist
    import tensorflow.keras as keras

    batch_size = 128
    num_classes = 10
    epochs = 3

    # input image dimensions
    img_rows, img_cols = 28, 28

    # the data, split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 28, 28, 1)
    x_test = x_test.reshape(10000, 28, 28, 1)

    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    model = CNN()
    model.add_input_layer(shape=(28, 28, 1), name="Input")
    model.append_conv2d_layer(32, kernel_size=(3, 3))
    model.append_conv2d_layer(64, kernel_size=(3, 3))
    model.append_maxpooling2d_layer(pool_size=(2, 2))
    model.append_flatten_layer()
    model.append_dense_layer(128, activation="relu")
    model.append_dense_layer(num_classes, activation="softmax")

    model.set_loss_function("categorical_crossentropy")
    model.set_metric("accuracy")
    model.set_optimizer("Adagrad")

    model.train(x_train, y_train, batch_size=batch_size, num_epochs=epochs)

    score = model.evaluate(x_test, y_test)

    actual = np.array([0.05093684684933396, 0.9907])

    # loss
    np.testing.assert_almost_equal(actual[0], score[0], decimal=2)

    # accuracy
    np.testing.assert_almost_equal(actual[1], score[1], decimal=2)
示例#11
0
def gridSearch(xTrain, yTrain, xDev, yDev, options):
    paramCombos = myProduct(options)
    bestCombo, bestCrossEntropy = None, float('inf')
    scores = {}
    
    for combo in paramCombos:
        cnn = CNN(numFilters=combo['numFilters'], windowSize=combo['windowSize'])
        cnn.fit(xTrain[:combo['numTrain']], yTrain[:combo['numTrain']], numEpochs=combo['numEpochs'],
                batchSize=combo['batchSize'], verbose=True)
        crossEntropy, accuracy = cnn.evaluate(xDev, yDev, showAccuracy=True)
        scores[tuple(combo.items())] = (crossEntropy, accuracy)
        if crossEntropy < bestCrossEntropy:
            bestCombo, bestCrossEntropy = combo, crossEntropy
        print 'Combo: {}, CE: {}, accuracy: {}'.format(combo, crossEntropy, accuracy)
    return scores
示例#12
0
	def train(self):

		#Change data to required format
		img_rows,img_columns = 45,45
		train_data = self.train_img.reshape((self.train_img.shape[0], img_rows,img_columns))
		train_data = train_data[:, np.newaxis, :, :]
		test_data = self.test_img.reshape((self.test_img.shape[0], img_rows,img_columns))
		test_data = test_data[:, np.newaxis, :, :]
		label_map={}
		count = 0
		for folder in os.listdir("../src/data"):
			label_map[folder]=count
			count+=1
		for i in range(len(self.train_labels)):
			self.train_labels[i]=label_map[self.train_labels[i]]
		for j in range(len(self.test_labels)):
			self.test_labels[j]=label_map[self.test_labels[j]]

		# Transform training and testing data to 10 classes in range [0,classes] ; num. of classes = 0 to 9 = 10 classes
		total_classes = count
		train_labels = np_utils.to_categorical(self.train_labels, total_classes)
		test_labels = np_utils.to_categorical(self.test_labels,total_classes)


		# Defing and compile the SGD optimizer and CNN model
		print('\n Compiling model...')
		sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
		clf = CNN().build(img_rows,img_columns,1,total_classes)
		clf.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy"])

		# Initially train and test the model; If weight saved already, load the weights using arguments.
		num_epoch = self.num_epoch		# Number of epochs
		verb = 1			# Verbose
		print('\nTraining the Model...')
		model_info=clf.fit(train_data, train_labels, batch_size=self.b_size, nb_epoch=num_epoch,verbose=verb)

		# Evaluate accuracy and loss function of test data
		print('Evaluating Accuracy and Loss Function...')
		loss, accuracy = clf.evaluate(test_data, test_labels, batch_size=self.b_size, verbose=1)
		print('Accuracy of Model',(accuracy * 100))
		clf.save_weights('model.h5', overwrite=True)
		print(model_info.history)
		self.plot_model_history(model_info)
		return accuracy
示例#13
0
def gridSearch(xTrain, yTrain, xDev, yDev, options):
    paramCombos = myProduct(options)
    bestCombo, bestCrossEntropy = None, float('inf')
    scores = {}

    for combo in paramCombos:
        cnn = CNN(numFilters=combo['numFilters'],
                  windowSize=combo['windowSize'])
        cnn.fit(xTrain[:combo['numTrain']],
                yTrain[:combo['numTrain']],
                numEpochs=combo['numEpochs'],
                batchSize=combo['batchSize'],
                verbose=True)
        crossEntropy, accuracy = cnn.evaluate(xDev, yDev, showAccuracy=True)
        scores[tuple(combo.items())] = (crossEntropy, accuracy)
        if crossEntropy < bestCrossEntropy:
            bestCombo, bestCrossEntropy = combo, crossEntropy
        print 'Combo: {}, CE: {}, accuracy: {}'.format(combo, crossEntropy,
                                                       accuracy)
    return scores
示例#14
0
def test_train_and_evaluate():
    # Initializing and adding layers
    print("*********** PLEASE WAIT FOR DATA TO LOAD ***********")
    (train_images, train_labels), (test_images,
                                   test_labels) = cifar10.load_data()
    train_images, test_images = train_images / 255.0, test_images / 255.0
    new_cnn = CNN()
    new_cnn.add_input_layer(shape=(32, 32, 3), name="input")
    new_cnn.append_conv2d_layer(32,
                                strides=3,
                                activation="relu",
                                name="conv2d_1")
    new_cnn.append_maxpooling2d_layer(pool_size=2, name="maxpool_1")
    new_cnn.append_conv2d_layer(64,
                                strides=3,
                                activation="relu",
                                name="conv2d_2")
    new_cnn.append_maxpooling2d_layer(pool_size=2, name="maxpool_2")
    new_cnn.append_conv2d_layer(64,
                                strides=3,
                                activation="relu",
                                name="conv2d_3")
    new_cnn.append_flatten_layer(name="flatten")
    new_cnn.append_dense_layer(64, activation="relu", name="dense_1")
    new_cnn.append_dense_layer(10, activation="softmax", name="dense_2")
    # Setting Compiler values
    new_cnn.set_loss_function(loss="SparseCategoricalCrossentropy")
    new_cnn.set_optimizer(optimizer="SGD")
    new_cnn.set_metric('accuracy')
    # Entering Num Epoch
    batch_size = 1000
    num_epoch = 10
    history = new_cnn.train(train_images,
                            train_labels,
                            batch_size=batch_size,
                            num_epochs=num_epoch)
    assert len(history) == len(train_images) / batch_size * num_epoch
    evaluate = new_cnn.evaluate(train_images, train_labels)
    assert evaluate[1] <= 1
    assert evaluate[0] <= 3
示例#15
0
model.append_conv2d_layer(num_of_filters=32,
                          kernel_size=3,
                          padding='same',
                          activation='relu')
model.append_conv2d_layer(num_of_filters=32,
                          kernel_size=3,
                          padding='same',
                          activation='relu')
model.append_maxpooling2d_layer(pool_size=2)
model.append_conv2d_layer(num_of_filters=64,
                          kernel_size=3,
                          padding='same',
                          activation='relu')
model.append_conv2d_layer(num_of_filters=64,
                          kernel_size=3,
                          padding='same',
                          activation='relu')
model.append_maxpooling2d_layer(pool_size=2)
model.append_flatten_layer()
model.append_dense_layer(num_nodes=512, activation='relu')
model.append_dense_layer(num_nodes=num_classes, activation='softmax')
model.set_optimizer(optimizer='RMSprop', learning_rate=0.0001)
model.set_loss_function('categorical_crossentropy')
model.set_metric(['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
model.train(x_train, y_train, num_classes, epochs)
model.evaluate(x_test, y_test)
示例#16
0
import numpy as np

convNet = CNN("AlphaOneB.h5")

testBoardState = {"red": [(-3,2),(0,-1),(-2,0),(1,1)],
                    "blue": [(-1,0),(1,-1),(-2,2)],
                    "green": [(1,-3),(0,0),(-3,1),(2,1)]}
testExitState = {"red": [(-1,-1),(-2,0),(-3,3),(-1,1)],
                    "blue": [],
                    "green": [(-2,1),(0,0)]}
testExits = {"red": 1, "blue": 0, "green": 0}
testCurrentPlayer = "red"

if False: # Switch this to test the eval function of the CNN

    v, p = convNet.evaluate(boardState=testExitState, colour=testCurrentPlayer, exits=testExits)
    nextMoves = getNextMoves(testExitState, testCurrentPlayer)
    P = adjustNNPriors(p, testExitState, testCurrentPlayer)

    for move in nextMoves:
        print("{}: {}".format(move, P[encodeMove(move)]))

if False: # Switch this to test v outputs

    full = [(-3,2),(0,-1),(-2,0),(1,1)]
    for i in range(0, len(full)+1):
        bs = deepcopy(testBoardState)
        bs["red"] = full[0:i]

        v, p = convNet.evaluate(boardState=bs, colour=testCurrentPlayer, exits=testExits)
示例#17
0
clf = CNN(width=img_rows, height=img_columns, depth=1, total_classes=total_classes, weightsPath=args["weights"])
clf.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy"])

# Initially train and test the model; If weight saved already, load the weights using arguments.
b_size = 128		# Batch size
num_epoch = 20		# Number of epochs
verb = 1			# Verbose

# If weights saved and argument load_model; Load the pre-trained model.
if args["load_model"] < 0:
	print('\nTraining the Model...')
	clf.fit(trainData, trainLabels, batch_size=b_size, nb_epoch=num_epoch,verbose=verb)
	
	# Evaluate accuracy and loss function of test data
	print('Evaluating Accuracy and Loss Function...')
	loss, accuracy = clf.evaluate(test_img, test_labels, batch_size=128, verbose=1)
	print('Accuracy of Model: {:.2f}%'.format(accuracy * 100))

	
# Save the pre-trained model.
if args["save_model"] > 0:
	print('Saving weights to file...')
	clf.save_weights(args["weights"], overwrite=True)

	
# Show the images using OpenCV and making random selections.
for num in np.random.choice(np.arange(0, len(test_labels)), size=(5,)):
	# Predict the label of digit using CNN.
	probs = clf.predict(test_img[np.newaxis, i])
	prediction = probs.argmax(axis=1)
示例#18
0
        input_shape = (1, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    print('x_train shape:', x_train.shape, 'x_test.shape:', x_test.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # Convert class vectors to binary class matrices
    y_train = np_utils.to_categorical(y_train, num_classes)
    y_test = np_utils.to_categorical(y_test, num_classes)

    # Build the model
    model = CNN(num_classes, input_shape, ngpus)
    model.fit(x_train,
              y_train,
              batch_size=batch_size * ngpus,
              epochs=nb_epochs,
              verbose=1,
              validation_data=(x_test, y_test))

    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test score:', score[0])
    print('Test accuracy:', score[1])