예제 #1
0
def PredictionMatrix():
    print('Loading valid data...')
    (X_valid, y_valid, the_images) = LARGEload.load_valid_data()
    X_valid = X_valid.astype("float32")
    X_valid /= 255
    print('Generating prediction...')
    print ("The actual values are:")
    #print (y_valid)
    y = np.bincount(y_valid)
    ii = np.nonzero(y)[0]
    print (zip(ii,y[ii]))
    y_pred = model.predict_classes(X_valid, batch_size=20)
    #print (y_pred)
    y = np.bincount(y_pred)
    ii = np.nonzero(y)[0]
    print ("The predicted values are:")
    print (zip(ii,y[ii]))
    print ("Our confusion matrix is:")
    cm = confusion_matrix(y_valid, y_pred)
    print(cm)
    plot_confusion_matrix(cm, weights=save_name[:-5])
    # how many samples match the ground truth validation labels ?
    correct = np.sum(y_pred == y_valid)
    print ("Number of correct classifications is %d/5000" % (correct))
    # accuracy = number correct / total number
    accuracy = float(correct) / 5000
    print ("Accuracy of %f" % accuracy)
    return accuracy, correct
예제 #2
0
def test():
    X_test, y_test = LARGEload.load_test_data()
    Y_test = np_utils.to_categorical(y_test, 5)
    print(X_test.shape[0], 'test samples')
    X_test = X_test.astype("float32")
    X_test /= 255
    print('Testing...')
    score = model.evaluate(X_test, Y_test, batch_size=20)
    print('Test score:', score)
예제 #3
0
def BatchGenerator():
    split = 0
    while split < splits:  # splits * split_size = training_size
        (X_batch, y_batch, class_weight) = LARGEload.load_train_data(split)
        y_batch = np_utils.to_categorical(y_batch, 5)
        X_batch = X_batch.astype("float32")
        X_batch /= 255
        split += 1
        yield (X_batch, y_batch, split, class_weight)