コード例 #1
0
MODEL_DIR = "/models"
model_prefix = "ece408"
dataset_size = float("inf")

if len(sys.argv) > 1:
    dataset_size = int(sys.argv[1])
if len(sys.argv) > 2:
    print "Usage:", sys.argv[0], "<dataset size>"
    print "    <dataset_size> = [0 - 10000]"
    sys.exit(-1)

# Log to stdout for MXNet
logging.getLogger().setLevel(logging.DEBUG)  # logging to stdout

print "Loading fashion-mnist data...",
test_images, test_labels = load_mnist(
    path="/fashion-mnist", rows=70, cols=70, kind="t10k-70")
print "done"

# Reduce the size of the dataset, if desired
dataset_size = max(0, min(dataset_size, 10000))
test_images = test_images[:dataset_size]
test_labels = test_labels[:dataset_size]

# Cap batch size at the size of our training data
batch_size = len(test_images)

# Get iterators that cover the dataset
test_iter = mx.io.NDArrayIter(
    test_images, test_labels, batch_size)

# Evaluate the network
コード例 #2
0
#!/usr/bin/env python

import mxnet as mx
import logging
from reader import load_mnist

# Log to stdout for MXNet
logging.getLogger().setLevel(logging.DEBUG)  # logging to stdout

print "Loading fashion-mnist data...",
test_images, test_labels = load_mnist(path="/fashion-mnist", kind="t10k")
# Reshape the data to the format expected by MXNet's default convolutional layers
test_images = test_images.reshape((10000, 1, 28, 28))
test_labels = test_labels.reshape(10000)
# You can reduce the size of the train or test datasets by uncommenting the following lines
# test_images = test_images[:1000]
# test_labels = test_labels[:1000]
print "done"

# Do everything in a single batch
batch_size = len(test_images)

# Get iterators that cover the dataset
test_iter = mx.io.NDArrayIter(test_images, test_labels, batch_size)

# Evaluate the network
print "Loading model...",
lenet_model = mx.mod.Module.load(prefix='/models/baseline',
                                 epoch=1,
                                 context=mx.gpu())
lenet_model.bind(data_shapes=test_iter.provide_data,
コード例 #3
0
ファイル: train_keras.py プロジェクト: power1997312/-20201
import reader
import tensorflow as tf
import keras
import matplotlib.pyplot as plt
import os

# prepare data
from visible import plot_image, plot_value_array

train_img, train_label = reader.load_mnist(
    'datasets', kind='train')  # (60000,784) ,(60000, )
test_img, test_label = reader.load_mnist(
    'datasets', kind='t10k')  # (10000,784) , (10000, )
"""show img[0] in colors
plt.figure()
plt.imshow(train_img[0].reshape((28,28)))
plt.colorbar()
plt.grid(False)
plt.show()"""

# transfrom into greyscale
train_img = train_img / 255.0
test_img = test_img / 255.0
"""show img [0..24] in grey-level pic
plt.figure(figsize=(10,10))
for i in range(25):
    plt.subplot(5,5,i+1)
    plt.xticks([])
    plt.yticks([])
    plt.grid(False)
    plt.imshow(train_img[i].reshape((28,28)), cmap=plt.cm.binary)
コード例 #4
0
ファイル: final.py プロジェクト: xyu335/ece408_project
model_prefix = "ece408"
dataset_size = float("inf")

if len(sys.argv) > 1:
    dataset_size = int(sys.argv[1])
if len(sys.argv) > 2:
    print "Usage:", sys.argv[0], "<dataset size>"
    print "    <dataset_size> = [0 - 10000]"
    sys.exit(-1)

# Log to stdout for MXNet
logging.getLogger().setLevel(logging.DEBUG)  # logging to stdout

print "Loading fashion-mnist data..."
test_images, test_labels = load_mnist(path="/fashion-mnist",
                                      rows=48,
                                      cols=48,
                                      kind="t10k-48")
print "done"

# Reduce the size of the dataset, if desired
dataset_size = max(0, min(dataset_size, 10000))
test_images = test_images[:dataset_size]
test_labels = test_labels[:dataset_size]

# Cap batch size at the size of our training data
batch_size = len(test_images)

# Get iterators that cover the dataset
test_iter = mx.io.NDArrayIter(test_images, test_labels, batch_size)

# Evaluate the network
コード例 #5
0
MODEL_DIR = "/models"
model_prefix = "ece408"
dataset_size = float("inf")

if len(sys.argv) > 1:
    dataset_size = int(sys.argv[1])
if len(sys.argv) > 2:
    print "Usage:", sys.argv[0], "<dataset size>"
    print "    <dataset_size> = [0 - 10000]"
    sys.exit(-1)

# Log to stdout for MXNet
logging.getLogger().setLevel(logging.DEBUG)  # logging to stdout

print "Loading fashion-mnist data..."
test_images, test_labels = load_mnist(
    path="/fashion-mnist", rows=64, cols=64, kind="t10k-64")
print "done"

# Reduce the size of the dataset, if desired
# dataset_size = max(0, min(dataset_size, 10000))
dataset_size = max(0, min(dataset_size, 10000))
test_images = test_images[:dataset_size]
test_labels = test_labels[:dataset_size]

# Cap batch size at the size of our training data
batch_size = len(test_images)

# Get iterators that cover the dataset
test_iter = mx.io.NDArrayIter(
    test_images, test_labels, batch_size)
コード例 #6
0
ファイル: submission.py プロジェクト: subramanian1998/GPU498
model_prefix = "eecs498"
dataset_size = float("inf")

if len(sys.argv) > 1:
    dataset_size = int(sys.argv[1])
if len(sys.argv) > 2:
    print "Usage:", sys.argv[0], "<dataset size>"
    print "    <dataset_size> = [0 - 10000]"
    sys.exit(-1)

# Log to stdout for MXNet
logging.getLogger().setLevel(logging.DEBUG)  # logging to stdout

print "Loading fashion-mnist data...",
test_images, test_labels = load_mnist(path="fashion-mnist",
                                      rows=72,
                                      cols=72,
                                      kind="t10k-72")
print "done"

# Reduce the size of the dataset, if desired
dataset_size = max(0, min(dataset_size, 10000))
test_images = test_images[:dataset_size]
test_labels = test_labels[:dataset_size]

# Cap batch size at the size of our training data
batch_size = len(test_images)

# Get iterators that cover the dataset
test_iter = mx.io.NDArrayIter(test_images, test_labels, batch_size)

# Evaluate the network
コード例 #7
0
        return int(sum(y_predict == y)) / y.shape[0]

    def get_accuraccy(self, X, y):
        a, z = self.forward_propagation(X)
        y_predict = self.predict(a[-1][:, 1:])
        return int(sum(y_predict == y)) / y.shape[0]

    def load(self, a):
        fin = open(a, "rb")
        self.Theta = pickle.load(fin)
        print(pickle.load(fin))
        fin.close()

if __name__ == '__main__':
    # read data
    X_train, y_train = reader.load_mnist('data/number', kind='train')
    X_test, y_test = reader.load_mnist('data/number', kind='t10k')

    # Normalize
    mean = np.mean(X_train)
    std = np.std(X_train)

    X_test = (X_test - mean) / std
    X_train = (X_train - mean) / std

    X_train = X_train[:5000]
    y_train = y_train[:5000]
    X_cv = X_test[5000:10000]
    y_cv = y_test[5000:10000]
    X_test = X_test[:5000]
    y_test = y_test[:5000]
コード例 #8
0
    """Filter number 0 and 1"""

    X = []
    y = []
    for i in range(X_test.shape[0]):
        if y_test[i][0] == 0 or y_test[i][0] == 1:
            X.append(X_test[i])
            y.append(y_test[i])
    X = np.array(X)
    y = np.array(y)
    return X, y


if __name__ == "__main__":
    # Read data
    (X_train, y_train) = reader.load_mnist('data', kind='train')
    (X_test, y_test) = reader.load_mnist('data', kind='t10k')
    X_train = X_train[:10000]
    y_train = y_train[:10000]
    X_test = X_test
    y_test = y_test

    # X_train, y_train = filter_array(X_train, y_train)
    # X_test, y_test = filter_array(X_test, y_test)

    ex1b = LogisticRegression(X_train, y_train)
    y_guess = ex1b.predict(X_test)

    # Get accuracy
    sum = 0
    for i in range(y_test.shape[0]):