コード例 #1
0
    def __init__(self, train=True):
        self.train = train
        x_train, y_train, x_test, y_test = mnist.load(onehot=False,
                                                      flatten=False)

        self.x_train = torch.from_numpy(x_train).float()
        self.y_train = torch.from_numpy(y_train).long()
        self.x_test = torch.from_numpy(x_test).float()
        self.y_test = torch.from_numpy(y_test).long()

        self.len = self.x_train.size(0) if train else self.x_test.size(0)
コード例 #2
0
import sys, os
sys.path.append(os.pardir)

import numpy as np
import tensorflow as tf
from datetime import datetime
import common.mnist as mnist


# Set hyper-parameters:
n_epoch, batch_size, lr = 10, 64, 0.001
shuffle, verbose = True, True

# Load data:
x_train, y_train, x_test, y_test = mnist.load()

x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.float32, [None, 10])

# Setup a model:
tf.set_random_seed(0)
lin1 = tf.layers.dense(x, 200, activation=tf.nn.relu)
lin2 = tf.layers.dense(lin1, 200, activation=tf.nn.relu)
lin3 = tf.layers.dense(lin2, 10)
output = lin3

loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
           logits=output, labels=y))
optimizer = tf.train.AdamOptimizer(lr).minimize(loss)

correct = tf.equal(tf.argmax(output,1), tf.argmax(y,1))
コード例 #3
0
import torch
import torch.nn.functional as F
from datetime import datetime
import common.mnist as mnist

if __name__ == "__main__":

    use_gpu = 1
    device = torch.device("cuda") if use_gpu else torch.device("cpu")

    # Set hyper-parameters:
    n_epoch, batch_size, lr = 10, 64, 0.01
    shuffle, verbose = True, True

    # Load data:
    x_train, y_train, x_test, y_test = mnist.load(onehot=False)

    x_train = torch.from_numpy(x_train).float().to(device)
    y_train = torch.from_numpy(y_train).long().to(device)
    x_test = torch.from_numpy(x_test).float().to(device)
    y_test = torch.from_numpy(y_test).long().to(device)

    # Setup a model:
    torch.manual_seed(0)
    model = torch.nn.Sequential(torch.nn.Linear(784, 200), torch.nn.Sigmoid(),
                                torch.nn.Linear(200, 10)).to(device)
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=lr)

    # Train the model:
    n_data = x_train.shape[0]
コード例 #4
0
from keras import optimizers
#from keras.datasets import mnist
import common.mnist as mnist


# To prevent CUDA_ERROR_OUT_OF_MEMORY:
config = keras.backend.tf.ConfigProto()
config.gpu_options.allow_growth = True
session = keras.backend.tf.Session(config=config)

# Set hyper-parameters:
n_epoch, batch_size, lr = 10, 64, 0.001
verbose = True

# Load data:
x_train, y_train, x_test, y_test = mnist.load(flatten=False)
x_train = x_train.reshape(-1, 28, 28, 1)
x_test = x_test.reshape(-1, 28, 28, 1)

#(x_train, y_train), (x_test, y_test) = mnist.load_data()
#x_train = x_train.reshape(-1, 28, 28, 1)/255.
#y_train = keras.utils.to_categorical(y_train, 10)
#x_test = x_test.reshape(-1, 28, 28, 1)/255.
#y_test = keras.utils.to_categorical(y_test, 10)

# Setup a model:
model = Sequential()
model.add(keras.layers.Conv2D(32, (3, 3), activation='relu',
          input_shape=(28, 28, 1)))
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(keras.layers.Dropout(0.5))
コード例 #5
0
    def forward(self, x):
        self.x = x
        return x.reshape(x.shape[0], -1)

    def backward(self, dy):
        return dy.reshape(*self.x.shape)


if __name__ == "__main__":

    # Set hyper-parameters:
    n_epoch, batch_size, lr = 1, 64, 0.001
    shuffle, verbose = True, True

    # Load data:
    x_train, y_train, x_test, y_test = mnist.load(onehot=True, flatten=False)

    # Setup a model:
    np.random.seed(0)
    layers = [
        Convolution(1, 32, name='1', kernel_size=(3, 3)),
        np_nn.Relu(),
        MaxPooling(),
        Dropout(),
        Convolution(32, 64, name='2', kernel_size=(3, 3)),
        MaxPooling(),
        Dropout(),
        Flatten(),
        np_nn.Linear(64 * 7 * 7, 256, name='3', activation='relu'),
        np_nn.Relu(),
        Dropout(),
コード例 #6
0
        self.compile(loss='mean_squared_error',
                     optimizer=optimizers.adam(lr=lr),
                     metrics=['accuracy'])


# To prevent CUDA_ERROR_OUT_OF_MEMORY:
config = keras.backend.tf.ConfigProto()
config.gpu_options.allow_growth = True
session = keras.backend.tf.Session(config=config)

# Set hyper-parameters:
n_epoch, batch_size, lr = 10, 64, 0.0001
verbose = True

# Load data:
data, _, _, _ = mnist.load()

# Setup a model:
#model = Sequential()
#model.add(Dense(50, activation='relu', input_shape=(784,)))
#model.add(Dense(10, activation='sigmoid'))

model = AutoEncoder()
model.summary()

#model.compile(loss='mean_squared_error',
#              optimizer=optimizers.adam(lr=lr),
#              metrics=['accuracy'])

# Train the model
#history = model.fit(data, data,
コード例 #7
0
import keras
import common.mnist as mnist


# To prevent CUDA_ERROR_OUT_OF_MEMORY:
config = keras.backend.tf.ConfigProto()
config.gpu_options.allow_growth = True
session = keras.backend.tf.Session(config=config)

# Set hyper-parameters:
n_epoch, batch_size, lr = 10, 64, 0.01
verbose = True

# Load data:
x_train, y_train, x_test, y_test = mnist.load(onehot=True)

# Setup a model:
model = keras.models.Sequential()
model.add(keras.layers.Dense(200, activation='sigmoid', input_shape=(784,)))
model.add(keras.layers.Dense(10, activation='softmax'))
model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer=keras.optimizers.sgd(lr=lr),
              metrics=['accuracy'])

# Train the model
history = model.fit(x_train, y_train,
                    batch_size=batch_size,
                    epochs=n_epoch,