예제 #1
0
def main():
    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    dataset = save_and_load_mnist("./data/mnist/")

    x_train = dataset['train_data']
    x_test = dataset['test_data']
    m = Model(sess)
    m.fit(x_train, x_test)
예제 #2
0
import tensorflow as tf
import numpy as np
from load_mnist import save_and_load_mnist

dataset = save_and_load_mnist("./data/mnist/")

x_train = dataset['train_data']
y_train = dataset['train_target']
x_test = dataset['test_data']
y_test = dataset['test_target']

#global step의 경우, 0으로 초기화하고 train가능하지 않게 설정한다.
#global step은 optimizer가 학습한 횟수를 의미한다. 변수로서 계속 변한다.
global_step = tf.Variable(0, trainable=False, name='global_step')

X = tf.placeholder(dtype=tf.float32, shape=[None, 784], name='X')
Y = tf.placeholder(dtype=tf.int32, shape=[None, 1], name='Y')
Y_one_hot = tf.reshape(tf.one_hot(Y, 10), [-1, 10], name='Y_one_hot')

W1 = tf.get_variable(name='W1',
                     shape=[784, 256],
                     initializer=tf.glorot_uniform_initializer())
b1 = tf.get_variable(name='b1',
                     shape=[256],
                     initializer=tf.zeros_initializer())
h1 = tf.nn.relu(tf.nn.bias_add(tf.matmul(X, W1), b1), name='h1')

W2 = tf.get_variable(name='W2',
                     shape=[256, 128],
                     initializer=tf.glorot_uniform_initializer())
b2 = tf.get_variable(name='b2',
예제 #3
0
 def __init__(self, data_name):
     dataset = save_and_load_mnist("./data/mnist/")
     x = dataset[data_name]
     self.len = len(x)
     self.x_data = torch.from_numpy(x)