Example #1
0
 def __init__(self):
     self.numClass = 10
     self.batchSize = 256
     (self.x_train, self.y_train), (self.x_test, self.y_test) = load_data()
     self.dataSize = len(self.y_train)
     self.y_train_one_hot = self.onehot(self.y_train, self.numClass)
     self.y_test_one_hot = self.onehot(self.y_test, self.numClass)
     self.pool = util.Parallel()
     self.thread = util.ThreadBuffer()
Example #2
0
    def inference(self, model_kind):
        with self.graph.as_default():
            ###cifar 10 로드
            (x_train, y_train), (x_test, y_test) = load_data()

            y_train = tf.squeeze(tf.one_hot(y_train, 10), axis=1)
            y_test = tf.squeeze(tf.one_hot(y_test, 10), axis=1)

            X = tf.placeholder(
                tf.float32,
                [None, x_train.shape[1], x_train.shape[2], x_train.shape[3]])
            Y = tf.placeholder(tf.float32, [None, 10])
            is_training = tf.placeholder(tf.bool)

            train_op, pred, loss, logit = self.train(X, Y, is_training,
                                                     model_kind)
            correct_prediction = tf.equal(pred, tf.argmax(Y, axis=1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

            sess = tf.Session()

            saver = tf.train.Saver(tf.global_variables())

            model_folder = './model' + str(model_kind)
            ckpt = tf.train.get_checkpoint_state(model_folder)
            if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                sess.run(tf.global_variables_initializer())

            test_accuracy = 0

            start_test_time = time.time()
            for i in range(x_test.shape[0] // 100):
                input_batch, label_batch = self.test_sort(
                    100 * i, 100 * (i + 1), x_train,
                    y_train.eval(session=sess))
                tmpacc = sess.run(accuracy,
                                  feed_dict={
                                      X: input_batch,
                                      Y: label_batch,
                                      is_training: False
                                  })
                test_accuracy = test_accuracy + tmpacc / (x_test.shape[0] //
                                                          100)

            print('test accuracy %g' % test_accuracy)
            test_time = time.time() - start_test_time
            print('test time %g' % test_time)
def read_CIFAR10_subset():
    """
    Load the CIFAR-10 data subset from keras helper module
    and perform preprocessing for training ResNet.
    :return: X_set: np.ndarray, shape: (N, H, W, C).
             y_set: np.ndarray, shape: (N, num_channels) or (N,).
    """

    # Download CIFAR-10 data and load data
    (x_train, y_train), (x_test, y_test) = load_data()

    y_train_oh = np.zeros((len(y_train), 10), dtype=np.uint8)
    for i in range(len(y_train)):
        y_train_oh[i, y_train[i]] = 1
    y_train_one_hot = y_train_oh

    y_test_oh = np.zeros((len(y_test), 10), dtype=np.uint8)
    for i in range(len(y_test)):
        y_test_oh[i, y_test[i]] = 1
    y_test_one_hot = y_test_oh

    x_train = x_train / 255.0
    x_test = x_test / 255.0

    cifar_mean = np.array([0.4914, 0.4822, 0.4465])
    cifar_std = np.array([0.2470, 0.2435, 0.2616])

    for i in range(len(x_train)):
        x_train[i] -= cifar_mean
        x_train[i] /= cifar_std

    for j in range(len(x_test)):
        x_test[j] -= cifar_mean
        x_test[j] /= cifar_std

    print('x_train shape : ', x_train.shape, end='\n')
    print('x_test shape : ', x_test.shape, end='\n')
    print('y_train_one_hot shape : ', y_train_one_hot.shape, end='\n')
    print('y_test_one_hot shape : ', y_test_one_hot.shape, end='\n')
    print('\nDone')

    return x_train, x_test, y_train_one_hot, y_test_one_hot
Example #4
0
def readCifar10Keras():

   
    import numpy as np
    from tensorflow.python.keras._impl.keras.datasets.cifar10 import load_data
    from tensorflow.python.keras._impl.keras.utils.data_utils import get_file

    dirname = 'cifar-10-batches-py'
    origin = 'https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
    path = get_file(dirname, origin=origin, untar=True)
    print(path)
    (x_train, y_train), (x_test, y_test) = load_data()
    
    fig ,ax=  plt.subplots(10, 10, figsize=(10, 10))     
    index =0
    for i in  range(10) :
        for j in range(10):
            ax[i][j].imshow(x_train[index]) 
            ax[i][j].annotate(str(y_train[index]) + str(i) + "," + str(j), xy=(0, 0), xytext=(0, -1), arrowprops=None, fontsize=8)
            index+=1
         
    plt.show()
Example #5
0
L4_flat = tf.reshape(L4, [-1, 2 * 2 * 128])
print("L4 Reshape: ", L4)

W5 = tf.get_variable("W5", shape=[2 * 2 * 128, 384], initializer=tf.contrib.layers.xavier_initializer())
b5 = tf.Variable(tf.random_normal([384]))
L5 = tf.nn.relu(tf.matmul(L4_flat, W5) + b5)
print("L5 Relu: ", L5)
L5 = tf.nn.dropout(L5, keep_prob=keep_prob)
print("L5 Dropout: ", L5)

W6 = tf.get_variable("W6", shape=[384, 10], initializer=tf.contrib.layers.xavier_initializer())
b6 = tf.Variable(tf.random_normal([10]))
logits = tf.matmul(L5, W6) + b6
y_hat = tf.nn.softmax(logits)

(x_train, y_train), (x_test, y_test) = load_data()
y_train_one_hot = tf.squeeze(tf.one_hot(y_train, 10), axis=1)
y_test_one_hot = tf.squeeze(tf.one_hot(y_test, 10), axis=1)

cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

correct_prediction = tf.equal(tf.argmax(y_hat, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

sess = tf.InteractiveSession()
sess.run(tf.global_variables_initializer())

print('Started')

for epoch in range(training_epochs):
Example #6
0
	def load_label_file(self):		
		# CIFAR-10 데이터를 다운로드하고 데이터를 불러옵니다.
		(x_train, y_train), (x_test, y_test) = load_data()
	
		return x_train,x_test,y_train,y_test
print(mnist.train.labels.shape)
print(mnist.validation.images.shape)
print(mnist.validation.labels.shape)
print(mnist.test.images.shape)
print(mnist.test.labels.shape)

#import tensorflow.contrib.learn.python.learn.datasets.base as base
from tensorflow.contrib.learn.python.learn.datasets import base
iris_data,iris_label = base.load_iris()
house_data,house_label = base.load_boston()

print(iris_data.shape)
print(house_data.shape)

from tensorflow.python.keras._impl.keras.datasets import cifar10
cifar10.load_data()
image, lables = cifar10.distorted_inputs()
print(images)
print(lables)











Example #8
0
    def load_label_file(self):
        (x_train, y_train), (x_test, y_test) = load_data()

        return x_train, x_test, y_train, y_test
Example #9
0
def load_cifar10_data():
    (x_train, y_train), (x_test, y_test) = load_data()
    y_train_one_hot = dense_to_one_hot(y_train, number_classes)
    y_test_one_hot = dense_to_one_hot(y_test, number_classes)
    return (x_train, y_train_one_hot), (x_test, y_test_one_hot)
Example #10
0
P_W2 = tf.Variable(xavier_init([h_dim, X_dim]))
P_b2 = tf.Variable(tf.zeros(shape=[X_dim]))

theta_P = [P_W1, P_W2, P_b1, P_b2]

#D(z)
D_W1 = tf.Variable(xavier_init([z_dim + eps_dim, h_dim]))
D_b1 = tf.Variable(tf.zeros(shape=[h_dim]))

D_W2 = tf.Variable(xavier_init([h_dim, 1]))
D_b2 = tf.Variable(tf.zeros(shape=[1]))

theta_D = [D_W1, D_W2, D_b1, D_b2]

#Training
x = load_data()

z_sample = Q(X, eps)
z_sample_fake = Q(X_eps, eps)
p_prob = P(z_sample)
X_samples = P(z_sample_fake)

#Adversarial loss to approx. Q(z|X)
D_real = D(X, z_sample)
D_fake = D(X, z_sample_fake)

G_loss = -(tf.reduce_mean(D_real) + tf.reduce_mean(tf.log(p_prob)))
g_psi = tf.reduce_mean(
    tf.nn.sigmoid_cross_entropy_with_logits(labels=D_real,
                                            logits=tf.ones_like(D_real)) +
    tf.nn.sigmoid_cross_entropy_with_logits(labels=D_fake,
Example #11
0
def data_loader(dataset):
    if dataset == 'mnist':
        mb_size = 256
        X_dim = 784
        width = 28
        height = 28
        channels = 1
        len_x_train = 60000
        len_x_test = 10000
        x_train = input_data.read_data_sets('data/MNIST_data', one_hot=True)
        x_test, _ = x_train.test.next_batch(len_x_test, shuffle=False)
        x_test = np.reshape(x_test, [-1, 28, 28, 1])

    if dataset == 'svhn':
        mb_size = 256
        X_dim = 1024
        width = 32
        height = 32
        channels = 3
        len_x_train = 604388
        len_x_test = 26032

        train_location = 'data/SVHN/train_32x32.mat'
        extra_location = 'data/SVHN/extra_32x32.mat'
        test_location = 'data/SVHN/test_32x32.mat'

        train_dict = sio.loadmat(train_location)
        x_ = np.asarray(train_dict['X'])
        x_train = []
        for i in range(x_.shape[3]):
            x_train.append(x_[:, :, :, i])
        x_train = np.asarray(x_train)

        extra_dict = sio.loadmat(extra_location)
        x_ex = np.asarray(extra_dict['X'])
        x_extra = []
        for i in range(x_ex.shape[3]):
            x_extra.append(x_ex[:, :, :, i])
        x_extra = np.asarray(x_extra)
        x_train = np.concatenate((x_train, x_extra), axis=0)
        x_train = normalize(x_train)

        test_dict = sio.loadmat(test_location)
        x_ = np.asarray(test_dict['X'])
        x_test = []
        for i in range(x_.shape[3]):
            x_test.append(x_[:, :, :, i])
        x_test = np.asarray(x_test)

    if dataset == 'cifar10':
        mb_size = 256
        X_dim = 1024
        len_x_train = 50000
        len_x_test = 10000
        width = 32
        height = 32
        channels = 3
        (x_train, y_train), (x_test, y_test) = load_data()
        x_train = normalize(x_train)
        x_test = normalize(x_test)

    if dataset == 'celebA':
        mb_size = 256
        X_dim = 4096
        width = 64
        height = 64
        channels = 3
        download_celeb_a("data")
        data_files = glob(os.path.join("data/celebA/*.jpg"))
        len_x_train = 200000
        len_x_test = 2599
        sample = [
            get_image(sample_file, 128, True, 64, is_grayscale=0)
            for sample_file in data_files
        ]
        sample_images = np.array(sample).astype(np.float32)
        x_train = sample_images[:200000]
        x_test = sample_images[200000:]

    if dataset == 'lsun':
        mb_size = 256
        X_dim = 4096
        width = 64
        height = 64
        channels = 3
        download_lsun("data")
        lsun_train = Lsun("data/lsun/bedroom_train_lmdb")
        lsun_test = Lsun("data/lsun/bedroom_val_lmdb")
        len_x_train = 3033042
        len_x_test = 300
        sample_images = lsun_test.load_data(len_x_test)
        x_train = lsun_train
        x_test = sample_images[:len_x_test]

    return mb_size, X_dim, width, height, channels, len_x_train, x_train, len_x_test, x_test
Example #12
0
    def run(self, max_iter, model_kind, input_size, lr, wd, momentum,
            done_epoch, sampling_step):
        self.input_size = input_size
        self.lr = lr
        self.wd = wd
        self.momentum = momentum
        self.done_epoch = done_epoch
        self.acc_sampling_step = sampling_step

        self.graph = tf.Graph()

        with self.graph.as_default():
            ###cifar 10 로드
            (x_train, y_train), (x_test, y_test) = load_data()

            y_train = tf.squeeze(tf.one_hot(y_train, 10), axis=1)
            y_test = tf.squeeze(tf.one_hot(y_test, 10), axis=1)

            X = tf.placeholder(
                tf.float32,
                [None, x_train.shape[1], x_train.shape[2], x_train.shape[3]])
            Y = tf.placeholder(tf.float32, [None, 10])
            is_training = tf.placeholder(tf.bool)

            train_op, pred, loss, logit = self.train(X, Y, is_training,
                                                     model_kind)
            correct_prediction = tf.equal(pred, tf.argmax(Y, axis=1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

            sess = tf.Session()

            saver = tf.train.Saver(tf.global_variables())

            model_folder = './model' + str(model_kind)
            ckpt = tf.train.get_checkpoint_state(model_folder)
            if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                sess.run(tf.global_variables_initializer())

            data_size = y_train.shape[0].value
            batch_num = data_size // self.input_size

            start_time = time.time()
            for epoch in range(max_iter):
                for itr in range(batch_num):
                    input_batch, label_batch = self.next_batch(
                        self.input_size, x_train, y_train.eval(session=sess))
                    _, loss_ = sess.run([train_op, loss],
                                        feed_dict={
                                            X: input_batch,
                                            Y: label_batch,
                                            is_training: True
                                        })

                    if itr % self.loss_sampling_step == 0:
                        progress_view = 'progress : ' + '%7.6f' % (
                            itr / batch_num *
                            100) + '%  loss :' + '%7.6f' % loss_
                        print(progress_view)
                        self.metric_list['losses'].append(loss_)

                with open('loss.txt', 'a') as wf:
                    epoch_time = time.time() - start_time
                    loss_info = '\nepoch: ' + '%7d' % (
                        epoch + 1 + self.done_epoch
                    ) + '  batch loss: ' + '%7.6f' % loss_ + '  time elapsed: ' + '%7.6f' % epoch_time
                    wf.write(loss_info)

                if epoch % self.acc_sampling_step == 0:
                    test_accuracy = 0

                    start_test_time = time.time()
                    for i in range(x_test.shape[0] // 100):
                        input_batch, label_batch = self.test_sort(
                            100 * i, 100 * (i + 1), x_test,
                            y_test.eval(session=sess))
                        tmpacc = sess.run(accuracy,
                                          feed_dict={
                                              X: input_batch,
                                              Y: label_batch,
                                              is_training: False
                                          })
                        test_accuracy = test_accuracy + tmpacc / (
                            x_test.shape[0] // 100)

                    inference_time_test = time.time() - start_test_time

                    train_accuracy = 0
                    start_test_time = time.time()
                    for i in range(x_train.shape[0] // 100):
                        input_batch, label_batch = self.test_sort(
                            100 * i, 100 * (i + 1), x_train,
                            y_train.eval(session=sess))
                        tmpacc = sess.run(accuracy,
                                          feed_dict={
                                              X: input_batch,
                                              Y: label_batch,
                                              is_training: False
                                          })
                        train_accuracy = train_accuracy + tmpacc / (
                            x_train.shape[0] // 100)

                    inference_time_train = time.time() - start_test_time

                    self.reg_acc(test_accuracy, train_accuracy,
                                 inference_time_test, inference_time_train)

                    if epoch % 20 == 0:
                        model_dir = './model' + str(
                            model_kind) + '_epoch' + str(
                                epoch + 1 + self.done_epoch) + '/model.ckpt'
                        saver.save(sess, model_dir)

            model_dir = './model' + str(model_kind) + '/model.ckpt'
            saver.save(sess, model_dir)
            sess.close()