Beispiel #1
0
def load_data(filename):

    # load mnist data set
    if filename == "data/mnist.train":
        
        (X_train, y_train), (X_test, y_test) = mnist.load_data()

        X_train = X_train.reshape(60000, 784)
        X_train = X_train.astype('float32')
        X_train /= 255

        Y_train = np_utils.to_categorical(y_train, 10)

        return X_train, Y_train

    if filename == "data/mnist2d.train":
        (X_train, y_train), (X_test, y_test) = mnist.load_data()

        X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
        X_train = X_train.astype('float32')
        X_train /= 255

        Y_train = np_utils.to_categorical(y_train, 10)

        return X_train, Y_train
        
    
    if filename == "data/mnist.test":
        (X_train, y_train), (X_test, y_test) = mnist.load_data()

        X_test = X_test.reshape(10000, 784)
        X_test = X_test.astype('float32')
        X_test /= 255

        Y_test = np_utils.to_categorical(y_test, 10)

        return X_test, Y_test 
    
    if filename == "data/mnist2d.test":
        (X_train, y_train), (X_test, y_test) = mnist.load_data()

        X_test = X_test.reshape(10000, 28, 28, 1)
        X_test = X_test.astype('float32')
        X_test /= 255

        Y_test = np_utils.to_categorical(y_test, 10)

        return X_test, Y_test 
    

    # load sensor data from csv file 
    data = pd.read_csv(filename, sep=';')
    X  = data[data.columns[:-1]].as_matrix()
    y = data[data.columns[-1]].as_matrix()
    y = y.reshape(y.shape[0], 1)
    return X, y 
Beispiel #2
0
def main():
    """ Test SVM from scikit learn on mnist data set.""" 

    (X_train, Y_train), (X_test, Y_test) =  mnist.load_data() 
  
    # preprocess data
    X_train = X_train.reshape(60000, 784)
    X_test = X_test.reshape(10000, 784)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255

    print(X_train.shape[0], 'train samples')
    print(X_test.shape[0], 'test samples')


    model = SVC(kernel='rbf', gamma=0.02, C=10) 
    model.fit(X_train, Y_train)
    
    train_yy = model.predict(X_train)
    test_yy = model.predict(X_test) 

    train_err = 100*mean_squared_error(train_yy, Y_train) 
    test_err = 100*mean_squared_error(test_yy, Y_test) 
    
    print("Train. err:", train_err) 
    print("Test err:", test_err) 

    train_acc = accuracy_score(Y_train, train_yy)  
    test_acc = accuracy_score(Y_test, test_yy) 

    pickle.dump(model, open("svm_rbf", "wb"))
 def test_mnist(self):
     print('mnist')
     (X_train, y_train), (X_test, y_test) = mnist.load_data()
     print(X_train.shape)
     print(X_test.shape)
     print(y_train.shape)
     print(y_test.shape)
Beispiel #4
0
def reference():
    from keras.datasets import mnist
    from keras.models import Sequential
    from keras.layers import Flatten, Dense, Activation, Convolution2D, MaxPooling2D
    from keras.utils import np_utils
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.astype('float32').reshape(x_train.shape + (1, )) / 255.
    x_test = x_test.astype('float32').reshape(x_test.shape + (1, )) / 255.
    y_train = np_utils.to_categorical(y_train)
    y_test = np_utils.to_categorical(y_test)
    rows, cols = x_train.shape[1:3]

    model = Sequential()
    model.add(Convolution2D(9, 3, 3, border_mode='same',
                            dim_ordering='tf', activation='relu',
                            batch_input_shape=(16, rows, cols, 1)))
    model.add(MaxPooling2D((2, 2), dim_ordering='tf'))
    model.add(Convolution2D(16, 3, 3, border_mode='same',
                            dim_ordering='tf', activation='relu'))
    model.add(Flatten())
    model.add(Dense(10, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=16, show_accuracy=True,
              validation_data=(x_test, y_test), nb_epoch=20)
Beispiel #5
0
    def __init__(self, **kwargs):
        super(MNIST, self).__init__(**kwargs)
        # data_path = osp.abspath( osp.join(__file__, osp.pardir, osp.pardir, osp.pardir, osp.pardir,
        #     'datasets/mnist/keras/mnist.pkl.gz') )
        # with gzip.open(data_path, 'rb') as f:
        #     (X_train, y_train), (X_test, y_test) = pickle.load(f)
        #
        (X_train, y_train), (X_test, y_test) = mnist.load_data()
        if self.data_set == 'train':
            X = X_train
            y = y_train
        elif self.data_set == 'train-small':
            X = X_train[:2000]
            y = y_train[:2000]
        elif self.data_set == 'test':
            X = X_test
            y = y_test
        elif self.data_set == 'test-small':
            X = X_test[:1000]
            y = y_test[:1000]
        elif self.data_set == 'all':
            X = np.vstack((X_train, X_test))
            y = np.vstack((y_train, y_test))
        else:
            raise ValueError('MNIST Unsupported data_set: ', self.data_set)

        # normalization
        if self.norm:
            X = X.astype(np.float32) / 255
        X = X[:,np.newaxis,:,:]
        X = self.init_layout_X(X)
        y = self.init_layout_y(y)
        self.X = X
        self.y = y
Beispiel #6
0
def load(dataset, inside_labels, unknown_labels, with_unknown):
    if dataset == 'mnist':
        from keras.datasets import mnist as data
    elif dataset == 'cifar10':
        from keras.datasets import cifar10 as data
    elif dataset == 'svhn':
        data = SVHN()

    inside_labels.sort()
    unknown_labels.sort()

    (X_train, y_train), (X_test, y_test) = data.load_data()
    y_train = y_train.reshape(-1)
    y_test = y_test.reshape(-1)

    X_train_all = X_train.copy()
    y_train_all = y_train.copy()

    idxs_labels = defaultdict(list)
    for i, label in enumerate(y_train):
        idxs_labels[label].append(i)

    idxs_train = []
    if with_unknown:
        total = sum(len(idxs_labels[label]) for label in idxs_labels if label in inside_labels)
        per_labels = total//(len(inside_labels) + len(unknown_labels))
        for label in idxs_labels:
            if label in inside_labels + unknown_labels:
                idxs_train += idxs_labels[label][:per_labels]
    else:
        for label in idxs_labels:
            if label in inside_labels:
                idxs_train += idxs_labels[label]

    X_train = X_train[idxs_train]/255
    X_train_all = X_train_all/255
    if len(X_train.shape[1:]) == 2:
        X_train = np.expand_dims(X_train, axis=1)
        X_train_all = np.expand_dims(X_train_all, axis=1)

    y_train = y_train[idxs_train]
    y_train = y_train.reshape(y_train.shape[0])

    idxs_inside = [i for i, l in enumerate(y_train) if l in inside_labels]
    idxs_unknown = [i for i, l in enumerate(y_train) if l in unknown_labels]

    label_key = {l: i for i, l in enumerate(inside_labels + unknown_labels)}
    y_train_int = np.array([label_key[l] for l in y_train])
    y_train = np.zeros([len(y_train_int), len(inside_labels)])
    y_train[idxs_inside, :] = np_utils.to_categorical(y_train_int[idxs_inside])
    y_train[idxs_unknown, :] = np.array([1.0/len(inside_labels)]*len(inside_labels))

    X_test = X_test/255
    if len(X_test.shape[1:]) == 2:
        X_test = np.expand_dims(X_test, axis=1)
    y_test = y_test.reshape(y_test.shape[0])

    assert y_train.shape[1] == len(inside_labels)

    return (X_train_all, y_train_all), (X_train, y_train) , (X_test, y_test),
Beispiel #7
0
def get_mnist_data(train_size, binarize, add_noise, noise_proportion,
        test=False):
    print('Loading MNIST dataset')
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    print('Splitting data into training and validation')
    X_val = np.copy(X_train[train_size:])
    y_val = np.copy(y_train[train_size:])
    X_train = np.copy(X_train[:train_size])
    y_train = np.copy(y_train[:train_size])
    print('Training shape = {}, Validation shape = {}, Test shape = {}'.format(
        X_train.shape, X_val.shape, X_test.shape))

    print('Preprocessing data: classes = {}, binarize = {}, add noise = {}'.format(
           nb_classes, binarize, add_noise))
    X_train, Y_train = preprocess_data(X_train, y_train, nb_classes=nb_classes,
            binarize=binarize, noise=add_noise, proportion=noise_proportion)
    X_val, Y_val = preprocess_data(X_val, y_val, nb_classes=nb_classes,
            binarize=binarize, noise=add_noise, proportion=noise_proportion)
    if test:
        X_test, Y_test = preprocess_data(X_test, y_test, nb_classes=nb_classes,
            binarize=binarize, noise=add_noise, proportion=noise_proportion)
        return (X_train, Y_train), (X_val, Y_val), (X_test, Y_test)
    else:
        return (X_train, Y_train), (X_val, Y_val)
Beispiel #8
0
def main():
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    # Transform data to a list of 1D arrays
    dim_product = x_train.shape[1] * x_train.shape[2]
    x_train = x_train.reshape(x_train.shape[0], dim_product)
    x_test = x_test.reshape(x_test.shape[0], dim_product)

    # Normalize data so that every point is between 0 and 1
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # Turn labels to categories
    y_train = np_utils.to_categorical(y_train, 10)
    y_test = np_utils.to_categorical(y_test, 10)

    model = Sequential()
    model.add(Dense(1200, input_dim=dim_product, init="normal",
                    activation='tanh'))
    # model.add(Dense(400, init="normal", activation="relu"))
    model.add(Dense(10, init="normal", activation="softmax"))

    model.compile(loss="categorical_crossentropy", optimizer="SGD",
                  metrics=['accuracy'])

    print(f"Models summary: {model.summary()}")

    model.fit(x_train, y_train, batch_size=200, nb_epoch=60,
              validation_split=0.3, verbose=1)

    score = model.evaluate(x_test, y_test, verbose=0)
    print(f"Final score: {score[1]*100}")
    model.save('simple-mnist.h5')
Beispiel #9
0
def load_mnist_rnn(max_train_items=None, max_test_items=None, normalize=True):
    #(X_train, y_train), (X_test, y_test) = cifar10.load_data()
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    if max_train_items is not None:
        skip_every_trn = int(X_train.shape[0] / max_train_items)
        X_train = X_train[::skip_every_trn,:,:]
        y_train = y_train[::skip_every_trn]
    if max_test_items is not None:
        skip_every_tst = int(X_test.shape[0] / max_test_items)
        X_test  = X_test[::skip_every_tst,:,:]
        y_test  = y_test[::skip_every_tst]

    X_train = X_train.astype('float32') / 255.0
    if normalize:
        X_train -= X_train.mean(axis=0)[None,:]
    X_test  = X_test.astype('float32') / 255.0
    if normalize:
        X_test -= X_test.mean(axis=0)[None,:]
    
    X_train = X_train[:,8:,:].reshape([len(X_train), -1, 28*4])
    trn=RegressionData(X=np.squeeze(X_train[:,0,:]), Y=X_train)
    trn.ids = y_train

    X_test = X_test[:,8:,:].reshape([len(X_test), -1, 28*4])
    tst=RegressionData(X=np.squeeze(X_test[:,0,:]), Y=X_test)
    tst.ids = y_test
    
    return Datasets(trn, tst)
Beispiel #10
0
def keras_mnist_data():
    """
    retrieve the MNIST database for keras
    """
    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    img_rows, img_cols = 28, 28    # should be cmputed from the data

    if K.image_dim_ordering() == 'th':
        X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
        X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
    else:
        X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 1)
        X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 1)

    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255

    # convert class vectors to binary class matrices
    nb_classes = len(set(y_train))
    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    return (X_train, Y_train), (X_test, Y_test)
Beispiel #11
0
def tsting_and_show(no_images = 100):
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    X_test = to_normal(X_test)

    generator = generator_model()
    generator.compile(loss='binary_crossentropy', optimizer="SGD")
    generator.load_weights('generator')

    noise = X_test[:,:,::2,::2]
    generated_images = generator.predict(noise, verbose=1)
    im = combine_images(generated_images[:no_images])
    im_org = combine_images(X_test[:no_images])
    im_dec = combine_images(noise[:no_images])

    figure(figsize=(4*3+2,4))
    subplot(1,3,1)
    imshow(im_org, cmap='gray')
    axis('off')
    title('Original')

    subplot(1,3,2)
    imshow(im_dec, cmap='gray')
    axis('off')
    title('Input 1/2x1/2 with Interpol')

    subplot(1,3,3)
    imshow(im, cmap='gray')
    axis('off')
    title('Expansion by AE-GANS')
def load_mnist(image_dim_ordering):

    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    if image_dim_ordering == 'th':
        X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
        X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
    else:
        X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
        X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)

    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')

    X_train = normalization(X_train)
    X_test = normalization(X_test)

    nb_classes = len(np.unique(np.hstack((y_train, y_test))))

    Y_train = np_utils.to_categorical(y_train, nb_classes)
    Y_test = np_utils.to_categorical(y_test, nb_classes)

    print X_train.shape, X_test.shape, Y_train.shape, Y_test.shape

    return X_train, Y_train, X_test, Y_test
Beispiel #13
0
def evaluate(lr, pos):
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    X_train = (X_train.astype("float32")).reshape((60000, 784))
    X_test = (X_test.astype("float32")).reshape((10000, 784))
    X_train /= 255
    X_test /= 255

    Y_train = np_utils.to_categorical(y_train, 10)
    Y_test = np_utils.to_categorical(y_test, 10)

    model = Sequential()
    model.add(Dense(output_dim=layer1, input_dim=784))
    if pos == 0:
        model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(Dense(output_dim=layer2, input_dim=layer1))
    if pos == 1:
        model.add(BatchNormalization())
    model.add(Activation("relu"))
    model.add(Dense(output_dim=10, input_dim=layer2))
    if pos == 2:
        model.add(BatchNormalization())
    model.add(Activation("softmax"))

    model.compile(
        loss="categorical_crossentropy", optimizer=SGD(lr=lr, momentum=0.9, nesterov=True), metrics=["accuracy"]
    )

    model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=(X_test, Y_test))
    score = model.evaluate(X_test, Y_test, verbose=0)
    return score[1]
Beispiel #14
0
 def __init__(self):
     """
     By invoke run(), all code is executed.
     """
     (X_train, y_train), (X_test, y_test) = mnist.load_data()
     self.Org = (X_train, y_train), (X_test, y_test)
     self.Data = self.Org
Beispiel #15
0
def test():
    (X_train, Y_train), (X_test, Y_test) =  mnist.load_data() 
  
    # preprocess data
    X_train = X_train.reshape(60000, 784)
    X_test = X_test.reshape(10000, 784)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255

    model = pickle.load(open("svm_rbf.pickle","rb"))
    
    train_yy = model.predict(X_train)
    test_yy = model.predict(X_test) 

    train_err = 100*mean_squared_error(train_yy, Y_train) 
    test_err = 100*mean_squared_error(test_yy, Y_test) 
    
    print("Train. err:", train_err) 
    print("Test err:", test_err) 

    train_acc = accuracy_score(Y_train, train_yy)  
    test_acc = accuracy_score(Y_test, test_yy) 
    
    print("Train acc:", train_acc)
    print("Test acc:", test_acc)
Beispiel #16
0
def reconstruct():

	inputs = tf.placeholder(
		shape=(None, 28, 28),
		dtype=tf.float32,
		name='inputs',
	)

	encoder = Encoder(tf.reshape(inputs, [-1, 784]))
	latent = encoder.outputs
	decoder = Decoder(latent)
	reconstruction = tf.reshape(decoder.outputs, [-1, 28, 28])

	sess = tf.Session()
	encoder.load(sess, 'saved_models/encoder/', verbose=True)
	decoder.load(sess, 'saved_models/decoder/', verbose=True)

	(x_train, _), (x_test, _) = mnist.load_data()

	sample = np.expand_dims(x_test[np.random.choice(len(x_test))], axis=0)

	generated_sample = sess.run(reconstruction, {inputs: sample / 256.}) * 256.

	image = np.concatenate([sample[0], generated_sample[0]], axis=1)

	PIL.Image.fromarray(image).resize((200, 100)).show()
Beispiel #17
0
def construct_randomly_split_mnist(nb_splits=10, mode='train'):
    """Split MNIST dataset by labels.

        Args:
                nb_splits: numer of splits
                mode: whether to use train or testing data

        Returns:
            List of (X, y) tuples representing each dataset
    """
    # Load MNIST data and normalize
    nb_classes = 10
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(-1, 784)
    X_test = X_test.reshape(-1, 784)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255

    if mode == 'train':
        X, y = X_train, y_train
    else:
        X, y = X_test, y_test

    return split_dataset_randomly(X, y, nb_splits, nb_classes)
Beispiel #18
0
def train(batch_size):
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = (X_train - 127.5) / 127.5
    X_train = X_train[:, :, :, None]
    X_test = X_test[:, :, :, None]

    d = discriminator_model()
    g = generator_model()

    d_on_g = generator_containing_discriminator(g, d)
    d_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True) # nesterov?
    g_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
    g.compile(loss='binary_crossentropy', optimizer='SGD')
    d_on_g.compile(loss='binary_crossentropy', optimizer=g_optim)
    d.trainable = True
    d.compile(loss='binary_crossentropy', optimizer=d_optim)
    for epoch in range(100):
        for index in range(X_train.shape[0]//batch_size):
            noise = np.random.uniform(-1, 1, size=(batch_size, 100))
            image_batch = X_train[index*batch_size:(index+1)*batch_size]
            generated_image = g.predict(noise, verbose=0)
            X = np.concatenate((image_batch, generated_image)) # 元画像 + 生成した画像
            y = [1] * batch_size + [0] * batch_size # 正解画像に対するラベル => 1, 生成画像 => 0
            # discriminatorは, 元の画像を1, 生成された画像を0として学習していく
            d_loss = d.train_on_batch(X, y)
            noise = np.random.uniform(-1, 1, (batch_size, 100))
            d.trainable = False
            # generatorは, 本当の画像
            g_loss = d_on_g.train_on_batch(noise, [1] * batch_size)
            print("batch {} d_loss {}".format(index, d_loss))
            d.trainable = True
            print("batch {} g_loss: {}".format(index, g_loss))
            if index % 10 == 9:
                g.save_weights('generator', True)
                d.save_weights('discriminator', True)
Beispiel #19
0
def construct_split_mnist(task_labels,  split='train', multihead=False):
    """Split MNIST dataset by labels.

        Args:
                task_labels: list of list of labels, one for each dataset
                split: whether to use train or testing data

        Returns:
            List of (X, y) tuples representing each dataset
    """
    # Load MNIST data and normalize
    nb_classes = 10
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = X_train.reshape(-1, 784)
    X_test = X_test.reshape(-1, 784)
    X_train = X_train.astype('float32')
    X_test = X_test.astype('float32')
    X_train /= 255
    X_test /= 255

    if split == 'train':
        X, y = X_train, y_train
    else:
        X, y = X_test, y_test

    return split_dataset_by_labels(X, y, task_labels, nb_classes, multihead)
Beispiel #20
0
def load_mnist():
    (X_train, Y_train), (X_test, Y_test) = mnist.load_data()
    X_train = X_train.reshape(-1, 784)
    X_test = X_test.reshape(-1, 784)
    X_train = X_train.astype("float32") / 255.0
    X_test = X_test.astype("float32") / 255.0
    return (X_train, Y_train), (X_test, Y_test)
Beispiel #21
0
 def data_mix(self):
     
     # randomly choose dataset
     dataset = random.choice(['mnist', 'cifar10', 'cifar100'])#
     
     n_labels = 10
     
     if dataset == "mnist":
         data = mnist.load_data()
     
     if dataset == "cifar10":
         data = cifar10.load_data()
     
     if dataset == "cifar100":
         data = cifar100.load_data()
         n_labels = 100
     
     # Choose dataset size. This affects regularization needed
     r = np.random.rand()
     
     # not using full dataset to make regularization more important and 
     # speed up testing a little bit
     data_size = int( 2000 * (1-r) + 40000 * r )
     
     # I do not use test data for validation, but last 10000 instances in dataset 
     # so that trained models can be compared to results in literature
     (CX, CY), (CXt, CYt) = data
     
     if dataset == "mnist":
         CX = np.expand_dims(CX, axis=1)
     
     data = CX[:data_size], CY[:data_size], CX[-10000:], CY[-10000:];
      
     return data, n_labels
Beispiel #22
0
def use_tflearn():
    import tflearn

    # Data loading and preprocessing
    import tflearn.datasets.mnist as mnist
    X, Y, testX, testY = mnist.load_data(one_hot=True)

    # Building deep neural network
    input_layer = tflearn.input_data(shape=[None, 784])
    dense1 = tflearn.fully_connected(input_layer, 64, activation='tanh',
                                     regularizer='L2', weight_decay=0.001)
    dropout1 = tflearn.dropout(dense1, 0.8)
    dense2 = tflearn.fully_connected(dropout1, 64, activation='tanh',
                                     regularizer='L2', weight_decay=0.001)
    dropout2 = tflearn.dropout(dense2, 0.8)
    softmax = tflearn.fully_connected(dropout2, 10, activation='softmax')

    # Regression using SGD with learning rate decay and Top-3 accuracy
    sgd = tflearn.SGD(learning_rate=0.1, lr_decay=0.96, decay_step=1000)
    top_k = tflearn.metrics.Top_k(3)
    net = tflearn.regression(softmax, optimizer=sgd, metric=top_k,
                             loss='categorical_crossentropy')

    # Training
    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(X, Y, n_epoch=20, validation_set=(testX, testY),
              show_metric=True, run_id="dense_model")
Beispiel #23
0
def load_mnist_dataset():
    from keras.datasets import mnist

    num_classes = 10
    # input image dimensions
    img_rows, img_cols = 28, 28

    # the data, shuffled and split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    # qui modifica la forma nel caso i canali siano la 2 dimensione o la 4
    if K.image_data_format() == 'channels_first':
        x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
        x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
        input_shape = (1, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255  # normalizing?
    x_test /= 255
    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    return x_train, y_train, x_test, y_test
def get_mnist_data():
	# Use keras dataset instead.
	# conda install keras
	(X_train, y_train), (X_test, y_test) = mnist.load_data()

	# Reshape data once for training, again for test.
	# X_train 50Kx28x28 matrix -> 50Kx784x1 matrix
	# y_train 10Kx1 array -> 10Kx10x1 matrix (hot-encoded vector)
	images_train = [x.reshape(784,1) / 255.0 for x in X_train]
	labels_encoded_train = [np.zeros((10,1)) for y in y_train]
	for i, l in enumerate(y_train): labels_encoded_train[i][l] = 1

	# Reshape for test, same as training
	images_test = [x.reshape(784,1) / 255.0 for x in X_test]
	labels_encoded_test = [np.zeros((10,1)) for y in y_test]
	for i, l in enumerate(y_test): labels_encoded_test[i][l] = 1

	# training is a list of (image, hot-encoded label vector) tuples.
	# test is a list of (image, label) tuples.
	train = [(x,y) for x,y in zip(images_train, labels_encoded_train)]
	test =[(x,y) for x,y in zip(images_test, labels_encoded_test)]
	# test = [(x,y) for x,y in zip(images_test, y_test)]
	validate = test[8000:]
	test = test[:8000]

	return train, test, validate
Beispiel #25
0
    def setUpClass(cls):
        # MNIST dataset used for building pre_trained_models/mnist_cnn/model_mnist_cnn_epoch_3
        cls.batch_size = 128
        cls.num_classes = 10
        cls.epochs = 2
        # input image dimensions
        cls.img_rows, cls.img_cols = 28, 28
        # shuffled and split between train and test sets
        (cls.x_train, cls.y_train), (cls.x_test, cls.y_test) = mnist.load_data()
        if K.image_data_format() == 'channels_first':
            cls.x_train = cls.x_train.reshape(cls.x_train.shape[0], 1, cls.img_rows, cls.img_cols)
            cls.x_test = cls.x_test.reshape(cls.x_test.shape[0], 1, cls.img_rows, cls.img_cols)
            cls.input_shape = (1, cls.img_rows, cls.img_cols)
        else:
            cls.x_train = cls.x_train.reshape(cls.x_train.shape[0], cls.img_rows, cls.img_cols, 1)
            cls.x_test = cls.x_test.reshape(cls.x_test.shape[0], cls.img_rows, cls.img_cols, 1)
            cls.input_shape = (cls.img_rows, cls.img_cols, 1)

        cls.x_train = cls.x_train.astype('float32')
        cls.x_test = cls.x_test.astype('float32')
        cls.x_train /= 255
        cls.x_test /= 255
        cls.x_train = (cls.x_train - 0.5) * 2
        cls.x_test = (cls.x_test - 0.5) * 2

        # convert class vectors to binary class matrices
        cls.y_train = keras.utils.to_categorical(cls.y_train, cls.num_classes)
        cls.y_test = keras.utils.to_categorical(cls.y_test, cls.num_classes)
Beispiel #26
0
def mnist_dataset():
    (X_train, _), (X_test, _) = mnist.load_data()
    X_train = X_train.astype('float32') / 256.
    X_test = X_test.astype('float32') / 256.
    X_train = X_train.reshape((len(X_train), np.prod(X_train.shape[1:])))
    X_test = X_test.reshape((len(X_test), np.prod(X_test.shape[1:])))
    return X_train, X_test
def run_mlp(n_epochs = 20):
    np.random.seed(1234)

    nb_inputs = 28*28
    nb_classes = 10

    model = Sequential()
    model.add(Dense(512, input_shape=(nb_inputs,), activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(512, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(nb_classes, activation='softmax'))

    # RMSProp works way faster than SGD
    model.compile(loss='categorical_crossentropy', optimizer=RMSprop())

    (X_train, Y_train), (X_test, Y_test) = mnist.load_data()
    X_train = X_train.reshape(60000, 28*28).astype("float32")/255
    X_test = X_test.reshape(10000, 28*28).astype("float32")/255
    Y_train = np_utils.to_categorical(Y_train, nb_classes)
    Y_test = np_utils.to_categorical(Y_test, nb_classes)

    model.fit(X_train, Y_train, nb_epoch=n_epochs, batch_size=128,
              show_accuracy=True, verbose=2,
              validation_split=0.1)
    loss, accuracy = model.evaluate(X_test, Y_test,
                                    show_accuracy=True, verbose=0)
    return accuracy
Beispiel #28
0
    def train(self, epochs, batch_size=128, save_interval=50):

        # Load the dataset
        (X_train, _), (_, _) = mnist.load_data()

        # Rescale -1 to 1
        X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_train = np.expand_dims(X_train, axis=3)

        half_batch = int(batch_size / 2)

        for epoch in range(epochs):


            # ---------------------
            #  Train Discriminator
            # ---------------------

            # Sample noise and generate img
            z = np.random.normal(size=(half_batch, self.latent_dim))
            imgs_ = self.generator.predict(z)

            # Select a random half batch of images and encode
            idx = np.random.randint(0, X_train.shape[0], half_batch)
            imgs = X_train[idx]
            z_ = self.encoder.predict(imgs)

            valid = np.ones((half_batch, 1))
            fake = np.zeros((half_batch, 1))

            # Train the discriminator (img -> z is valid, z -> img is fake)
            d_loss_real = self.discriminator.train_on_batch([z_, imgs], valid)
            d_loss_fake = self.discriminator.train_on_batch([z, imgs_], fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ---------------------
            #  Train Generator
            # ---------------------

            # Sample gaussian noise
            z = np.random.normal(size=(batch_size, self.latent_dim))

            # Select a random half batch of images
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            imgs = X_train[idx]

            valid = np.ones((batch_size, 1))
            fake = np.zeros((batch_size, 1))

            # Train the generator (z -> img is valid and img -> z is is invalid)
            g_loss = self.bigan_generator.train_on_batch([z, imgs], [valid, fake])

            # Plot the progress
            print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[0]))

            # If at save interval => save generated image samples
            if epoch % save_interval == 0:
                # Select a random half batch of images
                self.save_imgs(epoch)
Beispiel #29
0
def train_aegans(BATCH_SIZE, disp=True):
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    X_train = (X_train.astype(np.float32) - 127.5) / 127.5
    X_train = X_train.reshape((X_train.shape[0], 1) + X_train.shape[1:])
    discriminator = discriminator_model()
    generator = generator_model()
    discriminator_on_generator = \
        generator_containing_discriminator_ae(generator, discriminator)
    d_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
    g_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
    generator.compile(loss='binary_crossentropy', optimizer="SGD")
    discriminator_on_generator.compile(loss=merge_bc_mse, optimizer=g_optim)
    discriminator.trainable = True
    discriminator.compile(loss='binary_crossentropy', optimizer=d_optim)
    noise = np.zeros((BATCH_SIZE, 100))
    for epoch in range(100):
        if disp:
            print("Epoch is", epoch)
            print("Number of batches", int(X_train.shape[0] / BATCH_SIZE))

        for index in range(int(X_train.shape[0] / BATCH_SIZE)):
            # for i in range(BATCH_SIZE):
                #noise[i, :] = np.random.uniform(-1, 1, 100)
            #    noise[i, :] = X_train[i, :].reshape(-1)[np.round(np.linspace(0,783,100)).astype(int)]

            image_batch = X_train[index * BATCH_SIZE:(index + 1) * BATCH_SIZE]
            noise = image_batch[:, :, ::2, ::2].copy()
            generated_images = generator.predict(noise, verbose=0)

            if index % 30 == 0:
                image = combine_images(generated_images)
                image = image * 127.5 + 127.5
                Image.fromarray(image.astype(np.uint8)).save(
                    str(epoch) + "_" + str(index) + ".png")

            X = np.concatenate((image_batch, generated_images))
            y = [1] * BATCH_SIZE + [0] * BATCH_SIZE
            d_loss = discriminator.train_on_batch(X, y)
            if disp and index % 30 == 0:
                print("batch %d d_loss : %f" % (index, d_loss))

            noise = image_batch[:, :, ::2, ::2].copy()
            discriminator.trainable = False

            target_left = np.array([1] * BATCH_SIZE).reshape(BATCH_SIZE, 1)
            target_right = image_batch.reshape(BATCH_SIZE, -1)
            target = np.concatenate([target_left, target_right], axis=1)
            # Debuging code
            # print("epoch, index, target.shape -->", epoch, index, target.shape)
            g_loss = discriminator_on_generator.train_on_batch(
                noise, target)

            discriminator.trainable = True            
            if disp and index % 30 == 0:
                print("batch %d g_loss : %f" % (index, g_loss))

            if index % 10 == 9:
                generator.save_weights('generator', True)
                discriminator.save_weights('discriminator', True)
    def train(self, epochs, batch_size=128, save_interval=50):

        # Load the dataset
        (X_train, _), (_, _) = mnist.load_data()

        # Rescale -1 to 1
        X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_train = np.expand_dims(X_train, axis=3)

        half_batch = int(batch_size / 2)

        for epoch in range(epochs):


            # ---------------------
            #  Train Discriminator
            # ---------------------

            # Select a random half batch of images
            idx = np.random.randint(0, X_train.shape[0], half_batch)
            imgs = X_train[idx]

            # Generate a half batch of embedded images
            latent_fake = self.encoder.predict(imgs)

            latent_real = np.random.normal(size=(half_batch, self.encoded_dim))

            valid = np.ones((half_batch, 1))
            fake = np.zeros((half_batch, 1))

            # Train the discriminator
            d_loss_real = self.discriminator.train_on_batch(latent_real, valid)
            d_loss_fake = self.discriminator.train_on_batch(latent_fake, fake)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)


            # ---------------------
            #  Train Generator
            # ---------------------

            # Select a random half batch of images
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            imgs = X_train[idx]

            # Generator wants the discriminator to label the generated representations as valid
            valid_y = np.ones((batch_size, 1))

            # Train the generator
            g_loss = self.adversarial_autoencoder.train_on_batch(imgs, [imgs, valid_y])

            # Plot the progress
            print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f, mse: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[0], g_loss[1]))

            # If at save interval => save generated image samples
            if epoch % save_interval == 0:
                # Select a random half batch of images
                idx = np.random.randint(0, X_train.shape[0], 25)
                imgs = X_train[idx]
                self.save_imgs(epoch, imgs)
from keras.utils import np_utils
from keras.datasets import mnist
(train_feature, train_label), (test_feature, test_label) = mnist.load_data()

print(train_label[0:5])
train_label_onehot = np_utils.to_categorical(train_label)
print(train_label_onehot[0:5])
def model_train(bot,update):
    import keras
    from keras.datasets import mnist
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, Flatten
    from keras.layers import Conv2D, MaxPooling2D
    from keras import backend as K

    ##################################################################################################
    from dl_bot import DLBot
    from telegram_bot_callback import TelegramCallback
    
    # bot = DLBot(token=telegram_token, user_id=telegram_user_id)
    telegram_callback = TelegramCallback(bot, update)
    ###################################################################################################

    batch_size = 128
    num_classes = 10
    epochs = 5

    img_rows, img_cols = 28, 28

    (x_train, y_train), (x_test, y_test) = mnist.load_data()


    if K.image_data_format() == 'channels_first':
        x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
        x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
        input_shape = (1, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)


    model = Sequential()
    model.add(Conv2D(32, kernel_size=(3, 3),
                    activation='relu',
                    input_shape=input_shape))
    model.add(Conv2D(64, (3, 3), activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes, activation='softmax'))

    model.compile(loss=keras.losses.categorical_crossentropy,
                optimizer=keras.optimizers.Adadelta(),
                metrics=['accuracy'])

    model.fit(x_train[:1000], y_train[:1000],
            batch_size=batch_size,
            epochs=epochs,
            verbose=1,
            validation_data=(x_test, y_test),
            callbacks=[telegram_callback])

    score = model.evaluate(x_test, y_test, verbose=0)
    print('Test loss:', score[0])
    print('Test accuracy:', score[1])
    return score
Beispiel #33
0
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Flatten
from keras.utils import to_categorical
import wandb
from wandb.keras import WandbCallback

# logging code
run = wandb.init()
config = run.config

# load data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
img_width = X_train.shape[1]
img_height = X_train.shape[2]

# one hot encode outputs
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
labels = range(10)

num_classes = y_train.shape[1]

# create model
model = Sequential()
model.add(Flatten(input_shape=(img_width, img_height)))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
Beispiel #34
0
def train(self, epochs, batch_size=128, sample_interval=50):

    # Load the dataset
    (X_train, y_train), (_, _) = mnist.load_data()

    # Rescale -1 to 1
    X_train = (X_train.astype(np.float32) - 127.5) / 127.5
    X_train = np.expand_dims(X_train, axis=3)
    y_train = y_train.reshape(-1, 1)

    half_batch = int(batch_size / 2)

    for epoch in range(epochs):

        # ---------------------
        #  Train Discriminator
        # ---------------------

        # Select a random half batch of images
        idx = np.random.randint(0, X_train.shape[0], half_batch)
        imgs = X_train[idx]

        noise = np.random.normal(0, 1, (half_batch, 100))

        # The labels of the digits that the generator tries to create an
        # image representation of
        sampled_labels = np.random.randint(0, 10, half_batch).reshape(-1, 1)

        # Generate a half batch of new images
        gen_imgs = generator.predict([noise, sampled_labels])
        valid = np.ones((half_batch, 1))
        fake = np.zeros((half_batch, 1))

        # Image labels. 0-9 if image is valid or 10 if it is generated (fake)
        img_labels = y_train[idx]
        fake_labels = 10 * np.ones(half_batch).reshape(-1, 1)

        # Train the discriminator
        d_loss_real = discriminator.train_on_batch(imgs, [valid, img_labels])
        d_loss_fake = discriminator.train_on_batch(gen_imgs,
                                                   [fake, fake_labels])
        d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

        # ---------------------
        #  Train Generator
        # ---------------------

        # Sample generator input
        noise = np.random.normal(0, 1, (batch_size, 100))
        valid = np.ones((batch_size, 1))
        # Generator wants discriminator to label the generated images as the intended
        # digits
        sampled_labels = np.random.randint(0, 10, batch_size).reshape(-1, 1)

        # Train the generator
        g_loss = combined.train_on_batch([noise, sampled_labels],
                                         [valid, sampled_labels])

        # Plot the progress
        print("%d [D loss: %f, acc.: %.2f%%, op_acc: %.2f%%] [G loss: %f]" %
              (epoch, d_loss[0], 100 * d_loss[3], 100 * d_loss[4], g_loss[0]))

        # If at save interval => save generated image samples
        if epoch % sample_interval == 0:
            save_model()
            sample_images(epoch)

    def sample_images(self, epoch):
        r, c = 10, 10
        noise = np.random.normal(0, 1, (r * c, 100))
        sampled_labels = np.array([num for _ in range(r) for num in range(c)])

        gen_imgs = generator.predict([noise, sampled_labels])

        # Rescale images 0 - 1
        gen_imgs = 0.5 * gen_imgs + 0.5

        fig, axs = plt.subplots(r, c)
        cnt = 0
        for i in range(r):
            for j in range(c):
                axs[i, j].imshow(gen_imgs[cnt, :, :, 0], cmap='gray')
                axs[i, j].axis('off')
                cnt += 1
        fig.savefig("images/%d.png" % epoch)
        plt.close()

    def save_model(self):
        def save(model, model_name):
            model_path = "saved_model/%s.json" % model_name
            weights_path = "saved_model/%s_weights.hdf5" % model_name
            options = {"file_arch": model_path, "file_weight": weights_path}
            json_string = model.to_json()
            open(options['file_arch'], 'w').write(json_string)
            model.save_weights(options['file_weight'])

        save(generator, "mnist_acgan_generator")
        save(discriminator, "mnist_acgan_discriminator")
        save(combined, "mnist_acgan_adversarial")
Beispiel #35
0
def load_mnist_data():
    return mnist.load_data()
"""Program 2&3.ipynb

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/13xtHNr4RITYpAKZwHeN7gdXYXUjEPf6-
"""

from keras import Sequential
from keras.datasets import mnist
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Dense
from keras.utils import to_categorical

(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
#display the first image in the training data
plt.imshow(train_images[0, :, :], cmap='gray')
plt.title('Ground Truth : {}'.format(train_labels[0]))
plt.show()

#process the data
#1. convert each image of shape 28*28 to 784 dimensional
dimData = np.prod(train_images.shape[1:])
print("Dim data:", dimData)
train_data = train_images.reshape(train_images.shape[0], dimData)
test_data = test_images.reshape(test_images.shape[0], dimData)

#convert data and scale values between 0 and 1
train_data = train_data.astype('float')
test_data = test_data.astype('float')
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 17:14:02 2020

@author: yurifarod
"""

import matplotlib.pyplot as plt
import numpy as np
from keras.datasets import mnist
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D, Flatten, Reshape

(previsores_treinamento, _), (previsores_teste, _) = mnist.load_data()
previsores_treinamento = previsores_treinamento.reshape(
    (len(previsores_treinamento), 28, 28, 1))
previsores_teste = previsores_teste.reshape((len(previsores_teste), 28, 28, 1))

previsores_treinamento = previsores_treinamento.astype('float32') / 255
previsores_teste = previsores_teste.astype('float32') / 255

autoencoder = Sequential()

# Encoder
autoencoder.add(
    Conv2D(filters=16,
           kernel_size=(3, 3),
           activation='relu',
           input_shape=(28, 28, 1)))
autoencoder.add(MaxPooling2D(pool_size=(2, 2)))
Beispiel #38
0
def train_model(batch_size=1000,
                epochs=1005,
                num_samples=10000,
                var1=0.99,
                var2=0.01):
    num_classes = 10
    print('var1:', var1)
    print('var2:', var2)
    # input image dimensions
    img_rows, img_cols = 28, 28

    # the data, split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    if K.image_data_format() == 'channels_first':
        x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
        x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
        input_shape = (1, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255
    x_train = x_train[0:num_samples]
    print('x_train shape:', x_train.shape)
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)[0:num_samples]
    y_test = keras.utils.to_categorical(y_test, num_classes)

    model = Sequential()
    model.add(Flatten(input_shape=(28, 28, 1)))
    model.add(Dense(64, activation='tanh'))
    model.add(Dense(64, activation='tanh'))
    model.add(Dense(64, activation='tanh'))
    model.add(Dense(64, activation='tanh'))
    model.add(Dense(64, activation='tanh'))
    model.add(Dense(64, activation='tanh'))
    model.add(Dense(64, activation='tanh'))
    model.add(Dense(64, activation='tanh'))
    model.add(Dense(64, activation='tanh'))
    model.add(Dense(num_classes, activation='softmax'))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.sgd(),
                  metrics=['accuracy'])
    model.summary()
    n_pop = 10
    n_para = 84170
    fit = np.zeros((n_pop, 1))
    pop = np.random.normal(0, 0.01, (n_pop, n_para))
    count_update = 0

    flag_add = 0
    flag_stop = 0
    minibatches = random_mini_batches(x_train, y_train, batch_size)
    len_batch = len(minibatches)
    record = np.zeros((epochs, 4))

    count_non_update = 0
    for i in range(epochs):
        print(i,
              'th iteration------------------------------------------------')
        count_batches = 0
        for minibatch in minibatches:
            (minibatch_X, minibatch_Y) = minibatch
            model.train_on_batch(minibatch_X, minibatch_Y)

            for n in range(10):
                a = np.array(model.get_weights())
                score_old = model.evaluate(minibatch_X, minibatch_Y,
                                           verbose=0)[0]
                b = np.array(model.get_weights())
                if flag_add == 0:
                    j = np.random.randint(0, len(b))
                    adding_random = np.random.normal(0, 0.01, b[j].shape)
                b[j] = b[j] + adding_random
                model.set_weights(b)
                score_new = model.evaluate(minibatch_X, minibatch_Y,
                                           verbose=0)[0]
                if score_new < score_old * (var1 + i * var2 / epochs):
                    a = copy.deepcopy(b)
                    score_old = score_new
                    count_update = count_update + 1
                    flag_add = 1
                    break
                else:
                    model.set_weights(a)
                    flag_add = 0
            count_batches = count_batches + 1
            score = model.evaluate(x_train, y_train, verbose=0)
            # with K.Session() as sess:
            #
            #     outputTensor = model.output  # Or model.layers[index].output
            #     listOfVariableTensors = model.trainable_weights
            #     gradients =K.gradients(outputTensor, listOfVariableTensors)

        record[i, 0], record[i, 1] = model.evaluate(x_train,
                                                    y_train,
                                                    verbose=0)
        record[i, 2], record[i, 3] = model.evaluate(x_test, y_test, verbose=0)
        print('Train loss:', record[i, 0])
        print('Train accuracy:', record[i, 1])
        print('Test loss:', record[i, 2])
        print('Test accuracy', record[i, 3])
        print('count_update:', count_update)
        if i > 999 and i % 500 == 0:
            df = pd.DataFrame(
                record,
                columns=['train_loss', 'train_acc', 'test_loss', 'test_acc'])
            df.to_csv('minist_ga' + str(i) + str(num_samples) + str(var1) +
                      str(var2) + '.csv',
                      index=None,
                      header=True)
from keras.layers import Input, Dense
from keras.models import Model
from keras.datasets import mnist
from keras import regularizers
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D
from keras.models import Model
from keras import backend as K

(x_train, _), (x_test, _) = mnist.load_data()

x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1))  # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1))  # adapt this if using `channels_first` image data format

print x_train.shape
print x_test.shape

input_img = Input(shape=(28, 28, 1))

x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)

# at this point the representation is (4, 4, 8) i.e. 128-dimensional
Beispiel #40
0
#loading the dataset
from keras.datasets import mnist

dataset = mnist.load_data('mymnist.db')
train, test = dataset
X_train, y_train = train
X_test, y_test = test
X_train = X_train.reshape(60000, 28, 28, 1)
X_test = X_test.reshape(10000, 28, 28, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
from keras.utils.np_utils import to_categorical

y_train_cat = to_categorical(y_train)
y_test = to_categorical(y_test)
#Adding layers
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.models import Sequential
from keras.optimizers import adam

model = Sequential()
i = 1
n = 6
for i in range(i):
    model.add(
        Convolution2D(filters=n,
                      kernel_size=(3, 3),
                      activation='relu',
Beispiel #41
0
import keras
from tflearn.layers.core import fully_connected
from keras.datasets import mnist
from keras.layers import Input, Dense
from keras.models import Model
from keras import backend as K

num_classes = 10
img_rows, img_cols = 28, 28

(trainX, trainY), (testX, testY) = mnist.load_data()

trainX = trainX.reshape(trainX.shape[0], img_rows * img_cols)
testX = testX.reshape(testX.shape[0], img_rows * img_cols)

# 将图像像素转换为0到1之间的实数
trainX = trainX.astype('float32')
testX = testX.astype('float32')
trainX /= 255.0
testX /= 255.0

# 将标准答案转化为需要的格式(one-hot编码)
trainY = keras.utils.to_categorical(trainY, num_classes)
testY = keras.utils.to_categorical(testY, num_classes)

# 定义两个输入,一个输入为原始的图片信息,另一个输入为正确答案
input1 = Input(shape=(784, ), name='input1')
input2 = Input(shape=(10, ), name='input2')

# 定义一个只有一个隐藏节点的全连接网络
x = Dense(1, activation='relu')(input1)
Beispiel #42
0
def load_data(op_path, vid_path):
    """Load training data from MNIST, the given operators image and the digits/operators in the video.

    Args:
        op_path: Path to the operator image.
        vid_path: Path to the video.

    Return:
        tuple: training data.
        tuple: testing data.
    """
    # load MNIST data
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    # load the given operators image, crop it for each operator and generate more data
    operators = cv2.imread(op_path)
    operators = cv2.cvtColor(operators, cv2.COLOR_RGB2GRAY)

    plus = cv2.resize(~operators[:, :316], (28, 28)).reshape(1, 28, 28, 1)
    equal = cv2.resize(~operators[:, 340:340 + 316], (28, 28)).reshape(1, 28, 28, 1)
    minus = cv2.resize(~operators[:, 710:710 + 316], (28, 28)).reshape(1, 28, 28, 1)
    divide = cv2.resize(~operators[:, 1079:1079 + 316], (28, 28)).reshape(1, 28, 28, 1)
    multiply = cv2.resize(~operators[:, 1420:1420 + 316], (28, 28)).reshape(1, 28, 28, 1)

    op_datagen = ImageDataGenerator(
        rotation_range=360,
        zoom_range=[0.9, 1.6],
        vertical_flip=True,
        horizontal_flip=True)
    plus_numpy = np.array([list(op_datagen.flow(plus)) for _ in range(10000)])
    equal_numpy = np.array([list(op_datagen.flow(equal)) for _ in range(10000)])
    minus_numpy = np.array([list(op_datagen.flow(minus)) for _ in range(10000)])
    divide_numpy = np.array([list(op_datagen.flow(divide)) for _ in range(10000)])
    multiply_numpy = np.array([list(op_datagen.flow(multiply)) for _ in range(10000)])

    x_op = np.concatenate((plus_numpy, minus_numpy, multiply_numpy, divide_numpy, equal_numpy))
    y_op = np.concatenate((9 * np.ones(len(plus)), 10 * np.ones(len(minus)), 11 * np.ones(len(multiply)), 12 * np.ones(len(divide)), 13 * np.ones(len(equal))))

    rand_perm = np.random.permutation(len(y_op))
    x_op = x_op[rand_perm]
    y_op = y_op[rand_perm]

    x_op_train = x_op[:27000]
    x_op_test = x_op[27000:]
    y_op_train = y_op[:27000]
    y_op_test = y_op[27000:]

    # load digits and operators in the video
    cap = cv2.VideoCapture(vid_path)
    frames = []
    while cap.isOpened():
        ret, frame = cap.read()
        # if frame is read correctly ret is True
        if not ret:
            break
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frames.append(frame)

    cap.release()

    two = []
    three = []
    seven = []
    plus = []
    multiply = []
    divide = []
    equal = []

    angles = [0, 45, 100, 185, 228, 274, 350]
    for ang in angles:
        M = cv2.getRotationMatrix2D((360, 240), ang, 1.0)
        tmp = cv2.warpAffine(frames[0], M, (720, 480))
        tip, center = find_red_arrow(tmp)
        elements, _ = find_math_elements(tmp, center, 18)
        plus.append(255. - normalize(elements[0]))
        three.append(255. - normalize(elements[1]))
        two.append(255. - normalize(elements[2]))
        divide.append(255. - normalize(elements[3]))
        seven.append(255. - normalize(elements[4]))
        seven.append(255. - normalize(elements[5]))
        equal.append(255. - normalize(elements[6]))
        multiply.append(255. - normalize(elements[7]))
        three.append(255. - normalize(elements[8]))
        two.append(255. - normalize(elements[9]))

    two_numpy = np.array([np.array(img) for img in two])
    three_numpy = np.array([np.array(img) for img in three])
    seven_numpy = np.array([np.array(img) for img in seven])
    plus_numpy = np.array([np.array(img) for img in plus])
    multiply_numpy = np.array([np.array(img) for img in multiply])
    divide_numpy = np.array([np.array(img) for img in divide])
    equal_numpy = np.array([np.array(img) for img in equal])

    x_vid = np.concatenate((two_numpy, three_numpy, seven_numpy, plus_numpy, multiply_numpy, divide_numpy, equal_numpy))
    y_vid = np.concatenate((2 * np.ones(len(two)), 3 * np.ones(len(three)), 7 * np.ones(len(seven)), 9 * np.ones(len(plus)), 11 * np.ones(len(multiply)), 12 * np.ones(len(divide)), 13 * np.ones(len(equal))))

    rand_perm = np.random.permutation(len(y_vid))
    x_vid = x_vid[rand_perm]
    y_vid = y_vid[rand_perm]

    x_vid_train = x_vid[:50]
    x_vid_test = x_vid[50:]
    y_vid_train = y_vid[:50]
    y_vid_test = y_vid[50:]

    x_train = np.concatenate((x_train, x_op_train, x_vid_train))
    y_train = np.concatenate((y_train, y_op_train, y_vid_train))
    x_test = np.concatenate((x_test, x_op_test, x_vid_test))
    y_test = np.concatenate((y_test, y_op_test, y_vid_test))

    return (x_train, y_train), (x_test, y_test)
Beispiel #43
0
    model_file = sys.argv[1]
    nums_to_check = [float(n) for n in sys.argv[2].strip('[]').split(',')]
    ds = sys.argv[3]

    # Load original model
    full_model = load_model(os.path.join('./models',model_file), compile=False)
    full_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])

    # Load appropriate dataset
    if(ds == 'fashion_mnist'):
        (train_data, train_labels), (test_data, test_labels) = fashion_mnist.load_data()
        train_data = train_data.reshape((train_data.shape[0], 28, 28, 1)).astype('float32')/255
        test_data = test_data.reshape((test_data.shape[0], 28, 28, 1)).astype('float32')/255
    elif(ds == 'mnist'):
        (train_data, train_labels), (test_data, test_labels) = mnist.load_data()
        train_data = train_data.reshape((train_data.shape[0], 28, 28, 1)).astype('float32')/255
        test_data = test_data.reshape((test_data.shape[0], 28, 28, 1)).astype('float32')/255
    else:
        raise NotImplementedError('Dataset not understood.')

    # Seed the generator.
    np.random.seed()

    # Evaluate original model.
    test_labels = to_categorical(test_labels)
    results_full = full_model.evaluate(test_data, test_labels)

    # Keep as oracle all layers but the last two.
    oracle_model = Sequential()
    for layer in full_model.layers[:-2]:
# MNIST classifier.

# Instalar la base de datos del MNIST.
from tensorflow import keras
from PIL import ImageTk, Image
from numpy import asarray, argmax
from keras.datasets import mnist
(X_train_raw, Y_train_raw), (X_test_raw, Y_test_raw) = mnist.load_data()

# Normalizar el DATASET.
X_train = X_train_raw.reshape(60000, 784)
X_test = X_test_raw.reshape(10000, 784)
X_train = X_train / 255
X_test = X_test / 255

# Asignarle la forma vectorial.
Y_train = keras.utils.to_categorical(Y_train_raw, 10)
Y_test = keras.utils.to_categorical(Y_test_raw, 10)

# Generar la red neuronal.
from keras.layers.core import Dense, Dropout, Activation
model = keras.Sequential()
model.add(Dense(100, activation="relu"))
model.add(Dense(10, activation="softmax"))

# Compilar y entrenar la red neuronal.
print("Entrenando red neuronal...")
model.compile(loss='categorical_crossentropy', metrics=['accuracy'])
result = model.fit(X_train,
                   Y_train,
                   batch_size=200,
Beispiel #45
0
    def train(self, epochs, batch_size=128, sample_interval=50):
        # Load the dataset
        (X_train, y_train), (_, _) = mnist.load_data()

        # Rescale MNIST to 32x32
        X_train = np.array([
            scipy.misc.imresize(x, [self.img_rows, self.img_cols])
            for x in X_train
        ])

        # Rescale -1 to 1
        X_train = (X_train.astype(np.float32) - 127.5) / 127.5
        X_train = np.expand_dims(X_train, axis=3)
        y_train = y_train.reshape(-1, 1)

        half_batch = int(batch_size / 2)

        for epoch in range(epochs):

            # ------------------------
            # Train Discriminator
            # ------------------------

            # Sample half batch of images
            idx = np.random.randint(0, X_train.shape[0], half_batch)
            imgs = X_train[idx]
            labels = y_train[idx]

            masked_imgs = self.mask_randomly(imgs)

            # Generate a half batch of new images
            gen_imgs = self.generator.predict(imgs)

            valid = np.ones((half_batch, 4, 4, 1))
            fake = np.zeros((half_batch, 4, 4, 1))

            labels = to_categorical(labels, num_classes=self.num_classes + 1)
            fake_labels = to_categorical(np.full((half_batch, 1),
                                                 self.num_classes),
                                         num_classes=self.num_classes + 1)

            # Train the discriminator
            d_loss_real = self.discriminator.train_on_batch(
                imgs, [valid, labels])
            d_loss_fake = self.discriminator.train_on_batch(
                gen_imgs, [fake, fake_labels])
            d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)

            # ----------------------
            # Train Generator
            # ----------------------

            # Select a random half batch of images
            idx = np.random.randint(0, X_train.shape[0], batch_size)
            imgs = X_train[idx]

            masked_imgs = self.mask_randomly(imgs)

            # Generator wants the discriminator to label the generated images as valid
            valid = np.ones((batch_size, 4, 4, 1))

            # Train the generator
            # print("------------------------------")
            # print(masked_imgs.shape)
            # print(valid.shape)
            # print("------------------------------")
            g_loss = self.combined.train_on_batch(masked_imgs, valid)

            # Plot the progress
            print("%d [D loss: %f, op_acc: %.2f%% [G loss: %f]" %
                  (epoch, d_loss[0], 100 * d_loss[4], g_loss))

            # If at save interval => save generated image samples
            if epoch % sample_interval == 0:
                # Select a random half batch of images
                idx = np.random.randint(0, X_train.shape[0], 6)
                imgs = X_train[idx]
                self.sample_images(epoch, imgs)
                self.save_model()
Beispiel #46
0
parser.add_argument('weight_nc', help="weight hyperparm to control neuron coverage", type=float)
parser.add_argument('step', help="step size of gradient descent", type=float)
parser.add_argument('seeds', help="number of seeds of input", type=int)
parser.add_argument('grad_iterations', help="number of iterations of gradient descent", type=int)
parser.add_argument('threshold', help="threshold for determining neuron activated", type=float)
parser.add_argument('-t', '--target_model', help="target model that we want it predicts differently",
                    choices=[0, 1, 2], default=0, type=int)
parser.add_argument('-sp', '--start_point', help="occlusion upper left corner coordinate", default=(0, 0), type=tuple)
parser.add_argument('-occl_size', '--occlusion_size', help="occlusion size", default=(10, 10), type=tuple)

args = parser.parse_args()

# input image dimensions
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(_, _), (x_test, _) = mnist.load_data()

x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)

x_test = x_test.astype('float32')
x_test /= 255

# define input tensor as a placeholder
input_tensor = Input(shape=input_shape)

# load multiple models sharing same input tensor
model1 = Model1(input_tensor=input_tensor)
model2 = Model2(input_tensor=input_tensor)
model3 = Model3(input_tensor=input_tensor)
Beispiel #47
0
import pandas as pd
import numpy as np
from keras.preprocessing import image
from keras.models import Sequential
from keras.layers import Conv2D, Lambda
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense, Dropout
from keras.layers.normalization import BatchNormalization
from keras.utils.np_utils import to_categorical
from keras.datasets import mnist

#Load data from MNIST database
(train_x, train_y), (test_x, ) = mnist.load_data()
train_x = train_x.reshape(train_x.shape[0], 28, 28, 1).astype("float32")
test_x = test_x.reshape(test_x.shape[0], 28, 28, 1).astype("float32")

#one-hot encoding
train_y = to_categorical(train_y)
test_y = to_categorical(test_y)
num_classes = test_y.shape[1]

#augment data
newData = image.ImageDataGenerator()
batches = newData.flow(train_x, train_y, batch_size=64)

#normalize data
m = np.mean(train_x)
s = np.std(train_x)

Beispiel #48
0
from keras.datasets import mnist
from keras.preprocessing.image import ImageDataGenerator
from matplotlib import pyplot as plt
from keras import backend

backend.set_image_data_format('channels_first')

# 导入数据
(X_train, y_train), (X_validation, y_validation) = mnist.load_data()

X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')
X_validation = X_validation.reshape(X_validation.shape[0], 1, 28,
                                    28).astype('float32')

# ZCA白化
imgGen = ImageDataGenerator(zca_whitening=True)
imgGen.fit(X_train)

for X_batch, y_batch in imgGen.flow(X_train, y_train, batch_size=9):
    for i in range(0, 9):
        plt.subplot(331 + i)
        plt.imshow(X_batch[i].reshape(28, 28), cmap=plt.get_cmap('gray'))
    plt.show()
    break
Beispiel #49
0
import numpy as np
import keras
from keras import backend
from keras.datasets import mnist
from keras.models import load_model
from keras import backend as K
from cleverhans.attacks import FastGradientMethod
from cleverhans.utils_keras import KerasModelWrapper
from sklearn.decomposition import PCA

#Perturbation magnitude. Adjust and run to observe behavior for various bounds.
eta = 0.25

#Load training and testing data and normalize in [0, 1]
(data_train, labels_train), (data_test, labels_test) = mnist.load_data()
data_train = data_train / 255.0
data_test = data_test / 255.0

#Flatten dataset (New shape for training and testing set is (60000,784) and (10000, 784))
data_train = data_train.reshape(
    (len(data_train), np.prod(data_train.shape[1:])))
data_test = data_test.reshape((len(data_test), np.prod(data_test.shape[1:])))

#Create labels as one-hot vectors
labels_train = keras.utils.np_utils.to_categorical(labels_train,
                                                   num_classes=10)
labels_test = keras.utils.np_utils.to_categorical(labels_test, num_classes=10)

#Import trained classifers
backend.set_learning_phase(False)
Beispiel #50
0
import os
import os.path
import tempfile
import matplotlib.pyplot as plt
import keras
from keras.datasets import mnist

# Construct a location in /tmp dir to hold cached data
dataPath = os.path.join(tempfile.gettempdir(), str(os.getuid()))
print(dataPath)
if not os.path.exists(dataPath):
    os.mkdir(dataPath)
filenameWithPath = os.path.join(dataPath, "mnist")

# Get training and testing sets
(x_train, y_train), (x_test, y_test) = mnist.load_data(path=filenameWithPath)

# Save single image to a file to view
plt.imshow(x_train[0], cmap='Greys')
plt.savefig('digit.png')
Beispiel #51
0
import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import mnist  # keras에서 제공되는 예제 파일

mnist.load_data()  # mnist파일 불러오기

(x_train, y_train), (
    x_test,
    y_test) = mnist.load_data()  # mnist에서 이미 x_train, y_train으로 나눠져 있는 값 가져오기

print(x_train[0])  # 0 ~ 255까지의 숫자가 적혀짐 (color에 대한 수치)
print('y_train: ', y_train[0])  # 5

print(x_train.shape)  # (60000, 28, 28)
print(x_test.shape)  # (10000, 28, 28)
print(y_train.shape)  # (60000,)        : 10000개의 xcalar를 가진 vector(1차원)
print(y_test.shape)  # (10000,)

print(x_train[0].shape)  # (28, 28)
# plt.imshow(x_train[0], 'gray')                          # '2차원'을 집어넣어주면 수치화된 것을 이미지로 볼 수 있도록 해줌
# # plt.imshow(x_train[0])                                  # 색깔로 나옴
# plt.show()                                              # 그림으로 보여주기

# 데이터 전처리 1. 원핫인코딩 : 당연하다              => y 값
from keras.utils import np_utils

y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
print(y_train.shape)  #  (60000, 10)

# 데티어 전처리 2. 정규화( MinMaxScalar )    => x 값
Beispiel #52
0
from keras.datasets import mnist
from keras.utils import to_categorical
from keras.models import Sequential
from keras.initializers import RandomUniform
from keras.layers import Dense, Activation
from keras.optimizers import SGD

TRN_SIZE = 60000
TST_SIZE = 10000
BATCH_SIZE = 128
CLASSES = 10
EPOCHS = 5
IMAGE_SIZE = 28

(trn_images, trn_labels), (tst_images, tst_labels) = mnist.load_data()

trn_images = trn_images.reshape(TRN_SIZE, IMAGE_SIZE**2)
tst_images = tst_images.reshape(TST_SIZE, IMAGE_SIZE**2)
tst_labels = to_categorical(tst_labels, CLASSES)
trn_labels = to_categorical(trn_labels, CLASSES)
trn_images = trn_images.astype("float32")
tst_images = tst_images.astype("float32")
trn_images /= 255
tst_images /= 255

ru = RandomUniform(minval=-0.1, maxval=0.1, seed=None)
model = Sequential()
model.add(
    Dense(256,
          input_shape=(IMAGE_SIZE**2, ),
          kernel_initializer=ru,
Beispiel #53
0
from keras.layers.wrappers import TimeDistributed
from keras.layers.convolutional import Convolution2D
from keras.layers.core import Dense, Flatten
from keras.layers.recurrent import GRU
from keras.utils.np_utils import to_categorical

# for reproducibility
np.random.seed(2017)

SEQ_LENGTH = 8
EPOCHS = 30
EXAMPLES_PER_EPOCH = 500
BATCH_SIZE = 32

# the data, shuffled and split between train and test sets
(X_train_raw, y_train_temp), (X_test_raw, y_test_temp) = mnist.load_data()

# basic image processing
# convert images to float
X_train_raw = X_train_raw.astype('float32')
X_test_raw = X_test_raw.astype('float32')
X_train_raw /= 255
X_test_raw /= 255

# encode output
y_train_raw = to_categorical(y_train_temp, 10)
y_test_raw = to_categorical(y_test_temp, 10)

train_size, height, width = X_train_raw.shape
depth = 1
def mnist_data():
    data = mnist.load_data()
    return [mnist_process(*d) for d in data]
Beispiel #55
0
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Flatten

pixel_width = 28
pixel_height = 28
batch_size = 32
epochs = 10

num_of_classes = 10

(features_train, labels_train), (features_test,
                                 labels_test) = mnist.load_data()

features_train = features_train.reshape(features_train.shape[0], pixel_width,
                                        pixel_height, 1)
features_test = features_test.reshape(features_test.shape[0], pixel_width,
                                      pixel_height, 1)

input_shape = (pixel_width, pixel_height, 1)

features_train = features_train.astype('float32')
features_test = features_test.astype('float32')

features_train /= 255
features_test /= 255

labels_train = keras.utils.to_categorical(labels_train, num_of_classes)
labels_test = keras.utils.to_categorical(labels_test, num_of_classes)
Beispiel #56
0
import numpy as np
import tensorflowjs as tfjs
from keras.models import Sequential
from keras.layers import Dense
from keras.datasets import mnist
BATCH_SIZE = 64
EPOCHS = 6

IMAGE_LENGTH = 28
INPUT_NODE = 784
OUTPUT_NODE = 10
NUM_CHANNELS = 1
LEARNING_RATE = 0.15

# data, split between train and validate sets
(x_train, y_train), (x_eval, y_eval) = mnist.load_data("mnist")
x_train = x_train.reshape(x_train.shape[0], INPUT_NODE)
x_eval = x_eval.reshape(x_eval.shape[0], INPUT_NODE)
x_train = x_train.astype(np.float32) / 255 - 0.5
x_eval = x_eval.astype(np.float32) / 255 - 0.5

# convert class vector to binary class matrices (one-hot representation)
y_train = keras.utils.to_categorical(y_train, OUTPUT_NODE)
y_eval = keras.utils.to_categorical(y_eval, OUTPUT_NODE)

model = Sequential()


def init(num, size):
    model.add(Dense(input_shape=(INPUT_NODE, ), units=size, activation="relu"))
Beispiel #57
0
def main(args):
    # parameters
    batch_size = 128
    epochs = 30
    learning_rate = 5e-4
    train_size_data = 2000

    # load data
    (X_train,
     y_train), (X_valid,
                y_valid) = mnist.load_data()  # (60000, 28, 28) (10000,)

    if K.image_data_format() == "channels_first":
        # X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
        X_train = X_train.reshape(X_train.shape[0], 1 * 28 * 28)
    else:
        # X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
        X_train = X_train.reshape(X_train.shape[0], 1 * 28 * 28)

    X_train = X_train.astype("float32") / 255.
    y_train = tf.keras.utils.to_categorical(y_train, 10)

    # random data order
    random_order = np.arange(len(X_train))
    np.random.shuffle(random_order)
    X_train = X_train[random_order][0:train_size_data]
    y_train = y_train[random_order][0:train_size_data]
    print("x_train shape:", X_train.shape)
    print(X_train.shape[0], "train samples")

    # define graph
    with tf.name_scope("Inputs"):
        X_placeholder = tf.placeholder(tf.float32, [None, 28 * 28 * 1])
        Y_placeholder = tf.placeholder(tf.float32, [None, 10])

    predict = dnn_medium_model(X_placeholder)

    with tf.name_scope("Cross_entropy_loss"):
        cross_entropy = tf.reduce_mean(-tf.reduce_sum(
            Y_placeholder * tf.log(predict), reduction_indices=[1]))
        tf.summary.scalar("cross_entropy",
                          cross_entropy)  # In tensorboard event

    with tf.name_scope("train"):
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(
            cross_entropy)

    # train
    with tf.Session() as sess:
        # Merge all the summaries and write them out to ./logs/Tensorboard/train/
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(
            os.path.join(args.LOG_DIR_PATH,
                         "Visulize_grads/Tensorboard/train/"), sess.graph)

        sess.run(tf.global_variables_initializer())
        nbrof_batch = int(len(X_train) / batch_size)

        cross_entropy_steps = []
        accuracy_steps = []
        grads_steps = []
        for e in range(epochs):
            for i in range(nbrof_batch):
                step_start = time.time()

                batch_x, batch_y = X_train[i * batch_size:(i + 1) *
                                           batch_size], y_train[i * batch_size:
                                                                (i + 1) *
                                                                batch_size]
                feed_dict = {X_placeholder: batch_x, Y_placeholder: batch_y}

                _, summary, cross_entropy_ = sess.run(
                    [train_step, merged, cross_entropy], feed_dict=feed_dict)

                y_predict_ = sess.run(predict, feed_dict=feed_dict)
                correct_prediction_ = tf.equal(tf.argmax(y_predict_, 1),
                                               tf.argmax(batch_y, 1))
                tensor_accuracy = tf.reduce_mean(
                    tf.cast(correct_prediction_, tf.float32))
                accuracy = sess.run(tensor_accuracy, feed_dict=feed_dict)

                train_writer.add_summary(summary, e)

                grads_total = 0
                for variable in tf.trainable_variables():
                    [grad] = sess.run(tf.gradients(ys=cross_entropy,
                                                   xs=variable),
                                      feed_dict=feed_dict)  # [] 是取 list 中的值
                    grad_norm = np.sum(grad**2)
                    grads_total += grad_norm
                grads_total = grads_total**0.5

                step_end = time.time()
                if i % 10 == 0:
                    print(
                        "time:{} sec, epochs:{}, steps:{}, loss={}, accuracy={}, grads_norm={}"
                        .format(step_end - step_start, e,
                                (e * nbrof_batch) + i, cross_entropy_,
                                accuracy, grads_total))

                cross_entropy_steps.append(cross_entropy_)
                accuracy_steps.append(accuracy)
                grads_steps.append(grads_total)

        # save loss process
        cross_entropy_steps = np.asarray(cross_entropy_steps)
        loss_save_path = os.path.join(args.LOG_DIR_PATH, "Visulize_grads",
                                      "loss.npy")
        if not os.path.exists(os.path.dirname(loss_save_path)):
            os.makedirs(os.path.dirname(loss_save_path))
        np.save(loss_save_path, cross_entropy_steps)
        print("Loss process to path: ", loss_save_path)

        # save accuracy process
        accuracy_steps = np.asarray(accuracy_steps)
        accuracy_save_path = os.path.join(args.LOG_DIR_PATH, "Visulize_grads",
                                          "accuracy.npy")
        if not os.path.exists(os.path.dirname(accuracy_save_path)):
            os.makedirs(os.path.dirname(accuracy_save_path))
        np.save(accuracy_save_path, accuracy_steps)
        print("Accuracy process to path: ", accuracy_save_path)

        # save grads process
        grads_steps = np.asarray(grads_steps)
        grads_save_path = os.path.join(args.LOG_DIR_PATH, "Visulize_grads",
                                       "grads.npy")
        if not os.path.exists(os.path.dirname(grads_save_path)):
            os.makedirs(os.path.dirname(grads_save_path))
        np.save(grads_save_path, grads_steps)
        print("Accuracy process to path: ", grads_save_path)

        # save model
        saver = tf.train.Saver()
        save_model_dir_path = os.path.join(args.SAVE_MODLE_DIR_PATH,
                                           "Visulize_grads", "model")
        if not os.path.exists(os.path.dirname(save_model_dir_path)):
            os.makedirs(os.path.dirname(save_model_dir_path))
        save_path = saver.save(sess, save_model_dir_path)
        print("Save model to path: {}".format(
            os.path.dirname(save_model_dir_path)))
Beispiel #58
0
def load_mnist(max_shift):
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
    x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')

    y_train = utils.to_categorical(y_train, 10)
    y_test = utils.to_categorical(y_test, 10)

    idx_tr = range(len(x_train))
    np.random.shuffle(idx_tr)
    X_train = np.zeros(x_train.shape)
    X_train0 = np.zeros(x_train.shape)
    X_train1 = np.zeros(x_train.shape)
    for i in xrange(x_train.shape[0]):
        shifti = np.random.randint(-max_shift, max_shift, size=[2, 2])
        x_train_0 = shift_2d(x_train[i, :, :, 0], shifti[0], max_shift)
        x_train_1_bis = shift_2d(x_train[idx_tr[i], :, :, 0], shifti[1],
                                 max_shift)
        x_train_1 = x_train[idx_tr[i], :, :, 0]
        X_train[i] = np.minimum(
            np.expand_dims(np.add(x_train_0, x_train_1), axis=2), 255)
        X_train0[i] = np.expand_dims(x_train_0, -1)
        X_train1[i] = np.expand_dims(x_train_1, -1)
    Y_train1 = np.vstack([y_train.argmax(1), y_train[idx_tr].argmax(1)]).T
    X_train = X_train[Y_train1[:, 0] != Y_train1[:, 1]]
    X_train0 = X_train0[Y_train1[:, 0] != Y_train1[:, 1]]
    X_train1 = X_train1[Y_train1[:, 0] != Y_train1[:, 1]]
    Y_train1 = Y_train1[Y_train1[:, 0] != Y_train1[:, 1]]
    Y_train = K.eval(K.one_hot(Y_train1, 10))

    idx_te = range(len(x_test))
    np.random.shuffle(idx_te)
    X_test = np.zeros(x_test.shape)
    X_test0 = np.zeros(x_test.shape)
    X_test1 = np.zeros(x_test.shape)
    for i in xrange(x_test.shape[0]):
        shifti = np.random.randint(-max_shift, max_shift, size=[2, 2])
        x_test_0 = shift_2d(x_test[i, :, :, 0], shifti[0], max_shift)
        x_test_1_bis = shift_2d(x_test[idx_te[i], :, :, 0], shifti[1],
                                max_shift)
        x_test_1 = x_test[idx_te[i], :, :, 0]
        X_test[i] = np.minimum(
            np.expand_dims(np.add(x_test_0, x_test_1), axis=2), 255)
        X_test0[i] = np.expand_dims(x_test_0, -1)
        X_test1[i] = np.expand_dims(x_test_1, -1)
    Y_test1 = np.vstack([y_test.argmax(1), y_test[idx_te].argmax(1)]).T
    X_test = X_test[Y_test1[:, 0] != Y_test1[:, 1]]
    X_test0 = X_test0[Y_test1[:, 0] != Y_test1[:, 1]]
    X_test1 = X_test1[Y_test1[:, 0] != Y_test1[:, 1]]
    Y_test1 = Y_test1[Y_test1[:, 0] != Y_test1[:, 1]]
    Y_test = K.eval(K.one_hot(Y_test1, 10))

    X_train /= 255
    X_test /= 255
    X_train0 /= 255
    X_train1 /= 255
    X_test0 /= 255
    X_test1 /= 255
    return (X_train, Y_train), (X_test,
                                Y_test), (X_train0,
                                          X_train1), (X_test0,
                                                      X_test1), (Y_train1,
                                                                 Y_test1)
Beispiel #59
0
def mnist_data():
    (xtrain, ytrain), (xtest, ytest) = mnist.load_data()
    return mnist_process(xtrain), mnist_process(xtest)
Beispiel #60
0
def LoadData():
    # the data, shuffled and split between train and test sets
    (X_train, y_train), (X_test, y_test) = mnist.load_data()

    return X_train, y_train, X_test, y_test