Esempio n. 1
0
import matplotlib.pyplot as plt
from deepL_module.datasets.mnist import load_mnist
from deepL_module.nn.multi_layer_nn import Neural_net
from deepL_module.nn.optimizers import *
from deepL_module.base import *
import copy

# === loading data ===
(X_train, train_t), (X_test, test_t) = load_mnist(normalize=True)

# data reduction
X_train = X_train[:1000]
train_t = train_t[:1000]
train_t = to_categorical(train_t)

# setting parameters
max_epochs = 20
train_size = X_train.shape[0]
batch_size = 200
learning_rate = 0.01
scale = 0.005
iter_per_epoch = max(train_size / batch_size, 1)
max_iter = int(max_epochs * iter_per_epoch)

# constructing model
model = Neural_net(n_input=784,
                   n_hidden=[50, 100, 70, 100],
                   n_output=10,
                   w_std=scale,
                   batch_norm=False)
Esempio n. 2
0
    E = h * X_out.sum() - eta * (X_in * X_out).sum() - beta * Del * X_out[loc]
    return E


def ICM(loc):
    global X_out
    E = []
    X_out[loc] = 1
    E.append(get_energy(loc))
    X_out[loc] = -1
    E.append(get_energy(loc))
    X_out[loc] = 2 * np.argmax(E) - 1


#1 Preparing image data
(data, _), _ = load_mnist(normalize=True)
origin = np.where(data[7] > 0.5, 1, -1).reshape(28, 28)
X_in = addNoise(origin)
X_out = X_in.copy()

#2 Setting Hamiltonian params
h = 0.2
beta = .5
eta = 2
'''#3 ICM algorithm'''
for _ in range(10):
    for loc in itertools.product(range(1, 27), range(1, 27)):
        ICM(loc)

#4 display images
padding = np.pad(np.ones((26, 26)), (1, 1), 'constant')
Esempio n. 3
0
import numpy as np
import matplotlib.pyplot as plt
from deepL_module.datasets.mnist import load_mnist
from deepL_module.nn.sequential import Sequential
from deepL_module.nn.optimizers import *
from deepL_module.base import *
from deepL_module.nn.layers import *

max_epochs = 50
'''#0 loading data '''
(X_train, train_t), (X_test, test_t) = load_mnist(normalize=True,
                                                  flatten=False)
X_train, train_t = X_train[:10000], train_t[:10000]
train_t = to_categorical(train_t)
'''#1 config for NN '''
model = Sequential()
model.add(Conv2D(16, (5, 5), input_shape=(1, 28, 28)))
model.add(Activation('relu'))
model.add(Maxpooling(pool_h=2, pool_w=2, stride=2))
model.add(Conv2D(16, (3, 3)))
model.add(Activation('relu'))
model.add(Maxpooling(pool_h=2, pool_w=2, stride=2))
model.add(Dense(100, activation='relu'))
model.add(Dense(10))
# optimizer
routine = Adam(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=routine)
'''#2 learning '''
import time
start = time.time()
hist = model.fit(X_train,
Esempio n. 4
0
    filter_num = filters.shape[0]
    n_col = int(np.ceil(filter_num / n_row))
    fig = plt.figure(fig_num, figsize=(8, 5))

    for i in range(filter_num):
        ax = fig.add_subplot(n_col, n_row, i + 1)
        ax.imshow(filters[i, 0], cmap=plt.cm.gray_r, interpolation='nearest')
        plt.tick_params(labelbottom=False, labelleft=False)
        plt.tick_params(bottom=False, left=False)
    fig.suptitle(title, fontsize=20)
    plt.show()


'''#0 loading data '''
(X_train, train_t), (X_test, test_t) = load_mnist(flatten=False)
X_train, train_t = X_train[:5000], train_t[:5000]
train_t = to_categorical(train_t)
'''#1 config for NN '''
model = Sequential(w_std=0.01)
model.add(Conv2D(30, (5, 5), input_shape=(1, 28, 28)))
model.add(Activation('relu'))
model.add(Maxpooling(pool_h=2, pool_w=2, stride=2))
model.add(Dense(100, activation='relu'))
model.add(Dense(10))

# optimizer setting
routine = Adam(lr=0.001)
model.compile(loss='categorical_crossentropy', optimizer=routine)
'''#2 visualizing initial params '''
show_filters(model.params['W1'], fig_num=1, title='before learning')
Esempio n. 5
0
def fetch_mnist(fetch_nums: list):
    (x, y), (_, _) = load_mnist(normalize=True)
    X = np.array([x[np.where(y == i)[0][:200]] for i in fetch_nums])
    X = (1 - X).reshape(200 * len(fetch_nums), 784)  # binary inversion
    return (X > .5).astype(np.float)