def linear_regression(a=1.0, b=0.0):
    X = np.linspace(-100, 100, 200)
    X = X.reshape((-1, 1))
    [train_x, test_x] = split_data(X, ratio=0.8, random=True)
    train_y = a * train_x + b
    test_y = a * test_x + b

    i = Input(1)
    x = Dense(1)(i)

    # define trainer
    trainer = Trainer(loss='mse',
                      optimizer=Adam(learning_rate=0.2),
                      batch_size=50,
                      epochs=50)

    # create model
    model = Sequential(i, x, trainer)

    model.summary()

    # training process
    model.fit(train_x, train_y)

    # predict
    y_hat = model.predict(test_x)
    plt.plot(test_x, test_y, 'b')
    plt.plot(test_x, y_hat, 'r')
    plt.show()
Ejemplo n.º 2
0
def main():
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    print 'Imported MNIST data: training input %s and training labels %s.' % (
        x_train.shape, y_train.shape)
    print 'Imported MNIST data: test input %s and test labels %s.' % (
        x_test.shape, y_test.shape)

    N, H, W = x_train.shape
    x = x_train.reshape((N, H * W)).astype('float') / 255
    y = to_categorical(y_train, num_classes=10)

    model = Sequential()
    model.add(Dense(), ReLU(), layer_dim=(28 * 28, 300), weight_scale=1e-2)
    model.add(Dense(), ReLU(), layer_dim=(300, 100), weight_scale=1e-2)
    model.add(Dense(), Softmax(), layer_dim=(100, 10), weight_scale=1e-2)

    model.compile(optimizer=GradientDescent(learning_rate=1e-2),
                  loss_func=categorical_cross_entropy)
    model.fit(x, y, epochs=10, batch_size=50, verbose=False)

    N, H, W = x_test.shape
    x = x_test.reshape((N, H * W)).astype('float') / 255
    y = to_categorical(y_test, num_classes=10)

    model.evaluate(x, y)
Ejemplo n.º 3
0
def linear_classification(a=1.0, b=0.0, graph=False):

    # prepare data
    x = np.linspace(-100, 100, 200)
    y = a * x + b
    X = np.array(list(zip(x, y))) + np.random.randn(200, 2) * 100
    Y = to_one_hot(np.where(a * X[:, 0] + b > X[:, 1], 1, 0))
    (train_x, train_y), (test_x, test_y) = split_data(X,
                                                      Y,
                                                      ratio=0.8,
                                                      random=True)

    # build simple FNN
    i = Input(2)
    x = Dense(2, activation='softmax')(i)

    # define trainer
    trainer = Trainer(loss='cross_entropy',
                      optimizer=Adam(learning_rate=0.05),
                      batch_size=50,
                      epochs=50,
                      metrics=['accuracy'])

    # create model
    model = Sequential(i, x, trainer)

    model.summary()

    # training process
    model.fit(train_x, train_y)
    print(model.evaluate(test_x, test_y))

    if graph:
        plt.plot(model.history['loss'])
        plt.show()

        # predict
        y_hat = model.predict(test_x)
        y_hat = np.argmax(y_hat, axis=1)
        simple_plot(test_x, y_hat, a, b)
Ejemplo n.º 4
0
def binary_classification():
  def separate_label(data):
    X = normalize(data[:, :2].astype('float32'))
    Y = np.where(data[:, 2] == b'black', 0, 1)
    return X, Y

  # prepare train data
  data_dir = "data/examples/binary_classification"
  train_data_path = os.path.join(data_dir, 'training.arff')
  train_data = load_arff(train_data_path)
  train_x, train_y = separate_label(train_data)
  train_y = to_one_hot(train_y)

  # build simple FNN
  i = Input(2)
  x = Dense(30, activation='relu')(i)
  x = Dense(30, activation='relu')(x)
  x = Dense(2, activation='softmax')(x)

  # define trainer
  trainer = Trainer(loss='cross_entropy', optimizer=Adam(clipvalue=1.0), batch_size=256, epochs=500, metrics=['accuracy'])

  # create model
  model = Sequential(i, x, trainer)

  model.summary()

  # training process
  model.fit(train_x, train_y)

  plt.plot(range(len(model.history['loss'])), model.history['loss'])
  plt.show()

  # predict
  test_data_path = os.path.join(data_dir, 'test.arff')
  test_data = load_arff(test_data_path)
  test_x, _ = separate_label(test_data)

  y_hat = model.predict(test_x)
  simple_plot(test_x, y_hat)
Ejemplo n.º 5
0
def universal_approximation(f, x):
    [train_x, test_x] = split_data(x, ratio=0.8, random=True)
    train_y = f(train_x)

    test_x = np.sort(test_x, axis=0)
    test_y = f(test_x)

    # build simple FNN
    i = Input(1)
    x = Dense(50, activation='relu')(i)
    x = Dense(1)(x)

    # define trainer
    schedule = ExponentialDecay(initial_learning_rate=0.01, decay_rate=0.75)
    trainer = Trainer(loss='mse',
                      optimizer=Adam(learning_rate=schedule),
                      batch_size=50,
                      epochs=750)

    # create model
    model = Sequential(i, x, trainer)

    model.summary()

    # training process
    start = time.time()
    model.fit(train_x, train_y)
    print(time.time() - start)

    plt.plot(range(len(model.history['loss'])), model.history['loss'])
    plt.show()

    # predict
    y_hat = model.predict(test_x)
    plt.plot(test_x, test_y, 'b-', label='original')
    plt.plot(test_x, y_hat, 'r-', label='predicted')
    plt.legend()
    plt.show()
Ejemplo n.º 6
0
model.add(RepeatVector(DIGITS + 1))
# The decoder RNN could be multiple layers stacked or a single keras_layer
for _ in range(LAYERS):
    model.add(RNN(HIDDEN_SIZE, return_sequences=True))

# For each of step of the output sequence, decide which character should be chosen
model.add(TimeDistributedDense(convertor.get_dim()))
model.add(Activation("softmax"))

model.compile(loss="categorical_crossentropy", optimizer="adam")

# Train the model each generation and show predictions against the validation dataset
for iteration in range(1, 200):
    print()
    print("-" * 50)
    print("Iteration", iteration)
    model.fit(X_train, y_train, batch_size=BATCH_SIZE, nb_epoch=1, validation_data=(X_val, y_val), show_accuracy=True)
    ###
    # Select 10 samples from the validation set at random so we can visualize errors
    for i in range(10):
        ind = np.random.randint(0, len(X_val))
        rowX, rowy = X_val[np.array([ind])], y_val[np.array([ind])]
        preds = model.predict_classes(rowX, verbose=0)
        q = convertor.decode(rowX[0], invert=True)
        correct = convertor.decode(rowy[0])
        guess = convertor.decode(preds[0], calc_argmax=False)
        print("Q", q)
        print("T", correct)
        print(colors.ok + "☑" + colors.close if correct == guess else colors.fail + "☒" + colors.close, guess)
        print("---")
Ejemplo n.º 7
0
train_y = convert_to_one_hot(train_y, num_classes)
test_x = np.reshape(test_x, (len(test_x), 1, img_rows, img_cols)).astype(skml_config.config.i_type)
test_y = convert_to_one_hot(test_y, num_classes)

train_x, valid_x, train_y, valid_y = train_test_split(train_x, train_y)


filters = 64
model = Sequential()
model.add(Convolution(filters, 3, input_shape=input_shape))
model.add(BatchNormalization())
model.add(ReLU())
model.add(MaxPooling(2))
model.add(Convolution(filters, 3))
model.add(BatchNormalization())
model.add(ReLU())
model.add(GlobalAveragePooling())
model.add(Affine(num_classes))
model.compile(SoftmaxCrossEntropy(), Adam())

train_batch_size = 100
valid_batch_size = 1
print("訓練開始: {}".format(datetime.now().strftime("%Y/%m/%d %H:%M")))
model.fit(train_x, train_y, train_batch_size, 20, validation_data=(valid_batch_size, valid_x, valid_y), validation_steps=1)
print("訓練終了: {}".format(datetime.now().strftime("%Y/%m/%d %H:%M")))

model.save(save_path)

loss, acc = model.evaluate(test_x, test_y)
print("Test loss: {}".format(loss))
print("Test acc: {}".format(acc))
Ejemplo n.º 8
0
with gzip.open(PATH.as_posix(), "rb") as f:
    ((x_train, y_train), (x_test, y_test), _) = pickle.load(f,
                                                            encoding="latin-1")

im_size = (28, 28)
input_dim = np.prod(im_size)

# %% Auto Encoder
autoencoder = Sequential(input_dim,
                         30,
                         10,
                         30,
                         input_dim,
                         activation='logistic')
autoencoder.fit(x_train, x_train, epochs=10)

x_sample = x_test[np.random.randint(1000, size=8)]
output = autoencoder.forward(x_sample)

for i in range(8):
    # plot original image
    ax = plt.subplot(8, 2, 2 * i + 1)
    ax.axis('off')
    ax.imshow(x_sample[i].reshape(im_size), cmap='gray')

    # plot reconstructed image
    ax = plt.subplot(8, 2, 2 * i + 2)
    ax.axis('off')
    ax.imshow(output[i].reshape(im_size), cmap='gray')
Ejemplo n.º 9
0
y = np.array([[1],
              [0],
              [0],
              [1]])

print("Prediction")
p = model.predict(X)
print(p)
print("Error")
print(p-y)

loss_function = SquaredError()
custom_loss = CustomLoss()

print("Training")
loss_history = model.fit(X, y, epochs=100, batch_size=4, steps_per_epoch=1000, halt=False, loss=custom_loss)
print("Prediction")
p = model.predict(X)
print(p)
print("Error")
print(p-y)
print("Weights in first dense layer")
print(model.layers[0].weights)
print("Weights in second dense layer")
print(model.layers[2].weights)

plt.plot(np.arange(0, 100), loss_history[:, 0])
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.show()
Ejemplo n.º 10
0
              [1, 0],
              [1, 1]])
y = np.array([[0, 1],
              [1, 0],
              [1, 0],
              [1, 0]])

print("Prediction")
p = model.predict(X)
print(p)
print("Error")
print(p-y)

loss_function = SquaredError()

print("Training")
loss_history = model.fit(X, y, epochs=100, batch_size=2, steps_per_epoch=1000, halt=False, loss=loss_function)
print("Prediction")
p = model.predict(X)
print(p)
print("Error")
print(p-y)
print("Weights")
print(model.layers[0].weights)

plt.plot(np.arange(0, 100), loss_history[:, 0])
plt.plot(np.arange(0, 100), loss_history[:, 1])
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.show()
Ejemplo n.º 11
0
#%%
import init

from models import Sequential, Dense
from utils import *

xtr, ytr = np.loadtxt('data/xt'), np.loadtxt('data/yt')
nn = Sequential(2, Dense(8, dropout=0.01), 1, activation='tanh')

#%%
nn.fit(xtr, ytr, lr=5e-3, epochs=50, callbacks=[train_anim(xtr, ytr)])
Ejemplo n.º 12
0
    """
    mndata = MNIST('./samples')
    images, labels = mndata.load_training()

    vocab = set()
    for label in labels:
        vocab.add(label)
    vocab = sorted(vocab)
    Y = []
    for label in labels:
        one_hot = [0] * len(vocab)
        one_hot[label] = 1
        Y.append(one_hot)
    X = np.array(images).T / 255
    Y = np.array(Y).T
    return (X, Y)


X, Y = parse_data()

model = Sequential()
model.add(Dense(1024, n_inputs=X.shape[0]))
model.add(Dense(1024))
model.add(Dense(1024))
model.add(Dense(Y.shape[0], activation='sigmoid'))
model.compile()

# model = pickle.load(open('model.p', 'rb'))

model.fit(X, Y, 1, learning_rate=0.003)
Ejemplo n.º 13
0
import init
from models import Sequential
from layers import Dense
from utils import onehot
import numpy as np
from tensorflow.keras.datasets import mnist

(x_tr, y_tr), (x_ts, y_ts) = mnist.load_data()
im_shape = x_tr[0].shape
im_size = np.prod(im_shape)

def accuracy(model):
    return np.mean(np.argmax((model(x_ts)), axis=(-1)) == y_ts)

nn = Sequential(im_size, Dense(30, activation='tanh'), Dense(10, activation='logistic'))
x_tr = x_tr.reshape(-1, im_size)
x_ts = x_ts.reshape(-1, im_size)
nn.fit(x_tr, (onehot(y_tr, 10)), epochs=10)

print(accuracy(nn))
Ejemplo n.º 14
0
test_labels_one_hot = data[5]

model = Sequential()
model.add(
    Dense(16, 784, kernel_initializer=truncated_normal,
          bias_initializer=zeros))
model.add(Sigmoid())
model.add(
    Dense(10, 16, kernel_initializer=truncated_normal, bias_initializer=zeros))
model.add(Sigmoid())

loss = SquaredError()

loss_history = model.fit(train_imgs,
                         train_labels_one_hot,
                         batch_size=32,
                         epochs=10,
                         loss=loss,
                         halt=False)
pred = model.predict(test_imgs)
pred_labels = pred.argmax(1)
print("MSE", loss.evaluate(pred, test_labels_one_hot).mean(0))
print("Percentage correct", np.mean(pred_labels == test_labels) * 100)
print("Prediction for first 5 images")
print(pred[0:5, :].argmax(1))
print("True labels")
print(test_labels[0:5])

plt.plot(np.arange(0, 10), loss_history.mean(1))
plt.title("Graph of mean loss over all one-hot outputs")
plt.xlabel("Epoch")
plt.ylabel("Mean loss")
Ejemplo n.º 15
0
x, inputs_shape, t, outputs_shape = create_data_numeric(3)
loss = "mean_squared_error"
metric = "rmse"
last_layer_activation = "identify"

seed = 15
model = Sequential(seed=seed)
model.add(Dense(10, activation="relu", inputs_shape=inputs_shape))
model.add(Dense(10, activation="relu"))
model.add(Dense(outputs_shape, activation=last_layer_activation))
model.compile(loss=loss, optimizer=Adam(), metric=metric)

train_x, test_x, train_t, test_t = train_test_split(x,
                                                    t,
                                                    test_size=0.3,
                                                    random_state=seed)
model.fit(train_x, train_t, test_x, test_t, epochs=1000, batch_size=50)

#誤差をプロット
import matplotlib.pyplot as plt

plt.plot(model.history_train[0])
plt.plot(model.history_test[0])
plt.title("loss")
plt.legend(["train", "test"])

plt.plot(model.history_train[1])
plt.plot(model.history_test[1])
plt.title(model.metric_name)
plt.legend(["train", "test"])
model.add(Conv2D(32, (5, 5), activation="relu",
                 inputs_shape=x_train.shape[1:]))
model.add(Pooling((2, 2)))
model.add(Conv2D(16, (3, 3), activation="relu"))
model.add(Pooling((2, 2)))
model.add(Dense(10, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(10, activation="softmax"))
model.compile(loss="categorical_crossentropy",
              optimizer=Adam(),
              metric="accuracy")

model.fit(x_train=x_train,
          t_train=y_train,
          x_test=x_test,
          t_test=y_test,
          batch_size=128,
          epochs=10,
          output_num=1)

#誤差をプロット
plt.plot(model.history_train[0])
plt.plot(model.history_test[0])
plt.title("loss")
plt.legend(["train", "test"])
plt.show()

plt.plot(model.history_train[1])
plt.plot(model.history_test[1])
plt.title(model.metric_name)
plt.legend(["train", "test"])