Example #1
0
def build_model(parameters):
    model = Sequential(
        optimizer=stochastic_gradient_descent(learning_rate=parameters[1],
                                              decay=parameters[2],
                                              momentum=parameters[3]))
    for units_of_layer in parameters[0]:
        model.add(Dense(units_of_layer))
    model.add(Dense(1))
    return model
Example #2
0
from links import *
from model import Sequential
from dqn import DQN
import numpy as np
from numpy.random import *
import gym
from copy import deepcopy

env = gym.make("CartPole-v0")
obs = env.reset()

model = Sequential()
model.add(Linear(4, 400, activation="relu", initialization="HeNormal"))
#model.add(Linear(400,400,activation="relu",initialization="HeNormal"))
#model.add(Linear(100,100,activation="relu",initialization="HeNormal"))
model.add(Linear(400, 2, initialization="zeros"))
model.compile(optimizer="MomentumSGD")
target_model = deepcopy(model)
Memory = DQN()
initial_exploration = 100
replay_size = 32

epsilon = 0.3
gamma = 0.95
time = 0
episode = 0
last_obs = deepcopy(obs)

#ReplayMemory = [None for i in range(10**5)]
#m_size = 0
step = 0
# Create dataset
X, y, X_test, y_test = create_data_mnist('fashion_mnist_images')
# Shuffle the training dataset
keys = np.array(range(X.shape[0]))
np.random.shuffle(keys)
X = X[keys]
y = y[keys]
# Scale and reshape samples
X = (X.reshape(X.shape[0], -1).astype(np.float32) - 127.5) / 127.5
X_test = (X_test.reshape(X_test.shape[0], -1).astype(np.float32) -
          127.5) / 127.5
# Instantiate the model
model = Sequential()
# Add layers
model.add(Layer_Dense(X.shape[1], 128))
model.add(Activation_ReLU())
model.add(Layer_Dense(128, 128))
model.add(Activation_ReLU())
model.add(Layer_Dense(128, 10))
model.add(Activation_softmax())
# Set loss, optimizer and accuracy objects
model.compile(loss=Loss_CategoricalCrossentropy(),
              optimizer=Optimizer_Adam(decay=1e-4),
              metrics=Accuracy_Categorical())
# model.fit(X, y, validation_data = (X_test, y_test), epochs = 10 , batch_size = 128 , steps_per_epoch = 100 )
# model.save('fashion_mnist.model')
# model.evaluate(X_test, y_test)
model = model.load('fashion_mnist.model')
model.evaluate(X_test, y_test)
Example #4
0
from layer import Dense, Conv2D, MaxPool2D, Flatten
from loss import BinaryCrossEntropy
from activation import Sigmoid, ReLU
from optimizer import GradientDescentOptimizer

if __name__ == "__main__":
    from sklearn.datasets import load_digits
    data = load_digits(n_class=2)
    X, y = data['data'].reshape(-1, 8, 8, 1) / 16, data['target'].reshape(
        -1, 1)

    model = Sequential()
    model.add(Conv2D,
              ksize=3,
              stride=1,
              activation=ReLU(),
              input_size=(8, 8, 1),
              filters=7,
              padding=0)
    model.add(MaxPool2D, ksize=2, stride=1, padding=0)
    model.add(Conv2D,
              ksize=2,
              stride=1,
              activation=ReLU(),
              filters=5,
              padding=0)
    model.add(Flatten)
    model.add(Dense, units=1, activation=Sigmoid())
    model.summary()

    model.compile(BinaryCrossEntropy())
Example #5
0
from links import *
from model import Sequential
import numpy as np
from numpy.random import *
import chainer
from tqdm import tqdm

model = Sequential()
model.add(Linear(784, 500, activation="relu", initialization="HeNormal"))
model.add(Linear(500, 500, activation="relu", initialization="HeNormal"))
model.add(Linear(500, 10, activation="softmax"))
model.compile(optimizer="Adam")

train, test = chainer.datasets.get_mnist()
train_data, train_label = train._datasets
test_data, test_label = test._datasets
#print train_label[0:100]

count = 0
count2 = 0
loss = 0
for i in tqdm(range(6000000)):
    #if train_label[i%60000]>1:
    #    continue
    #count2 += 1
    #inp = randint(0,2,(1,2))
    inp = np.zeros((1, 784))
    inp[0] = train_data[i % 60000]
    y = model(inp)
    t = np.zeros((1, 10))
    #t[0][0] = train_label[i%60000]