Exemplo n.º 1
0
from model import Sequential
from dqn import DQN
import numpy as np
from numpy.random import *
import gym
from copy import deepcopy

env = gym.make("CartPole-v0")
obs = env.reset()

model = Sequential()
model.add(Linear(4, 400, activation="relu", initialization="HeNormal"))
#model.add(Linear(400,400,activation="relu",initialization="HeNormal"))
#model.add(Linear(100,100,activation="relu",initialization="HeNormal"))
model.add(Linear(400, 2, initialization="zeros"))
model.compile(optimizer="MomentumSGD")
target_model = deepcopy(model)
Memory = DQN()
initial_exploration = 100
replay_size = 32

epsilon = 0.3
gamma = 0.95
time = 0
episode = 0
last_obs = deepcopy(obs)

#ReplayMemory = [None for i in range(10**5)]
#m_size = 0
step = 0
while True:
Exemplo n.º 2
0
from model import Sequential
from dqn import DQN
import numpy as np
from numpy.random import *
import gym
from copy import deepcopy

env = gym.make("CartPole-v0")
obs = env.reset()

model = Sequential()
model.add(Linear(4,500,activation="relu",initialization="HeNormal"))
#model.add(Linear(200,200,activation="relu",initialization="HeNormal"))
#model.add(Linear(100,100,activation="relu",initialization="HeNormal"))
model.add(Linear(500,2,initialization="zeros"))
model.compile(optimizer="Adam")
Memory = DQN()

epsilon = 0.
gamma = 0.95
time = 0
episode = 0
last_obs = deepcopy(obs)

#ReplayMemory = [None for i in range(10**5)]
#m_size = 0

while True:
    time += 1
    #env.render()
    Q = model(obs)
Exemplo n.º 3
0
    model = Sequential()
    model.add(Conv2D,
              ksize=3,
              stride=1,
              activation=ReLU(),
              input_size=(8, 8, 1),
              filters=7,
              padding=0)
    model.add(MaxPool2D, ksize=2, stride=1, padding=0)
    model.add(Conv2D,
              ksize=2,
              stride=1,
              activation=ReLU(),
              filters=5,
              padding=0)
    model.add(Flatten)
    model.add(Dense, units=1, activation=Sigmoid())
    model.summary()

    model.compile(BinaryCrossEntropy())

    print("Initial Loss", model.evaluate(X, y)[0])
    model.fit(X,
              y,
              n_epochs=100,
              batch_size=300,
              learning_rate=0.001,
              optimizer=GradientDescentOptimizer(),
              verbose=1)
    print("Final Loss", model.evaluate(X, y)[0])
Exemplo n.º 4
0
#!/usr/bin/python3
from keras.datasets import cifar10
from model import Sequential
from layers.pool import MaxPool
from one_hot import one_hot

(x_train, y_train), (x_test, y_test) = cifar10.load_data()

x_train, x_test = x_train / 255, x_test / 255
y_train = one_hot(y_train)

model = Sequential(x_train, y_train)

model.add_Conv(32, (3, 3))
model.add_Activation()
model.add_Pool()

model.add_Conv(32, (3, 3))
model.add_Activation()
model.add_Pool()

model.add_Conv(64, (3, 3))
model.add_Activation()
model.add_Pool()

model.add_Dense(512)
model.out(10)

model.compile(1, 32)
Exemplo n.º 5
0
# Create dataset
X, y, X_test, y_test = create_data_mnist('fashion_mnist_images')
# Shuffle the training dataset
keys = np.array(range(X.shape[0]))
np.random.shuffle(keys)
X = X[keys]
y = y[keys]
# Scale and reshape samples
X = (X.reshape(X.shape[0], -1).astype(np.float32) - 127.5) / 127.5
X_test = (X_test.reshape(X_test.shape[0], -1).astype(np.float32) -
          127.5) / 127.5
# Instantiate the model
model = Sequential()
# Add layers
model.add(Layer_Dense(X.shape[1], 128))
model.add(Activation_ReLU())
model.add(Layer_Dense(128, 128))
model.add(Activation_ReLU())
model.add(Layer_Dense(128, 10))
model.add(Activation_softmax())
# Set loss, optimizer and accuracy objects
model.compile(loss=Loss_CategoricalCrossentropy(),
              optimizer=Optimizer_Adam(decay=1e-4),
              metrics=Accuracy_Categorical())
# model.fit(X, y, validation_data = (X_test, y_test), epochs = 10 , batch_size = 128 , steps_per_epoch = 100 )
# model.save('fashion_mnist.model')
# model.evaluate(X_test, y_test)
model = model.load('fashion_mnist.model')
model.evaluate(X_test, y_test)