Exemplo n.º 1
0
 def __init__(self, name, color, g):
     self.type = "AI"
     self.name = name
     self.color = color
     self.ctr = 0
     self.g = g
     self.replay_memory = [0 for i in range(MEM_SIZE)]
     self.reward = 0
     self.model = nn.create_model()
     try:
         self.model.load_weights('my_model_weights.h5')
     except:
         pass
Exemplo n.º 2
0
 def __init__(self, name, color, g):
     self.type = "AI"
     self.name = name
     self.color = color
     self.ctr = 0
     self.g = g
     self.replay_memory = [0 for i in range(MEM_SIZE)]
     self.reward = 0
     self.model = nn.create_model()
     try:
         self.model.load_weights('my_model_weights.h5')
     except:
         pass
Exemplo n.º 3
0
from DataGenerator import import_data
from nn import create_model

import numpy as np
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
import matplotlib.pyplot as plt

if __name__ == '__main__':

    data = import_data()

    data = np.expand_dims(data, axis=-1)
    print(data.shape)
    model = create_model()
    batch_size = 16
    epochs = 100
    callbacks = [TensorBoard(), ModelCheckpoint('model.h5')]

    # model.load_weights('model.h5')

    model.fit(data,
              data,
              batch_size=batch_size,
              epochs=epochs,
              callbacks=callbacks,
              verbose=1)
Exemplo n.º 4
0
import nn

model = nn.create_model()
nn.train_model(model)
Exemplo n.º 5
0
        return np.clip(reward, 0, 1.) - 1/60 # for the frame

processor = SMBProcessor()

class SMBCallback(Callback):
    def __init__(self, processor):
        super().__init__()
        self.processor = processor

    def on_episode_begin(self, episode, logs):
        # print(self.processor.frame)
        self.processor.frame = 0
        # print(self.processor.frame)


model = nn.create_model(input_shape, nb_actions)

memory = SequentialMemory(limit=1000000, window_length=WINDOW_LENGTH)

policy = LinearAnnealedPolicy(EpsGreedyQPolicy(), attr='eps', 
                            value_max=.5, value_min=.3, 
                            # value_max=.5, value_min=.1,
                            value_test=.05, nb_steps=2000000)
                            # value_test=.05, nb_steps=5000000)

dqn = DQNAgent(model=model, nb_actions=nb_actions, policy=policy,
            memory=memory, processor=processor, nb_steps_warmup=50000, 
            gamma=.99, target_model_update=10000, train_interval=4, 
            delta_clip=1.)

dqn.compile(Adam(lr=.00025), metrics=['mae'])
Exemplo n.º 6
0
    "best_value": -1,
    "samples": [],
    "hidden_activation": -1,
    "hidden_layer_neurons": -1,
    "loss_function": -1,
    "model": None
}

nn_scores_arr = np.zeros((len(hidden_activations), len(hidden_layer_neurons),
                          len(loss_functions), n_splits * n_repeats))

for i, hidden_activation in enumerate(hidden_activations):
    for j, hid_layer_neurons in enumerate(hidden_layer_neurons):
        for k, loss in enumerate(loss_functions):
            nn = create_model(X.shape[1],
                              hidden_layer_neurons=hid_layer_neurons,
                              hidden_activation=hidden_activation,
                              loss=loss)
            clf = KerasRegressor(build_fn=lambda: nn, epochs=25, verbose=0)
            score = cross_val_score(clf,
                                    X,
                                    Y,
                                    scoring='neg_mean_squared_error',
                                    cv=cv)
            nn_scores["NN, activation={0}, neurons={1}, loss={2}".format(
                hidden_activation, hid_layer_neurons, loss)] = score
            nn_scores_arr[i][j][k][:] = score
            mean_of_scores = np.mean(score)
            if mean_of_scores > best_NN["best_value"]:
                best_NN["best_value"] = mean_of_scores
                best_NN["samples"] = score
                best_NN["hidden_activation"] = hidden_activation
Exemplo n.º 7
0
                        default=1600,
                        help='input image size')
    parser.add_argument("-v",
                        "--verbose",
                        action='count',
                        default=0,
                        help="level of debug messages")
    args = parser.parse_args()

    if args.model:
        print('Load torch model')
        model = load_model(args.model)
    else:
        print('Create torch model')
        model = create_model(arch=args.arch,
                             classnames=['background', 'pos'],
                             basenet=args.basenet)

    print('Trace torch model')
    # Input to the model
    batch_size = 1
    x = torch.randn(batch_size,
                    3,
                    args.image_size,
                    args.image_size,
                    requires_grad=True)
    model.eval()
    output = model(x)
    print(output.shape)

    print('Export onnx model')
Exemplo n.º 8
0
def build_model():
    file_name = 'unreal_GPU'
    X, y = get_dataset(file_name + '.csv')
    create_model(X, y, file_name)
Exemplo n.º 9
0
with tf.Session() as sess:

    table = initialize_lookup_table()

    features_batch , labels_batch = input.get_features_and_labels('./data/train/PC1.csv',table, batch_size)

    print "getting test_features and labels"
    test_features_batch, test_labels_batch = input.get_features_and_labels('./data/test/test.csv' ,table, batch_size)
    
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(coord=coord)

    test_features , test_labels = sess.run([test_features_batch,test_labels_batch] , feed_dict={batch_size : testing_batch_size})

    train_z , test_x, test_y, test_prediction = nn.create_model(features_batch,labels_batch)
    loss_a = tf.reduce_mean(compute_loss(train_z))
    
    train_prediction = tf.nn.sigmoid(train_z)
    train_accuracy = compute_accuracy(train_prediction, labels_batch)
    test_accuracy = compute_accuracy(test_prediction , test_y)

    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
    train_step = optimizer.minimize(loss_a)
    
    sess.run(tf.global_variables_initializer())

    writer = tf.summary.FileWriter('./graph')
    writer.add_graph(sess.graph)

    for _ in range(2000):