Пример #1
0
def main():
    model = alexnet(WIDTH, HEIGHT, LR)
    model.load(MODEL_NAME)
    while (True):
        # 800x600 windowed mode
        screen_grab = ImageGrab.grab(bbox=(123, 499, 408, 570))
        screen_arr = np.array(screen_grab)
        last_time = time.time()
        screen_gray = cv2.cvtColor(screen_arr, cv2.COLOR_BGR2GRAY)
        # resize to something a bit more acceptable for a CNN
        screen_resized = cv2.resize(screen_gray, (80, 60))
        moves = list(
            np.around(model.predict([screen_resized.reshape(80, 60, 1)])[0]))
        print(moves)
        if moves == [1, 0]:
            jump()
Пример #2
0
def export(output_file, input_nodes):

    with tf.Graph().as_default() as graph:
        input_tensor = tf.compat.v1.placeholder(name=input_nodes,
                                                dtype=tf.float32,
                                                shape=[None, 224, 224, 3])
        network = alexnet(net_in=input_tensor,
                          classes=2,
                          drop_rate=0.0,
                          is_training=False)

        graph_def = graph.as_graph_def()

        with gfile.GFile(output_file, 'w') as f:
            f.write(text_format.MessageToString(graph_def))

    return
Пример #3
0
import numpy as np
from AlexNet import alexnet
WIDTH = 80
HEIGHT = 60
LR = 1e-3
EPOCHS = 3
MODEL_NAME = 'T-rex{}-{}-{}-epochs.model'.format(LR, 'alexnetv2',EPOCHS)

def main()
  model = alexnet(WIDTH, HEIGHT, LR)
  #Setup the training data:

  train_data = np.load('training_data.npy', allow_pickle=True)

  train = train_data[:-500]
  test = train_data[-500:]

  X = np.array([i[0] for i in train]).reshape(-1,WIDTH,HEIGHT,1)
  Y = [i[1] for i in train]

  test_x = np.array([i[0] for i in test]).reshape(-1,WIDTH,HEIGHT,1)
  test_y = [i[1] for i in test]
  #Now we can actually train the model with:

  model.fit({'input': X}, {'targets': Y}, n_epoch=EPOCHS, validation_set=({'input': test_x}, {'targets': test_y}), snapshot_step=500, show_metric=True, run_id=MODEL_NAME)

  # tensorboard --logdir=foo:C:/Users/H/Desktop/ai-gaming/log

  model.save(MODEL_NAME)
main()
Пример #4
0
def train(target_acc,dataset_dir,input_ckpt,epochs,batchsize,init_lr,output_ckpt, \
          tboard_logs,input_height,input_width,input_chan):

    # if an input checkpoint is specified, we are doing pruning fine-tune 
    if (input_ckpt!=''):
        tf.set_pruning_mode()
        print('Doing fine-tuning', flush=True)

    # unpack dataset from npz files
    npz_file = np.load(os.path.join(dataset_dir,'trainData.npz'))
    x_train = npz_file['x']
    y_train = npz_file['y']

    npz_file = np.load(os.path.join(dataset_dir,'testData.npz'))
    x_test = npz_file['x']
    y_test = npz_file['y']

    train_batches = int(len(x_train)/batchsize)
    test_batches = int(len(x_test)/batchsize)

    print('Train batches:',train_batches,flush=True)
    print('Test batches:',test_batches,flush=True)


    # define placeholders for training mode and droput rate
    images_in = tf.compat.v1.placeholder(tf.float32, shape=[None,input_height,input_width,input_chan], name='images_in')
    labels_ph = tf.compat.v1.placeholder(tf.float32, shape=[None,2], name='labels_ph')
    train_ph = tf.compat.v1.placeholder(tf.bool, shape=None, name='train_ph')
    drop_ph = tf.compat.v1.placeholder(tf.float32, shape=None, name='drop_ph')
    
    # build the network, input comes from the 'images_in' placeholder
    logits = alexnet(images_in, classes=2, drop_rate=drop_ph, is_training=train_ph)
  
    # softmax cross entropy loss function - needs one-hot encoded labels
    loss = tf.reduce_mean(tf.compat.v1.losses.softmax_cross_entropy(logits=logits, onehot_labels=labels_ph),name='loss')

    # global step
    global_step=tf.compat.v1.train.get_or_create_global_step()
    
    # stepped learning rate - divides the learning rate by 10 at each step
    # number of steps is defined by n
    n = 2
    decay_steps = int((epochs/n)*(len(x_train)/batchsize))
    decay_lr = tf.compat.v1.train.exponential_decay(learning_rate=init_lr,
                                                    global_step=global_step,
                                                    decay_steps=decay_steps,
                                                    decay_rate=0.1,
                                                    staircase=True,
                                                    name='decay_lr')
    
    # Adaptive Momentum optimizer - minimize the loss
    update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
    optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=decay_lr)
    with tf.control_dependencies(update_ops):
        train_op = optimizer.minimize(loss, global_step=global_step)

    predicted_logit = tf.argmax(input=logits, axis=1, output_type=tf.int32,name='predicted_logit')
    ground_truth = tf.argmax(input=labels_ph, axis=1, output_type=tf.int32, name='ground_truth')

    # Accuracy
    tf_acc, tf_acc_update = tf.compat.v1.metrics.accuracy(ground_truth, predicted_logit, name='accuracy')
    acc_var = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.LOCAL_VARIABLES, scope='accuracy')
    acc_var_initializer = tf.compat.v1.variables_initializer(var_list=acc_var)

    # TensorBoard data collection
    tf.compat.v1.summary.scalar('loss', loss)
    tf.compat.v1.summary.scalar('decay_lr', decay_lr)
    tf.compat.v1.summary.image('images_in', images_in)

    # set up saver object
    saver = tf.compat.v1.train.Saver()

    # Run the graph in a Session
    with tf.compat.v1.Session() as sess:
        best_epoch = 0
        best_acc = 0    
        sess.run(tf.compat.v1.initializers.global_variables())

        # TensorBoard writer
        writer = tf.compat.v1.summary.FileWriter(tboard_logs, sess.graph)
        tb_summary = tf.compat.v1.summary.merge_all()

        # if input checkpoint specified, restore it
        if (input_ckpt!=''):
            saver.restore(sess, input_ckpt)
   
        # Training and test over specified number of epochs
        for epoch in range(epochs):
            print('\nEpoch', epoch+1, '/', epochs,':', flush=True)   

            # Training for 1 epoch
            for i in range(train_batches):    
                x_batch, y_batch = x_train[i*batchsize:(i+1)*batchsize], y_train[i*batchsize:(i+1)*batchsize]    
                # random left-right flip
                for j in range(batchsize):
                    if (random.randint(0,1)==1):
                        np.fliplr(x_batch[j])

                # Run graph for optimization - i.e. do the training
                _, s, dlr = sess.run([train_op,tb_summary,decay_lr], feed_dict={images_in: x_batch, labels_ph: y_batch, train_ph: True, drop_ph: 0.5})
            writer.add_summary(s, global_step=(((epoch+1)*train_batches)-1) )

            # test after every epoch            
            sess.run(acc_var_initializer)
            for i in range(test_batches):   
                # fetch a batch from test dataset
                x_batch, y_batch = x_test[i*batchsize:(i+1)*batchsize], y_test[i*batchsize:(i+1)*batchsize]
    
                # Run graph for accuracy
                sess.run([tf_acc_update], feed_dict={images_in: x_batch, labels_ph: y_batch, train_ph: False, drop_ph: 0.0})

            print (' - Learning rate:', dlr, flush=True)
            score = sess.run(tf_acc)
            print (' - Accuracy with test set:', score, flush=True)

            # save checkpoint if accuracy improved
            if (score > best_acc):
                saver.save(sess, output_ckpt)
                print(' - Saved checkpoint to %s' % output_ckpt, flush=True)
                best_acc = score
                best_epoch = epoch+1

            # exit if target accuracy reached
            if (best_acc >= target_acc):
                print('Accuracy of',best_acc,'is better than target accuracy of',target_acc,'..exiting train/fine-tune',flush=True)
                break

    print('\nBest epoch:',best_epoch,' Accuracy:',best_acc, flush=True)
    writer.flush()
    writer.close()
    print('Run `tensorboard --logdir=%s --port 6006 --host localhost` to see the results.' % tboard_logs)

    return best_acc, best_epoch
Пример #5
0
def eval_ckpt(dataset_dir, input_ckpt, batchsize):

    # unpack dataset
    npz_file = np.load(os.path.join(dataset_dir, 'testData.npz'))
    x_test = npz_file['x']
    y_test = npz_file['y']

    test_batches = int(len(x_test) / batchsize)

    # define placeholders for the input images, labels, training mode and droput rate
    images_in = tf.compat.v1.placeholder(tf.float32,
                                         shape=[None, 224, 224, 3],
                                         name='images_in')
    labels = tf.compat.v1.placeholder(tf.uint8, shape=[None, 2], name='labels')

    # build the network, input comes from the 'images_in' placeholder
    logits = alexnet(images_in, classes=2, drop_rate=0.0, is_training=False)

    predicted_logit = tf.argmax(input=logits,
                                axis=1,
                                output_type=tf.int32,
                                name='predicted_logit')
    ground_truth = tf.compat.v1.argmax(labels,
                                       1,
                                       output_type=tf.int32,
                                       name='ground_truth')

    # Define the metric and update operations
    tf_acc, tf_acc_update = tf.compat.v1.metrics.accuracy(ground_truth,
                                                          predicted_logit,
                                                          name='accuracy')
    acc_var = tf.compat.v1.get_collection(
        tf.compat.v1.GraphKeys.LOCAL_VARIABLES, scope="accuracy")
    acc_var_initializer = tf.compat.v1.variables_initializer(var_list=acc_var)

    # create saver object
    saver = tf.compat.v1.train.Saver()

    with tf.compat.v1.Session() as sess:

        sess.run(tf.compat.v1.initializers.global_variables())
        sess.run(acc_var_initializer)

        # restore checkpoint to be evaluated
        saver.restore(sess, input_ckpt)

        for i in range(test_batches):

            # fetch a batch from test dataset
            x_batch, y_batch = x_test[i * batchsize:(i + 1) *
                                      batchsize], y_test[i *
                                                         batchsize:(i + 1) *
                                                         batchsize]

            # Run graph for accuracy node
            sess.run(tf_acc_update,
                     feed_dict={
                         images_in: x_batch,
                         labels: y_batch
                     })

        score = sess.run(tf_acc)
        print('Checkpoint accuracy with test dataset:', score)

    return
Пример #6
0
test_image, test_label = cifar10.inputs(eval_data=True,
                                        data_dir=cifar10_dir,
                                        batch_size=FLAGS.test_size)

ckpt_dir = 'ckpt/'

if not os.path.exists(ckpt_dir):
    os.makedirs(ckpt_dir)

with tf.name_scope('input'):
    x_image = train_image
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')
    y_ = train_label

with tf.name_scope('prediction'):
    alex_net = alexnet(x_image, keep_prob)
    y = alex_net.prediction

with tf.name_scope('cross_entropy'):
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=y_, name="cross_entropy_per_example")
    cross_entropy = tf.reduce_mean(cross_entropy)

with tf.name_scope('train_step'):
    train_step = tf.train.AdagradOptimizer(FLAGS.lr).minimize(cross_entropy)
    #train_step= tf.train.GradientDescentOptimizer(FLAGS.lr).minimize(cross_entropy)

saver = tf.train.Saver(max_to_keep=5)

correct_prediction = tf.nn.in_top_k(y, y_, 1)
accuracy = tf.reduce_sum(tf.cast(correct_prediction, tf.float32)) / 128