예제 #1
0
def main(args):
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    tf.reset_default_graph()

    # global_ep = tf.Variable(
    #     0, dtype=tf.int32, name='global_ep', trainable=False)

    env = Doom(visiable=True)

    global_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global')

    network = Net(env.state_dim, env.action_dim, 'global', None)

    saver = tf.train.Saver()

    # sys.exit('..................')

    with tf.Session() as sess:
        if args.model_path is not None:
            print('Loading model...')
            ckpt = tf.train.get_checkpoint_state(args.model_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            sys.exit('No model path found...')
        # print_net_params_number()
        while True:
            play_episodes(sess, env, network)
예제 #2
0
def main(args):
    if args.save_path is not None and not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    tf.reset_default_graph()

    global_ep = tf.Variable(0,
                            dtype=tf.int32,
                            name='global_ep',
                            trainable=False)

    env = Doom(visiable=False)
    Net(env.state_dim, env.action_dim, 'global', None)
    num_workers = args.parallel
    workers = []

    # create workers
    for i in range(num_workers):
        w = Worker(i, Doom(), global_ep, args)
        workers.append(w)

    print('%d workers in total.\n' % num_workers)
    saver = tf.train.Saver(max_to_keep=3)

    with tf.Session() as sess:
        coord = tf.train.Coordinator()
        if args.model_path is not None:
            print('Loading model...')
            ckpt = tf.train.get_checkpoint_state(args.model_path)
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print('Initializing a new model...')
            sess.run(tf.global_variables_initializer())
        print_net_params_number()

        # Start work process for each worker in a separated thread
        worker_threads = []
        for w in workers:
            run_fn = lambda: w.run(sess, coord, saver)
            t = threading.Thread(target=run_fn)
            t.start()
            time.sleep(0.5)
            worker_threads.append(t)
        coord.join(worker_threads)