コード例 #1
0
#args['out_length'] = 25
#args['grid_size'] = (13,3)
#args['soc_conv_depth'] = 64
#args['conv_3x1_depth'] = 16
#args['dyn_embedding_size'] = 32
#args['input_embedding_size'] = 32
#args['num_lat_classes'] = 3
#args['num_lon_classes'] = 2
#args['use_maneuvers'] = True

args = {}
args['train_flag'] = True
args['model_dir'] = 'experiments/' + cmd_args.experiment
args['restore_file'] = cmd_args.restore_file  # or 'last' or 'best'

utils.set_logger(os.path.join(args['model_dir'], 'train.log'))

json_path = os.path.join(args['model_dir'], 'params.json')
assert os.path.isfile(
    json_path), "No json configuration file found at {}".format(json_path)
params = utils.Params(json_path)
params.grid_size = (params.grid_size_lon, params.grid_size_lat)

# use GPU if available
params.use_cuda = torch.cuda.is_available()
params.train_flag = args['train_flag']
params.model_dir = args['model_dir']

print("\nEXPERIMENT:", args['model_dir'], "\n")

if "transformer" in cmd_args.experiment:
コード例 #2
0
#args['in_length'] = 16
#args['out_length'] = 25
#args['grid_size'] = (13,3)
#args['soc_conv_depth'] = 64
#args['conv_3x1_depth'] = 16
#args['dyn_embedding_size'] = 32
#args['input_embedding_size'] = 32
#args['num_lat_classes'] = 3
#args['num_lon_classes'] = 2
#args['use_maneuvers'] = True

args = {}
args['train_flag'] = False
args['model_dir'] = 'experiments/' + cmd_args.experiment

utils.set_logger(os.path.join(args['model_dir'], 'evaluate.log'))

json_path = os.path.join(args['model_dir'], 'params.json')
assert os.path.isfile(
    json_path), "No json configuration file found at {}".format(json_path)
params = utils.Params(json_path)
params.grid_size = (params.grid_size_lon, params.grid_size_lat)

# use GPU if available
params.use_cuda = torch.cuda.is_available()
#params.use_cuda = False
params.train_flag = args['train_flag']
params.model_dir = args['model_dir']

params.create_onnx = True
コード例 #3
0
ファイル: test_algo.py プロジェクト: rongrong1314/MCTS-NNET
        "{:.6f} sec, hardbrakes: {:.2f}, steps_to_goal: {:.2f}, "
        "steps_to_collision {:.2f}, "
        "speed_at_collision {:.2f}".format(np.mean(metric_scores),
                                           success / len(metric_scores),
                                           np.mean(metric_runtime),
                                           np.mean(metric_hardbrakes),
                                           np.mean(metric_steps_to_goal),
                                           np.mean(metric_steps_to_collision),
                                           np.mean(metric_speed_at_collision)))

    plot(metric_steps_to_goal, metric_steps_to_collision, args.algo)


## MAIN starts here ###

utils.set_logger('logs/test.log')
# Parser
# python3 test_algo.py
# python3 test_algo.py --algo dqn
parser = argparse.ArgumentParser()
parser.add_argument(
    '--algo',
    default='baseline',
    help="baseline, qlearning, dqn, mcts, mcts-nnet, mpc, human")
parser.add_argument('--nn', default='dnn', help="dnn or cnn")
parser.add_argument('--restore',
                    default=None,
                    help="Optional, file in models containing weights to load")
parser.add_argument('--visu',
                    default=False,
                    help="Optional, used to debug and visualize tests")
コード例 #4
0
    # 		done = mdp.isEnd(sp)[0]

    # 		#agent.step(s, a, r, sp, done)
    # 		memory.add(s, a, r, sp, done)
    # 		if len(memory) > BATCH_SIZE:
    # 			samples = memory.sample()
    # 			for sample in samples:
    # 				state, action, reward, next_state, isDone = sample
    # 				rl.incorporateFeedback(state, action, reward, next_state, isDone)
    # 		else:
    # 			rl.incorporateFeedback(s, a, r, sp, done)

    # 		score += r
    # 		if done:
    # 			break
    # 		s = sp
    # 	scores_window.append(score)
    # 	eps = max(eps_end, eps_decay*eps)
    # 	avg_sliding_score = np.mean(scores_window)
    # 	print("Episode {} Average sliding score: {:.2f}".format(i_episode, avg_sliding_score))
    # 	if avg_sliding_score > -10:
    # 		weightsFile.write("Episode {} Average sliding score: {:.2f}\n".format(i_episode, avg_sliding_score))
    # 		rl.dumpWeights()


utils.set_logger('qlearning.log')

weightsFile = open("models/qlearning.weights", "a")
mdp = ActMDP()
qlearning(mdp)
コード例 #5
0
                sp, r = mdp.sampleSuccReward(s, a)
                done = mdp.isEnd(sp)[0]
                score += r
                if done:
                    break
                s = sp
            dev_scores_window.append(score)
        dev_mean_score = np.mean(dev_scores_window)
        logging.info("Epoch no {}: dev_mean_score: {:.2f}".format(
            num_epoch, dev_mean_score))
        if dev_mean_score > best_score:
            agent.save(num_epoch, dev_mean_score)
            best_score = dev_mean_score


utils.set_logger('logs/train.log')

# run python3 dqn.py or python3 dqn.py --restore best or python3 dqn.py -nn cnn
parser = argparse.ArgumentParser()
parser.add_argument('--nn', default='dnn', help="dnn or cnn")
parser.add_argument(
    '--restore',
    default=None,
    help="Optional, file in models containing weights to reload before training"
)

args = parser.parse_args()

mdp = ActMDP()
train_dqn(mdp, args)