Exemplo n.º 1
0
if args.obj is not None:
    obj = args.obj

envs = []
for g, seeds in zip(graphs, e_seeds_list):
    env = NetworkEnv(fullGraph=g,
                     seeds=seeds,
                     opt_reward=0,
                     nop_r=args.nop_reward,
                     times_mean=args.times_mean_env,
                     bad_reward=args.bad_reward,
                     clip_max=args.max_reward,
                     clip_min=args.min_reward,
                     normalize=args.norm_reward)
    envs.append(env)
replay = PriortizedReplay(BUFF_SIZE, 10, beta=0.6)

logging.info('State Dimensions: ' + str(action_dim))
logging.info('Action Dimensions: ' + str(action_dim))

acmodel = DQNTrainer(input_dim=input_dim,
                     state_dim=action_dim,
                     action_dim=action_dim,
                     replayBuff=replay,
                     lr=LR,
                     use_cuda=use_cuda,
                     gamma=args.gamma,
                     eta=eta,
                     gcn_num_layers=gcn_layers,
                     num_pooling=num_pooling,
                     assign_dim=assign_dim,
Exemplo n.º 2
0
    goals.append([min_goal, mid_goal, max_goal])
    print('Goals is:', goals)
# logging.info('Goals setting is:' + str(goals))
print('goal setting over')


if args.obj is not None:
    obj = args.obj

envs = []
for g, seeds in zip(graphs, e_seeds_list):
    env = NetworkEnv(fullGraph=g, seeds=seeds, opt_reward=0, nop_r=args.nop_reward,
                     times_mean=args.times_mean_env, bad_reward=args.bad_reward, clip_max=args.max_reward,
                     clip_min=args.min_reward, normalize=args.norm_reward)
    envs.append(env)
replay = PriortizedReplay(BUFF_SIZE, 10, beta=0.6)

logging.info('State Dimensions: ' + str(action_dim))
logging.info('Action Dimensions: ' + str(action_dim))

acmodel = DQNTrainer(input_dim=input_dim, state_dim=action_dim, action_dim=action_dim, replayBuff=replay, lr=LR,
                     use_cuda=use_cuda, gamma=args.gamma,
                     eta=eta, gcn_num_layers=gcn_layers, num_pooling=num_pooling, assign_dim=assign_dim,
                     assign_hidden_dim=assign_hidden_dim)

noise = OrnsteinUhlenbeckActionNoise(action_dim, theta=noise_momentum, sigma=noise_magnitude)

# ! Doesn't Support nested models
# writer.add_graph(acmodel.actor_critic)
rws = []
Exemplo n.º 3
0
if args.obj is not None:
    obj = args.obj

envs = []
for g, seeds in zip(graphs, e_seeds_list):
    env = NetworkEnv(fullGraph=g,
                     seeds=seeds,
                     opt_reward=0,
                     nop_r=args.nop_reward,
                     times_mean=args.times_mean_env,
                     bad_reward=args.bad_reward,
                     clip_max=args.max_reward,
                     clip_min=args.min_reward,
                     normalize=args.norm_reward)
    envs.append(env)
replay = PriortizedReplay(BUFF_SIZE, 10, beta=0.6)

logging.info('State Dimensions: ' + str(action_dim))
logging.info('Action Dimensions: ' + str(action_dim))

acmodel = DQNTrainer(input_dim=input_dim,
                     state_dim=action_dim,
                     action_dim=action_dim,
                     replayBuff=replay,
                     lr=LR,
                     use_cuda=use_cuda,
                     gamma=args.gamma,
                     eta=eta,
                     gcn_num_layers=gcn_layers,
                     num_pooling=num_pooling,
                     assign_dim=assign_dim,