Exemple #1
0
def parallelize(game, params):
    print(params)
    #game = "/home/eilab/Raj/tw-drl/Games/obj_20_qlen_5_room_10/train/game_" + str(10) + ".ulx"
    trainer = DQNTrainer(game, params)
    trainer.train()
    #del trainer.model
    #del trainer
    #gc.collect()
    """
Exemple #2
0
def parallelize(game, params):
    print(params)
    trainer = DQNTrainer(game, params)
    trainer.train()
Exemple #3
0
from dqn import DQNTrainer
from joblib import Parallel, delayed
import multiprocessing
import gc


def parallelize(game, params):
    print(params)
    trainer = DQNTrainer(game, params)
    trainer.train()


if __name__ == "__main__":
    trainer = DQNTrainer()
    trainer.train_QA()
Exemple #4
0
if args.obj is not None:
    obj = args.obj

envs = []
for g, seeds in zip(graphs, e_seeds_list):
    env = NetworkEnv(fullGraph=g, seeds=seeds, opt_reward=0, nop_r=args.nop_reward,
                     times_mean=args.times_mean_env, bad_reward=args.bad_reward, clip_max=args.max_reward,
                     clip_min=args.min_reward, normalize=args.norm_reward)
    envs.append(env)
replay = PriortizedReplay(BUFF_SIZE, 10, beta=0.6)

logging.info('State Dimensions: ' + str(action_dim))
logging.info('Action Dimensions: ' + str(action_dim))

acmodel = DQNTrainer(input_dim=input_dim, state_dim=action_dim, action_dim=action_dim, replayBuff=replay, lr=LR,
                     use_cuda=use_cuda, gamma=args.gamma,
                     eta=eta, gcn_num_layers=gcn_layers, num_pooling=num_pooling, assign_dim=assign_dim,
                     assign_hidden_dim=assign_hidden_dim)

noise = OrnsteinUhlenbeckActionNoise(action_dim, theta=noise_momentum, sigma=noise_magnitude)

# ! Doesn't Support nested models
# writer.add_graph(acmodel.actor_critic)
rws = []


def make_const_attrs(graph, input_dim):
    n = len(graph)
    mat = np.ones((n, input_dim))
    # mat = np.random.rand(n,input_dim)
    return mat
Exemple #5
0
                     bad_reward=args.bad_reward,
                     clip_max=args.max_reward,
                     clip_min=args.min_reward,
                     normalize=args.norm_reward)
    envs.append(env)
replay = PriortizedReplay(BUFF_SIZE, 10, beta=0.6)

logging.info('State Dimensions: ' + str(action_dim))
logging.info('Action Dimensions: ' + str(action_dim))

acmodel = DQNTrainer(input_dim=input_dim,
                     state_dim=action_dim,
                     action_dim=action_dim,
                     replayBuff=replay,
                     lr=LR,
                     use_cuda=use_cuda,
                     gamma=args.gamma,
                     eta=eta,
                     gcn_num_layers=gcn_layers,
                     num_pooling=num_pooling,
                     assign_dim=assign_dim,
                     assign_hidden_dim=assign_hidden_dim)

noise = OrnsteinUhlenbeckActionNoise(action_dim,
                                     theta=noise_momentum,
                                     sigma=noise_magnitude)

# ! Doesn't Support nested models
# writer.add_graph(acmodel.actor_critic)
rws = []