Exemple #1
0
def main():
    params = Params()

    mp.set_start_method('spawn')
    count = mp.Value('i', 0)
    best_acc = mp.Value('d', 0.0)
    lock = mp.Lock()

    shared_model = A3C_LSTM_GA()
    shared_model = shared_model.share_memory()

    shared_optimizer = SharedAdam(shared_model.parameters(),
                                  lr=params.lr,
                                  amsgrad=params.amsgrad,
                                  weight_decay=params.weight_decay)
    shared_optimizer.share_memory()
    #run_sim(0, params, shared_model, None,  count, lock)
    #test(params, shared_model, count, lock, best_acc)

    processes = []

    train_process = 0
    test_process = 0

    for rank in range(params.n_process):

        p = mp.Process(target=test,
                       args=(
                           test_process,
                           params,
                           shared_model,
                           count,
                           lock,
                           best_acc,
                       ))
        p.start()
        processes.append(p)
        test_process += 1

        for i in range(2):
            p = mp.Process(target=run_sim,
                           args=(
                               train_process,
                               params,
                               shared_model,
                               shared_optimizer,
                               count,
                               lock,
                           ))
            p.start()
            processes.append(p)
        train_process += 1

    for p in processes:
        p.join()
Exemple #2
0
                                 map_location=lambda storage, loc: storage)
        shared_model.load_state_dict(saved_state)
    shared_model.share_memory()

    if args.shared_optimizer:
        if args.optimizer == 'RMSprop':
            optimizer = SharedRMSprop(shared_model.parameters(), lr=args.lr)
        if args.optimizer == 'Adam':
            optimizer = SharedAdam(shared_model.parameters(),
                                   lr=args.lr,
                                   amsgrad=args.amsgrad)
            optimizer_r = SharedAdam(shared_model.r_net.parameters(),
                                     lr=args.rl_r,
                                     amsgrad=args.amsgrad)
        optimizer.share_memory()
        optimizer_r.share_memory()
    else:
        optimizer = None

    processes = []
    counter = mp.Value('i', 0)
    lock = mp.Lock()

    p = mp.Process(target=test,
                   args=(args, shared_model, env_conf, lock, counter))
    p.start()
    processes.append(p)
    # p = mp.Process(target=train_rep, args=(args, shared_model, env_conf))
    # p.start()
    # processes.append(p)
    time.sleep(0.1)
Exemple #3
0
    # Get the number of conditions as a hyperparameter.
    # `args` delivers the hyperparameters to `ggm.ggm`.
    args.N_conditions = len(next(iter(id_to_whole_conditions.values()))) + len(next(iter(id_to_scaffold_conditions.values())))

    #lines for multiprocessing
    mp.set_start_method('spawn')
    torch.manual_seed(1)

    #model 
    shared_model = ggm(args)
    shared_model.share_memory()  # torch.nn.Module.share_memory

    #shared optimizer
    shared_optimizer = SharedAdam(shared_model.parameters(), lr=args.lr, amsgrad=True)
    shared_optimizer.share_memory()
    print ("Model #Params: %dK" % (sum([x.nelement() for x in shared_model.parameters()]) / 1000,))
    
    #initialize parameters of the model 
    if save_fpath:
        initial_epoch, initial_cycle = [int(value) for value in re.findall('\d+', os.path.basename(save_fpath))]
        shared_model = utils.initialize_model(shared_model, save_fpath)
    else:
        initial_epoch = initial_cycle = 0
        shared_model = utils.initialize_model(shared_model, False)

    num_cycles = int(len(id_to_smiles)/args.ncpus/args.item_per_cycle)
    print(f"""\
ncpus             : {args.ncpus}
OMP_NUM_THREADS   : {os.environ.get('OMP_NUM_THREADS')}
Number of data    : {len(id_to_smiles)}