Пример #1
0
    start = time.time()
    """Training Loop"""
    rl_model.train()
    # Make a baseline model
    bl_model = Solver(args.num_procs,
                      args.embedding_size,
                      args.hidden_size,
                      args.num_tasks,
                      use_deadline=False,
                      use_cuda=True)
    if args.use_cuda:
        bl_model.cuda()
    bl_model.load_state_dict(rl_model.state_dict())
    bl_model.eval()
    optimizer = optim.Adam(rl_model.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                args.lr_decay_step,
                                                gamma=0.9,
                                                last_epoch=-1)
    last_rl_model_sum = -1
    updates = 0
    noupdateinarow = 0
    _max = -1
    for epoch in range(args.num_epochs):
        loss_ = 0
        avg_hit = []
        for batch_idx, (_, sample_batch) in enumerate(train_loader):
            if use_cuda:
                sample_batch = sample_batch.cuda()
            num_samples = sample_batch.shape[0]
Пример #2
0
            _sample, num_proc, None,
            use_deadline)  # 여기서 OPA에 테스트 안넘겼는데 까고 들어가보면 DA어쩌구 테스트를 사용함.

    with ProcessPoolExecutor(max_workers=4) as executor:
        inputs = []
        res_opa = np.zeros(len(test_dataset), dtype=int).tolist()
        for i, sample in test_dataset:
            #ret = heu.OPA(sample, args.num_procs, heu.test_DA_LC, use_deadline)
            inputs.append((sample, args.num_procs, use_deadline))
        for i, ret in tqdm(enumerate(executor.map(wrap, inputs))):
            res_opa[i] = ret
        opares = np.sum(res_opa)

    print("[before training][OPA generates %d]" % opares)
    model.train()
    optimizer = optim.Adam(model.parameters(), lr=0.5 * args.lr)
    loss_ = 0
    avg_hit = []
    updates = 0
    prev_ = -1
    require_pt = True
    if require_pt:
        for batch_idx, (_, sample_batch) in enumerate(train_data_loader):
            guide = []
            as_np = sample_batch.numpy()
            for r in as_np:
                guide.append(
                    np.argsort(
                        py_heu.get_DM_scores(r, args.num_procs, use_deadline)))
            guide = torch.from_numpy(np.array(guide, dtype=np.int64))
            if use_cuda: