def wrap(x): _sample, num_proc, use_deadline = x return heu.OPA(_sample, num_proc, None, use_deadline)
res_map = defaultdict(lambda: defaultdict(lambda: 0)) for num_tasks in [args.num_task]: if num_proc >= num_tasks: continue util_range = get_util_range(num_proc) for util in util_range: print(num_tasks, util) res = res_map[(num_proc, num_tasks, util)] with open("eval/%d-%d/%s" % (num_proc, num_tasks, util), 'rb') as f: train_dataset = pickle.load(f) i = 0 for x, y in train_dataset: if i == 10000: break opa_da_lc_res_ = cy_heu.OPA(y, num_proc, heu.test_DA_LC, use_deadline) #res['OPA_DA'] += opa_da_res res['OPA_DA_LC'] += opa_da_lc_res_ p = scores_to_priority( heu.get_DkC_scores(y, num_proc, use_deadline=use_deadline)) res['DkC_RTA_LC'] += (heu.test_RTA_LC(y, num_proc, p, use_deadline)) res['DkC_DA_LC'] += (heu.test_DA_LC(y, num_proc, p, use_deadline)) p = scores_to_priority( heu.get_DM_scores(y, num_proc, use_deadline=use_deadline)) res['DM_RTA_LC'] += (heu.test_RTA_LC(y, num_proc, p, use_deadline)) res['DM_DA_LC'] += (heu.test_DA_LC(y, num_proc, p, use_deadline)) p = scores_to_priority(
def wrap(x): _sample, num_proc, use_deadline = x return heu.OPA( _sample, num_proc, None, use_deadline) # 여기서 OPA에 테스트 안넘겼는데 까고 들어가보면 DA어쩌구 테스트를 사용함.
# Calculating heuristics model = Solver(args.num_procs, args.embedding_size, args.hidden_size, args.num_tasks, use_deadline=use_deadline nimp=bool(args.model)) if args.use_cuda: model = model.cuda() res_opa = [] for i, sample in tqdm(test_dataset): ret = heu.OPA(sample, args.num_procs, heu.test_DA_LC, use_deadline) res_opa.append(ret) opares = np.sum(res_opa) print("[before training][OPA generates %d]" % opares) model.train() optimizer = optim.Adam(model.parameters(), lr=args.lr) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_decay_step, gamma=0.9, last_epoch=-1) last_rl_model_sum = -1 updates = 0 noupdateinarow = 0 _max = -1 for epoch in range(args.num_epochs): loss_ = 0 avg_hit = [] for batch_idx, (_, sample_batch) in enumerate(train_data_loader): if args.use_cuda: