# for i, _batch in eval_loader:
    #     if use_cuda:
    #         _batch = _batch.cuda()
    #     R, log_prob, actions = model(_batch, argmax=True)
    #     for j, chosen in enumerate(actions.cpu().numpy()):
    #         order = np.zeros_like(chosen)
    #         for p in range(args.num_tasks):
    #             order[chosen[p]] = args.num_tasks - p - 1
    #         if use_cuda:
    #             ret.append(test_module(_batch[j].cpu().numpy(), args.num_procs, order, use_deadline, False))
    #         else:
    #             ret.append(test_module(_batch[j].numpy(), args.num_procs, order, use_deadline, False))

    # print("[Before training][RL model generates %d]" % (np.sum(ret)))

    linear_model = LinearSolver(args.num_procs, args.num_tasks,
                                args.use_deadline, use_cuda)

    # TRAIN LOOP
    if use_cuda:
        linear_model = linear_model.to("cuda:0")
        rl_model = rl_model.to("cuda:0")

    linear_model = linear_model.train()
    criterion = nn.MSELoss()
    optimizer = optim.Adam(linear_model.parameters(), lr=5e-3)

    start = time.time()
    for epoch in range(args.num_epochs):
        loss_ = 0
        avg_hit = []
        for batch_idx, (_, sample_batch) in enumerate(train_loader):
Exemple #2
0
            else:
                ret.append(
                    test_module(_batch[j].numpy(), args.num_procs, order,
                                use_deadline, False))
            rl_order = order

    print("[Before training][RL model generates %d]" % (np.sum(ret)))

    temp_fname = "LIN-p%d-t%d-d%d-l[%s, %s].torchmodel" % (
        args.num_procs, args.num_tasks, int(use_deadline), args.range_l,
        args.range_r)
    try:
        model = torch.load("linearmodels/" + temp_fname).cuda()
    except:
        raise AssertionError("Loading Error")
    linear_model = LinearSolver(args.num_procs, args.num_tasks,
                                args.use_deadline, args.use_cuda)

    linear_model.load_state_dict(model.state_dict())
    linear_model = linear_model.to("cuda:0")

    # EVALUATE
    linear_model.eval()
    lin_ret = []
    for i, _batch in eval_loader:
        if use_cuda:
            _batch = _batch.to("cuda:0")
        ev_linear_score = linear_model(_batch)

        _, ev_linear_score_idx = torch.sort(ev_linear_score, descending=True)
        np_linear_score = ev_linear_score_idx.cpu().detach().numpy()
        for j, chosen in enumerate(np_linear_score):
Exemple #3
0
    # for i, _batch in eval_loader:
    #     if use_cuda:
    #         _batch = _batch.cuda()
    #     R, log_prob, actions = model(_batch, argmax=True)
    #     for j, chosen in enumerate(actions.cpu().numpy()):
    #         order = np.zeros_like(chosen)
    #         for p in range(args.num_tasks):
    #             order[chosen[p]] = args.num_tasks - p - 1
    #         if use_cuda:
    #             ret.append(test_module(_batch[j].cpu().numpy(), args.num_procs, order, use_deadline, False))
    #         else:
    #             ret.append(test_module(_batch[j].numpy(), args.num_procs, order, use_deadline, False))
    #
    # print("[Before training][RL model generates %d]" % (np.sum(ret)))

    linear_model = LinearSolver(args.num_procs, args.num_tasks,
                                args.use_deadline, args.use_cuda)

    # linear_name = "LIN-p%d-t%d-d%d-l[%s, %s].torchmodel" % (args.num_procs, args.num_tasks, int(use_deadline), args.range_l, args.range_r)
    # with open("../Pandamodels/linearmodels/" + linear_name, "rb") as f:
    #     tmp = torch.load(f)
    # linear_model.load_state_dict(tmp.state_dict())

    # 밑에꺼 그냥 주석제거하고 트레이닝 하면됨.
    # ## TRAIN LOOP ##
    if args.use_cuda:
        linear_model = linear_model.to("cuda:0")
        rl_model = rl_model.to("cuda:0")

    linear_model = linear_model.train()
    optimizer = optim.Adam(linear_model.parameters(), lr=1e-4)
Exemple #4
0
        test_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        pin_memory=True
    )
    eval_loader = DataLoader(
        test_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        pin_memory=True
    )

    dist_file_name = "LIN-p%d-t%d-d%d-l[%s, %s]" % (
            args.num_procs, args.num_tasks, int(use_deadline), args.range_l, args.range_r)

    Distilation = LinearSolver(args.num_procs, args.num_tasks,
                               args.use_deadline, False)

    with open("../Pandamodels/linearmodels/" + dist_file_name + ".torchmodel", "rb") as f:
        tmp = torch.load(f)
    Distilation.load_state_dict(tmp.state_dict())
    Distilation.eval()

    start = time.time()
    distil_ret = []
    for i, batch in eval_loader:
        linear_score = Distilation(batch)
        gumbel_score = sample_gumbel(linear_score, sampling_number=5)      # [batch_size x num_gumbel_sample x num_tasks]
        gumbel_rank = get_rank(gumbel_score)        # [batch_size x num_gumbel_sample x num_tasks]
        val = 0
        for j, order in enumerate(gumbel_rank):     # j : ~batch size
            tmp_ret = []