コード例 #1
0
ファイル: localtrain.py プロジェクト: jsw7460/PANDAAndLinear
                                args.use_deadline, False))
    print("Before training, global model generates", sum(ret))

    start = time.time()
    """Training Loop"""
    rl_model.train()
    # Make a baseline model
    bl_model = Solver(args.num_procs,
                      args.embedding_size,
                      args.hidden_size,
                      args.num_tasks,
                      use_deadline=False,
                      use_cuda=True)
    if args.use_cuda:
        bl_model.cuda()
    bl_model.load_state_dict(rl_model.state_dict())
    bl_model.eval()
    optimizer = optim.Adam(rl_model.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                args.lr_decay_step,
                                                gamma=0.9,
                                                last_epoch=-1)
    last_rl_model_sum = -1
    updates = 0
    noupdateinarow = 0
    _max = -1
    for epoch in range(args.num_epochs):
        loss_ = 0
        avg_hit = []
        for batch_idx, (_, sample_batch) in enumerate(train_loader):
            if use_cuda:
コード例 #2
0
ファイル: encoder_rl.py プロジェクト: jsw7460/PANDAAndLinear
                order[chosen[k]] = args.num_tasks - k - 1  # 중요할수록 숫자가 높다.
            if use_cuda:
                ret.append(test_module(batch[j].cpu().numpy(), args.num_procs, order, args.use_deadline, False))
            else:
                ret.append(test_module(batch[j].numpy(), args.num_procs, order, args.use_deadline, False))
    print("Before training, global model generates", sum(ret))

    start = time.time()
    """Training Loop"""
    rl_model.train()
    # Make a baseline model
    bl_model = Solver(args.num_procs, args.embedding_size, args.hidden_size,
                      args.num_tasks, use_deadline=False, use_cuda=True, only_encoder=True)
    if args.use_cuda:
        bl_model.cuda()
    bl_model.load_state_dict(rl_model.state_dict(), strict=False)
    bl_model.eval()
    optimizer = optim.Adam(rl_model.parameters(), lr=args.lr)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_decay_step, gamma=0.9, last_epoch=-1)
    last_rl_model_sum = -1
    updates = 0
    noupdateinarow = 0
    _max = -1
    for epoch in range(args.num_epochs):
        loss_ = 0
        avg_hit = []
        for batch_idx, (_, sample_batch) in enumerate(train_loader):
            if use_cuda:
                sample_batch = sample_batch.cuda()
            num_samples = sample_batch.shape[0]
            optimizer.zero_grad()