コード例 #1
0
    running_acc = 0.
    for (img, label) in trainloader:
        img = torch.autograd.Variable(img).cuda()
        label = torch.autograd.Variable(label).cuda()
        save_param, st = quantize_weight(mnist_mlp, w_bits, cells)
        noise(mnist_mlp, st)
        optimizer.zero_grad()
        output = mnist_mlp(img)
        loss = criterian(output, label)
        loss.backward()
        optimizer.step()
        store_param(mnist_mlp, save_param)
    #testing
    save_param, st = quantize_weight(mnist_mlp, w_bits, cells)
    for (img, label) in testloader:
        noise(mnist_mlp, st)
        img = torch.autograd.Variable(img).cuda()
        label = torch.autograd.Variable(label).cuda()
        output = mnist_mlp(img)
        _, predict = torch.max(output, 1)
        correct_num = (predict == label).sum()
        running_acc += correct_num.data[0]
        store_param(mnist_mlp, save_param)
    running_acc /= len(testset)
    print(running_acc)
    if running_acc > best_acc:
        best_acc = running_acc
        if save_param:
            print('saving')
            torch.save(mnist_mlp.state_dict(), 'mlp_mnist.t7')
コード例 #2
0
def main():
    # get unity environment
    env, brain = get_unity_envs()

    # get arguments
    args = get_arguments()
    print(args)

    # set gpu environment
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    cudnn.enabled = True
    cudnn.benchmark = True
    cuda = torch.cuda.is_available()

    # set random seed
    rn = set_seeds(args.random_seed, cuda)

    # make directory
    os.makedirs(args.snapshot_dir, exist_ok=True)

    # get validation dataset
    val_set = get_validation_dataset(args)
    print("len of test set: ", len(val_set))
    val_loader = data.DataLoader(val_set,
                                 batch_size=args.real_batch_size,
                                 shuffle=False,
                                 num_workers=args.num_workers,
                                 pin_memory=True)

    # generate training list
    with open(args.syn_list_path, "w") as fp:
        for i in range(args.syn_img_num):
            if i % 10 != 0:
                fp.write(str(i + 1) + '\n')

    # get main model
    main_model = MLP(args.num_inputs, args.num_outputs, args.hidden_size)
    if args.resume != "":
        main_model.load_state_dict(torch.load(args.resume))

    # get task model
    if args.task_model_name == "FCN8s":
        task_model = FCN8s_sourceonly(n_class=args.num_classes)
        vgg16 = VGG16(pretrained=True)
        task_model.copy_params_from_vgg16(vgg16)
    else:
        raise ValueError("Specified model name: FCN8s")

    # save initial task model
    torch.save(task_model.state_dict(),
               os.path.join(args.snapshot_dir, "task_model_init.pth"))

    if cuda:
        main_model = main_model.cuda()
        task_model = task_model.cuda()

    # get optimizer
    main_optimizer = optim.Adam(main_model.parameters(), lr=args.main_lr)
    task_optimizer = optim.SGD(task_model.parameters(),
                               lr=args.task_lr,
                               momentum=0.9,
                               weight_decay=1e-4)

    frame_idx = 0
    whole_start_time = time.time()
    while frame_idx < args.max_frames:

        log_probs = []
        rewards = []

        start_time = time.time()

        for i_step in range(1, args.step_each_frame + 1):

            # get initial attribute list
            state = np.random.rand(1, args.num_inputs)
            state = torch.from_numpy(state).float()

            if cuda:
                state = state.cuda()

            # get modified attribute list
            dist = main_model(state)
            action = dist.sample()

            action_actual = action.float() / 10.0  # [0, 0.9]

            # generate images by attribute list
            print("action: " + str(action_actual.cpu().numpy()))
            get_images_by_attributes(args, i_step, env, brain,
                                     action_actual[0].cpu().numpy())

            train_set = get_training_dataset(args, i_step)
            train_loader = data.DataLoader(train_set,
                                           batch_size=args.syn_batch_size,
                                           shuffle=True,
                                           num_workers=args.num_workers,
                                           pin_memory=True)

            # train the task model using synthetic dataset
            task_model.load_state_dict(
                torch.load(
                    os.path.join(args.snapshot_dir, "task_model_init.pth")))

            reward = train_task_model(train_loader, val_loader, task_model,
                                      task_optimizer, args, cuda)
            log_prob = dist.log_prob(action)[0]

            log_probs.append(log_prob)
            rewards.append(torch.FloatTensor([reward]))

            frame_idx += 1

            if frame_idx == 1:
                moving_start = torch.FloatTensor([reward])

        baseline = compute_returns(rewards, moving_start)
        moving_start = baseline[-1]

        log_probs = torch.cat(log_probs)
        baseline = torch.cat(baseline).detach()
        rewards = torch.cat(rewards).detach()

        advantage = rewards - baseline
        if cuda:
            advantage = advantage.cuda()

        loss = -(log_probs * advantage.detach()).mean()

        with open(os.path.join(args.snapshot_dir, "logs.txt"), 'a') as fp:
            fp.write(
                "frame idx: {0:4d}, state: {1:s}, action: {2:s}, reward: {3:s}, baseline: {4:s}, loss: {5:.2f} \n"
                .format(frame_idx, str(state.cpu()[0].numpy()),
                        str(action.cpu()[0].numpy()), str(rewards.numpy()),
                        str(baseline.numpy()), loss.item()))

        print("optimize the main model parameters")
        main_optimizer.zero_grad()
        loss.backward()
        main_optimizer.step()

        elapsed_time = time.time() - start_time
        print("[frame: {0:3d}], [loss: {1:.2f}], [time: {2:.1f}]".format(
            frame_idx, loss.item(), elapsed_time))

        torch.save(
            main_model.state_dict(),
            os.path.join(args.snapshot_dir, "main_model_%d.pth" % frame_idx))

    elapsed_time = time.time() - whole_start_time
    print("whole time: {0:.1f}".format(elapsed_time))
    env.close()
コード例 #3
0
ファイル: run_pytorch.py プロジェクト: PoeWunLee/hlb-datathon
def main(args):
    input_size = 5
    output_size = 3
    epochs = 100

    # Make the training and testing set to be less bias
    hlb_dataset = HLBDataset(args.dataset_path[0])

    hlb_train, hlb_test = random_split(
        hlb_dataset,
        (round(0.8 * len(hlb_dataset)), round(0.2 * len(hlb_dataset))))

    print(args.weights_path[0])
    print(f'Number of training examples: {len(hlb_train)}')
    print(f'Number of testing examples: {len(hlb_test)}')

    trainloader = DataLoader(hlb_train,
                             batch_size=2048,
                             shuffle=True,
                             num_workers=2)
    testloader = DataLoader(hlb_test,
                            batch_size=1024,
                            shuffle=False,
                            num_workers=2)

    save_weights_pth = args.weights_path[0]

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = MLP(input_size=input_size, output_size=output_size)
    print(model)

    model.to(device)

    weights = torch.tensor([0.85, 0.25, 1.0])

    criterion = nn.CrossEntropyLoss(weight=weights).cuda()
    learning_rate = 1e-6
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    train_acc_array = []
    train_loss_array = []
    test_acc_array = []
    test_loss_array = []
    for i in range(epochs):  # loop over the dataset multiple times
        train_loss, train_acc = do_train(model, device, trainloader, criterion,
                                         optimizer)

        print('Epoch {} Train loss: {} Train acc: {}'.format(
            i, train_loss, train_acc))
        train_acc_array.append(train_acc)
        train_loss_array.append(train_loss)

        test_loss, test_acc = do_test(model, device, testloader, criterion)
        test_acc_array.append(test_acc)
        test_loss_array.append(test_loss)

        print('Test loss: {} Test acc: {}'.format(test_loss, test_acc))

        if i % 50 == 0:
            learning_rate = learning_rate * 0.99
            for param_group in optimizer.param_groups:
                param_group['lr'] = learning_rate

    torch.save(model.state_dict(), args.weights_path[0])
    save_data(train_loss_array, train_acc_array, test_loss_array,
              test_acc_array)
    plot_graph(train_loss_array, train_acc_array, test_loss_array,
               test_acc_array)