示例#1
0
def getDataloader(dataset, train_dir, val_dir, test_dir, sigma, stride,
                  workers, batch_size):
    if dataset == 'LSP':
        train_loader = torch.utils.data.DataLoader(lsp_lspet_data.LSP_Data(
            'lspet', train_dir, sigma, stride,
            Mytransforms.Compose([
                Mytransforms.RandomHorizontalFlip(),
            ])),
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   num_workers=workers,
                                                   pin_memory=True)

        val_loader = torch.utils.data.DataLoader(lsp_lspet_data.LSP_Data(
            'lsp', val_dir, sigma, stride,
            Mytransforms.Compose([
                Mytransforms.TestResized(368),
            ])),
                                                 batch_size=1,
                                                 shuffle=True,
                                                 num_workers=1,
                                                 pin_memory=True)

        test_loader = 0

    elif dataset == 'MPII':
        train_loader = torch.utils.data.DataLoader(mpii_data.mpii(
            train_dir, sigma, "Train",
            Mytransforms.Compose([
                Mytransforms.TestResized(368),
            ])),
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   num_workers=workers,
                                                   pin_memory=True)

        val_loader = torch.utils.data.DataLoader(mpii_data.mpii(
            val_dir, sigma, "Val",
            Mytransforms.Compose([
                Mytransforms.TestResized(368),
            ])),
                                                 batch_size=1,
                                                 shuffle=True,
                                                 num_workers=1,
                                                 pin_memory=True)

        test_loader = torch.utils.data.DataLoader(mpii_data.mpii(
            test_dir, sigma, "Val",
            Mytransforms.Compose([
                Mytransforms.TestResized(368),
            ])),
                                                  batch_size=1,
                                                  shuffle=True,
                                                  num_workers=1,
                                                  pin_memory=True)


#     elif dataset == 'COCO':
#         train_loader = torch.utils.data.DataLoader(
#                                             coco_data.COCO_Data(True, train_dir, sigma, stride,
#                                             Mytransforms.Compose([Mytransforms.RandomResized(),
#                                             Mytransforms.RandomRotate(40),
#                                             #Mytransforms.RandomCrop(368),
#                                             Mytransforms.SinglePersonCrop(368),
#                                             Mytransforms.RandomHorizontalFlip(),])),
#                                             batch_size  = batch_size, shuffle=True,
#                                             num_workers = workers, pin_memory=True)

#         val_loader   = torch.utils.data.DataLoader(
#                                             coco_data.COCO_Data(False, val_dir, sigma, stride,
#                                             Mytransforms.Compose([Mytransforms.TestResized(368),
#                                             Mytransforms.SinglePersonCrop(368),])),
#                                             batch_size  = 1, shuffle=True,
#                                             num_workers = workers, pin_memory=True)

    elif dataset == 'Penn_Action':
        train_loader = torch.utils.data.DataLoader(penn_action.Penn_Action(
            train_dir, sigma, batch_size, True,
            Mytransforms.Compose([
                Mytransforms.TestResized(368),
            ])),
                                                   batch_size=1,
                                                   shuffle=True,
                                                   num_workers=workers,
                                                   pin_memory=True)

        val_loader = torch.utils.data.DataLoader(penn_action.Penn_Action(
            val_dir, sigma, batch_size, False,
            Mytransforms.Compose([
                Mytransforms.TestResized(368),
            ])),
                                                 batch_size=1,
                                                 shuffle=True,
                                                 num_workers=1,
                                                 pin_memory=True)

        test_loader = None

    elif dataset == 'NTID':
        train_loader = torch.utils.data.DataLoader(ntid_data.NTID(
            train_dir, sigma, "Train",
            Mytransforms.Compose([
                Mytransforms.TestResized(368),
                Mytransforms.RandomHorizontalFlip_NTID(),
            ])),
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   num_workers=workers,
                                                   pin_memory=True)

        val_loader = torch.utils.data.DataLoader(ntid_data.NTID(
            val_dir, sigma, "Val",
            Mytransforms.Compose([
                Mytransforms.TestResized(368),
            ])),
                                                 batch_size=1,
                                                 shuffle=True,
                                                 num_workers=1,
                                                 pin_memory=True)

        test_loader = torch.utils.data.DataLoader(ntid_data.NTID(
            test_dir,
            sigma,
            "Test",
        ),
                                                  batch_size=1,
                                                  shuffle=True,
                                                  num_workers=1,
                                                  pin_memory=True)

    elif dataset == 'PoseTrack':
        train_loader = torch.utils.data.DataLoader(
            posetrack_data.PoseTrack_Data(
                True, train_dir, sigma, stride,
                Mytransforms.Compose([
                    Mytransforms.TestResized(368),
                ])),
            batch_size=batch_size,
            shuffle=True,
            num_workers=workers,
            pin_memory=True)

        val_loader = torch.utils.data.DataLoader(posetrack_data.PoseTrack_Data(
            False, val_dir, sigma, stride,
            Mytransforms.Compose([
                Mytransforms.TestResized(368),
            ])),
                                                 batch_size=1,
                                                 shuffle=True,
                                                 num_workers=1,
                                                 pin_memory=True)

    elif dataset == "BBC":
        train_loader = torch.utils.data.DataLoader(bbc_data.BBC(
            train_dir, sigma, "Train",
            Mytransforms.Compose([
                Mytransforms.TestResized(368),
                Mytransforms.RandomHorizontalFlip_NTID(),
            ])),
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   num_workers=workers,
                                                   pin_memory=True)

        val_loader = torch.utils.data.DataLoader(bbc_data.BBC(
            val_dir, sigma, "Val",
            Mytransforms.Compose([
                Mytransforms.TestResized(368),
            ])),
                                                 batch_size=1,
                                                 shuffle=True,
                                                 num_workers=1,
                                                 pin_memory=True)

        test_loader = torch.utils.data.DataLoader(bbc_data.BBC(
            val_dir,
            sigma,
            "Test",
        ),
                                                  batch_size=1,
                                                  shuffle=True,
                                                  num_workers=1,
                                                  pin_memory=True)

    return train_loader, val_loader, test_loader
示例#2
0
def train_val(model, device, args):
    data_root = args.data_root
    train_txt = data_root + args.train_txt
    val_txt = data_root + args.val_txt

    cudnn.benchmark = True

    train_loader = torch.utils.data.DataLoader(CPNFolder(
        train_txt, args.output_shape,
        Mytransforms.Compose([
            Mytransforms.RandomResized(),
            Mytransforms.RandomRotate(40),
            Mytransforms.RandomCrop(320),
            Mytransforms.RandomHorizontalFlip(),
        ])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    if args.test_interval != 0 and args.val_txt is not None:
        val_loader = torch.utils.data.DataLoader(CPNFolder(
            val_txt, args.output_shape,
            Mytransforms.Compose([
                Mytransforms.TestResized(320),
            ])),
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=args.workers,
                                                 pin_memory=True)

    params, multiple = get_parameters(model, args, False)

    optimizer = torch.optim.SGD(params,
                                args.base_lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    global_losses = [AverageMeter() for i in range(4)]
    refine_losses = AverageMeter()

    end = time.time()
    iters = args.start_iters
    learning_rate = args.base_lr

    model.train()

    while iters < args.max_iter:

        for i, (input, label15, label11, label9, label7,
                valid) in enumerate(train_loader):

            #learning_rate = adjust_learning_rate(optimizer, iters, args.base_lr, policy=args.lr_policy, policy_parameter=args.policy_parameter, multiple=multiple)
            data_time.update(time.time() - end)

            label15 = label15.to(device)
            label11 = label11.to(device)
            label9 = label9.to(device)
            label7 = label7.to(device)
            valid = valid.to(device)

            labels = [label15, label11, label9, label7]

            global_out, refine_out = model(input)

            global_loss, refine_loss = L2_loss(global_out, refine_out, labels,
                                               valid, args.top_k,
                                               args.batch_size,
                                               args.num_points)

            loss = 0.0

            for i, global_loss1 in (global_loss):
                loss += global_loss1
                global_losses[i].update(global_loss1.data[0], input.size(0))

            loss += refine_loss
            losses.update(loss.data[0], input.size(0))
            refine_losses.update(refine_loss.data[0], input.size(0))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            batch_time.update(time.time() - end)
            end = time.time()

            iters += 1
            if iters % args.display == 0:
                print(
                    'Train Iteration: {0}\t'
                    'Time {batch_time.sum:.3f}s / {1}iters, ({batch_time.avg:.3f})\t'
                    'Data load {data_time.sum:.3f}s / {1}iters, ({data_time.avg:3f})\n'
                    'Learning rate = {2}\n'
                    'Loss = {loss.val:.8f} (ave = {loss.avg:.8f})\n'.format(
                        iters,
                        args.display,
                        learning_rate,
                        batch_time=batch_time,
                        data_time=data_time,
                        loss=losses))
                for cnt in range(0, 4):
                    print(
                        'Global Net Loss{0} = {loss1.val:.8f} (ave = {loss1.avg:.8f})'
                        .format(cnt + 1, loss1=global_losses[cnt]))
                print(
                    'Refine Net Loss = {loss1.val:.8f} (ave = {loss1.avg:.8f})'
                    .format(loss1=refine_losses))

                print(
                    time.strftime(
                        '%Y-%m-%d %H:%M:%S -----------------------------------------------------------------------------------------------------------------\n',
                        time.localtime()))

                batch_time.reset()
                data_time.reset()
                losses.reset()
                for cnt in range(4):
                    global_losses[cnt].reset()
                refine_losses.reset()

            if args.test_interval != 0 and args.val_dir is not None and iters % args.test_interval == 0:

                model.eval()
                for j, (input, label15, label11, label9, label7,
                        valid) in enumerate(val_loader):

                    label15 = label15.to(device)
                    label11 = label11.to(device)
                    label9 = label9.to(device)
                    label7 = label7.to(device)
                    valid = valid.to(device)

                    labels = [label15, label11, label9, label7]

                    global_out, refine_out = model(input)

                    global_loss, refine_loss = L2_loss(global_out, refine_out,
                                                       labels, valid,
                                                       args.top_k,
                                                       args.batch_size,
                                                       args.num_points)

                    loss = 0.0

                    for i, global_loss1 in enumerate(global_loss):
                        loss += global_loss1
                        global_losses[i].update(global_loss1.data[0],
                                                input.size(0))

                    loss += refine_loss
                    losses.update(loss.data[0], input.size(0))
                    refine_losses.update(refine_loss.data[0], input.size(0))

                batch_time.update(time.time() - end)
                end = time.time()
                # save_checkpoint({
                #     'iter': iters,
                #     'state_dict': model.state_dict(),
                #     }, 'cpn_fashion')

                print(
                    'Test Time {batch_time.sum:.3f}s, ({batch_time.avg:.3f})\t'
                    'Loss {loss.avg:.8f}\n'.format(batch_time=batch_time,
                                                   loss=losses))
                for cnt in range(0, 4):
                    print(
                        'Global Net Loss{0} = {loss1.val:.8f} (ave = {loss1.avg:.8f})'
                        .format(cnt + 1, loss1=global_losses[cnt]))
                print(
                    'Refine Net Loss = {loss1.val:.8f} (ave = {loss1.avg:.8f})'
                    .format(loss1=refine_losses))
                print(
                    time.strftime(
                        '%Y-%m-%d %H:%M:%S -----------------------------------------------------------------------------------------------------------------\n',
                        time.localtime()))

                batch_time.reset()
                losses.reset()
                for cnt in range(4):
                    global_losses[cnt].reset()
                refine_losses.reset()

                model.train()

            if iters == args.max_iter:
                break