Esempio n. 1
0
def trainloader_upscaled_static(args, model='mobilenet'):
    valdir = os.path.join(args.data, 'train')

    if model == 'mobilenet':
        input_size1 = 1024
        input_size2 = 896
    elif model == 'resnet':
        input_size1 = 512
        input_size2 = 448
    else:
        raise NotImplementedError
    if int(args.subset) == 0 or int(args.subset) == 192:
        transform = transforms.Compose([
            enhance.random_crop(),
            enhance.horizontal_flip(),
            enhance.vertical_flip(),
            enhance.random_rotation(),
            enhance.tocv2(),
            transforms.Resize(input_size1),
            transforms.CenterCrop(input_size2),
            transforms.Upscale(upscale_factor=2),
            transforms.TransformUpscaledDCT(),
            transforms.ToTensorDCT(),
            transforms.Aggregate(),
            transforms.NormalizeDCT(
                train_upscaled_static_mean,
                train_upscaled_static_std,
            )
        ])
    else:
        transform = transforms.Compose([
            enhance.random_crop(),
            enhance.horizontal_flip(),
            enhance.vertical_flip(),
            enhance.random_rotation(),
            enhance.tocv2(),
            transforms.Resize(input_size1),
            transforms.CenterCrop(input_size2),
            transforms.Upscale(upscale_factor=2),
            transforms.TransformUpscaledDCT(),
            transforms.ToTensorDCT(),
            transforms.SubsetDCT(channels=args.subset),
            transforms.Aggregate(),
            transforms.NormalizeDCT(train_upscaled_static_mean,
                                    train_upscaled_static_std,
                                    channels=args.subset)
        ])
    dset = ImageFolderDCT(valdir, transform, backend='pil')
    val_loader = torch.utils.data.DataLoader(dset,
                                             batch_size=args.train_batch,
                                             shuffle=True,
                                             num_workers=args.workers,
                                             pin_memory=True)

    return val_loader, len(dset), dset.get_clsnum()
Esempio n. 2
0
def valloader_upscaled_dct_direct(args, model='mobilenet'):
    if model == 'mobilenet':
        input_size1 = 128
        input_size2 = 112
    elif model == 'resnet':
        input_size1 = 64
        input_size2 = 56
    else:
        raise NotImplementedError

    valdir = os.path.join(args.data, 'val')
    transform = transforms.Compose([
        transforms.UpsampleCbCr(),
        transforms.SubsetDCT2(channels=args.subset, pattern=args.pattern),
        transforms.Aggregate2(),
        transforms.Resize(input_size1),
        transforms.CenterCrop(input_size2),
        transforms.ToTensorDCT2(),
        transforms.NormalizeDCT(
            train_upscaled_static_dct_direct_mean_interp,
            train_upscaled_static_dct_direct_std_interp,
            channels=args.subset,
            pattern=args.pattern
        )
    ])
    val_loader = torch.utils.data.DataLoader(
        ImageFolderDCT(valdir, transform, backend='dct'),
        batch_size=args.test_batch, shuffle=False,
        num_workers=args.workers, pin_memory=True
    )

    return val_loader
Esempio n. 3
0
def trainloader_dct_resized(args):
    traindir = os.path.join(args.data, 'train')
    train_dataset = ImageFolderDCT(traindir, transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.TransformDCT(),  # 28x28x192
        transforms.DCTFlatten2D(),
        transforms.UpsampleDCT(upscale_ratio_h=4, upscale_ratio_w=4, debug=False),
        transforms.ToTensorDCT(),
        transforms.SubsetDCT(channels=args.subset),
        transforms.Aggregate(),
        transforms.NormalizeDCT(
            train_dct_subset_mean,
            train_dct_subset_std,
            channels=args.subset
        )
    ]))

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.train_batch, shuffle=(train_sampler is None),
        num_workers=args.workers, pin_memory=True, sampler=train_sampler)

    train_loader_len = len(train_loader)

    return train_loader, train_sampler, train_loader_len
Esempio n. 4
0
def trainloader_dct_subset(args):
    traindir = os.path.join(args.data, 'train')
    train_dataset = ImageFolderDCT(traindir, transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.TransformDCT(),
        transforms.ToTensorDCT(),
        transforms.SubsetDCT(args.subset_channels),
        transforms.NormalizeDCT(
            train_y_mean, train_y_std,
            train_cb_mean, train_cb_std,
            train_cr_mean, train_cr_std),
    ]))

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.train_batch, shuffle=(train_sampler is None),
        num_workers=args.workers, pin_memory=True, sampler=train_sampler)

    train_loader_len = len(train_loader)

    return train_loader, train_sampler, train_loader_len
Esempio n. 5
0
def valloader_upscaled_static(args, model='mobilenet'):
    valdir = os.path.join(args.data, 'val')

    if model == 'mobilenet':
        input_size1 = 1024
        input_size2 = 896
    elif model == 'resnet':
        input_size1 = 512
        input_size2 = 448
    else:
        raise NotImplementedError

    transform = transforms.Compose([
            transforms.Resize(input_size1),
            transforms.CenterCrop(input_size2),
            transforms.Upscale(upscale_factor=2),
            transforms.TransformUpscaledDCT(),
            transforms.ToTensorDCT(),
            transforms.SubsetDCT(channels=args.subset, pattern=args.pattern),
            transforms.Aggregate(),
            transforms.NormalizeDCT(
                train_upscaled_static_mean,
                train_upscaled_static_std,
                channels=args.subset,
                pattern=args.pattern
            )
        ])

    val_loader = torch.utils.data.DataLoader(
        ImageFolderDCT(valdir, transform),
        batch_size=args.test_batch, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    return val_loader
def get_mean_and_std_dct_resized(dataset, model='mobilenet'):
    '''Compute the mean and std value of dataset.'''

    # Dataset = ImageFolderDCT(dataset, transforms.Compose([
    #     transforms.TransformDCT(),  # 28x28x192
    #     transforms.DCTFlatten2D(),
    #     transforms.UpsampleDCT(upscale_ratio_h=4, upscale_ratio_w=4, debug=False),
    #     transforms.ToTensorDCT(),
    #     transforms.Average()
    # ]), aggregate=True)

    # Dataset = ImageFolderDCT(dataset, transforms.Compose([
    #     transforms.Upscale(upscale_factor=2),
    #     transforms.TransformUpscaledDCT(),
    #     transforms.ToTensorDCT(),
    #     transforms.Aggregate(),
    #     transforms.Average()
    # ]))
    if model == 'mobilenet':
        input_size = 896
        batchsize = 256
    elif model == 'resnet':
        input_size = 448
        batchsize = 128
    else:
        raise NotImplementedError

    Dataset = ImageFolderDCT(dataset, transforms.Compose([
        transforms.DCTFlatten2D(),
        transforms.UpsampleDCT(size_threshold=input_size, T=input_size, debug=False),
        # transforms.UpsampleDCT(size_threshold=112 * 8, T=112 * 8, debug=False),
        transforms.Aggregate2(),
        # transforms.RandomResizedCropDCT(112),
        # transforms.RandomHorizontalFlip(),
        transforms.ToTensorDCT2(),

        transforms.Average()
    ]), backend='dct')

    dataloader = torch.utils.data.DataLoader(Dataset, batch_size=batchsize, pin_memory=True, shuffle=False, num_workers=16)

    mean, std = torch.zeros(192), torch.zeros(192)
    print('==> Computing mean and std..')

    # end = time.time()
    for i, (inputs, targets) in enumerate(dataloader):
        # print('data time: {}'.format(time.time()-end))
        print('{}/{}'.format(i, len(dataloader)))

        mean += inputs.mean(dim=0)
        std += inputs.std(dim=0)
        # end = time.time()

    mean.div_(i+1)
    std.div_(i+1)

    return mean, std
Esempio n. 7
0
def folder2lmdb(dpath, name="train", write_frequency=1):
    directory = osp.expanduser(osp.join(dpath, name))
    print("Loading dataset from %s" % directory)

    dataset = ImageFolderDCT('/ILSVRC2012/train',
                             transforms.Compose([
                                 transforms.DCTFlatten2D(),
                                 transforms.UpsampleDCT(upscale_ratio_h=4,
                                                        upscale_ratio_w=4,
                                                        debug=False),
                                 transforms.ToTensorDCT(),
                                 transforms.SubsetDCT(channels=32),
                             ]),
                             backend='dct')

    data_loader = torch.utils.data.DataLoader(
        dataset,
        num_workers=0,
    )

    lmdb_path = osp.join(dpath, "%s.lmdb" % name)
    isdir = os.path.isdir(lmdb_path)

    print("Generate LMDB to %s" % lmdb_path)
    db = lmdb.open(
        lmdb_path,
        subdir=isdir,
        map_size=1281167 * 224 * 224 * 32 * 10,
        readonly=False,
        # map_size=1099511627776 * 2, readonly=False,
        meminit=False,
        map_async=True)

    txn = db.begin(write=True)
    for idx, (image, label) in enumerate(data_loader):
        image = image.numpy()
        label = label.numpy()
        txn.put(u'{}'.format(idx).encode('ascii'),
                dumps_pyarrow((bz2.compress(image), label)))
        if idx % write_frequency == 0:
            print("[%d/%d]" % (idx, len(data_loader)))
            txn.commit()
            txn = db.begin(write=True)

    # finish iterating through dataset
    txn.commit()
    keys = [u'{}'.format(k).encode('ascii') for k in range(idx + 1)]
    with db.begin(write=True) as txn:
        txn.put(b'__keys__', dumps_pyarrow(keys))
        txn.put(b'__len__', dumps_pyarrow(len(keys)))

    print("Flushing database ...")
    db.sync()
    db.close()
Esempio n. 8
0
def valloader_dct(args):
    valdir = os.path.join(args.data, 'val')

    val_loader = torch.utils.data.DataLoader(
        ImageFolderDCT(valdir, transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.TransformDCT(),
            transforms.ToTensorDCT(),
            transforms.NormalizeDCT(
                train_y_mean, train_y_std,
                train_cb_mean, train_cb_std,
                train_cr_mean, train_cr_std),
        ])),
        batch_size=args.test_batch, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    return val_loader
Esempio n. 9
0
def trainloader_upscaled_static(args, model='mobilenet'):
    traindir = os.path.join(args.data, 'train')

    if model == 'mobilenet':
        input_size = 896
    elif model == 'resnet':
        input_size = 448
    else:
        raise NotImplementedError

    transform = transforms.Compose([
        transforms.RandomResizedCrop(input_size),
        transforms.RandomHorizontalFlip(),
        transforms.Upscale(upscale_factor=2),
        transforms.TransformUpscaledDCT(),
        transforms.ToTensorDCT(),
        transforms.SubsetDCT(channels=args.subset, pattern=args.pattern),
        transforms.Aggregate(),
        transforms.NormalizeDCT(
            train_upscaled_static_mean,
            train_upscaled_static_std,
            channels=args.subset,
            pattern=args.pattern
        )
    ])

    train_dataset = ImageFolderDCT(traindir, transform)

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.train_batch, shuffle=(train_sampler is None),
        num_workers=args.workers, pin_memory=True, sampler=train_sampler)

    train_loader_len = len(train_loader)

    return train_loader, train_sampler, train_loader_len
Esempio n. 10
0
def trainloader_upscaled_dct_direct(args, model='mobilenet'):
    if model == 'mobilenet':
        input_size = 112
    elif model == 'resnet':
        input_size = 56
    else:
        raise NotImplementedError

    traindir = os.path.join(args.data, 'train')
    transform = transforms.Compose([
        transforms.UpsampleCbCr(),
        transforms.SubsetDCT2(channels=args.subset, pattern=args.pattern),
        transforms.RandomResizedCropDCT(size=input_size),
        transforms.Aggregate2(),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensorDCT2(),
        transforms.NormalizeDCT(
            train_upscaled_static_dct_direct_mean_interp,
            train_upscaled_static_dct_direct_std_interp,
            channels=args.subset,
            pattern=args.pattern
        )
    ])

    train_dataset = ImageFolderDCT(traindir, transform, backend='dct')

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.train_batch, shuffle=(train_sampler is None),
        num_workers=args.workers, pin_memory=True, sampler=train_sampler)

    train_loader_len = len(train_loader)

    return train_loader, train_sampler, train_loader_len
Esempio n. 11
0
def valloader_dct_resized(args):
    valdir = os.path.join(args.data, 'val')

    val_loader = torch.utils.data.DataLoader(
        ImageFolderDCT(valdir, transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.TransformDCT(),  # 28x28x192
            transforms.DCTFlatten2D(),
            transforms.UpsampleDCT(upscale_ratio_h=4, upscale_ratio_w=4, debug=False),
            transforms.ToTensorDCT(),
            transforms.SubsetDCT(channels=args.subset),
            transforms.Aggregate(),
            transforms.NormalizeDCT(
                train_dct_subset_mean,
                train_dct_subset_std,
                channels=args.subset
            )
        ])),
        batch_size=args.test_batch, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    return val_loader
def get_mean_and_std_dct(dataset, sublist=None):
    '''Compute the mean and std value of dataset.'''
    import datasets.cvtransforms as transforms
    # jpeg_encoder = TurboJPEG('/home/kai.x/work/local/lib/libturbojpeg.so')

    # Dataset = ImageFolderDCT(dataset, transforms.Compose([
    #     #     transforms.RandomResizedCrop(224),
    #     #     # transforms.RandomHorizontalFlip(),
    #     #     transforms.TransformDCT(),
    #     #     transforms.UpsampleDCT(896),
    #     #     transforms.ToTensorDCT()
    #     #         # transforms.RandomResizedCrop(256),
    #     #         # transforms.RandomHorizontalFlip(),
    #     #         # transforms.TransformDCT(),
    #     #         # transforms.UpsampleDCT(256, 256),
    #     #         # transforms.CenterCropDCT(112),
    #     #         # transforms.ToTensorDCT()
    #     #     ]))

    # Dataset = ImageFolderDCT(dataset, transforms.Compose([
    #     transforms.Upscale(),
    #     transforms.TransformDCT(),
    #     transforms.ToTensorDCT(),
    # ]))

    Dataset = ImageFolderDCT(dataset, transforms.Compose([
        transforms.DCTFlatten2D(),
        transforms.UpsampleDCT(upscale_ratio_h=4, upscale_ratio_w=4, debug=False),
        transforms.ToTensorDCT(),
        transforms.Average()
    ]), backend='dct')

    dataloader = torch.utils.data.DataLoader(Dataset, batch_size=1, pin_memory=True, shuffle=False, num_workers=0)

    mean_y, mean_cb, mean_cr = torch.zeros(64), torch.zeros(64), torch.zeros(64)
    std_y, std_cb, std_cr = torch.zeros(64), torch.zeros(64), torch.zeros(64)
    print('==> Computing mean and std..')

    end = time.time()
    if sublist is None:
        for i, (inputs_y, inputs_cb, inputs_cr, targets) in enumerate(dataloader):
            print('data time: {}'.format(time.time()-end))
            print('{}/{}'.format(i, len(dataloader)))

            mean_y += inputs_y.mean(dim=0)
            std_y += inputs_y.std(dim=0)
            mean_cb += inputs_cb.mean(dim=0)
            std_cb += inputs_cb.std(dim=0)
            mean_cr += inputs_cr.mean(dim=0)
            std_cr += inputs_cr.std(dim=0)
            end = time.time()

        mean_y.div_(i+1)
        std_y.div_(i+1)
        mean_cb.div_(i+1)
        std_cb.div_(i+1)
        mean_cr.div_(i+1)
        std_cr.div_(i+1)
    else:
        dataloader_iterator = iter(dataloader)
        for i in range(sublist):
            try:
                inputs_y, inputs_cb, inputs_cr, targets = next(dataloader_iterator)
            except:
                print('error')

            print('{}/{}'.format(i, sublist))
            for i in range(64):
                mean_y[i] += inputs_y[:, i, :, :].mean()
                std_y[i] += inputs_y[:, i, :, :].std()
                mean_cb[i] += inputs_cb[:, i, :, :].mean()
                std_cb[i] += inputs_cb[:, i, :, :].std()
                mean_cr[i] += inputs_cr[:, i, :, :].mean()
                std_cr[i] += inputs_cr[:, i, :, :].std()
        mean_y.div_(sublist)
        std_y.div_(sublist)
        mean_cb.div_(sublist)
        std_cb.div_(sublist)
        mean_cr.div_(sublist)
        std_cr.div_(sublist)
    return mean_y, std_y, mean_cb, std_cb, mean_cr, std_cr
Esempio n. 13
0
    transform7 = transforms.Compose([
        transforms.UpsampleCbCr(),
        transforms.SubsetDCT2(channels=64),
        transforms.RandomResizedCropDCT(size=448),
        transforms.Aggregate2(),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensorDCT2(),
        transforms.NormalizeDCT(
            train_upscaled_static_dct_direct_mean_interp,
            train_upscaled_static_dct_direct_std_interp,
            channels=64,
        )
    ])
    # train_dataset = ImageFolderDCT('/ILSVRC2012/train', transform1, backend='opencv')
    # train_dataset = ImageFolderDCT('/ILSVRC2012/train', transform2
    # , backend='dct')
    train_dataset = ImageFolderDCT('/ILSVRC2012/train', transform7, backend='dct')

    train_sampler = None

    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=16, shuffle=(train_sampler is None),
        num_workers=1, pin_memory=True, sampler=train_sampler)

    for i, data in enumerate(train_loader):
        print(data)