コード例 #1
0
    def get_data_loader(data_dir, batch_size, num_workers):
        normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        jitter_param = 0.4
        lighting_param = 0.1
        input_size = opt.input_size

        def batch_fn(batch, ctx):
            data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
            label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
            return data, label

        transform_train = transforms.Compose([
            transforms.RandomResizedCrop(input_size),
            transforms.RandomFlipLeftRight(),
            transforms.RandomColorJitter(brightness=jitter_param, contrast=jitter_param,
                                        saturation=jitter_param),
            transforms.RandomLighting(lighting_param),
            transforms.ToTensor(),
            normalize
        ])
        transform_test = transforms.Compose([
            transforms.Resize(256, keep_ratio=True),
            transforms.CenterCrop(input_size),
            transforms.ToTensor(),
            normalize
        ])

        train_data = gluon.data.DataLoader(
            imagenet.classification.ImageNet(data_dir, train=True).transform_first(transform_train),
            batch_size=batch_size, shuffle=True, last_batch='discard', num_workers=num_workers)
        val_data = gluon.data.DataLoader(
            imagenet.classification.ImageNet(data_dir, train=False).transform_first(transform_test),
            batch_size=batch_size, shuffle=False, num_workers=num_workers)

        return train_data, val_data, batch_fn
コード例 #2
0
 def __init__(self,
              name=None,
              train_path=None,
              val_path=None,
              input_size=224,
              crop_ratio=0.875,
              jitter_param=0.4,
              **kwargs):
     self.name = name
     self.train_path = train_path
     self.val_path = val_path
     self.input_size = input_size
     resize = int(math.ceil(input_size / crop_ratio))
     self.transform_train = transforms.Compose([
         transforms.Resize(resize),
         transforms.RandomResizedCrop(input_size),
         transforms.RandomFlipLeftRight(),
         transforms.RandomColorJitter(brightness=jitter_param,
                                      contrast=jitter_param,
                                      saturation=jitter_param),
         transforms.RandomLighting(0.1),
         transforms.ToTensor(),
         transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
     ])
     self.transform_val = transforms.Compose([
         transforms.Resize(resize),
         transforms.CenterCrop(input_size),
         transforms.ToTensor(),
         transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
     ])
     self._read_dataset(**kwargs)
コード例 #3
0
def get_train_data(rec_train, batch_size, data_nthreads, input_size,
                   crop_ratio, args):
    def train_batch_fn(batch, ctx):
        data = batch[0].as_in_context(ctx)
        label = batch[1].as_in_context(ctx)
        return data, label

    jitter_param = 0.4
    lighting_param = 0.1
    resize = int(math.ceil(input_size / crop_ratio))

    train_transforms = []
    if args.auto_aug:
        print('Using AutoAugment')
        from autogluon.utils.augment import AugmentationBlock, autoaug_imagenet_policies
        train_transforms.append(AugmentationBlock(autoaug_imagenet_policies()))

    from gluoncv.utils.transforms import EfficientNetRandomCrop
    from autogluon.utils import pil_transforms

    if input_size >= 320:
        train_transforms.extend([
            EfficientNetRandomCrop(input_size),
            pil_transforms.Resize((input_size, input_size),
                                  interpolation=Image.BICUBIC),
            pil_transforms.RandomHorizontalFlip(),
            pil_transforms.ColorJitter(brightness=0.4,
                                       contrast=0.4,
                                       saturation=0.4),
            transforms.RandomLighting(lighting_param),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
    else:
        train_transforms.extend([
            transforms.RandomResizedCrop(input_size),
            transforms.RandomFlipLeftRight(),
            transforms.RandomColorJitter(brightness=jitter_param,
                                         contrast=jitter_param,
                                         saturation=jitter_param),
            transforms.RandomLighting(lighting_param),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

    transform_train = transforms.Compose(train_transforms)

    train_set = mx.gluon.data.vision.ImageRecordDataset(
        rec_train).transform_first(transform_train)
    train_sampler = SplitSampler(len(train_set),
                                 num_parts=num_workers,
                                 part_index=rank)

    train_data = gluon.data.DataLoader(
        train_set,
        batch_size=batch_size,  # shuffle=True,
        last_batch='discard',
        num_workers=data_nthreads,
        sampler=train_sampler)
    return train_data, train_batch_fn
コード例 #4
0
ファイル: kaggle_CIFAR10.py プロジェクト: wk738126046/ML
def enhanceDataFuc():
    transform_train = vision.transforms.Compose([
        # transforms.CenterCrop(32)
        # transforms.RandomFlipTopBottom(),
        # transforms.RandomColorJitter(brightness=0.0, contrast=0.0, saturation=0.0, hue=0.0),
        # transforms.RandomLighting(0.0),
        # transforms.Cast('float32'),
        # transforms.Resize(32),
        # random crop scale/ratios
        transforms.RandomResizedCrop(32,
                                     scale=(0.08, 1.0),
                                     ratio=(3.0 / 4.0, 4.0 / 3.0)),
        # random tranverse on left or right
        transforms.RandomFlipLeftRight(),
        # Converts an image NDArray to a tensor NDArray(0,1) and (H*W*C) changes (C*H*W)
        transforms.ToTensor(),
        # standard
        transforms.Normalize([0.4914, 0.4822, 0.4465],
                             [0.2023, 0.1994, 0.2010]),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.4914, 0.4822, 0.4465],
                             [0.2023, 0.1994, 0.2010])
    ])
    return transform_train, transform_test
コード例 #5
0
def load_img_batch(dataset, batch_size, type):
    if type == 'train':
        transform=img_transforms.Compose([\
            #随机对图像裁剪出面积为原图像面积的0.08~1倍
            #高/宽:3/4 ~ 4/3,最后高度与宽度都缩放到224像素


            img_transforms.RandomResizedCrop(224,scale=(0.08,1.0),ratio=(3.0/4.0,4.0/3.0)),\
            #随机左右翻转

            img_transforms.RandomFlipLeftRight(),\
            #随机变化亮度、对比度、饱和度

            img_transforms.RandomColorJitter(brightness=0.4,contrast=0.4,saturation=0.4),\
            #随机噪声

            img_transforms.RandomLighting(0.1),\
            img_transforms.ToTensor(),\
            # 对图像的每个通道做标准化

            img_transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])])
        return gdata.DataLoader(dataset.transform_first(transform),
                                batch_size=batch_size,
                                shuffle=True,
                                last_batch='keep')
    elif type == 'test':
        transform=img_transforms.Compose([\
            img_transforms.Resize(256),\
            img_transforms.CenterCrop(224),\
            img_transforms.ToTensor(),\
            img_transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])])
        return gdata.DataLoader(dataset.transform_first(transform),
                                batch_size=batch_size,
                                shuffle=False,
                                last_batch='keep')
コード例 #6
0
def imagenet_train_transform(ds_metainfo,
                             mean_rgb=(0.485, 0.456, 0.406),
                             std_rgb=(0.229, 0.224, 0.225),
                             jitter_param=0.4,
                             lighting_param=0.1):
    input_image_size = ds_metainfo.input_image_size
    if ds_metainfo.aug_type == "default":
        interpolation = 1
    elif ds_metainfo.aug_type == "ext1":
        interpolation = 10
    elif ds_metainfo.aug_type == "ext2":
        interpolation = 10
    else:
        raise RuntimeError("Unknown augmentation type: {}\n".format(
            ds_metainfo.aug_type))

    return transforms.Compose([
        transforms.RandomResizedCrop(size=input_image_size,
                                     interpolation=interpolation),
        transforms.RandomFlipLeftRight(),
        transforms.RandomColorJitter(brightness=jitter_param,
                                     contrast=jitter_param,
                                     saturation=jitter_param),
        transforms.RandomLighting(lighting_param),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean_rgb, std=std_rgb)
    ])
コード例 #7
0
ファイル: utils.py プロジェクト: djndl1/CSNotes
def get_folder_data(train_path, val_path, data_shape, batch_size, num_workers=os.cpu_count()):
    train_dataset = ImageFolderDataset(train_path)
    val_dataset = ImageFolderDataset(val_path)

    train_transformer = gluon.data.vision.transforms.Compose([
        transforms.RandomFlipLeftRight(),
        transforms.RandomResizedCrop(data_shape, scale=(0.5, 1.0)),
        transforms.RandomBrightness(0.5),
        transforms.RandomHue(0.1),
        transforms.Resize(data_shape),
        transforms.ToTensor()
    ])
    val_transformer = gluon.data.vision.transforms.Compose([
        transforms.Resize(data_shape),
        transforms.ToTensor()
    ])

    train_dataloader = data.DataLoader(train_dataset.transform_first(train_transformer),
                                         batch_size=batch_size, shuffle=True, last_batch='rollover', 
                                        num_workers=num_workers)
    val_dataloader = data.DataLoader(val_dataset.transform_first(val_transformer),
                                         batch_size=batch_size, shuffle=True, last_batch='rollover', 
                                        num_workers=num_workers)

    return train_dataloader, val_dataloader
コード例 #8
0
def test_of_trans():
    transformer = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomFlipLeftRight(),
        transforms.RandomColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
        transforms.RandomLighting(0.1),
        # transforms.ToTensor(),
        # transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
    ])

    up_folder = os.path.abspath(os.path.join(ROOT_DIR, '..'))
    img_folder = os.path.join(up_folder, 'data_set', 'XX-ImageLabel', 'train_data_416')
    img_file = os.path.join(DATA_DIR, "t_img_tags_train.txt")  # 数据类别
    img_saved = os.path.join(img_file + ".tp.npz")

    td = TripletDataset(data_folder=img_folder, data_file=img_file,
                        saved_path=img_saved, transform=transformer)
    # td = TripletDataset(data_folder=img_folder, data_file=img_file, saved_path=img_saved)

    train_data = DataLoader(td, batch_size=4, shuffle=True)

    for count, data in enumerate(train_data):
        print('OK')
        imgs, labels = data[0], data[1]
        print(imgs.shape, labels.shape)
        if count == 0:
            plt.subplot(131)
            plt.imshow(imgs[0][0].asnumpy())
            plt.subplot(132)
            plt.imshow(imgs[0][1].asnumpy())
            plt.subplot(133)
            plt.imshow(imgs[0][2].asnumpy())
            plt.show()
            break
コード例 #9
0
def get_train_data_source(dataset_dir,
                          batch_size,
                          num_workers,
                          input_image_size=(224, 224)):
    jitter_param = 0.4
    lighting_param = 0.1

    mean_rgb = (0.485, 0.456, 0.406)
    std_rgb = (0.229, 0.224, 0.225)

    transform_train = transforms.Compose([
        transforms.RandomResizedCrop(input_image_size),
        transforms.RandomFlipLeftRight(),
        transforms.RandomColorJitter(brightness=jitter_param,
                                     contrast=jitter_param,
                                     saturation=jitter_param),
        transforms.RandomLighting(lighting_param),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean_rgb, std=std_rgb)
    ])

    dataset = CUB200_2011(root=dataset_dir,
                          train=True).transform_first(fn=transform_train)

    # num_training_samples = len(dataset)
    return gluon.data.DataLoader(dataset=dataset,
                                 batch_size=batch_size,
                                 shuffle=True,
                                 last_batch='discard',
                                 num_workers=num_workers)
コード例 #10
0
ファイル: training_sda.py プロジェクト: xzhou33/d-SNE
    def create_transformer(self):
        train_tforms, eval_tforms = [transforms.Resize(self.args.resize)
                                     ], [transforms.Resize(self.args.resize)]

        if self.args.random_crop:
            train_tforms.append(
                transforms.RandomResizedCrop(self.args.size, scale=(0.8, 1.2)))
        else:
            train_tforms.append(transforms.CenterCrop(self.args.size))

        eval_tforms.append(transforms.CenterCrop(self.args.size))

        if self.args.flip:
            train_tforms.append(transforms.RandomFlipLeftRight())

        if self.args.random_color:
            train_tforms.append(
                transforms.RandomColorJitter(self.args.color_jitter,
                                             self.args.color_jitter,
                                             self.args.color_jitter, 0.1))

        train_tforms.extend([
            transforms.ToTensor(),
            transforms.Normalize(self.args.mean, self.args.std)
        ])
        eval_tforms.extend([
            transforms.ToTensor(),
            transforms.Normalize(self.args.mean, self.args.std)
        ])

        train_tforms = transforms.Compose(train_tforms)
        eval_tforms = transforms.Compose(eval_tforms)

        return train_tforms, eval_tforms
コード例 #11
0
def get_data_loader(data_dir, batch_size, num_workers):
    normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])
    jitter_param = 0.4
    lighting_param = 0.1

    def batch_fn(batch, ctx):
        data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
        label = gluon.utils.split_and_load(batch[1],
                                           ctx_list=ctx,
                                           batch_axis=0)
        return data, label

    if opt.mode == 'symbolic':
        train_data = mx.io.NDArrayIter(
            mx.nd.random.normal(shape=(opt.dataset_size, 3, 224, 224)),
            label=mx.nd.array(range(opt.dataset_size)),
            batch_size=batch_size,
        )

        # val_data = mx.io.NDArrayIter(
        #     mx.nd.random.normal(shape=(opt.dataset_size, 3, 224, 224)),
        #     label=mx.nd.array(range(opt.dataset_size)),
        #     batch_size=batch_size,
        # )
    else:
        transform_train = transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomFlipLeftRight(),
            transforms.RandomColorJitter(brightness=jitter_param,
                                         contrast=jitter_param,
                                         saturation=jitter_param),
            transforms.RandomLighting(lighting_param),
            transforms.ToTensor(), normalize
        ])
        # transform_test = transforms.Compose([
        #     transforms.Resize(256, keep_ratio=True),
        #     transforms.CenterCrop(224),
        #     transforms.ToTensor(),
        #     normalize
        # ])

        train_data = gluon.data.DataLoader(imagenet.classification.ImageNet(
            data_dir, train=True).transform_first(transform_train),
                                           batch_size=batch_size,
                                           shuffle=True,
                                           last_batch='discard',
                                           num_workers=num_workers)
        # val_data = gluon.data.DataLoader(
        #     imagenet.classification.ImageNet(data_dir, train=False).transform_first(transform_test),
        #     batch_size=batch_size, shuffle=False, num_workers=num_workers)

    if 'sync' in opt.kvstore:
        raise ValueError(
            "Need to resize iterator for distributed training to not hang at the end"
        )

    # return train_data, val_data, batch_fn
    return train_data, batch_fn
コード例 #12
0
def imagenet_train_transform(ds_metainfo,
                             mean_rgb=(0.485, 0.456, 0.406),
                             std_rgb=(0.229, 0.224, 0.225),
                             jitter_param=0.4,
                             lighting_param=0.1):
    """
    Create image transform sequence for training subset.

    Parameters:
    ----------
    ds_metainfo : DatasetMetaInfo
        ImageNet-1K dataset metainfo.
    mean_rgb : tuple of 3 float
        Mean of RGB channels in the dataset.
    std_rgb : tuple of 3 float
        STD of RGB channels in the dataset.
    jitter_param : float
        How much to jitter values.
    lighting_param : float
        How much to noise intensity of the image.

    Returns
    -------
    Sequential
        Image transform sequence.
    """
    input_image_size = ds_metainfo.input_image_size
    if ds_metainfo.aug_type == "aug0":
        interpolation = 1
        transform_list = []
    elif ds_metainfo.aug_type == "aug1":
        interpolation = 10
        transform_list = []
    elif ds_metainfo.aug_type == "aug2":
        interpolation = 10
        transform_list = [
            ImgAugTransform()
        ]
    else:
        raise RuntimeError("Unknown augmentation type: {}\n".format(ds_metainfo.aug_type))

    transform_list += [
        transforms.RandomResizedCrop(
            size=input_image_size,
            interpolation=interpolation),
        transforms.RandomFlipLeftRight(),
        transforms.RandomColorJitter(
            brightness=jitter_param,
            contrast=jitter_param,
            saturation=jitter_param),
        transforms.RandomLighting(lighting_param),
        transforms.ToTensor(),
        transforms.Normalize(
            mean=mean_rgb,
            std=std_rgb)
    ]

    return transforms.Compose(transform_list)
コード例 #13
0
def get_transforms(name):
    if name == 'drml':
        train_transform = T.Compose([
            T.RandomResizedCrop(170, (0.9, 1), (1, 1)),
            T.RandomFlipLeftRight(),
            T.ToTensor(),
            T.Normalize(0.5, 0.5)
        ])
        eval_transform = T.Compose(
            [T.Resize(170), T.ToTensor(),
             T.Normalize(0.5, 0.5)])
    elif name in ['r50', 'mobileface', 'dpn68', 'd121']:
        train_transform = T.Compose([T.RandomFlipLeftRight(), Transpose()])
        eval_transform = T.Compose([Transpose()])
    elif name == 'vggface2':
        train_transform = T.Compose([
            T.RandomResizedCrop(224, (0.9, 1), (1, 1)),
            T.RandomFlipLeftRight(),
            Transpose(),
            TransposeChannels(),
            T.Normalize((91.4953, 103.8827, 131.0912), (1., 1., 1.))
        ])
        eval_transform = T.Compose([
            T.Resize(224),
            Transpose(),
            TransposeChannels(),
            T.Normalize((91.4953, 103.8827, 131.0912), (1., 1., 1.))
        ])
    elif name == 'inceptionv3':
        train_transform = T.Compose([
            T.RandomResizedCrop(299, (0.9, 1), (1, 1)),
            T.RandomFlipLeftRight(),
            T.ToTensor(),
            T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
        ])
        eval_transform = T.Compose([
            T.Resize(299),
            T.ToTensor(),
            T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
        ])
    else:
        raise ValueError("Invalid Network Input")
    return train_transform, eval_transform
コード例 #14
0
    def transform(self):
        transform_list = []
        transform_list.append(transforms.Resize(286, Image.BICUBIC))
        transform_list.append(transforms.RandomResizedCrop(256))

        transform_list += [
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]
        return transforms.Compose(transform_list)
コード例 #15
0
def preprocess_train_data(normalize, jitter_param, lighting_param):
    transform_train = transforms.Compose([
        transforms.RandomResizedCrop(224),
        transforms.RandomFlipLeftRight(),
        transforms.RandomColorJitter(brightness=jitter_param,
                                     contrast=jitter_param,
                                     saturation=jitter_param),
        transforms.RandomLighting(lighting_param),
        transforms.ToTensor(), normalize
    ])
    return transform_train
def transform_cifar10_dataset_train():
    """
    Should create a transformation that performs both random horizontal flip and random crop
    
    :return: A gluon transform object
    :rtype: gluon.Block
    """
    transforms_train = transforms.Compose([
        transforms.RandomFlipLeftRight(),
        transforms.RandomResizedCrop(16),
    ])
    return transforms_train
コード例 #17
0
ファイル: transforms.py プロジェクト: kbrodt/imet-2019-fgvc6
def get_train_transform(resize, crop, scale, mean, std):
    return transforms.Compose([
        #  transforms.Resize(resize),
        #  transforms.CenterCrop(crop),
        transforms.RandomResizedCrop(crop, scale=scale),  # no ratio
        transforms.RandomFlipLeftRight(),
        #  transforms.RandomColorJitter(brightness=lighting_param,
        #                               contrast=lighting_param,
        #                               saturation=lighting_param),
        #  transforms.RandomLighting(lighting_param),
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])
コード例 #18
0
ファイル: utils.py プロジェクト: guanlongtianzi/imgclsmob
def get_data_loader(data_dir,
                    batch_size,
                    num_workers,
                    input_image_size=(224, 224),
                    resize_inv_factor=0.875):
    assert (resize_inv_factor > 0.0)
    if isinstance(input_image_size, int):
        input_image_size = (input_image_size, input_image_size)
    normalize = transforms.Normalize(mean=(0.485, 0.456, 0.406),
                                     std=(0.229, 0.224, 0.225))
    jitter_param = 0.4
    lighting_param = 0.1
    resize_value = int(
        math.ceil(float(input_image_size[0]) / resize_inv_factor))

    def batch_fn(batch, ctx):
        data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
        label = gluon.utils.split_and_load(batch[1],
                                           ctx_list=ctx,
                                           batch_axis=0)
        return data, label

    transform_train = transforms.Compose([
        transforms.RandomResizedCrop(input_image_size),
        transforms.RandomFlipLeftRight(),
        transforms.RandomColorJitter(brightness=jitter_param,
                                     contrast=jitter_param,
                                     saturation=jitter_param),
        transforms.RandomLighting(lighting_param),
        transforms.ToTensor(), normalize
    ])
    transform_test = transforms.Compose([
        transforms.Resize(resize_value, keep_ratio=True),
        transforms.CenterCrop(input_image_size),
        transforms.ToTensor(), normalize
    ])

    train_data = gluon.data.DataLoader(ImageNet(
        data_dir, train=True).transform_first(transform_train),
                                       batch_size=batch_size,
                                       shuffle=True,
                                       last_batch='discard',
                                       num_workers=num_workers)
    val_data = gluon.data.DataLoader(ImageNet(
        data_dir, train=False).transform_first(transform_test),
                                     batch_size=batch_size,
                                     shuffle=False,
                                     num_workers=num_workers)

    return train_data, val_data, batch_fn
コード例 #19
0
def test_random_transforms():
    from mxnet.gluon.data.vision import transforms

    tmp_t = transforms.Compose([transforms.Resize(300), transforms.RandomResizedCrop(224)])
    transform = transforms.Compose([transforms.RandomApply(tmp_t, 0.5)])

    img = mx.nd.ones((10, 10, 3), dtype='uint8')
    iteration = 1000
    num_apply = 0
    for _ in range(iteration):
        out = transform(img)
        if out.shape[0] == 224:
            num_apply += 1
    assert_almost_equal(num_apply/float(iteration), 0.5, 0.1)
コード例 #20
0
def imagenet_train_transform(input_image_size=(224, 224),
                             mean_rgb=(0.485, 0.456, 0.406),
                             std_rgb=(0.229, 0.224, 0.225),
                             jitter_param=0.4,
                             lighting_param=0.1):
    return transforms.Compose([
        transforms.RandomResizedCrop(input_image_size),
        transforms.RandomFlipLeftRight(),
        transforms.RandomColorJitter(brightness=jitter_param,
                                     contrast=jitter_param,
                                     saturation=jitter_param),
        transforms.RandomLighting(lighting_param),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean_rgb, std=std_rgb)
    ])
コード例 #21
0
ファイル: utils.py プロジェクト: kaixinbaba/gluon-cv
def get_data_loader(data_dir,
                    batch_size,
                    num_workers,
                    input_size,
                    crop_ratio,
                    train_dataset=None,
                    val_dataset=None):
    normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])
    jitter_param = 0.4
    lighting_param = 0.1
    input_size = input_size
    crop_ratio = crop_ratio if crop_ratio > 0 else 0.875
    resize = int(math.ceil(input_size / crop_ratio))

    transform_train = transforms.Compose([
        transforms.RandomResizedCrop(input_size),
        transforms.RandomFlipLeftRight(),
        transforms.RandomColorJitter(brightness=jitter_param,
                                     contrast=jitter_param,
                                     saturation=jitter_param),
        transforms.RandomLighting(lighting_param),
        transforms.ToTensor(), normalize
    ])
    transform_test = transforms.Compose([
        transforms.Resize(resize, keep_ratio=True),
        transforms.CenterCrop(input_size),
        transforms.ToTensor(), normalize
    ])

    if not train_dataset:
        train_dataset = imagenet.classification.ImageNet(data_dir, train=True)
    if not val_dataset:
        val_dataset = imagenet.classification.ImageNet(data_dir, train=False)

    train_data = gluon.data.DataLoader(
        train_dataset.transform_first(transform_train),
        batch_size=batch_size,
        shuffle=True,
        last_batch='discard',
        num_workers=num_workers)
    val_data = gluon.data.DataLoader(
        val_dataset.transform_first(transform_test),
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers)

    return train_data, val_data, loader_batch_fn
コード例 #22
0
def generate_transform(train, resize, _is_osx, input_size, jitter_param):
    if _is_osx:
        # using PIL to load image (slow)
        if train:
            transform = Compose(
                [
                    RandomResizedCrop(input_size),
                    RandomHorizontalFlip(),
                    ColorJitter(0.4, 0.4, 0.4),
                    ToTensor(),
                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
                ]
            )
        else:
            transform = Compose(
                [
                    Resize(resize),
                    CenterCrop(input_size),
                    ToTensor(),
                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
                ]
            )
    else:
        if train:
            transform = transforms.Compose(
                [
                    transforms.RandomResizedCrop(input_size),
                    transforms.RandomFlipLeftRight(),
                    transforms.RandomColorJitter(
                        brightness=jitter_param,
                        contrast=jitter_param,
                        saturation=jitter_param
                    ),
                    transforms.RandomLighting(0.1),
                    transforms.ToTensor(),
                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
                ]
            )
        else:
            transform = transforms.Compose(
                [
                    transforms.Resize(resize),
                    transforms.CenterCrop(input_size),
                    transforms.ToTensor(),
                    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
                ]
            )
    return transform
コード例 #23
0
def train_and_test_model(train_path, valid_path, test_path, n_frame,
                         batch_size, lr, save_path, n_epoch):
    # dataset
    train_dataset = DVPickleDataset(train_path, n_frame, 11)
    valid_dataset = DVPickleDataset(valid_path, n_frame, 11, False)
    transform = transforms.RandomResizedCrop([128, 128],
                                             scale=(0.7, 1.0),
                                             ratio=(0.7, 1.4))
    # test_dataset = DVPickleDataset(test_path, n_frame, False)
    train_data = gluon.data.DataLoader(
        train_dataset.transform_first(transform),
        batch_size=batch_size,
        shuffle=True,
        num_workers=4,
        last_batch="keep")
    valid_data = gluon.data.DataLoader(valid_dataset,
                                       batch_size=batch_size,
                                       shuffle=False,
                                       num_workers=4,
                                       last_batch="keep")
    # test_data = gluon.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, last_batch="keep")

    # model
    net = plain_network()
    net.initialize(init=init.Xavier(), ctx=mx.gpu())
    loss = gluon.loss.SoftmaxCrossEntropyLoss()
    trainer = gluon.Trainer(net.collect_params(), "adam", {
        'learning_rate': lr,
        "wd": 0.001
    })
    model = Model(net,
                  load_path=None,
                  save_path=save_path,
                  loss=loss,
                  trainer=trainer,
                  ctx=mx.gpu())

    # training
    print("Start training...")
    best_train, best_loss = model.train(train_data, valid_data, batch_size,
                                        n_epoch, acc)
    print("Training finished, best loss=%.3f" % (best_loss))

    # testing
    #print("Start testing...")
    #test_loss, test_acc = model.test(test_data, acc)
    #print("Testing finished, loss=%.3f, acc=%.3f" % (test_loss, test_acc))
    return
コード例 #24
0
ファイル: tools.py プロジェクト: ilpech/gryaz
def get_data_raw(dataset_path, batch_size, num_workers):
    train_path = os.path.join(dataset_path, 'train')
    val_path = os.path.join(dataset_path, 'val')
    test_path = os.path.join(dataset_path, 'test')
    normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])
    jitter_param = 0.4
    lighting_param = 0.1
    input_size = 224
    crop_ratio = 0.875
    resize = int(math.ceil(input_size / crop_ratio))

    transform_train = transforms.Compose([
        transforms.RandomResizedCrop(input_size),
        transforms.RandomFlipLeftRight(),
        transforms.RandomColorJitter(brightness=jitter_param,
                                     contrast=jitter_param,
                                     saturation=jitter_param),
        transforms.RandomLighting(lighting_param),
        transforms.ToTensor(), normalize
    ])

    transform_test = transforms.Compose([
        transforms.Resize(resize, keep_ratio=True),
        transforms.CenterCrop(input_size),
        transforms.ToTensor(), normalize
    ])

    train_data = gluon.data.DataLoader(gluon.data.vision.ImageFolderDataset(
        train_path).transform_first(transform_train),
                                       batch_size=batch_size,
                                       shuffle=True,
                                       num_workers=num_workers)

    val_data = gluon.data.DataLoader(gluon.data.vision.ImageFolderDataset(
        val_path).transform_first(transform_test),
                                     batch_size=batch_size,
                                     shuffle=False,
                                     num_workers=num_workers)

    test_data = gluon.data.DataLoader(gluon.data.vision.ImageFolderDataset(
        test_path).transform_first(transform_test),
                                      batch_size=batch_size,
                                      shuffle=False,
                                      num_workers=num_workers)

    return train_data, val_data, test_data
コード例 #25
0
    def create_loader(self):
        """
        Create the data loader
        :return:
        """
        if self.args.mode.upper() == 'TRAIN':
            tforms = []
            tforms.append(transforms.Resize(self.args.resize))

            if self.args.flip:
                tforms.append(transforms.RandomFlipLeftRight())

            if self.args.random_crop:
                tforms.append(
                    transforms.RandomResizedCrop(self.args.im_size,
                                                 scale=(0.8, 1)))
            else:
                tforms.append(transforms.CenterCrop(self.args.im_size))

            if self.args.random_jitter:
                tforms.append(transforms.RandomColorJitter(0.4, 0.4, 0.4, 0.4))

            tforms.append(transforms.ToTensor())
            tforms.append(
                transforms.Normalize((0.485, 0.456, 0.406),
                                     (0.229, 0.224, 0.225)))

            tforms = transforms.Compose(tforms)

            tr_db = list(self.cfg['train'].values())[0]

            dataset = ImageRecordDataset(tr_db['rec'], transform=tforms)

            self.tr_loader = DataLoader(dataset,
                                        batch_size=self.args.bs,
                                        num_workers=8,
                                        pin_memory=True)

        else:
            tforms = transforms.Compose([
                transforms.Resize(self.args.resize),
                transforms.CenterCrop(self.args.im_size),
                transforms.ToTensor(),
                transforms.Normalize((0.485, 0.456, 0.406),
                                     (0.229, 0.224, 0.225))
            ])
            self.eval_tforms = tforms
コード例 #26
0
def load_images(images_file_path,
                batch_size,
                resize_size=256,
                is_train=True,
                crop_size=224,
                is_cen=False,
                sampler=None,
                pseudo_labels=None):
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    if not is_train:
        transformer = transforms.Compose([
            transforms.Resize(resize_size),
            transforms.CenterCrop(crop_size),
            transforms.ToTensor(), normalize
        ])
        shuffle = False
        last_bacth = 'keep'
    else:
        if is_cen:
            transformer = transforms.Compose([
                transforms.Resize(resize_size),
                transforms.CenterCrop(crop_size),
                transforms.RandomFlipLeftRight(),
                transforms.ToTensor(), normalize
            ])
        else:
            transformer = transforms.Compose([
                transforms.Resize(resize_size),
                transforms.RandomResizedCrop(crop_size, scale=(0.8, 1.0)),
                transforms.RandomFlipLeftRight(),
                transforms.ToTensor(), normalize
            ])
        shuffle = False if sampler is not None else True
        last_bacth = 'keep'

    imageset = ImageFolderDataset(images_file_path,
                                  pseudo_labels=pseudo_labels)
    data_loader = DataLoader(dataset=imageset.transform_first(transformer),
                             shuffle=shuffle,
                             batch_size=batch_size,
                             last_batch=last_bacth,
                             sampler=sampler,
                             num_workers=0)

    return data_loader
コード例 #27
0
ファイル: ml_trainer.py プロジェクト: SpikeKing/XX-ImageLabel
    def get_val_data(self, batch_size):
        """
        获取验证数据,数据扩充
        """
        transform_val = transforms.Compose([
            transforms.Resize(256),
            transforms.RandomResizedCrop(224),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
        ])

        td = MultilabelDataset(data_folder=self.val_folder,
                               data_file=self.val_file,
                               transform=transform_val)
        val_data = DataLoader(td, batch_size=batch_size, shuffle=True)

        return val_data, len(td)
コード例 #28
0
ファイル: ml_trainer.py プロジェクト: SpikeKing/XX-ImageLabel
    def get_tl_val_data(self, batch_size):
        """
        获取TripletLoss验证数据, 一组3个, 数据扩充
        """
        transform_val = transforms.Compose([
            transforms.Resize(256),
            transforms.RandomResizedCrop(224),
            transforms.ToTensor(),
            transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
        ])

        td = TripletDataset(data_folder=self.val_folder,
                            data_file=self.val_file,
                            transform=transform_val,
                            saved_path=True)
        val_data = DataLoader(dataset=td, batch_size=batch_size, shuffle=True)

        return val_data, len(td)
コード例 #29
0
def test_transformer():
    from mxnet.gluon.data.vision import transforms

    transform = transforms.Compose([
		transforms.Resize(300),
		transforms.CenterCrop(256),
		transforms.RandomResizedCrop(224),
		transforms.RandomFlipLeftRight(),
		transforms.RandomColorJitter(0.1, 0.1, 0.1, 0.1),
		transforms.RandomBrightness(0.1),
		transforms.RandomContrast(0.1),
		transforms.RandomSaturation(0.1),
		transforms.RandomHue(0.1),
		transforms.RandomLighting(0.1),
		transforms.ToTensor(),
		transforms.Normalize([0, 0, 0], [1, 1, 1])])

    transform(mx.nd.ones((245, 480, 3), dtype='uint8')).wait_to_read()
コード例 #30
0
ファイル: imagenet1k.py プロジェクト: yangkang779/imgclsmob
def get_train_data_loader(data_dir, batch_size, num_workers, input_image_size,
                          mean_rgb, std_rgb, jitter_param, lighting_param):
    transform_train = transforms.Compose([
        transforms.RandomResizedCrop(input_image_size),
        transforms.RandomFlipLeftRight(),
        transforms.RandomColorJitter(brightness=jitter_param,
                                     contrast=jitter_param,
                                     saturation=jitter_param),
        transforms.RandomLighting(lighting_param),
        transforms.ToTensor(),
        transforms.Normalize(mean=mean_rgb, std=std_rgb)
    ])
    return gluon.data.DataLoader(dataset=ImageNet(
        root=data_dir, train=True).transform_first(fn=transform_train),
                                 batch_size=batch_size,
                                 shuffle=True,
                                 last_batch='discard',
                                 num_workers=num_workers)