Exemplo n.º 1
0
def get_data(train_or_test):
    isTrain = train_or_test == 'train'
    pp_mean = dataset.SVHNDigit.get_per_pixel_mean()
    if isTrain:
        d1 = dataset.SVHNDigit('train')
        d2 = dataset.SVHNDigit('extra')
        ds = RandomMixData([d1, d2])
    else:
        ds = dataset.SVHNDigit('test')

    if isTrain:
        augmentors = [
            imgaug.CenterPaste((40, 40)),
            imgaug.RandomCrop((32, 32)),
            #imgaug.Flip(horiz=True),
            imgaug.Brightness(10),
            imgaug.Contrast((0.8, 1.2)),
            imgaug.GaussianDeform(  # this is slow
                [(0.2, 0.2), (0.2, 0.8), (0.8, 0.8), (0.8, 0.2)], (32, 32),
                0.2, 3),
            imgaug.MapImage(lambda x: x - pp_mean),
        ]
    else:
        augmentors = [imgaug.MapImage(lambda x: x - pp_mean)]
    ds = AugmentImageComponent(ds, augmentors)
    ds = BatchData(ds, 128, remainder=not isTrain)
    if isTrain:
        ds = PrefetchData(ds, 5, 5)
    return ds
Exemplo n.º 2
0
def get_config():
    #anchors = np.mgrid[0:4,0:4][:,1:,1:].transpose(1,2,0).reshape((-1,2)) / 4.0
    # prepare dataset
    d1 = dataset.SVHNDigit('train')
    d2 = dataset.SVHNDigit('extra')
    train = RandomMixData([d1, d2])
    test = dataset.SVHNDigit('test')

    augmentors = [
        imgaug.Resize((40, 40)),
        imgaug.Brightness(30),
        imgaug.Contrast((0.5, 1.5)),
        imgaug.GaussianDeform(  # this is slow
            [(0.2, 0.2), (0.2, 0.8), (0.8, 0.8), (0.8, 0.2)], (40, 40), 0.2,
            3),
    ]
    train = AugmentImageComponent(train, augmentors)
    train = BatchData(train, 128)
    nr_proc = 5
    train = PrefetchData(train, 5, nr_proc)
    step_per_epoch = train.size()

    augmentors = [
        imgaug.Resize((40, 40)),
    ]
    test = AugmentImageComponent(test, augmentors)
    test = BatchData(test, 128, remainder=True)

    sess_config = get_default_sess_config(0.8)

    lr = tf.train.exponential_decay(learning_rate=1e-3,
                                    global_step=get_global_step_var(),
                                    decay_steps=train.size() * 60,
                                    decay_rate=0.2,
                                    staircase=True,
                                    name='learning_rate')
    tf.scalar_summary('learning_rate', lr)

    return TrainConfig(
        dataset=train,
        optimizer=tf.train.AdamOptimizer(lr),
        callbacks=Callbacks([
            StatPrinter(),
            ModelSaver(),
            InferenceRunner(dataset_test,
                            [ScalarStats('cost'),
                             ClassificationError()])
        ]),
        session_config=sess_config,
        model=Model(),
        step_per_epoch=step_per_epoch,
        max_epoch=350,
    )
Exemplo n.º 3
0
def get_inat_augmented_data(subset,
                            options,
                            lmdb_dir=None,
                            year='2018',
                            do_multiprocess=True,
                            do_validation=False,
                            is_train=None,
                            shuffle=None,
                            n_allow=None):
    input_size = options.input_size if options.input_size else 224
    isTrain = is_train if is_train is not None else (subset == 'train'
                                                     and do_multiprocess)
    shuffle = shuffle if shuffle is not None else isTrain
    postfix = "" if n_allow is None else "_allow_{}".format(n_allow)

    #TODO: Parameterize the cv split to be consider
    #Currently hardcoding to 1
    cv = 1

    # When do_validation is True it will expect *cv_train and *cv_val lmdbs
    # Currently the cv_train split is always used
    if isTrain:
        postfix += '_cv_train_{}'.format(cv)
    elif do_validation:
        subset = 'train'
        postfix += '_cv_val_{}'.format(cv)

    if lmdb_dir == None:
        lmdb_path = os.path.join(options.data_dir, 'inat_lmdb',
                                 'inat2018_{}{}.lmdb'.format(subset, postfix))
    else:
        lmdb_path = os.path.join(
            options.data_dir, lmdb_dir,
            'inat{}_{}{}.lmdb'.format(year, subset, postfix))

    ds = LMDBData(lmdb_path, shuffle=False)
    if shuffle:
        ds = LocallyShuffleData(ds,
                                1024 * 80)  # This is 64G~80G in memory images
    ds = PrefetchData(ds, 1024 * 8, 1)  # prefetch around 8 G
    ds = LMDBDataPoint(ds)
    ds = MapDataComponent(ds, lambda x: cv2.imdecode(x, cv2.IMREAD_COLOR),
                          0)  # BGR uint8 data
    if isTrain:

        class Resize(imgaug.ImageAugmentor):
            """
            crop 8%~100% of the original image
            See `Going Deeper with Convolutions` by Google.
            """
            def _augment(self, img, _):
                h, w = img.shape[:2]
                area = h * w
                for _ in range(10):
                    targetArea = self.rng.uniform(0.08, 1.0) * area
                    aspectR = self.rng.uniform(0.75, 1.333)
                    ww = int(np.sqrt(targetArea * aspectR))
                    hh = int(np.sqrt(targetArea / aspectR))
                    if self.rng.uniform() < 0.5:
                        ww, hh = hh, ww
                    if hh <= h and ww <= w:
                        x1 = 0 if w == ww else self.rng.randint(0, w - ww)
                        y1 = 0 if h == hh else self.rng.randint(0, h - hh)
                        out = img[y1:y1 + hh, x1:x1 + ww]
                        out = cv2.resize(out, (input_size, input_size),
                                         interpolation=cv2.INTER_CUBIC)
                        return out
                out = cv2.resize(img, (input_size, input_size),
                                 interpolation=cv2.INTER_CUBIC)
                return out

        augmentors = [
            Resize(),
            imgaug.RandomOrderAug([
                imgaug.Brightness(30, clip=False),
                imgaug.Contrast((0.8, 1.2), clip=False),
                imgaug.Saturation(0.4),
                # rgb-bgr conversion
                imgaug.Lighting(0.1,
                                eigval=[0.2175, 0.0188, 0.0045][::-1],
                                eigvec=np.array([[-0.5675, 0.7192, 0.4009],
                                                 [-0.5808, -0.0045, -0.8140],
                                                 [-0.5836, -0.6948, 0.4203]],
                                                dtype='float32')[::-1, ::-1])
            ]),
            imgaug.Clip(),
            imgaug.Flip(horiz=True),
            imgaug.ToUint8()
        ]
    else:
        augmentors = [
            imgaug.ResizeShortestEdge(256),
            imgaug.CenterCrop((input_size, input_size)),
            imgaug.ToUint8()
        ]
    ds = AugmentImageComponent(ds, augmentors, copy=False)
    if do_multiprocess:
        ds = PrefetchDataZMQ(ds, min(24, multiprocessing.cpu_count()))
    ds = BatchData(ds,
                   options.batch_size // options.nr_gpu,
                   remainder=not isTrain)
    return ds
Exemplo n.º 4
0
def get_tiny_imagenet_augmented_data(subset, options,
        do_multiprocess=True, is_train=None, shuffle=None):
    isTrain = is_train if is_train is not None else (subset == 'train' and do_multiprocess)
    shuffle = shuffle if shuffle is not None else isTrain

    lmdb_path = os.path.join(options.data_dir,
        'tiny_imagenet_lmdb', 'tiny_imagenet_{}.lmdb'.format(subset))
    # since tiny imagenet is small (200MB zipped) we can shuffle all directly.
    # we skipped the LocallyShuffleData and PrefetchData routine.
    ds = LMDBData(lmdb_path, shuffle=shuffle)
    ds = LMDBDataPoint(ds)
    ds = MapDataComponent(ds, lambda x: cv2.imdecode(x, cv2.IMREAD_COLOR), 0)
    img_size = 64
    if isTrain:
        class Resize(imgaug.ImageAugmentor):
            """
            crop 8%~100% of the original image
            See `Going Deeper with Convolutions` by Google.
            """
            def _augment(self, img, _):
                h, w = img.shape[:2]
                area = h * w
                for _ in range(10):
                    targetArea = self.rng.uniform(0.3, 1.0) * area
                    aspectR = self.rng.uniform(0.75, 1.333)
                    ww = int(np.sqrt(targetArea * aspectR))
                    hh = int(np.sqrt(targetArea / aspectR))
                    if self.rng.uniform() < 0.5:
                        ww, hh = hh, ww
                    if hh <= h and ww <= w:
                        x1 = 0 if w == ww else self.rng.randint(0, w - ww)
                        y1 = 0 if h == hh else self.rng.randint(0, h - hh)
                        out = img[y1:y1 + hh, x1:x1 + ww]
                        out = cv2.resize(out, (img_size, img_size), interpolation=cv2.INTER_CUBIC)
                        return out
                out = cv2.resize(img, (img_size, img_size), interpolation=cv2.INTER_CUBIC)
                return out

        augmentors = [
            Resize(),
            imgaug.RandomOrderAug(
                [imgaug.Brightness(30, clip=False),
                 imgaug.Contrast((0.8, 1.2), clip=False),
                 imgaug.Saturation(0.4),
                 # rgb-bgr conversion
                 imgaug.Lighting(0.1,
                                 eigval=[0.2175, 0.0188, 0.0045][::-1],
                                 eigvec=np.array(
                                     [[-0.5675, 0.7192, 0.4009],
                                      [-0.5808, -0.0045, -0.8140],
                                      [-0.5836, -0.6948, 0.4203]],
                                     dtype='float32')[::-1, ::-1]
                                 )]),
            imgaug.Clip(),
            imgaug.Flip(horiz=True),
            imgaug.ToUint8()
        ]
    else:
        augmentors = [
            imgaug.ResizeShortestEdge(72),
            imgaug.CenterCrop((img_size, img_size)),
            imgaug.ToUint8()
        ]
    ds = AugmentImageComponent(ds, augmentors, copy=False)
    ds = BatchData(ds, options.batch_size // options.nr_gpu, remainder=not isTrain)
    if do_multiprocess:
        ds = PrefetchData(ds, nr_prefetch=4, nr_proc=4)
    return ds