Example #1
0
def get_data(name, data_dir, meta_dir, gpu_nums):
    isTrain = True if 'train' in name else False
    ds = Camvid(data_dir, meta_dir, name, shuffle=True)


    if isTrain:
        ds = MapData(ds, RandomResize)

    if isTrain:
        shape_aug = [
                     RandomCropWithPadding(args.crop_size,IGNORE_LABEL),
                     Flip(horiz=True),
                     ]
    else:
        shape_aug = []

    ds = AugmentImageComponents(ds, shape_aug, (0, 1), copy=False)

    def f(ds):
        image, label = ds
        m = np.array([104, 116, 122])
        const_arr = np.resize(m, (1,1,3))  # NCHW
        image = image - const_arr
        return image, label

    ds = MapData(ds, f)
    if isTrain:
        ds = BatchData(ds, args.batch_size*gpu_nums)
        ds = PrefetchDataZMQ(ds, 1)
    else:
        ds = BatchData(ds, 1)
    return ds
Example #2
0
def get_data(name, data_dir, meta_dir, gpu_nums):
    isTrain = name == 'train'
    ds = PascalVOC12(data_dir, meta_dir, name, shuffle=True)


    if isTrain:#special augmentation
        shape_aug = [RandomResize(xrange=(0.7, 1.5), yrange=(0.7, 1.5),
                            aspect_ratio_thres=0.15),
                     RandomCropWithPadding(args.crop_size,IGNORE_LABEL),
                     Flip(horiz=True),
                     ]
    else:
        shape_aug = []

    ds = AugmentImageComponents(ds, shape_aug, (0, 1), copy=False)

    def f(ds):
        image, label = ds
        m = np.array([104, 116, 122])
        const_arr = np.resize(m, (1,1,3))  # NCHW
        image = image - const_arr
        return image, label

    ds = MapData(ds, f)
    if isTrain:
        ds = BatchData(ds, args.batch_size*gpu_nums)
        ds = PrefetchDataZMQ(ds, 1)
    else:
        ds = BatchData(ds, 1)
    return ds
Example #3
0
def get_data(name, data_dir, meta_dir, gpu_nums):
    isTrain = True if 'train' in name else False

    def imgread(ds):
        img, label = ds
        img = cv2.imread(img, cv2.IMREAD_COLOR)
        label = cv2.imread(label, cv2.IMREAD_GRAYSCALE)
        return img, label

    if isTrain:
        #ds = LMDBData('/data2/dataset/cityscapes/cityscapes_train.lmdb', shuffle=True)
        #ds = FakeData([[batch_size, CROP_HEIGHT, CROP_HEIGHT, 3], [batch_size, CROP_HEIGHT, CROP_HEIGHT, 1]], 5000, random=False, dtype='uint8')
        ds = PascalVOC12Files(data_dir, meta_dir, name, shuffle=True)
        ds = MultiThreadMapData(ds,4,imgread, buffer_size= 2)
        #ds = PrefetchDataZMQ(MapData(ds, ImageDecode), 1) #imagedecode is heavy
        ds = MapData(ds, RandomResize)
    else:
        ds = PascalVOC12Files(data_dir, meta_dir, name, shuffle=False)
        ds = MultiThreadMapData(ds, 4, imgread, buffer_size= 2)

    if isTrain:
        shape_aug = [
                     RandomCropWithPadding(args.crop_size,IGNORE_LABEL),
                     Flip(horiz=True),
                     ]
        ds = AugmentImageComponents(ds, shape_aug, (0, 1), copy=False)

    def reduce_mean_rgb(ds):
        image, label = ds
        m = np.array([104, 116, 122])
        const_arr = np.resize(m, (1,1,3))  # NCHW
        image = image - const_arr
        return image, label

    def MxnetPrepare(ds):
        data, label = ds
        data = np.transpose(data, (0, 3, 1, 2))  # NCHW
        label = label[:, :, :, None]
        label = np.transpose(label, (0, 3, 1, 2))  # NCHW
        dl = [[mx.nd.array(data[args.batch_size * i:args.batch_size * (i + 1)])] for i in
              range(gpu_nums)]  # multi-gpu distribute data, time-consuming!!!
        ll = [[mx.nd.array(label[args.batch_size * i:args.batch_size * (i + 1)])] for i in
              range(gpu_nums)]
        return dl, ll

    #ds = MapData(ds, reduce_mean_rgb)
    ds = MultiThreadMapData(ds, 4, reduce_mean_rgb, buffer_size=2)

    if isTrain:
        ds = FastBatchData(ds, args.batch_size*gpu_nums)
        ds = MapData(ds, MxnetPrepare)
        #ds = PrefetchDataZMQ(ds, 1)
    else:
        ds = BatchData(ds, 1)
    return ds
Example #4
0
def get_data(name, meta_dir, gpu_nums):
    isTrain = name == 'train'

    def imgread(ds):
        img, label = ds
        img = cv2.imread(img, cv2.IMREAD_COLOR)
        label = cv2.imread(label, cv2.IMREAD_GRAYSCALE)
        return img, label

    if isTrain:
        #ds = LMDBData('/data2/dataset/Mapillary/Mapillary_train.lmdb', shuffle=True)
        #ds = FakeData([[batch_size, CROP_HEIGHT, CROP_HEIGHT, 3], [batch_size, CROP_HEIGHT, CROP_HEIGHT, 1]], 5000, random=False, dtype='uint8')
        ds = MapillaryFiles(base_dir, meta_dir, name, shuffle=True)
        ds = MultiThreadMapData(ds, 4, imgread)
        #ds = PrefetchDataZMQ(MapData(ds, ImageDecode), 1) #imagedecode is heavy
        ds = MapData(ds, ResizeShortestEdge)  # ResizeShortestEdge
        ds = MapData(ds, RandomResize)
    else:
        ds = MapillaryFiles(base_dir, meta_dir, name, shuffle=False)
        ds = MultiThreadMapData(ds, 4, imgread)

    if isTrain:  #special augmentation
        shape_aug = [
            RandomCrop(args.crop_size),
            Flip(horiz=True),
        ]
        ds = AugmentImageComponents(ds, shape_aug, (0, 1), copy=False)

    def MxnetPrepare(ds):
        data, label = ds
        data = np.transpose(data, (0, 3, 1, 2))  # NCHW
        label = label[:, :, :, None]
        label = np.transpose(label, (0, 3, 1, 2))  # NCHW
        dl = [
            [mx.nd.array(data[args.batch_size * i:args.batch_size * (i + 1)])]
            for i in range(gpu_nums)
        ]  # multi-gpu distribute data, time-consuming!!!
        ll = [[
            mx.nd.array(label[args.batch_size * i:args.batch_size * (i + 1)])
        ] for i in range(gpu_nums)]
        return dl, ll

    def reduce_mean_rgb(ds):
        image, label = ds
        m = np.array([104, 116, 122])
        const_arr = np.resize(m, (1, 1, 3))  # NCHW
        image = image - const_arr
        return image, label

    #ds = MapData(ds, reduce_mean_rgb)
    ds = MultiThreadMapData(ds, 4, reduce_mean_rgb)

    if isTrain:
        ds = FastBatchData(ds, args.batch_size * gpu_nums)
        #ds = PrefetchDataZMQ(ds, 3)
        ds = MapData(ds, MxnetPrepare)
        #ds = PrefetchData(ds,100, 1)
        #ds = MultiProcessPrefetchData(ds, 100, 2)
        #ds = PrefetchDataZMQ(MyBatchData(ds, args.batch_size*gpu_nums), 6)
    else:
        ds = BatchData(ds, 1)
    return ds