예제 #1
0
def get_dataflow(path, is_train, img_path=None):
    ds = CocoPose(path, img_path, is_train)  # read data from lmdb
    if is_train:
        ds = MapData(ds, read_image_url)
        ds = MapDataComponent(ds, pose_random_scale)
        ds = MapDataComponent(ds, pose_rotation)
        ds = MapDataComponent(ds, pose_flip)
        ds = MapDataComponent(ds, pose_resize_shortestedge_random)
        ds = MapDataComponent(ds, pose_crop_random)
        ds = MapData(ds, pose_to_img)
        # augs = [
        #     imgaug.RandomApplyAug(imgaug.RandomChooseAug([
        #         imgaug.GaussianBlur(max_size=3)
        #     ]), 0.7)
        # ]
        # ds = AugmentImageComponent(ds, augs)
        ds = PrefetchData(ds, 1000, multiprocessing.cpu_count() * 1)
    else:
        ds = MultiThreadMapData(ds,
                                nr_thread=16,
                                map_func=read_image_url,
                                buffer_size=1000)
        ds = MapDataComponent(ds, pose_resize_shortestedge_fixed)
        ds = MapDataComponent(ds, pose_crop_center)
        ds = MapData(ds, pose_to_img)
        ds = PrefetchData(ds, 100, multiprocessing.cpu_count() // 4)

    return ds
예제 #2
0
def get_dataflow(path, is_train):
    ds = CocoPoseLMDB(path, is_train)  # read data from lmdb
    if is_train:
        ds = MapDataComponent(ds, pose_random_scale)
        ds = MapDataComponent(ds, pose_rotation)
        ds = MapDataComponent(ds, pose_flip)
        ds = MapDataComponent(ds, pose_resize_shortestedge_random)
        ds = MapDataComponent(ds, pose_crop_random)
        ds = MapData(ds, pose_to_img)
        augs = [
            imgaug.RandomApplyAug(
                imgaug.RandomChooseAug([
                    imgaug.SaltPepperNoise(white_prob=0.01, black_prob=0.01),
                    imgaug.RandomOrderAug([
                        imgaug.BrightnessScale((0.8, 1.2), clip=False),
                        imgaug.Contrast((0.8, 1.2), clip=False),
                        # imgaug.Saturation(0.4, rgb=True),
                    ]),
                ]),
                0.7),
        ]
        ds = AugmentImageComponent(ds, augs)
    else:
        ds = MapDataComponent(ds, pose_resize_shortestedge_fixed)
        ds = MapDataComponent(ds, pose_crop_center)
        ds = MapData(ds, pose_to_img)

    ds = PrefetchData(ds, 1000, multiprocessing.cpu_count())

    return ds
예제 #3
0
파일: pose_dataset.py 프로젝트: ybCliff/gp
def get_dataflow(path, is_train):
    ds = CocoPoseLMDB(path, is_train)  # read data from lmdb
    if is_train:
        ds = MapDataComponent(ds, pose_random_scale)
        ds = MapDataComponent(ds, pose_rotation)
        ds = MapDataComponent(ds, pose_flip)
        ds = MapDataComponent(ds, pose_resize_shortestedge_random)
        ds = MapDataComponent(ds, pose_crop_random)
        ds = MapData(ds, pose_to_img)
        augs = [
            imgaug.RandomApplyAug(
                imgaug.RandomChooseAug([
                    imgaug.BrightnessScale((0.6, 1.4), clip=False),
                    imgaug.Contrast((0.7, 1.4), clip=False),
                    imgaug.GaussianBlur(max_size=3)
                ]), 0.7),
        ]
        ds = AugmentImageComponent(ds, augs)
    else:
        ds = MapDataComponent(ds, pose_resize_shortestedge_fixed)
        ds = MapDataComponent(ds, pose_crop_center)
        ds = MapData(ds, pose_to_img)

    ds = PrefetchData(ds, 1000, multiprocessing.cpu_count())

    return ds
예제 #4
0
def get_dataflow(is_train):
    ds = CocoPoseLMDB('/data/public/rw/coco-pose-estimation-lmdb/', is_train)
    if is_train:
        ds = MapDataComponent(ds, pose_rotation)
        ds = MapDataComponent(ds, pose_flip)
        ds = MapDataComponent(ds, pose_resize_shortestedge_random)
        ds = MapDataComponent(ds, pose_crop_random)
        ds = MapData(ds, pose_to_img)
        augs = [
            imgaug.RandomApplyAug(imgaug.RandomChooseAug([
                imgaug.SaltPepperNoise(white_prob=0.01, black_prob=0.01),
                imgaug.RandomOrderAug([
                    imgaug.BrightnessScale((0.8, 1.2), clip=False),
                    imgaug.Contrast((0.8, 1.2), clip=False),
                    # imgaug.Saturation(0.4, rgb=True),
                ]),
            ]), 0.7),
        ]
        ds = AugmentImageComponent(ds, augs)
    else:
        ds = MapDataComponent(ds, pose_resize_shortestedge_fixed)
        ds = MapDataComponent(ds, pose_crop_center)
        ds = MapData(ds, pose_to_img)

    return ds
예제 #5
0
def get_data(name, data_dir, meta_dir, gpu_nums):
    isTrain = True if 'train' in name else False
    ds = Camvid(data_dir, meta_dir, name, shuffle=True)


    if isTrain:
        ds = MapData(ds, RandomResize)

    if isTrain:
        shape_aug = [
                     RandomCropWithPadding(args.crop_size,IGNORE_LABEL),
                     Flip(horiz=True),
                     ]
    else:
        shape_aug = []

    ds = AugmentImageComponents(ds, shape_aug, (0, 1), copy=False)

    def f(ds):
        image, label = ds
        m = np.array([104, 116, 122])
        const_arr = np.resize(m, (1,1,3))  # NCHW
        image = image - const_arr
        return image, label

    ds = MapData(ds, f)
    if isTrain:
        ds = BatchData(ds, args.batch_size*gpu_nums)
        ds = PrefetchDataZMQ(ds, 1)
    else:
        ds = BatchData(ds, 1)
    return ds
def get_dataflow_vgg(annot_path, img_dir, strict, x_size, y_size, include_outputs_masks=False):
    """
    This function initializes the tensorpack dataflow and serves generator
    for training operation.

    :param annot_path: path to the annotation file
    :param img_dir: path to the images
    :return: dataflow object
    """
    coco_crop_size = 368

    # configure augmentors

    augmentors = [
        ScaleAug(scale_min=0.5,
                 scale_max=1.1,
                 target_dist=0.6,
                 interp=cv2.INTER_CUBIC),

        RotateAug(rotate_max_deg=40,
                  interp=cv2.INTER_CUBIC,
                  border=cv2.BORDER_CONSTANT,
                  border_value=(128, 128, 128), mask_border_val=1),

        CropAug(coco_crop_size, coco_crop_size, center_perterb_max=40, border_value=128,
                mask_border_val=1),

        FlipAug(num_parts=18, prob=0.5)
    ]

    if x_size != coco_crop_size:
        augmentors.append(ResizeAug(x_size, x_size))

    # prepare augment function

    augment_func = functools.partial(augment,
                                     augmentors=augmentors)

    # build the dataflow

    df = CocoDataFlow((coco_crop_size, coco_crop_size), annot_path, img_dir)
    df.prepare()
    size = df.size()
    df = MapData(df, read_img)

    if include_outputs_masks:
        df = MapData(df, gen_mask)
        build_sample_func = functools.partial(build_sample_with_masks,
                                              y_size=y_size)
    else:
        build_sample_func = functools.partial(build_sample,
                                              y_size=y_size)

    df = MapData(df, augment_func)

    df = MultiProcessMapDataZMQ(df, num_proc=4, map_func=build_sample_func, buffer_size=200, strict=strict)

    return df, size
예제 #7
0
    def get_input_flow(self):
        ds_train = CellImageDataManagerTrain()
        # ds_train = MapDataComponent(ds_train, random_affine)  # TODO : no improvement?
        ds_train = MapDataComponent(ds_train, random_color)
        # ds_train = MapDataComponent(ds_train, random_scaling)
        ds_train = MapDataComponent(
            ds_train,
            mask_size_normalize)  # Resize by instance size - normalization
        ds_train = MapDataComponent(
            ds_train, lambda x: resize_shortedge_if_small(x, self.img_size))
        ds_train = MapDataComponent(
            ds_train, lambda x: random_crop(x, self.img_size, self.img_size))
        ds_train = MapDataComponent(ds_train, random_flip_lr)
        ds_train = MapDataComponent(ds_train, random_flip_ud)
        # ds_train = MapDataComponent(ds_train, data_to_elastic_transform_wrapper)
        ds_train = MapDataComponent(ds_train, erosion_mask)
        ds_train = MapData(
            ds_train, lambda x: data_to_segment_input(
                x, is_gray=False, unet_weight=True))
        ds_train = PrefetchData(ds_train, 256, 24)
        ds_train = BatchData(ds_train, self.batchsize)
        ds_train = MapDataComponent(ds_train, data_to_normalize1)

        ds_valid = CellImageDataManagerValid()
        ds_valid = MapDataComponent(
            ds_valid, lambda x: resize_shortedge_if_small(x, self.img_size))
        ds_valid = MapDataComponent(
            ds_valid, lambda x: random_crop(x, self.img_size, self.img_size))
        ds_valid = MapDataComponent(ds_valid, erosion_mask)
        ds_valid = MapData(
            ds_valid, lambda x: data_to_segment_input(
                x, is_gray=False, unet_weight=True))
        ds_valid = PrefetchData(ds_valid, 20, 12)
        ds_valid = BatchData(ds_valid, self.batchsize, remainder=True)
        ds_valid = MapDataComponent(ds_valid, data_to_normalize1)

        ds_valid2 = CellImageDataManagerValid()
        ds_valid2 = MapDataComponent(
            ds_valid2, lambda x: resize_shortedge_if_small(x, self.img_size))
        ds_valid2 = MapDataComponent(
            ds_valid2,
            lambda x: center_crop_if_tcga(x, self.img_size, self.img_size))
        # ds_valid2 = MapDataComponent(ds_valid2, lambda x: resize_shortedge(x, self.img_size))
        ds_valid2 = MapData(ds_valid2,
                            lambda x: data_to_segment_input(x, is_gray=False))
        ds_valid2 = MapDataComponent(ds_valid2, data_to_normalize1)

        ds_test = CellImageDataManagerTest()
        ds_test = MapDataComponent(
            ds_test, lambda x: resize_shortedge_if_small(x, self.img_size))
        # ds_test = MapDataComponent(ds_test, lambda x: resize_shortedge(x, self.img_size))
        ds_test = MapData(ds_test, lambda x: data_to_image(x, is_gray=False))
        ds_test = MapDataComponent(ds_test, data_to_normalize1)

        return ds_train, ds_valid, ds_valid2, ds_test
예제 #8
0
def get_data(name, data_dir, meta_dir, gpu_nums):
    isTrain = True if 'train' in name else False

    def imgread(ds):
        img, label = ds
        img = cv2.imread(img, cv2.IMREAD_COLOR)
        label = cv2.imread(label, cv2.IMREAD_GRAYSCALE)
        return img, label

    if isTrain:
        #ds = LMDBData('/data2/dataset/cityscapes/cityscapes_train.lmdb', shuffle=True)
        #ds = FakeData([[batch_size, CROP_HEIGHT, CROP_HEIGHT, 3], [batch_size, CROP_HEIGHT, CROP_HEIGHT, 1]], 5000, random=False, dtype='uint8')
        ds = PascalVOC12Files(data_dir, meta_dir, name, shuffle=True)
        ds = MultiThreadMapData(ds,4,imgread, buffer_size= 2)
        #ds = PrefetchDataZMQ(MapData(ds, ImageDecode), 1) #imagedecode is heavy
        ds = MapData(ds, RandomResize)
    else:
        ds = PascalVOC12Files(data_dir, meta_dir, name, shuffle=False)
        ds = MultiThreadMapData(ds, 4, imgread, buffer_size= 2)

    if isTrain:
        shape_aug = [
                     RandomCropWithPadding(args.crop_size,IGNORE_LABEL),
                     Flip(horiz=True),
                     ]
        ds = AugmentImageComponents(ds, shape_aug, (0, 1), copy=False)

    def reduce_mean_rgb(ds):
        image, label = ds
        m = np.array([104, 116, 122])
        const_arr = np.resize(m, (1,1,3))  # NCHW
        image = image - const_arr
        return image, label

    def MxnetPrepare(ds):
        data, label = ds
        data = np.transpose(data, (0, 3, 1, 2))  # NCHW
        label = label[:, :, :, None]
        label = np.transpose(label, (0, 3, 1, 2))  # NCHW
        dl = [[mx.nd.array(data[args.batch_size * i:args.batch_size * (i + 1)])] for i in
              range(gpu_nums)]  # multi-gpu distribute data, time-consuming!!!
        ll = [[mx.nd.array(label[args.batch_size * i:args.batch_size * (i + 1)])] for i in
              range(gpu_nums)]
        return dl, ll

    #ds = MapData(ds, reduce_mean_rgb)
    ds = MultiThreadMapData(ds, 4, reduce_mean_rgb, buffer_size=2)

    if isTrain:
        ds = FastBatchData(ds, args.batch_size*gpu_nums)
        ds = MapData(ds, MxnetPrepare)
        #ds = PrefetchDataZMQ(ds, 1)
    else:
        ds = BatchData(ds, 1)
    return ds
def get_dataflow(path, is_train):
    ds = SynthHands(path, is_train)       # read data from lmdb
    if is_train:
        ds = MapData(ds, read_image_url)
        ds = MapData(ds, pose_to_img)
        ds = PrefetchData(ds, 1000, multiprocessing.cpu_count() * 1)
    else:
        ds = MultiThreadMapData(ds, num_thread=16, map_func=read_image_url, buffer_size=1000)
        ds = MapData(ds, pose_to_img)
        ds = PrefetchData(ds, 100, multiprocessing.cpu_count() // 4)
    return ds
예제 #10
0
def _get_dataflow_onlyread(path, is_train, img_path=None):
    ds = OpenOoseHand(path, is_train)  # read data from lmdb
    ds = MapData(ds, read_image_url)
    ds = MapDataComponent(ds, crop_hand_roi_big)
    ds = MapDataComponent(ds, hand_random_scale)
    ds = MapDataComponent(ds, pose_rotation)
    ds = MapDataComponent(ds, pose_flip)
    ds = MapDataComponent(ds, crop_hand_roi)
    # ds = MapDataComponent(ds, pose_resize_shortestedge_fixed)
    # ds = MapDataComponent(ds, pose_crop_random)
    ds = MapData(ds, pose_to_img)
    ds = PrefetchData(ds, 10, 2)
    return ds
    def get_input_flow(self):
        ds_train = CellImageDataManagerTrain()
        # Augmentation :
        ds_train = MapDataComponent(ds_train, random_affine)
        ds_train = MapDataComponent(ds_train, random_color)
        # ds_train = MapDataComponent(ds_train, random_color2)  # not good
        ds_train = MapDataComponent(ds_train, random_scaling)
        ds_train = MapDataComponent(
            ds_train, lambda x: resize_shortedge_if_small(x, 224))
        ds_train = MapDataComponent(ds_train,
                                    lambda x: random_crop(x, 224, 224))
        ds_train = MapDataComponent(ds_train, random_flip_lr)
        # ds_train = MapDataComponent(ds_train, data_to_elastic_transform_wrapper)
        ds_train = MapDataComponent(ds_train, random_flip_ud)
        if self.unet_weight:
            ds_train = MapDataComponent(ds_train, erosion_mask)
        ds_train = PrefetchData(ds_train, 1000, 24)
        ds_train = MapData(
            ds_train, lambda x: data_to_segment_input(x, not self.is_color,
                                                      self.unet_weight))
        ds_train = BatchData(ds_train, self.batchsize)
        ds_train = MapDataComponent(ds_train, data_to_normalize1)
        ds_train = PrefetchData(ds_train, 10, 2)

        ds_valid = CellImageDataManagerValid()
        ds_valid = MapDataComponent(ds_valid,
                                    lambda x: center_crop(x, 224, 224))
        if self.unet_weight:
            ds_valid = MapDataComponent(ds_valid, erosion_mask)
        ds_valid = MapData(
            ds_valid, lambda x: data_to_segment_input(x, not self.is_color,
                                                      self.unet_weight))
        ds_valid = BatchData(ds_valid, self.batchsize, remainder=True)
        ds_valid = MapDataComponent(ds_valid, data_to_normalize1)
        ds_valid = PrefetchData(ds_valid, 20, 24)

        ds_valid2 = CellImageDataManagerValid()
        ds_valid2 = MapDataComponent(
            ds_valid2, lambda x: resize_shortedge_if_small(x, 224))
        ds_valid2 = MapData(
            ds_valid2, lambda x: data_to_segment_input(x, not self.is_color))
        ds_valid2 = MapDataComponent(ds_valid2, data_to_normalize1)

        ds_test = CellImageDataManagerTest()
        ds_test = MapDataComponent(ds_test,
                                   lambda x: resize_shortedge_if_small(x, 224))
        ds_test = MapData(ds_test,
                          lambda x: data_to_image(x, not self.is_color))
        ds_test = MapDataComponent(ds_test, data_to_normalize1)

        return ds_train, ds_valid, ds_valid2, ds_test
예제 #12
0
def batch_dataflow(df,
                   batch_size,
                   time_steps=4,
                   num_stages=6,
                   format=['heatpaf', 'last']):
    informat, outformat = format

    df = BatchData(df, batch_size, use_list=False)

    def in_heat(x):
        return [
            np.stack([x[0]] * time_steps, axis=1),
            np.stack([x[2]] * time_steps, axis=1)
        ]

    def in_heatpaf(x):
        return [
            np.stack([x[0]] * time_steps, axis=1),
            np.stack([x[1]] * time_steps, axis=1),
            np.stack([x[2]] * time_steps, axis=1)
        ]

    def out_heat_last(x):
        return [np.stack([x[4]] * time_steps, axis=1)] * num_stages

    def out_heatpaf_last(x):
        return [
            np.stack([x[3]] * time_steps, axis=1),
            np.stack([x[4]] * time_steps, axis=1),
            np.stack([x[3]] * time_steps, axis=1),
            np.stack([x[4]] * time_steps, axis=1),  # TD layers end here
            x[3],  # TD layers are joined here by LSTM
            x[4],
            x[3],  # these last outputs collapse to one timestep output
            x[4],
            x[3],
            x[4],
            x[3],
            x[4],
        ]

    if informat == 'heat' and outformat == 'last':
        df = MapData(df, lambda x: (heat_only(x), out_heat_last(x)))
    elif informat == 'heatpaf' and outformat == 'last':
        df = MapData(df, lambda x: (in_heatpaf(x), out_heatpaf_last(x)))
    else:
        raise Exception('Unknown format requested: %s' % format)

    df.reset_state()
    return df
    def get_dataflow(self, cfg):

        df = Pose(cfg)
        df = MapData(df, self.augment)
        df = MapData(df, self.compute_target_part_scoremap)

        num_cores = multiprocessing.cpu_count()
        num_processes = num_cores * int(self.cfg['processratio'])
        if num_processes <= 1:
            num_processes = 2 # recommended to use more than one process for training
        if os.name == 'nt':
            df2 = MultiProcessRunner(df, num_proc = num_processes, num_prefetch = self.cfg['num_prefetch'])
        else:
            df2 = MultiProcessRunnerZMQ(df, num_proc = num_processes, hwm = self.cfg['num_prefetch'])
        return df2
예제 #14
0
def _get_dataflow_onlyread(path, is_train, img_path=None):
    print('CocoPose-------------')
    ds = CocoPose(path, img_path, is_train)  # read data from lmdb
    print('CocoPose======')
    ds = MapData(ds, read_image_url)
    ds = MapDataComponent(ds, pose_random_scale)
    ds = MapDataComponent(ds, pose_rotation)
    ds = MapDataComponent(ds, pose_flip)
    ds = MapDataComponent(ds, pose_resize_shortestedge_random)
    ds = MapDataComponent(ds, pose_crop_random)
    print('MapData-------------')
    ds = MapData(ds, pose_to_img)
    print('MapData======')
    # ds = PrefetchData(ds, 1000, multiprocessing.cpu_count() * 4)
    return ds
예제 #15
0
def sample_augmentations():
    ds = CocoPoseLMDB('/data/public/rw/coco-pose-estimation-lmdb/',
                      is_train=False,
                      only_idx=0)
    ds = MapDataComponent(ds, pose_random_scale)
    ds = MapDataComponent(ds, pose_rotation)
    ds = MapDataComponent(ds, pose_flip)
    ds = MapDataComponent(ds, pose_resize_shortestedge_random)
    ds = MapDataComponent(ds, pose_crop_random)
    ds = MapData(ds, pose_to_img)
    augs = [
        imgaug.RandomApplyAug(
            imgaug.RandomChooseAug([
                imgaug.GaussianBlur(3),
                imgaug.SaltPepperNoise(white_prob=0.01, black_prob=0.01),
                imgaug.RandomOrderAug([
                    imgaug.BrightnessScale((0.8, 1.2), clip=False),
                    imgaug.Contrast((0.8, 1.2), clip=False),
                    # imgaug.Saturation(0.4, rgb=True),
                ]),
            ]),
            0.7),
    ]
    ds = AugmentImageComponent(ds, augs)

    ds.reset_state()
    for l1, l2, l3 in ds.get_data():
        CocoPoseLMDB.display_image(l1, l2, l3)
예제 #16
0
def get_data(name, data_dir, meta_dir, gpu_nums):
    isTrain = name == 'train'
    ds = PascalVOC12(data_dir, meta_dir, name, shuffle=True)


    if isTrain:#special augmentation
        shape_aug = [RandomResize(xrange=(0.7, 1.5), yrange=(0.7, 1.5),
                            aspect_ratio_thres=0.15),
                     RandomCropWithPadding(args.crop_size,IGNORE_LABEL),
                     Flip(horiz=True),
                     ]
    else:
        shape_aug = []

    ds = AugmentImageComponents(ds, shape_aug, (0, 1), copy=False)

    def f(ds):
        image, label = ds
        m = np.array([104, 116, 122])
        const_arr = np.resize(m, (1,1,3))  # NCHW
        image = image - const_arr
        return image, label

    ds = MapData(ds, f)
    if isTrain:
        ds = BatchData(ds, args.batch_size*gpu_nums)
        ds = PrefetchDataZMQ(ds, 1)
    else:
        ds = BatchData(ds, 1)
    return ds
def get_default_dataflow_batch(batchsize=32):
    ds = get_default_dataflow()
    ds = MapData(ds, data_to_segment_input)
    ds = BatchData(ds, batchsize)
    ds = MapDataComponent(ds, data_to_normalize01)
    ds = PrefetchData(ds, 10, 2)

    return ds
예제 #18
0
def get_infer_iterator(hparams, dataset, num_gpu, batch_size):

    df = DataFromList(dataset, shuffle=False)
    num_samples = len(df)
    if num_samples % batch_size != 0 and num_samples % batch_size < num_gpu:
        raise ValueError("num_samples %% batch_size < num_gpu")

    df = MapData(df, lambda data: map_func(hparams, data))
    batched_df = BatchData(df, batch_size=batch_size, remainder=True)
    splitted_df = MapData(
        batched_df,
        lambda x: [np.array_split(x[idx], num_gpu) for idx in range(len(x))])
    prefetched_df = PrefetchDataZMQ(splitted_df,
                                    nr_proc=1,
                                    hwm=batch_size * 10)

    return prefetched_df
def serialize_to_lmdb(dataset, hparams, lmdb_path):
    if os.path.isfile(lmdb_path):
        print("lmdb file ({}) exists!".format(lmdb_path))
    else:
        df = DataFromList(dataset, shuffle=False)
        df = MapData(df, lambda data: map_func(data, hparams))
        print("Creating lmdb cache...")
        LMDBSerializer.save(df, lmdb_path)
예제 #20
0
def get_dataflow(coco_data_paths):
    """
    This function initializes the tensorpack dataflow and serves generator
    for training operation.

    :param coco_data_paths: paths to the coco files: annotation file and folder with images
    :return: dataflow object
    """
    df = CocoDataFlow((368, 368), coco_data_paths)
    df.prepare()
    df = MapData(df, read_img)
    df = MapData(df, gen_mask)
    df = MapData(df, augment)
    df = MapData(df, apply_mask)
    df = MapData(df, build_sample)
    df = PrefetchDataZMQ(df, nr_proc=4)  #df = PrefetchData(df, 2, 1)

    return df
예제 #21
0
 def __init__(self, path,img_path,image, label, output_shape=(256, 256), batch_size=32, translation=True, scale=True, rotation=True, mins=0.25,maxs=1.2,mina=-np.pi,maxa=np.pi, ilumination=0.0):
     self.path = path
     self.img_path = img_path
     self.image = image
     self.label = (label == 1)
     self.output_shape = output_shape
     self.batch_size = batch_size
     self.translation = translation
     self.scale = scale
     self.rotation = rotation
     self.mins=mins
     self.maxs = maxs
     self.mina = -np.pi
     self.maxa = np.pi
     self.ilumination = ilumination
     self.ds = CocoPose(path, img_path, is_train)
     self.ds_img = MapData(ds,read_image_url)
     self.gen_ds = self.ds_img.get_data()
def get_dataflow(annot_path, img_dir):
    """
    This function initializes the tensorpack dataflow and serves generator
    for training operation.

    :param annot_path: path to the annotation file
    :param img_dir: path to the images
    :return: dataflow object
    """
    df = CocoDataFlow((368, 368), annot_path, img_dir)
    df.prepare()
    df = MapData(df, read_img)
    df = MapData(df, gen_mask)
    df = MapData(df, augment)
    df = MapData(df, apply_mask)
    df = MapData(df, build_sample)
    df = PrefetchDataZMQ(df, nr_proc=4) #df = PrefetchData(df, 2, 1)

    return df
예제 #23
0
    def test_with_id(self):
        ds_test = CellImageDataManagerTest()
        ds_test = MapData(ds_test, data_to_image)

        for idx, dp in enumerate(ds_test.get_data()):
            self.assertTrue(isinstance(dp[1][0], str))
            self.assertGreater(dp[2][0], 0)
            self.assertGreater(dp[2][1], 0)
            if idx > 10:
                break
예제 #24
0
def batch_dataflow(df, batch_size):
    """
    The function builds batch dataflow from the input dataflow of samples

    :param df: dataflow of samples
    :param batch_size: batch size
    :return: dataflow of batches
    """
    df = BatchData(df, batch_size, use_list=False)
    df = MapData(df, lambda x: ([x[0]], [x[2]]))
    df.reset_state()
    return df
def get_dataflow(annot_path, img_dir, batch_size):
    """
    This function initializes the tensorpack dataflow and serves generator
    for training operation.

    :param annot_path: path to the annotation file
    :param img_dir: path to the images
    :param batch_size: batch size
    :return: dataflow object
    """
    df = CocoDataFlow((368, 368), annot_path, img_dir)
    df.prepare()
    df = MapData(df, read_img)
    df = MapData(df, gen_mask)
    df = MapData(df, augment)
    df = MapData(df, apply_mask)
    df = MapData(df, build_sample)
    df = PrefetchDataZMQ(df, nr_proc=4)  #df = PrefetchData(df, 2, 1)
    df = BatchData(df, batch_size, use_list=False)
    df = MapData(
        df, lambda x: ([x[0], x[1], x[2]], [
            x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4], x[3],
            x[4]
        ]))
    df.reset_state()

    return df
예제 #26
0
def get_data(name, meta_dir, gpu_nums):
    isTrain = True if 'train' in name else False

    m = np.array([104, 116, 122])
    const_arr = np.resize(m, (1, 1, 3))  # NCHW
    const_arr = np.zeros(
        (args.crop_size[0], args.crop_size[1], 3)) + const_arr  #broadcast

    if isTrain:
        #ds = FakeData([[1024, 2048, 3], [ 1024, 2048]], 5000, random=False, dtype='uint8')
        #ds = FakeData([[CROP_HEIGHT, CROP_HEIGHT, 3], [CROP_HEIGHT, CROP_HEIGHT]], 5000,random=False, dtype='uint8')
        ds = CityscapesFiles(base_dir, meta_dir, name, shuffle=True)
        parallel = min(3, multiprocessing.cpu_count())
        augmentors = [
            RandomCropWithPadding(args.crop_size),
            Flip(horiz=True),
        ]
        aug = imgaug.AugmentorList(augmentors)

        def mapf(ds):
            img, label = ds
            img = cv2.imread(img, cv2.IMREAD_COLOR)
            label = cv2.imread(label, cv2.IMREAD_GRAYSCALE)
            img, params = aug.augment_return_params(img)
            label = aug._augment(label, params)
            img = img - const_arr  # very time-consuming
            return img, label

        #ds = MapData(ds, mapf)
        ds = MultiThreadMapData(ds,
                                parallel,
                                mapf,
                                buffer_size=500,
                                strict=True)
        #ds = MapData(ds, reduce_mean_rgb)

        ds = BatchData(ds, args.batch_size * gpu_nums)
        #ds = PrefetchDataZMQ(ds, 1)
    else:

        def imgread(ds):
            img, label = ds
            img = cv2.imread(img, cv2.IMREAD_COLOR)
            label = cv2.imread(label, cv2.IMREAD_GRAYSCALE)
            return [img, label]

        ds = CityscapesFiles(base_dir, meta_dir, name, shuffle=False)
        ds = MapData(ds, imgread)
        ds = BatchData(ds, 1)

    return ds
예제 #27
0
def get_dataflow(path, is_train=True, img_path=None,sigma=8.0,output_shape=(1440,2560),
                    numparts=5,translation=False,scale=False,rotation=True,
                            mins=0.25,maxs=1.2,mina=-np.pi,maxa=np.pi ,ilumination=0.0,image_type='RGB'):
    print('Creating images from',path)
    numparts, skeleton = get_skeleton_from_json(path)
    #numparts + 1 because need to the background
    ds = CocoPose(path, img_path, is_train, numparts=numparts + 1, sigma=sigma,skeleton=skeleton,
                    output_shape=output_shape, translation=translation,scale=scale,rotation=rotation,
                        mins=mins,maxs=maxs,mina=mina,maxa=maxa, ilumination=ilumination,image_type=image_type
                        )       # read data from lmdb
    if is_train:
        #ds = MapData(ds, read_image_url)
        ds = MultiThreadMapData(ds, nr_thread=8, map_func=read_image_url, buffer_size=10)
        
        ds = MapDataComponent(ds, get_augmented_image)
        #ds = MapDataComponent(ds, pose_rotation)
        #ds = MapDataComponent(ds, pose_flip)
        #ds = MapDataComponent(ds, pose_resize_shortestedge_random)
        #ds = MapDataComponent(ds, pose_crop_random)
        #logger.info('Out of new augmenter')
        ds = MapData(ds, pose_to_img)
        #logger.info('Out pose to img')
        # augs = [
        #     imgaug.RandomApplyAug(imgaug.RandomChooseAug([
        #         imgaug.GaussianBlur(max_size=3)
        #     ]), 0.7)
        # ]
        # ds = AugmentImageComponent(ds, augs)
        ds = PrefetchData(ds, 10, multiprocessing.cpu_count() * 1)
    else:
        #ds = MultiThreadMapData(ds, nr_thread=4, map_func=read_image_url, buffer_size=10)
        ds = MapData(ds, read_image_url)
        #ds = MapDataComponent(ds, pose_resize_shortestedge_fixed)
        #ds = MapDataComponent(ds, pose_crop_center)
        ds = MapData(ds, pose_to_img)
        ds = PrefetchData(ds, 10, multiprocessing.cpu_count() // 4)

    return ds
def get_infer_iterator(dataset, hparams, lmdb_path):

    serialize_to_lmdb(dataset, hparams, lmdb_path)

    batch_size = hparams.infer_batch_size
    num_gpu = hparams.num_gpu

    df = LMDBSerializer.load(lmdb_path, shuffle=False)

    batched_df = BatchData(df, batch_size=batch_size, remainder=False)
    splitted_df = MapData(
        batched_df,
        lambda x: [np.array_split(x[idx], num_gpu) for idx in range(len(x))])
    prefetched_df = PrefetchDataZMQ(splitted_df,
                                    nr_proc=1,
                                    hwm=batch_size * 10)

    return prefetched_df
예제 #29
0
class ImageGenerator():
    
    def __init__(self, path,img_path,image, label, output_shape=(256, 256), batch_size=32, translation=True, scale=True, rotation=True, mins=0.25,maxs=1.2,mina=-np.pi,maxa=np.pi, ilumination=0.0):
        self.path = path
        self.img_path = img_path
        self.image = image
        self.label = (label == 1)
        self.output_shape = output_shape
        self.batch_size = batch_size
        self.translation = translation
        self.scale = scale
        self.rotation = rotation
        self.mins=mins
        self.maxs = maxs
        self.mina = -np.pi
        self.maxa = np.pi
        self.ilumination = ilumination
        self.ds = CocoPose(path, img_path, is_train)
        self.ds_img = MapData(ds,read_image_url)
        self.gen_ds = self.ds_img.get_data()
        
    def next(self):
        
        meta = next(gen_ds)[0]
        new_meta = get_augmented_image(meta)
        
        return get_augemented_image_and_label(self.image, self.label, output_shape=self.output_shape,
                                              translation=self.translation, scale=self.scale,
                                              rotation=self.rotation,mins=self.mins, maxs=self.maxs, ilumination=self.ilumination)
    
    def __next__(self):
        return self.next()
    
    def __call__(self):
        return self
    
    def __iter__(self):
        for i in range(self.batch_size):
#         while True:
            yield self.next()
예제 #30
0
def get_iterator(hparams,
                 dataset,
                 lmdb_path,
                 shuffle=True,
                 drop_remainder=True,
                 nr_proc=4):

    serialize_to_lmdb(hparams, dataset, lmdb_path)

    batch_size = hparams.batch_size
    num_gpu = hparams.num_gpu
    df = LMDBSerializer.load(lmdb_path, shuffle=shuffle)

    batched_df = BatchData(df,
                           batch_size=batch_size,
                           remainder=not drop_remainder)
    splitted_df = MapData(
        batched_df,
        lambda x: [np.array_split(x[idx], num_gpu) for idx in range(len(x))])
    prefetched_df = PrefetchDataZMQ(splitted_df,
                                    nr_proc=nr_proc,
                                    hwm=batch_size * 10)

    return prefetched_df