def get_dataflow(coco_data_paths): """ This function initializes the tensorpack dataflow and serves generator for training operation. :param coco_data_paths: paths to the coco files: annotation file and folder with images :return: dataflow object """ df = CocoDataFlow((368, 368), coco_data_paths) df.prepare() df = MapData(df, read_img) df = MapData(df, gen_mask) df = MapData(df, augment) df = MapData(df, apply_mask) df = MapData(df, build_sample) df = PrefetchDataZMQ(df, nr_proc=4) #df = PrefetchData(df, 2, 1) return df
def get_dataflow(annot_path, img_dir): """ This function initializes the tensorpack dataflow and serves generator for training operation. :param annot_path: path to the annotation file :param img_dir: path to the images :return: dataflow object """ df = CocoDataFlow((368, 368), annot_path, img_dir) df.prepare() df = MapData(df, read_img) df = MapData(df, gen_mask) df = MapData(df, augment) df = MapData(df, apply_mask) df = MapData(df, build_sample) df = PrefetchDataZMQ(df, nr_proc=4) #df = PrefetchData(df, 2, 1) return df
46, meta.aug_joints, 1, stride=8) return [meta, mask_paf, mask_heatmap, pafmap, heatmap] if __name__ == '__main__': batch_size = 10 curr_dir = os.path.dirname(__file__) annot_path = os.path.join( curr_dir, '../dataset/annotations/pen_keypoints_validation.json') img_dir = os.path.abspath(os.path.join(curr_dir, '../dataset/validation/')) df = CocoDataFlow( (368, 368), COCODataPaths(annot_path, img_dir)) #, select_ids=[1000]) df.prepare() df = MapData(df, read_img) df = MapData(df, gen_mask) df = MapData(df, augment) df = MapData(df, apply_mask) df = MapData(df, build_debug_sample) df = PrefetchData(df, nr_prefetch=2, nr_proc=1) df.reset_state() gen = df.get_data() for g in gen: show_image_mask_center_of_main_person(g) #show_image_heatmap_paf(g)
df.reset_state() return df if __name__ == '__main__': """ Run this script to check speed of generating samples. Tweak the nr_proc parameter of PrefetchDataZMQ. Ideally it should reflect the number of cores in your hardware """ batch_size = 10 curr_dir = os.path.dirname(__file__) annot_path = os.path.join( curr_dir, '../dataset/annotations/person_keypoints_val2017.json') img_dir = os.path.abspath(os.path.join(curr_dir, '../dataset/val2017/')) df = CocoDataFlow( (368, 368), COCODataPaths(annot_path, img_dir)) # , select_ids=[1000]) df.prepare() df = MapData(df, read_img) df = MapData(df, gen_mask) df = MapData(df, augment) df = MapData(df, apply_mask) df = MapData(df, build_sample) df = PrefetchDataZMQ(df, nr_proc=4) df = BatchData(df, batch_size, use_list=False) df = MapData( df, lambda x: ([x[0], x[1], x[2]], [ x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4] ])) TestDataSpeed(df, size=100).start()
def get_dataflow(annot_path, img_dir, batch_size): """ This function initializes the tensorpack dataflow and serves generator for training operation. :param annot_path: path to the annotation file :param img_dir: path to the images :param batch_size: batch size :return: dataflow object """ df = CocoDataFlow((368, 368), annot_path, img_dir) df.prepare() df = MapData(df, read_img) df = MapData(df, gen_mask) df = MapData(df, augment) df = MapData(df, apply_mask) df = MapData(df, build_sample) df = PrefetchDataZMQ(df, nr_proc=4) #df = PrefetchData(df, 2, 1) df = BatchData(df, batch_size, use_list=False) df = MapData( df, lambda x: ([x[0], x[1], x[2]], [ x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4], x[3], x[4] ])) df.reset_state() return df
x[4], x[3], x[4], ])) df.reset_state() return df if __name__ == '__main__': """ Run this script to check speed of generating samples. Tweak the nr_proc parameter of PrefetchDataZMQ. Ideally it should reflect the number of cores in your hardware """ batch_size = 10 curr_dir = os.path.dirname(__file__) annot_path = os.path.join(curr_dir, '../dataset/my_person_keypoints.json') img_dir = os.path.abspath(os.path.join(curr_dir, '../dataset/train_data/')) df = CocoDataFlow((320, 320), annot_path, img_dir) #, select_ids=[1000]) df.prepare() df = MapData(df, read_img) df = MapData(df, gen_mask) df = MapData(df, augment) df = MapData(df, apply_mask) df = MapData(df, build_sample) df = PrefetchData(df, nr_proc=4) df = BatchData(df, batch_size, use_list=False) df = MapData(df, lambda x: ([x[0], x[1], x[2]], [x[3], x[4], x[3], x[4]])) TestDataSpeed(df, size=100).start()