class ImageGenerator(): def __init__(self, path,img_path,image, label, output_shape=(256, 256), batch_size=32, translation=True, scale=True, rotation=True, mins=0.25,maxs=1.2,mina=-np.pi,maxa=np.pi, ilumination=0.0): self.path = path self.img_path = img_path self.image = image self.label = (label == 1) self.output_shape = output_shape self.batch_size = batch_size self.translation = translation self.scale = scale self.rotation = rotation self.mins=mins self.maxs = maxs self.mina = -np.pi self.maxa = np.pi self.ilumination = ilumination self.ds = CocoPose(path, img_path, is_train) self.ds_img = MapData(ds,read_image_url) self.gen_ds = self.ds_img.get_data() def next(self): meta = next(gen_ds)[0] new_meta = get_augmented_image(meta) return get_augemented_image_and_label(self.image, self.label, output_shape=self.output_shape, translation=self.translation, scale=self.scale, rotation=self.rotation,mins=self.mins, maxs=self.maxs, ilumination=self.ilumination) def __next__(self): return self.next() def __call__(self): return self def __iter__(self): for i in range(self.batch_size): # while True: yield self.next()
def proceed_test(): ds = PascalVOC12(TEST_DATA_DIR, LIST_DIR, "test", shuffle=False) imagelist = ds.imglist def f(ds): image = ds m = np.array([104, 116, 122]) const_arr = np.resize(m, (1, 1, 3)) # NCHW image = image - const_arr return image ds = MapData(ds, f) ds = BatchData(ds, 1) ctx = [mx.gpu(int(i)) for i in args.gpu.split(',')] sym_instance = resnet101_deeplab_new() # infer shape val_provide_data = [[("data", (1, 3, tile_height, tile_width))]] val_provide_label = [[("softmax_label", (1, 1, tile_height, tile_width))]] data_shape_dict = { 'data': (1, 3, tile_height, tile_width), 'softmax_label': (1, 1, tile_height, tile_width) } eval_sym = sym_instance.get_symbol(NUM_CLASSES, is_train=False, use_global_stats=True) sym_instance.infer_shape(data_shape_dict) arg_params, aux_params = load_init_param(args.load, process=True) sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict, is_train=False) data_names = ['data'] label_names = ['softmax_label'] # create predictor predictor = Predictor(eval_sym, data_names, label_names, context=ctx, provide_data=val_provide_data, provide_label=val_provide_label, arg_params=arg_params, aux_params=aux_params) from mxnetgo.myutils.fs import mkdir_p vis_dir = "deeplabv2_4gpu_test_result" check_dir = os.path.join(vis_dir, "check") import shutil shutil.rmtree(vis_dir, ignore_errors=True) mkdir_p(check_dir) _itr = ds.get_data() nbatch = 0 for i in tqdm(range(len(imagelist))): data = next(_itr) l = imagelist[i] filename = os.path.basename(l).rsplit(".", 1)[0] print filename output_all = predict_scaler(data, predictor, scales=[0.5, 0.75, 1.0, 1.25, 1.5], classes=NUM_CLASSES, tile_size=(tile_height, tile_width), is_densecrf=False, nbatch=nbatch, val_provide_data=val_provide_data, val_provide_label=val_provide_label) output_all = np.argmax(output_all, axis=0).astype(np.uint8) result = output_all[:, :, None] cv2.imwrite(os.path.join(vis_dir, "{}.png".format(filename)), result) cv2.imwrite( os.path.join(check_dir, "{}.png".format(filename)), np.concatenate((data[0][0], visualize_label(output_all)), axis=1)) nbatch += 1