def test_deeplab(ctx): test_data = get_data("val", DATA_DIR, LIST_DIR, len(ctx)) ctx = [mx.gpu(int(i)) for i in args.gpu.split(',')] sym_instance = eval(symbol_str)() # infer shape val_provide_data = [[("data", (1, 3, tile_height, tile_width))]] val_provide_label = [[("softmax_label", (1, 1, tile_height, tile_width))]] data_shape_dict = { 'data': (1, 3, tile_height, tile_width), 'softmax_label': (1, 1, tile_height, tile_width) } eval_sym = sym_instance.get_symbol(NUM_CLASSES, is_train=False) sym_instance.infer_shape(data_shape_dict) arg_params, aux_params = load_init_param(args.load, process=True) sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict, is_train=False) data_names = ['data'] label_names = ['softmax_label'] # create predictor predictor = Predictor(eval_sym, data_names, label_names, context=ctx, provide_data=val_provide_data, provide_label=val_provide_label, arg_params=arg_params, aux_params=aux_params) if args.vis: from mxnetgo.myutils.fs import mkdir_p vis_dir = os.path.join(logger.get_logger_dir(), "vis") mkdir_p(vis_dir) stats = MIoUStatistics(NUM_CLASSES) test_data.reset_state() nbatch = 0 for data, label in tqdm(test_data.get_data()): output_all = predict_scaler(data, predictor, scales=[0.9, 1.0, 1.1], classes=NUM_CLASSES, tile_size=(tile_height, tile_width), is_densecrf=False, nbatch=nbatch, val_provide_data=val_provide_data, val_provide_label=val_provide_label) output_all = np.argmax(output_all, axis=0) label = np.squeeze(label) if args.vis: cv2.imwrite(os.path.join(vis_dir, "{}.jpg".format(nbatch)), visualize_label(output_all)) stats.feed(output_all, label) # very time-consuming nbatch += 1 logger.info("mIoU: {}, meanAcc: {}, acc: {} ".format( stats.mIoU, stats.mean_accuracy, stats.accuracy))
def proceed_test(): ds = PascalVOC12(TEST_DATA_DIR, LIST_DIR, "test", shuffle=False) imagelist = ds.imglist def f(ds): image = ds m = np.array([104, 116, 122]) const_arr = np.resize(m, (1, 1, 3)) # NCHW image = image - const_arr return image ds = MapData(ds, f) ds = BatchData(ds, 1) ctx = [mx.gpu(int(i)) for i in args.gpu.split(',')] sym_instance = resnet101_deeplab_new() # infer shape val_provide_data = [[("data", (1, 3, tile_height, tile_width))]] val_provide_label = [[("softmax_label", (1, 1, tile_height, tile_width))]] data_shape_dict = { 'data': (1, 3, tile_height, tile_width), 'softmax_label': (1, 1, tile_height, tile_width) } eval_sym = sym_instance.get_symbol(NUM_CLASSES, is_train=False, use_global_stats=True) sym_instance.infer_shape(data_shape_dict) arg_params, aux_params = load_init_param(args.load, process=True) sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict, is_train=False) data_names = ['data'] label_names = ['softmax_label'] # create predictor predictor = Predictor(eval_sym, data_names, label_names, context=ctx, provide_data=val_provide_data, provide_label=val_provide_label, arg_params=arg_params, aux_params=aux_params) from mxnetgo.myutils.fs import mkdir_p vis_dir = "deeplabv2_4gpu_test_result" check_dir = os.path.join(vis_dir, "check") import shutil shutil.rmtree(vis_dir, ignore_errors=True) mkdir_p(check_dir) _itr = ds.get_data() nbatch = 0 for i in tqdm(range(len(imagelist))): data = next(_itr) l = imagelist[i] filename = os.path.basename(l).rsplit(".", 1)[0] print filename output_all = predict_scaler(data, predictor, scales=[0.5, 0.75, 1.0, 1.25, 1.5], classes=NUM_CLASSES, tile_size=(tile_height, tile_width), is_densecrf=False, nbatch=nbatch, val_provide_data=val_provide_data, val_provide_label=val_provide_label) output_all = np.argmax(output_all, axis=0).astype(np.uint8) result = output_all[:, :, None] cv2.imwrite(os.path.join(vis_dir, "{}.png".format(filename)), result) cv2.imwrite( os.path.join(check_dir, "{}.png".format(filename)), np.concatenate((data[0][0], visualize_label(output_all)), axis=1)) nbatch += 1
def proceed_validation(ctx): #logger.auto_set_dir() test_data = get_data("val", DATA_DIR, LIST_DIR, len(ctx)) ctx = [mx.gpu(int(i)) for i in args.gpu.split(',')] sym_instance = resnet101_deeplab_new() # infer shape val_provide_data = [[("data", (1, 3, tile_height, tile_width))]] val_provide_label = [[("softmax_label", (1, 1, tile_height, tile_width))]] data_shape_dict = { 'data': (1, 3, tile_height, tile_width), 'softmax_label': (1, 1, tile_height, tile_width) } eval_sym = sym_instance.get_symbol(NUM_CLASSES, is_train=False, use_global_stats=True) sym_instance.infer_shape(data_shape_dict) arg_params, aux_params = load_init_param(args.load, process=True) sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict, is_train=False) data_names = ['data'] label_names = ['softmax_label'] # create predictor predictor = Predictor(eval_sym, data_names, label_names, context=ctx, provide_data=val_provide_data, provide_label=val_provide_label, arg_params=arg_params, aux_params=aux_params) if args.vis: from mxnetgo.myutils.fs import mkdir_p vis_dir = os.path.join("fuck_vis") mkdir_p(vis_dir) stats = MIoUStatistics(NUM_CLASSES) test_data.reset_state() nbatch = 0 for data, label in tqdm(test_data.get_data()): output_all = predict_scaler(data, predictor, scales=[0.5, 0.75, 1.0, 1.25, 1.5], classes=NUM_CLASSES, tile_size=(tile_height, tile_width), is_densecrf=False, nbatch=nbatch, val_provide_data=val_provide_data, val_provide_label=val_provide_label) output_all = np.argmax(output_all, axis=0) label = np.squeeze(label) if args.vis: m = np.array([104, 116, 122]) const_arr = np.resize(m, (1, 1, 3)) # NCHW origin_img = data[0] + const_arr cv2.imwrite( os.path.join(vis_dir, "{}.jpg".format(nbatch)), np.concatenate(( origin_img, visualize_label(label), np.dstack((label, label, label)), visualize_label(output_all), ), axis=1)) stats.feed(output_all, label) # very time-consuming nbatch += 1 logger.info("mIoU: {}, meanAcc: {}, acc: {} ".format( stats.mIoU, stats.mean_accuracy, stats.accuracy))
# create predictor predictor = Predictor(eval_sym, data_names, label_names, context=ctx, provide_data=val_provide_data, provide_label=val_provide_label, arg_params=arg_params, aux_params=aux_params) if args.vis: from mxnetgo.myutils.fs import mkdir_p vis_dir = os.path.join(logger.get_logger_dir(), "vis") logger.info(" vis_dir: {}".format(vis_dir)) mkdir_p(vis_dir) # load demo data image_names = [ 'frankfurt_000001_073088_leftImg8bit.png', 'lindau_000024_000019_leftImg8bit.png' ] im = cv2.imread('demo/' + image_names[0], cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) im = im[None, :, :, :].astype('float32') # extend one dimension output_all = predict_scaler(im, predictor, scales=[1.0], classes=config.dataset.NUM_CLASSES, tile_size=(config.TEST.tile_height, config.TEST.tile_width),