def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) model = cfg.model transforms = Compose(cfg.val_transforms) print(transforms) image_list, image_dir = get_image_list(args.image_path) logger.info('Number of predict images = {}'.format(len(image_list))) test_config = get_test_config(cfg, args) predict(model, model_path=args.model_path, transforms=transforms, image_list=image_list, image_dir=image_dir, save_dir=args.save_dir, **test_config)
def main(args): env_info = get_sys_env() args.use_gpu = True if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else False predictor = Predictor(args) predictor.run(get_images(args.image_path))
def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) val_dataset = cfg.val_dataset if not val_dataset: raise RuntimeError( 'The verification dataset is not specified in the configuration file.' ) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) model = cfg.model if args.model_path: para_state_dict = paddle.load(args.model_path) model.set_dict(para_state_dict) logger.info('Loaded trained params of model successfully') evaluate(model, val_dataset)
def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) val_dataset = cfg.val_dataset if not val_dataset: raise RuntimeError( 'The verification dataset is not specified in the configuration file.' ) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) model = cfg.model transforms = val_dataset.transforms image_list, image_dir = get_image_list(args.image_path) predict(model, model_path=args.model_path, transforms=transforms, image_list=image_list, image_dir=image_dir, save_dir=args.save_dir)
def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) val_dataset = cfg.val_dataset if not val_dataset: raise RuntimeError( 'The verification dataset is not specified in the configuration file.' ) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) model = cfg.model image_list, image_dir = get_image_list(args.image_path) logger.info('Number of predict images = {}'.format(len(image_list))) test_config = get_test_config(cfg, args) config_check(cfg, val_dataset=val_dataset) predict(model, model_path=args.model_path, val_dataset=val_dataset, image_list=image_list, image_dir=image_dir, save_dir=args.save_dir, **test_config)
def main(args): if args.seed is not None: paddle.seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) env_info = get_sys_env() info = ['{}: {}'.format(k, v) for k, v in env_info.items()] info = '\n'.join(['', format('Environment Information', '-^48s')] + info + ['-' * 48]) logger.info(info) place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config( args.cfg, learning_rate=args.learning_rate, iters=args.iters, batch_size=args.batch_size) train_dataset = cfg.train_dataset if train_dataset is None: raise RuntimeError( 'The training dataset is not specified in the configuration file.') elif len(train_dataset) == 0: raise ValueError( 'The length of train_dataset is 0. Please check if your dataset is valid' ) val_dataset = cfg.val_dataset if args.do_eval else None losses = cfg.loss msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) config_check(cfg, train_dataset=train_dataset, val_dataset=val_dataset) train( cfg.model, train_dataset, val_dataset=val_dataset, optimizer=cfg.optimizer, save_dir=args.save_dir, iters=cfg.iters, batch_size=cfg.batch_size, resume_model=args.resume_model, save_interval=args.save_interval, log_iters=args.log_iters, num_workers=args.num_workers, use_vdl=args.use_vdl, losses=losses, keep_checkpoint_max=args.keep_checkpoint_max)
def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) # Only support for the DeepLabv3+ model if args.data_format == 'NHWC': if cfg.dic['model']['type'] != 'DeepLabV3P': raise ValueError( 'The "NHWC" data format only support the DeepLabV3P model!') cfg.dic['model']['data_format'] = args.data_format cfg.dic['model']['backbone']['data_format'] = args.data_format loss_len = len(cfg.dic['loss']['types']) for i in range(loss_len): cfg.dic['loss']['types'][i]['data_format'] = args.data_format val_dataset = cfg.val_dataset if val_dataset is None: raise RuntimeError( 'The verification dataset is not specified in the configuration file.' ) elif len(val_dataset) == 0: raise ValueError( 'The length of val_dataset is 0. Please check if your dataset is valid' ) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) model = cfg.model skip_quant(model) quantizer = QAT(config=quant_config) quant_model = quantizer.quantize(model) logger.info('Quantize the model successfully') if args.model_path: utils.load_entire_model(quant_model, args.model_path) logger.info('Loaded trained params of model successfully') test_config = get_test_config(cfg, args) config_check(cfg, val_dataset=val_dataset) evaluate(quant_model, val_dataset, num_workers=args.num_workers, **test_config)
def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) val_dataset = cfg.val_dataset cfg_1 = Config(args.cfg_1) cfg_crop = Config(args.cfg_crop) val_dataset_crop = cfg_crop.val_dataset if not val_dataset: raise RuntimeError( 'The verification dataset is not specified in the configuration file.' ) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) model = cfg.model model_1 = cfg_1.model model_crop = cfg_crop.model transforms = val_dataset.transforms transforms_crop = val_dataset_crop.transforms image_list, image_dir = get_image_list(args.image_path) logger.info('Number of predict images = {}'.format(len(image_list))) predictEnsembleThree( model, model_1, model_crop, model_path=args.model_path, model_path_1=args.model_path_1, model_path_crop=args.model_path_crop, transforms=transforms, transforms_crop=transforms_crop, image_list=image_list, image_dir=image_dir, save_dir=args.save_dir, aug_pred=args.aug_pred, scales=args.scales, flip_horizontal=args.flip_horizontal, flip_vertical=args.flip_vertical, is_slide=args.is_slide, crop_size=args.crop_size, stride=args.stride, )
def analyze(args): env_info = get_sys_env() info = ['{}: {}'.format(k, v) for k, v in env_info.items()] info = '\n'.join(['', format('Environment Information', '-^48s')] + info + ['-' * 48]) logger.info(info) paddle.set_device('cpu') cfg = Config(args.config) custom_ops = {paddle.nn.SyncBatchNorm: op_flops_funs.count_syncbn} inputs = paddle.randn(args.input_size) _dynamic_flops(cfg.model, inputs, custom_ops=custom_ops, print_detail=True)
def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) if cfg.dic["data"]["target"]["dataset"] == 'cityscapes': val_dataset = CityDataset(split='val', **cfg.dic["data"]["target"]["kwargs"]) else: raise NotImplementedError() if len(val_dataset) < 500: print(len(val_dataset)) for i in range(len(val_dataset)): print(val_dataset[i]) if val_dataset is None: raise RuntimeError( 'The verification dataset is not specified in the configuration file.' ) elif len(val_dataset) == 0: raise ValueError( 'The length of val_dataset is 0. Please check if your dataset is valid' ) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) model = cfg.model if args.model_path: utils.load_entire_model(model, args.model_path) logger.info('Loaded trained params of model successfully') test_config = get_test_config(cfg, args) val.evaluate(model, val_dataset, num_workers=args.num_workers, **test_config)
def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) val_dataset = cfg.val_dataset if val_dataset is None: raise RuntimeError( 'The verification dataset is not specified in the configuration file.' ) elif len(val_dataset) == 0: raise ValueError( 'The length of val_dataset is 0. Please check if your dataset is valid' ) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) model = cfg.model if args.model_path: utils.load_entire_model(model, args.model_path) logger.info('Loaded trained params of model successfully') config_check(cfg, val_dataset=val_dataset) evaluate( model, val_dataset, aug_eval=args.aug_eval, scales=args.scales, flip_horizontal=args.flip_horizontal, flip_vertical=args.flip_vertical, is_slide=args.is_slide, crop_size=args.crop_size, stride=args.stride, num_workers=args.num_workers, )
def prepare_envs(args): """ Set random seed and the device. """ if args.seed is not None: paddle.seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) env_info = get_sys_env() info = ['{}: {}'.format(k, v) for k, v in env_info.items()] info = '\n'.join(['', format('Environment Information', '-^48s')] + info + ['-' * 48]) logger.info(info) place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place)
def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) val_dataset = cfg.val_dataset if val_dataset is None: raise RuntimeError( 'The verification dataset is not specified in the configuration file.' ) elif len(val_dataset) == 0: raise ValueError( 'The length of val_dataset is 0. Please check if your dataset is valid' ) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) model = cfg.model transforms = val_dataset.transforms alpha = predict(model, model_path=args.model_path, transforms=transforms, image_list=[args.image_path], trimap_list=[args.trimap_path], save_dir=args.save_dir) img_ori = cv2.imread(args.image_path) bg = get_bg(args.bg_path, img_ori.shape) alpha = alpha / 255 alpha = alpha[:, :, np.newaxis] com = alpha * img_ori + (1 - alpha) * bg com = com.astype('uint8') com_save_path = os.path.join(args.save_dir, os.path.basename(args.image_path)) cv2.imwrite(com_save_path, com)
def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) val_dataset = cfg.val_dataset if val_dataset is None: raise RuntimeError( 'The verification dataset is not specified in the configuration file.' ) elif len(val_dataset) == 0: raise ValueError( 'The length of val_dataset is 0. Please check if your dataset is valid' ) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) model = cfg.model transforms = val_dataset.transforms image_list, image_dir = get_image_list(args.image_path) if args.trimap_path is None: trimap_list = None else: trimap_list, _ = get_image_list(args.trimap_path) logger.info('Number of predict images = {}'.format(len(image_list))) predict(model, model_path=args.model_path, transforms=transforms, image_list=image_list, image_dir=image_dir, trimap_list=trimap_list, save_dir=args.save_dir)
def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) val_dataset = cfg.val_dataset if not val_dataset: raise RuntimeError( 'The verification dataset is not specified in the configuration file.' ) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) model = cfg.model transforms = val_dataset.transforms image_list, image_dir = get_image_list(args.image_path) logger.info('Number of predict images = {}'.format(len(image_list))) config_check(cfg, val_dataset=val_dataset) predict(model, model_path=args.model_path, transforms=transforms, thing_list=val_dataset.thing_list, label_divisor=val_dataset.label_divisor, stuff_area=val_dataset.stuff_area, ignore_index=val_dataset.ignore_index, image_list=image_list, image_dir=image_dir, save_dir=args.save_dir, threshold=args.threshold, nms_kernel=args.nms_kernel, top_k=args.top_k)
def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg) val_dataset = cfg.val_dataset if val_dataset is None: raise RuntimeError( 'The verification dataset is not specified in the configuration file.' ) elif len(val_dataset) == 0: raise ValueError( 'The length of val_dataset is 0. Please check if your dataset is valid' ) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) model = cfg.model if args.model_path: paddleseg.utils.utils.load_entire_model(model, args.model_path) logger.info('Loaded trained params of model successfully') config_check(cfg, val_dataset=val_dataset) evaluate( model, val_dataset, threshold=args.threshold, nms_kernel=args.nms_kernel, top_k=args.top_k, num_workers=args.num_workers, )
import tqdm import cv2 import paddle import paddle.nn as nn import numpy as np import paddle.nn.functional as F from visualdl import LogWriter from util.vis import draw_probmap, draw_points from util.misc import save_checkpoint from util.distributed import reduce_loss_dict from paddleseg.utils import get_sys_env, logger from .optimizer import get_optimizer_lr env_info = get_sys_env() info = ['{}: {}'.format(k, v) for k, v in env_info.items()] info = '\n'.join(['', format('Environment Information', '-^48s')] + info + ['-' * 48]) logger.info(info) place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) class ISTrainer(object): def __init__(self, model, cfg, model_cfg, loss_cfg,
def main(args): if args.seed is not None: paddle.seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) logger.info('Set seed to {}'.format(args.seed)) env_info = get_sys_env() info = ['{}: {}'.format(k, v) for k, v in env_info.items()] info = '\n'.join(['', format('Environment Information', '-^48s')] + info + ['-' * 48]) logger.info(info) place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg, learning_rate=args.learning_rate, iters=args.iters, batch_size=args.batch_size) if cfg.dic["data"]["source"]["dataset"] == 'synthia': train_dataset_src = SYNTHIADataset( split='train', **cfg.dic["data"]["source"]["kwargs"]) val_dataset_src = SYNTHIADataset(split='val', **cfg.dic["data"]["source"]["kwargs"]) elif cfg.dic["data"]["source"]["dataset"] == 'gta5': train_dataset_src = GTA5Dataset(split='train', **cfg.dic["data"]["source"]["kwargs"]) val_dataset_src = GTA5Dataset(split='val', **cfg.dic["data"]["source"]["kwargs"]) else: raise NotImplementedError() if cfg.dic["data"]["target"]["dataset"] == 'cityscapes': train_dataset_tgt = CityDataset(split='train', **cfg.dic["data"]["target"]["kwargs"]) val_dataset_tgt = CityDataset(split='val', **cfg.dic["data"]["target"]["kwargs"]) else: raise NotImplementedError() val_dataset_tgt = val_dataset_tgt if args.do_eval else None val_dataset_src = val_dataset_src if args.do_eval else None if train_dataset_src is None: raise RuntimeError( 'The training dataset is not specified in the configuration file.') elif len(train_dataset_src) == 0: raise ValueError( 'The length of train_dataset is 0. Please check if your dataset is valid' ) msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) trainer = Trainer(model=cfg.model, cfg=cfg.dic) trainer.train(train_dataset_src, train_dataset_tgt, val_dataset_tgt=val_dataset_tgt, val_dataset_src=val_dataset_src, optimizer=cfg.optimizer, save_dir=args.save_dir, iters=cfg.iters, batch_size=cfg.batch_size, resume_model=args.resume_model, save_interval=args.save_interval, log_iters=args.log_iters, num_workers=args.num_workers, use_vdl=args.use_vdl, keep_checkpoint_max=args.keep_checkpoint_max, test_config=cfg.test_config)
def background_replace(args): env_info = get_sys_env() args.use_gpu = True if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else False predictor = Predictor(args) if not osp.exists(args.save_dir): os.makedirs(args.save_dir) # 图像背景替换 if args.img_path is not None: if not osp.exists(args.img_path): raise Exception('The --img_path is not existed: {}'.format( args.img_path)) img = cv2.imread(args.img_path) bg = get_bg_img(args.bg_img_path, img.shape) comb = predictor.run(img, bg) save_name = osp.basename(args.img_path) save_path = osp.join(args.save_dir, save_name) cv2.imwrite(save_path, comb) # 视频背景替换 else: # 获取背景:如果提供背景视频则以背景视频作为背景,否则采用提供的背景图片 if args.bg_video_path is not None: if not osp.exists(args.bg_video_path): raise Exception( 'The --bg_video_path is not existed: {}'.format( args.bg_video_path)) is_video_bg = True else: bg = get_bg_img(args.bg_img_path, args.input_shape) is_video_bg = False # 视频预测 if args.video_path is not None: logger.info('Please wait. It is computing......') if not osp.exists(args.video_path): raise Exception('The --video_path is not existed: {}'.format( args.video_path)) cap_video = cv2.VideoCapture(args.video_path) fps = cap_video.get(cv2.CAP_PROP_FPS) width = int(cap_video.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap_video.get(cv2.CAP_PROP_FRAME_HEIGHT)) save_name = osp.basename(args.video_path) save_name = save_name.split('.')[0] save_path = osp.join(args.save_dir, save_name + '.avi') cap_out = cv2.VideoWriter( save_path, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, (width, height)) if is_video_bg: cap_bg = cv2.VideoCapture(args.bg_video_path) frames_bg = cap_bg.get(cv2.CAP_PROP_FRAME_COUNT) current_bg = 1 frame_num = 0 while cap_video.isOpened(): ret, frame = cap_video.read() if ret: #读取背景帧 if is_video_bg: ret_bg, bg = cap_bg.read() if ret_bg: if current_bg == frames_bg: current_bg = 1 cap_bg.set(cv2.CAP_PROP_POS_FRAMES, 0) else: break current_bg += 1 comb = predictor.run(frame, bg) cap_out.write(comb) frame_num += 1 logger.info('Processing frame {}'.format(frame_num)) else: break if is_video_bg: cap_bg.release() cap_video.release() cap_out.release() # 当没有输入预测图像和视频的时候,则打开摄像头 else: cap_video = cv2.VideoCapture(0) if not cap_video.isOpened(): raise IOError("Error opening video stream or file, " "--video_path whether existing: {}" " or camera whether working".format( args.video_path)) return if is_video_bg: cap_bg = cv2.VideoCapture(args.bg_video_path) frames_bg = cap_bg.get(cv2.CAP_PROP_FRAME_COUNT) current_bg = 1 while cap_video.isOpened(): ret, frame = cap_video.read() if ret: #读取背景帧 if is_video_bg: ret_bg, bg = cap_bg.read() if ret_bg: if current_bg == frames_bg: current_bg = 1 cap_bg.set(cv2.CAP_PROP_POS_FRAMES, 0) else: break current_bg += 1 comb = predictor.run(frame, bg) cv2.imshow('HumanSegmentation', comb) if cv2.waitKey(1) & 0xFF == ord('q'): break else: break if is_video_bg: cap_bg.release() cap_video.release() if args.test_speed: timer = predictor.cost_averager logger.info( 'Model inference time per image: {}\nFPS: {}\nNum of images: {}'. format(timer.get_average(), 1 / timer.get_average(), timer._cnt))
def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not (0.0 < args.pruning_ratio < 1.0): raise RuntimeError( 'The model pruning rate must be in the range of (0, 1).') if not os.path.exists(args.save_dir): os.makedirs(args.save_dir) cfg = Config(args.cfg, iters=args.retraining_iters, batch_size=args.batch_size, learning_rate=args.learning_rate) train_dataset = cfg.train_dataset if not train_dataset: raise RuntimeError( 'The training dataset is not specified in the configuration file.') val_dataset = cfg.val_dataset if not val_dataset: raise RuntimeError( 'The validation dataset is not specified in the c;onfiguration file.' ) os.environ['PADDLESEG_EXPORT_STAGE'] = 'True' net = cfg.model if args.model_path: para_state_dict = paddle.load(args.model_path) net.set_dict(para_state_dict) logger.info('Loaded trained params of model successfully') logger.info( 'Step 1/3: Start calculating the sensitivity of model parameters...') sample_shape = [1] + list(train_dataset[0][0].shape) sen_file = os.path.join(args.save_dir, 'sen.pickle') pruner = L1NormFilterPruner(net, sample_shape) pruner.sensitive(eval_func=partial(eval_fn, net, val_dataset, args.num_workers), sen_file=sen_file) logger.info( f'The sensitivity calculation of model parameters is complete. The result is saved in {sen_file}.' ) flops = dygraph_flops(net, sample_shape) logger.info( f'Step 2/3: Start to prune the model, the ratio of pruning is {args.pruning_ratio}. FLOPs before pruning: {flops}.' ) # Avoid the bug when pruning conv2d with small channel number. # Remove this code after PaddleSlim 2.1 is available. # Related issue: https://github.com/PaddlePaddle/PaddleSlim/issues/674. skips = [] for param in net.parameters(): if param.shape[0] <= 8: skips.append(param.name) pruner.sensitive_prune(args.pruning_ratio, skip_vars=skips) flops = dygraph_flops(net, sample_shape) logger.info(f'Model pruning completed. FLOPs after pruning: {flops}.') logger.info(f'Step 3/3: Start retraining the model.') train(net, train_dataset, optimizer=cfg.optimizer, save_dir=args.save_dir, num_workers=args.num_workers, iters=cfg.iters, batch_size=cfg.batch_size, losses=cfg.loss) evaluate(net, val_dataset) if paddle.distributed.get_rank() == 0: export_model(net, cfg, args.save_dir) ckpt = os.path.join(args.save_dir, f'iter_{args.retraining_iters}') if os.path.exists(ckpt): shutil.rmtree(ckpt) logger.info(f'Model retraining finish. Model is saved in {args.save_dir}')
def main(args): env_info = get_sys_env() place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not os.path.exists(args.save_dir): os.makedirs(args.save_dir) cfg = Config(args.cfg, iters=args.retraining_iters, batch_size=args.batch_size, learning_rate=args.learning_rate) train_dataset = cfg.train_dataset if not train_dataset: raise RuntimeError( 'The training dataset is not specified in the configuration file.') val_dataset = cfg.val_dataset if not val_dataset: raise RuntimeError( 'The validation dataset is not specified in the configuration file.' ) os.environ['PADDLESEG_EXPORT_STAGE'] = 'True' net = cfg.model if args.model_path: para_state_dict = paddle.load(args.model_path) net.set_dict(para_state_dict) logger.info('Loaded trained params of model successfully') logger.info('Step 1/2: Start to quantify the model...') quantizer = QAT(config=get_quant_config()) quantizer.quantize(net) logger.info('Model quantification completed.') logger.info('Step 2/2: Start retraining the quantized model.') train(net, train_dataset, optimizer=cfg.optimizer, save_dir=args.save_dir, num_workers=args.num_workers, iters=cfg.iters, batch_size=cfg.batch_size, losses=cfg.loss) evaluate(net, val_dataset) if paddle.distributed.get_rank() == 0: save_path = os.path.join(args.save_dir, 'model') input_var = paddle.ones([1] + list(val_dataset[0][0].shape)) quantizer.save_quantized_model(net, save_path, input_spec=[input_var]) yml_file = os.path.join(args.save_dir, 'deploy.yaml') with open(yml_file, 'w') as file: transforms = cfg.dic['val_dataset']['transforms'] data = { 'Deploy': { 'transforms': transforms, 'model': 'model.pdmodel', 'params': 'model.pdiparams' } } yaml.dump(data, file) ckpt = os.path.join(args.save_dir, f'iter_{args.retraining_iters}') if os.path.exists(ckpt): shutil.rmtree(ckpt) logger.info( f'Model retraining complete. The quantized model is saved in {args.save_dir}.' )
def main(args): if args.seed is not None: paddle.seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) env_info = get_sys_env() info = ['{}: {}'.format(k, v) for k, v in env_info.items()] info = '\n'.join(['', format('Environment Information', '-^48s')] + info + ['-' * 48]) logger.info(info) place = 'gpu' if env_info['Paddle compiled with cuda'] and env_info[ 'GPUs used'] else 'cpu' paddle.set_device(place) if not args.cfg: raise RuntimeError('No configuration file specified.') cfg = Config(args.cfg, learning_rate=args.learning_rate, iters=args.iters, batch_size=args.batch_size) # Only support for the DeepLabv3+ model if args.data_format == 'NHWC': if cfg.dic['model']['type'] != 'DeepLabV3P': raise ValueError( 'The "NHWC" data format only support the DeepLabV3P model!') cfg.dic['model']['data_format'] = args.data_format cfg.dic['model']['backbone']['data_format'] = args.data_format loss_len = len(cfg.dic['loss']['types']) for i in range(loss_len): cfg.dic['loss']['types'][i]['data_format'] = args.data_format train_dataset = cfg.train_dataset if train_dataset is None: raise RuntimeError( 'The training dataset is not specified in the configuration file.') elif len(train_dataset) == 0: raise ValueError( 'The length of train_dataset is 0. Please check if your dataset is valid' ) val_dataset = cfg.val_dataset if args.do_eval else None losses = cfg.loss msg = '\n---------------Config Information---------------\n' msg += str(cfg) msg += '------------------------------------------------' logger.info(msg) config_check(cfg, train_dataset=train_dataset, val_dataset=val_dataset) train(cfg.model, train_dataset, val_dataset=val_dataset, optimizer=cfg.optimizer, save_dir=args.save_dir, iters=cfg.iters, batch_size=cfg.batch_size, resume_model=args.resume_model, save_interval=args.save_interval, log_iters=args.log_iters, num_workers=args.num_workers, use_vdl=args.use_vdl, losses=losses, keep_checkpoint_max=args.keep_checkpoint_max, test_config=cfg.test_config, fp16=args.fp16, profiler_options=args.profiler_options)