def test_bmn(args): device = set_device(args.device) fluid.enable_dygraph(device) if args.dynamic else None #config setting config = parse_config(args.config_file) eval_cfg = merge_configs(config, 'test', vars(args)) feat_dim = config.MODEL.feat_dim tscale = config.MODEL.tscale dscale = config.MODEL.dscale prop_boundary_ratio = config.MODEL.prop_boundary_ratio num_sample = config.MODEL.num_sample num_sample_perbin = config.MODEL.num_sample_perbin #input and video index inputs = [ Input([None, config.MODEL.feat_dim, config.MODEL.tscale], 'float32', name='feat_input') ] gt_iou_map = Input([None, config.MODEL.dscale, config.MODEL.tscale], 'float32', name='gt_iou_map') gt_start = Input([None, config.MODEL.tscale], 'float32', name='gt_start') gt_end = Input([None, config.MODEL.tscale], 'float32', name='gt_end') video_idx = Input([None, 1], 'int64', name='video_idx') labels = [gt_iou_map, gt_start, gt_end, video_idx] #data eval_dataset = BmnDataset(eval_cfg, 'test') #model model = bmn(tscale, dscale, prop_boundary_ratio, num_sample, num_sample_perbin, pretrained=args.weights is None) model.prepare(loss_function=BmnLoss(tscale, dscale), metrics=BmnMetric(config, mode='test'), inputs=inputs, labels=labels, device=device) #load checkpoint if args.weights is not None: assert os.path.exists(args.weights + '.pdparams'), \ "Given weight dir {} not exist.".format(args.weights) logger.info('load test weights from {}'.format(args.weights)) model.load(args.weights) model.evaluate(eval_data=eval_dataset, batch_size=eval_cfg.TEST.batch_size, num_workers=eval_cfg.TEST.num_workers, log_freq=args.log_interval) logger.info("[EVAL] eval finished")
def train_bmn(args): device = set_device(args.device) fluid.enable_dygraph(device) if args.dynamic else None if not os.path.isdir(args.save_dir): os.makedirs(args.save_dir) #config setting config = parse_config(args.config_file) train_cfg = merge_configs(config, 'train', vars(args)) val_cfg = merge_configs(config, 'valid', vars(args)) feat_dim = config.MODEL.feat_dim tscale = config.MODEL.tscale dscale = config.MODEL.dscale prop_boundary_ratio = config.MODEL.prop_boundary_ratio num_sample = config.MODEL.num_sample num_sample_perbin = config.MODEL.num_sample_perbin # input and label list inputs = [Input([None, feat_dim, tscale], 'float32', name='feat_input')] gt_iou_map = Input([None, dscale, tscale], 'float32', name='gt_iou_map') gt_start = Input([None, tscale], 'float32', name='gt_start') gt_end = Input([None, tscale], 'float32', name='gt_end') labels = [gt_iou_map, gt_start, gt_end] # data train_dataset = BmnDataset(train_cfg, 'train') val_dataset = BmnDataset(val_cfg, 'valid') # model model = bmn(tscale, dscale, prop_boundary_ratio, num_sample, num_sample_perbin, pretrained=False) optim = optimizer(config, parameter_list=model.parameters()) model.prepare(optimizer=optim, loss_function=BmnLoss(tscale, dscale), inputs=inputs, labels=labels, device=device) # if resume weights is given, load resume weights directly if args.resume is not None: model.load(args.resume) model.fit(train_data=train_dataset, eval_data=val_dataset, batch_size=train_cfg.TRAIN.batch_size, epochs=train_cfg.TRAIN.epoch, eval_freq=args.valid_interval, log_freq=args.log_interval, save_dir=args.save_dir, shuffle=train_cfg.TRAIN.use_shuffle, num_workers=train_cfg.TRAIN.num_workers, drop_last=True)
def infer_bmn(args): device = set_device(args.device) fluid.enable_dygraph(device) if args.dynamic else None #config setting config = parse_config(args.config_file) infer_cfg = merge_configs(config, 'infer', vars(args)) feat_dim = config.MODEL.feat_dim tscale = config.MODEL.tscale dscale = config.MODEL.dscale prop_boundary_ratio = config.MODEL.prop_boundary_ratio num_sample = config.MODEL.num_sample num_sample_perbin = config.MODEL.num_sample_perbin #input and video index inputs = [ Input([None, config.MODEL.feat_dim, config.MODEL.tscale], 'float32', name='feat_input') ] labels = [Input([None, 1], 'int64', name='video_idx')] #data infer_dataset = BmnDataset(infer_cfg, 'infer') #model model = bmn(tscale, dscale, prop_boundary_ratio, num_sample, num_sample_perbin, pretrained=args.weights is None) model.prepare(metrics=BmnMetric(config, mode='infer'), inputs=inputs, labels=labels, device=device) # load checkpoint if args.weights is not None: assert os.path.exists( args.weights + ".pdparams"), "Given weight dir {} not exist.".format(args.weights) logger.info('load test weights from {}'.format(args.weights)) model.load(args.weights) # here use model.eval instead of model.test, as post process is required in our case model.evaluate(eval_data=infer_dataset, batch_size=infer_cfg.TEST.batch_size, num_workers=infer_cfg.TEST.num_workers, log_freq=args.log_interval) logger.info("[INFER] infer finished")
def train_bmn(args): device = paddle.set_device(args.device) paddle.disable_static(device) if args.dynamic else None if not os.path.isdir(args.save_dir): os.makedirs(args.save_dir) #config setting config = parse_config(args.config_file) train_cfg = merge_configs(config, 'train', vars(args)) val_cfg = merge_configs(config, 'valid', vars(args)) feat_dim = config.MODEL.feat_dim tscale = config.MODEL.tscale dscale = config.MODEL.dscale prop_boundary_ratio = config.MODEL.prop_boundary_ratio num_sample = config.MODEL.num_sample num_sample_perbin = config.MODEL.num_sample_perbin # data train_dataset = BmnDataset(train_cfg, 'train') val_dataset = BmnDataset(val_cfg, 'valid') # model model = bmn(tscale, dscale, feat_dim, prop_boundary_ratio, num_sample, num_sample_perbin, mode='train', pretrained=False) optim = optimizer(config, parameter_list=model.parameters()) model.prepare(optimizer=optim, loss=BmnLoss(tscale, dscale)) # if resume weights is given, load resume weights directly if args.resume is not None: model.load(args.resume) model.fit(train_data=train_dataset, eval_data=val_dataset, batch_size=train_cfg.TRAIN.batch_size, epochs=train_cfg.TRAIN.epoch, eval_freq=args.valid_interval, log_freq=args.log_interval, save_dir=args.save_dir, shuffle=train_cfg.TRAIN.use_shuffle, num_workers=train_cfg.TRAIN.num_workers, drop_last=True)
def infer_bmn(args): device = paddle.set_device(args.device) paddle.disable_static(device) if args.dynamic else None #config setting config = parse_config(args.config_file) infer_cfg = merge_configs(config, 'infer', vars(args)) feat_dim = config.MODEL.feat_dim tscale = config.MODEL.tscale dscale = config.MODEL.dscale prop_boundary_ratio = config.MODEL.prop_boundary_ratio num_sample = config.MODEL.num_sample num_sample_perbin = config.MODEL.num_sample_perbin #data infer_dataset = BmnDataset(infer_cfg, 'infer') #model model = bmn(tscale, dscale, feat_dim, prop_boundary_ratio, num_sample, num_sample_perbin, mode='infer', pretrained=args.weights is None) model.prepare(metrics=BmnMetric(config, mode='infer')) # load checkpoint if args.weights is not None: assert os.path.exists( args.weights + ".pdparams"), "Given weight dir {} not exist.".format(args.weights) logger.info('load test weights from {}'.format(args.weights)) model.load(args.weights) # here use model.eval instead of model.test, as post process is required in our case model.evaluate(eval_data=infer_dataset, batch_size=infer_cfg.TEST.batch_size, num_workers=infer_cfg.TEST.num_workers, log_freq=args.log_interval) logger.info("[INFER] infer finished")
def test_bmn(args): device = paddle.set_device(args.device) paddle.disable_static(device) if args.dynamic else None #config setting config = parse_config(args.config_file) eval_cfg = merge_configs(config, 'test', vars(args)) feat_dim = config.MODEL.feat_dim tscale = config.MODEL.tscale dscale = config.MODEL.dscale prop_boundary_ratio = config.MODEL.prop_boundary_ratio num_sample = config.MODEL.num_sample num_sample_perbin = config.MODEL.num_sample_perbin #data eval_dataset = BmnDataset(eval_cfg, 'test') #model model = bmn(tscale, dscale, feat_dim, prop_boundary_ratio, num_sample, num_sample_perbin, mode='test', pretrained=args.weights is None) model.prepare(loss=BmnLoss(tscale, dscale), metrics=BmnMetric(config, mode='test')) #load checkpoint if args.weights is not None: assert os.path.exists(args.weights + '.pdparams'), \ "Given weight dir {} not exist.".format(args.weights) logger.info('load test weights from {}'.format(args.weights)) model.load(args.weights) model.evaluate(eval_data=eval_dataset, batch_size=eval_cfg.TEST.batch_size, num_workers=eval_cfg.TEST.num_workers, log_freq=args.log_interval) logger.info("[EVAL] eval finished")