be.enable_winograd = 4 # default to winograd 4 for fast autotune # directory to store VGG weights cache_dir = get_data_cache_dir(args.data_dir, subdir='pascalvoc_cache') # build data loader # get config file for PASCALVOC config = PASCALVOC(args.manifest['train'], args.manifest_root, width=args.width, height=args.height, rois_per_img=rpn_rois_per_img, inference=False) config['subset_fraction'] = float(args.subset_pct / 100.0) train_set = faster_rcnn.build_dataloader(config, frcn_rois_per_img) # build the Faster-RCNN model model = faster_rcnn.build_model(train_set, frcn_rois_per_img, inference=False) # set up cost different branches, respectively weights = 1.0 / (rpn_rois_per_img) roi_w = 1.0 / (frcn_rois_per_img) frcn_tree_cost = Multicost(costs=[GeneralizedCostMask(costfunc=CrossEntropyMulti(), weights=roi_w), GeneralizedCostMask(costfunc=SmoothL1Loss(), weights=roi_w) ], weights=[1, 1]) cost = Multicost(costs=[GeneralizedCostMask(costfunc=CrossEntropyMulti(), weights=weights), GeneralizedCostMask(costfunc=SmoothL1Loss(sigma=3.0), weights=weights), frcn_tree_cost, ], weights=[1, 1, 1])
cache_dir = get_data_cache_dir(args.data_dir, subdir='pascalvoc_cache') # build data loader # get config file for PASCALVOC config = PASCALVOC(args.manifest['train'], args.manifest_root, width=args.width, height=args.height, rois_per_img=rpn_rois_per_img, inference=False) config['subset_fraction'] = float(args.subset_pct / 100.0) train_set = faster_rcnn.build_dataloader(config, frcn_rois_per_img) # build the Faster-RCNN model model = faster_rcnn.build_model(train_set, frcn_rois_per_img, inference=False) # set up cost different branches, respectively weights = 1.0 / (rpn_rois_per_img) roi_w = 1.0 / (frcn_rois_per_img) frcn_tree_cost = Multicost(costs=[ GeneralizedCostMask(costfunc=CrossEntropyMulti(), weights=roi_w), GeneralizedCostMask(costfunc=SmoothL1Loss(), weights=roi_w) ], weights=[1, 1]) cost = Multicost(costs=[ GeneralizedCostMask(costfunc=CrossEntropyMulti(), weights=weights), GeneralizedCostMask(costfunc=SmoothL1Loss(sigma=3.0), weights=weights), frcn_tree_cost,
# build data loader cache_dir = get_data_cache_dir(args.data_dir, subdir='pascalvoc_cache') config = PASCALVOC(args.manifest['val'], args.manifest_root, width=args.width, height=args.height, rois_per_img=rpn_rois_per_img, inference=True) valid_set = faster_rcnn.build_dataloader(config, frcn_rois_per_img) num_classes = valid_set.num_classes # build the Faster-RCNN network (model, proposalLayer) = faster_rcnn.build_model(valid_set, frcn_rois_per_img, inference=True) # load parameters and initialize model model.load_params(args.model_file, load_states=False) model.initialize(dataset=valid_set) # normalize the model by the bbtarget mean and std if needed # if a full training run was completed using train.py, then normalization # was already performed prior to saving the model. if args.normalize: model = util.scale_bbreg_weights(model, [0.0, 0.0, 0.0, 0.0], [0.1, 0.1, 0.2, 0.2], num_classes) # run inference
# setup backend be = gen_backend(**extract_valid_args(args, gen_backend)) # build data loader cache_dir = get_data_cache_dir(args.data_dir, subdir='pascalvoc_cache') config = PASCALVOC(args.manifest['val'], args.manifest_root, width=args.width, height=args.height, rois_per_img=rpn_rois_per_img, inference=True) valid_set = faster_rcnn.build_dataloader(config, frcn_rois_per_img) num_classes = valid_set.num_classes # build the Faster-RCNN network (model, proposalLayer) = faster_rcnn.build_model(valid_set, frcn_rois_per_img, inference=True) # load parameters and initialize model model.load_params(args.model_file, load_states=False) model.initialize(dataset=valid_set) # normalize the model by the bbtarget mean and std if needed # if a full training run was completed using train.py, then normalization # was already performed prior to saving the model. if args.normalize: model = util.scale_bbreg_weights(model, [0.0, 0.0, 0.0, 0.0], [0.1, 0.1, 0.2, 0.2], num_classes) # run inference # detection parameters