def setup_cfg(args): # load config from file and command-line arguments cfg = get_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) # Set score_threshold for builtin models ############################################################# #추가한 코드 cfg.MODEL.WEIGHTS = "/data_2/jongwon/output/stefan_dcn/model_0043999.pth" num_classes = 5 cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_classes cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES = num_classes cfg.MODEL.RETINANET.NUM_CLASSES = num_classes cfg.MODEL.FCOS.NUM_CLASSES = num_classes ############################################################# cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold cfg.MODEL.FCOS.INFERENCE_TH_TEST = args.confidence_threshold cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold cfg.freeze() return cfg
def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) return cfg
def setup_cfg(args): # load config from file and command-line arguments cfg = get_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) # Set score_threshold for builtin models cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold cfg.MODEL.FCOS.INFERENCE_TH_TEST = args.confidence_threshold cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold cfg.freeze() return cfg
def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) if args.eval_only: cfg.MODEL.WEIGHTS = "/root/centermask2/log_50_50/CenterMask-R-50-FPN-ms-3x/model_0009999.pth" cfg.SOLVER.IMS_PER_BATCH = 6 cfg.freeze() default_setup(cfg, args) return cfg
def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() cfg.merge_from_file(CONFIG_FILE_PATH) cfg.merge_from_list(args.opts) cfg.MODEL.WEIGHTS = WEIGHT_PATH cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = THRESH_TEST cfg.MODEL.RETINANET.SCORE_THRESH_TEST = THRESH_TEST cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = THRESH_TEST cfg.MODEL.FCOS.INFERENCE_TH_TEST = THRESH_TEST cfg.MODEL.DEVICE = 'cpu' cfg.freeze() default_setup(cfg, args) return cfg
def loadModel(self, nomeroffnet_path = "../", subdir="./NomeroffNet/configs/centermask2/numberplates/", config_file='centermask_numberplate_V_39_eSE_FPN_ms_3x.yaml'): """ Create configs and perform basic setups. TODO: create folder config/centermask2/ and put all architecture them """ centermask2_path= os.path.join(nomeroffnet_path, "centermask2") sys.path.append(centermask2_path) from centermask.config import get_cfg if get_mode() == "cpu": config_file = f"cpu_{config_file}" config_file = os.path.join(nomeroffnet_path, subdir, config_file) cfg = get_cfg() cfg.merge_from_file(config_file) cfg.freeze() self.predictor = DefaultPredictor(cfg)
def setup_cfg(params): # load config from file and command-line arguments cfg = get_cfg() cfg.merge_from_file(params["is_config_file"]) cfg.merge_from_list([]) # Set score_threshold for builtin models cfg.MODEL.WEIGHTS = params["is_weight_path"] num_classes = len(params["class_names"]) cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_classes cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES = num_classes cfg.MODEL.RETINANET.NUM_CLASSES = num_classes cfg.MODEL.FCOS.NUM_CLASSES = num_classes cfg.MODEL.RETINANET.SCORE_THRESH_TEST = params["is_thresh"] cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = params["is_thresh"] cfg.MODEL.FCOS.INFERENCE_TH_TEST = params["is_thresh"] cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = params[ "is_thresh"] cfg.freeze() return cfg
def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.SOLVER.BASE_LR = 0.005 cfg.SOLVER.IMS_PER_BATCH = 2 # cfg.SOLVER.CHECKPOINT_PERIOD = 5000 # cfg.SOLVER.MAX_ITER = 200 cfg.MODEL.ROI_HEADS.NUM_CLASSES = 5 cfg.MODEL.FCOS.NUM_CLASSES = 5 cfg.MY_CUSTOM.LOG_FILE = os.path.join(cfg.OUTPUT_DIR, 'my_log.txt') cfg.freeze() default_setup( cfg, args ) # if you don't like any of the default setup, write your own setup code return cfg
def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) num_classes = 5 cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_classes cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES = num_classes cfg.MODEL.RETINANET.NUM_CLASSES = num_classes cfg.MODEL.FCOS.NUM_CLASSES = num_classes #cfg.DATALOADER.NUM_WORKERS = 0 cfg.INPUT.ALBUMENTATIONS = "mapper/albu-config.json" cfg.freeze() default_setup(cfg, args) return cfg
def setup(args): """ Create configs and perform basic setups. """ DatasetCatalog.register( "carplate_train", lambda x='train': get_carplate_dicts(x, ROOT, 0.001)) DatasetCatalog.register("carplate_val", lambda x='val': get_carplate_dicts(x, ROOT, 0.001)) MetadataCatalog.get("carplate_val").set(thing_classes=["carplate"]) # carplate_metadata = MetadataCatalog.get("carplate_train") MetadataCatalog.get("carplate_val").set(evaluator_type='coco') cfg = get_cfg() cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.DATASETS.TRAIN = ("carplate_train", ) cfg.DATASETS.TEST = ("carplate_val", ) cfg.MODEL.DEVICE = 'cuda' cfg.TEST.EVAL_PERIOD = 1000 cfg.SOLVER.WARMUP_ITERS = 1000 cfg.SOLVER.CHECKPOINT_PERIOD = 3000 cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 64 cfg.DATALOADER.NUM_WORKERS = 2 cfg.SOLVER.IMS_PER_BATCH = 1 cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 cfg.SOLVER.GAMMA = 0.05 cfg.SOLVER.MAX_ITER = 30000 cfg.SOLVER.STEPS = (6000, 10000, 15000, 19000, 25000, 29000) cfg.SOLVER.BASE_LR = 0.00005 cfg.MODEL.WEIGHTS = os.path.join(ROOT, WEIGHTS, "X-101-32x8d.pkl") # cfg.freeze() default_setup(cfg, args) return cfg
#Register Datasets DatasetCatalog.register('openimages_train', get_train_dicts) MetadataCatalog.get('openimages_train').set(thing_classes=classes) openimages_train_metadata = MetadataCatalog.get('openimages_train') #Visualizing datasets # train_dicts = get_train_dicts() # for d in random.sample(train_dicts, 10): # print(d) # img = cv2.imread(d["file_name"]) # visualizer = Visualizer(img[:,:,::-1], metadata=openimages_train_metadata, scale=0.5) # vis = visualizer.draw_dataset_dict(d) # cv2.imshow("image", vis.get_image()[:,:,::-1]) # cv2.waitKey() cfg = get_cfg() #Training Configs cfg.merge_from_file( 'configs/centermask/Base-CenterMask-Lite-EfficientNet.yml') cfg.MODEL.WEIGHTS = 'C:\\Users\\Admin\\Documents\\detectron2\\projects\\CenterMask2\\output\\centermask\\CenterMask-Lite-Efficientnet-2x\\efficientnet-pretrained\\model_final_wo_solver_states.pth' cfg.DATASETS.TRAIN = ('openimages_train', ) cfg.DATASETS.TEST = () cfg.DATALOADER.NUM_WORKERS = 2 cfg.SOLVER.IMS_PER_BATCH = 2 cfg.SOLVER.MAX_ITER = 300000 cfg.SOLVER.BASE_LR = 0.00003 cfg.SOLVER.GAMMA = 0.2 cfg.SOLVER.STEPS = ( 150000, 220000,