def test(cfg, dataset_name): cfg.DATASETS.TEST = (dataset_name, ) predictor = DefaultPredictor(cfg) evaluator_FLIR = FLIREvaluator(dataset_name, cfg, False, output_dir=out_folder, out_pr_name='pr_val.png') #DefaultTrainer.test(cfg, trainer.model, evaluators=evaluator_FLIR) val_loader = build_detection_test_loader(cfg, dataset_name) inference_on_dataset(predictor.model, val_loader, evaluator_FLIR)
def test(cfg, dataset_name, file_name='FLIR_thermal_only_result.out'): cfg.DATASETS.TEST = (dataset_name, ) predictor = DefaultPredictor(cfg) out_name = out_folder + file_name evaluator_FLIR = FLIREvaluator(dataset_name, cfg, False, output_dir=out_folder, save_eval=True, out_eval_path=out_name) val_loader = build_detection_test_loader(cfg, dataset_name) inference_on_dataset(predictor.model, val_loader, evaluator_FLIR)
def test_during_train(trainer, dataset_name): cfg.DATASETS.TEST = (dataset_name, ) evaluator_FLIR = FLIREvaluator(dataset_name, cfg, False, output_dir=out_folder, out_pr_name='pr_val.png') val_loader = build_detection_test_loader(cfg, dataset_name) inference_on_dataset(trainer.model, val_loader, evaluator_FLIR)
def test_during_train(cfg, dataset_name, save_eval_name, save_folder): cfg.DATASETS.TEST = (dataset_name, ) trainer = DefaultTrainer(cfg) #predictor = DefaultPredictor(cfg) #evaluator_FLIR = FLIREvaluator(dataset_name, cfg, False, output_dir=out_folder, out_pr_name='pr_val.png') evaluator_FLIR = FLIREvaluator(dataset_name, cfg, False, output_dir=save_folder, save_eval=True, out_eval_path=(save_folder + save_eval_name)) #DefaultTrainer.test(cfg, trainer.model, evaluators=evaluator_FLIR) val_loader = build_detection_test_loader(cfg, dataset_name) inference_on_dataset(trainer.model, val_loader, evaluator_FLIR)
def test(cfg, dataset_name, file_name='FLIR_thermal_only_result.out'): cfg.DATASETS.TEST = (dataset_name, ) predictor = DefaultPredictor(cfg) #evaluator_FLIR = FLIREvaluator(dataset_name, cfg, False, output_dir=out_folder, out_pr_name='pr_val.png') out_name = out_folder + file_name #pdb.set_trace() evaluator_FLIR = FLIREvaluator(dataset_name, cfg, False, output_dir=out_folder, save_eval=True, out_eval_path=out_name) #DefaultTrainer.test(cfg, trainer.model, evaluators=evaluator_FLIR) val_loader = build_detection_test_loader(cfg, dataset_name) inference_on_dataset(predictor.model, val_loader, evaluator_FLIR)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3 cfg.DATASETS.TEST = (dataset, ) cfg.INPUT.FORMAT = 'BGR' cfg.INPUT.NUM_IN_CHANNELS = 3 cfg.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675] cfg.MODEL.PIXEL_STD = [1.0, 1.0, 1.0] # Read detection results det_1 = json.load(open(det_file_1, 'r')) det_2 = json.load(open(det_file_2, 'r')) det_3 = json.load(open(det_file_3, 'r')) evaluator = FLIREvaluator(dataset, cfg, False, output_dir=out_folder, save_eval=True, out_eval_path='out/mAP/FLIR_' + data_set + '_var_box_fusion_gnll.out') """ Method lists: 'bayesian_prior_wt_score_box': This is for tuning different background prior 'bayesian_wt_score_box' 'sumLogits' 'sumLogits_softmax' 'avgLogits_softmax' 'baysian_avg_bbox' 'avg_score' 'avg_score_wt_score_box' 'avg_score_var_score_box' 'avg_score_bbox': same as top-k voting 'avg_score_avg_box'
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3 cfg.DATASETS.TEST = (dataset, ) cfg.INPUT.FORMAT = 'BGR' cfg.INPUT.NUM_IN_CHANNELS = 3 cfg.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675] cfg.MODEL.PIXEL_STD = [1.0, 1.0, 1.0] # Read detection results det_1 = json.load(open(det_file_1, 'r')) det_2 = json.load(open(det_file_2, 'r')) det_3 = json.load(open(det_file_3, 'r')) evaluator = FLIREvaluator(dataset, cfg, False, output_dir=out_folder, save_eval=True, out_eval_path='out/mAP/FLIR_Baysian_' + data_set + '_avg_box_all.out') """ Method lists: 'bayesian_prior_wt_score_box': This is for tuning different background prior 'bayesian_wt_score_box' 'sumLogits_softmax' 'avgLogits_softmax' 'baysian_avg_bbox' 'avg_score' 'avg_score_wt_score_box' 'avg_score_bbox': same as top-k voting 'pooling' 'bayesian'
trainer = DefaultTrainer(cfg) trainer.resume_or_load(resume=False) out_name = 'out_model_iter_'+ str(idx*eval_every_iter) +'.pth' out_model_path = os.path.join(out_folder, out_name) trainer.train() torch.save(trainer.model.state_dict(), out_model_path) #pdb.set_trace() # Evaluation on validation set test(cfg, dataset_train) test(cfg, dataset_test) del trainer #pdb.set_trace() # Test on training set cfg.DATASETS.TEST = (dataset_train, ) predictor = DefaultPredictor(cfg) evaluator = FLIREvaluator(dataset, cfg, False, output_dir=out_folder, save_eval=True, out_eval_path='FLIR_train_eval.out') val_loader = build_detection_test_loader(cfg, dataset_train) inference_on_dataset(predictor.model, val_loader, evaluator) # Test on evaluation set cfg.DATASETS.TEST = (dataset_test, ) predictor = DefaultPredictor(cfg) evaluator = FLIREvaluator(dataset, cfg, False, output_dir=out_folder, save_eval=True, out_eval_path='FLIR_train_eval.out') val_loader = build_detection_test_loader(cfg, dataset_test) inference_on_dataset(predictor.model, val_loader, evaluator)
cfg.MODEL.WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_R_101_FPN_3x/137851257/model_final_f6e8b1.pkl" #cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "good_model/out_model_iter_32000.pth") cfg.MODEL.ROI_HEADS.NUM_CLASSES = 80 cfg.DATASETS.TEST = (dataset, ) cfg.INPUT.FORMAT = 'BGR' cfg.INPUT.NUM_IN_CHANNELS = 3 cfg.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675] cfg.MODEL.PIXEL_STD = [1.0, 1.0, 1.0] # Read detection results val_1 = json.load(open(val_file_1, 'r')) val_2 = json.load(open(val_file_2, 'r')) #val_3 = json.load(open(det_file_3, 'r')) evaluator = FLIREvaluator(dataset, cfg, False, output_dir=out_folder, save_eval=True, out_eval_path='out/mAP/FLIR_Baysian_Day.out') save_file_name = 'train_labels_2_model.npz' """ print('Perpare training data ... ') X_train, Y_train = train_late_fusion(det_1, det_2, anno_train_gt) np.savez(save_file_name, X=X_train, Y=Y_train) """ print('Loading saved data ...') train_data = np.load(save_file_name) X_train = train_data['X'] Y_train = train_data['Y'] #print('Perpare validation data ... ') #X_val, Y_val = train_late_fusion(val_1, val_2, anno_val_gt)
trainer.resume_or_load(resume=False) trainer.train() cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set the testing threshold for this model from detectron2.evaluation import FLIREvaluator, inference_on_dataset from detectron2.data import build_detection_test_loader from tools.plain_train_net import do_test # Test on training set cfg.DATASETS.TEST = (dataset, ) predictor = DefaultPredictor(cfg) evaluator = FLIREvaluator(dataset, cfg, False, output_dir=out_folder, out_pr_name='pr_train.png') val_loader = build_detection_test_loader(cfg, dataset) inference_on_dataset(predictor.model, val_loader, evaluator) # Test on validation set dataset = 'FLIR_val' cfg.DATASETS.TEST = (dataset, ) register_coco_instances(dataset, {}, val_json_path, val_folder) FLIR_metadata = MetadataCatalog.get(dataset) dataset_dicts = DatasetCatalog.get(dataset) evaluator = FLIREvaluator(dataset, cfg, False,
img_folder = '../../../Datasets/FLIR/val/thermal_8_bit/' dataset_train = FLIRDataset(X_train=X_train, Y_train=Y_train, img_folder=img_folder) batch_size = len(X_train) train_loader = DataLoader( dataset_train, batch_size=batch_size, shuffle=to_shuffle, num_workers=16, ) cfg = get_cfg_function(out_folder) evaluator = FLIREvaluator( dataset, cfg, False, output_dir=out_folder, save_eval=True, out_eval_path='out/mAP/FLIR_pytorch_learning_fusion.out') device = torch.device('cuda') torch.cuda.set_device(0) model = learnFusionModel(use_bias=use_bias, random_init=random_init) model = model.train() model = model.to(device) #optimizer = torch.optim.Adam(model.parameters(), lr=lr) optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
cfg = get_cfg() cfg.OUTPUT_DIR = out_folder cfg.merge_from_file("./configs/COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml") cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model cfg.MODEL.WEIGHTS = "output_val/model_0009999.pth" # Train config cfg.DATALOADER.NUM_WORKERS = 2 cfg.SOLVER.IMS_PER_BATCH = 2 cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 # faster, and good enough for this toy dataset (default: 512) cfg.MODEL.ROI_HEADS.NUM_CLASSES = 17 os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set the testing threshold for this model cfg.DATASETS.TEST = (dataset, ) predictor = DefaultPredictor(cfg) from detectron2.evaluation import FLIREvaluator, inference_on_dataset from detectron2.data import build_detection_test_loader from tools.plain_train_net import do_test # Test on validation set dataset = 'FLIR_val' cfg.DATASETS.TEST = (dataset, ) register_coco_instances(dataset, {}, val_json_path, val_folder) FLIR_metadata = MetadataCatalog.get(dataset) dataset_dicts = DatasetCatalog.get(dataset) evaluator = FLIREvaluator(dataset, cfg, False, output_dir=out_folder) val_loader = build_detection_test_loader(cfg, dataset) inference_on_dataset(predictor.model, val_loader, evaluator)
cfg.INPUT.FORMAT = 'BGR' cfg.INPUT.NUM_IN_CHANNELS = 3 cfg.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675] cfg.MODEL.PIXEL_STD = [1.0, 1.0, 1.0] # Read detection results det_1 = json.load(open(det_file_1, 'r')) det_2 = json.load(open(det_file_2, 'r')) det_3 = json.load(open(det_file_3, 'r')) val_1 = json.load(open(val_file_1, 'r')) val_2 = json.load(open(val_file_2, 'r')) #val_3 = json.load(open(det_file_3, 'r')) evaluator = FLIREvaluator( dataset, cfg, False, output_dir=out_folder, save_eval=True, out_eval_path='out/mAP/FLIR_learned_l1_w_o_bias.out') method = 'avg_score' #'baysian_wt_score_box'#'sumLogits_softmax'#'avgLogits_softmax'#'baysian_avg_bbox'#'avg_score'#'pooling' #'baysian'#'nms' #result = apply_late_fusion_and_evaluate(cfg, evaluator, det_1, det_2, det_3, method) save_file_name = 'train_labels_2_model_train2.npz' """ # Get training labels print('Perpare training data ... ') X_train, Y_train = train_late_fusion(det_1, det_2, anno_train_gt) np.savez(save_file_name, X=X_train, Y=Y_train) """ print('Loading saved data ...') train_data = np.load(save_file_name) X_train = train_data['X']