val_loader = build_detection_test_loader( dataset_dicts, mapper=DatasetMapper(is_train=False, augmentations=[], image_format=cfg.INPUT.FORMAT, precomputed_proposal_topk=500)) else: val_loader = build_detection_test_loader(cfg, dataset) if last_only: DetectionCheckpointer(model).load( os.path.join(cfg.OUTPUT_DIR, "model_final.pth")) #DetectionCheckpointer(model).load(os.path.join(cfg.OUTPUT_DIR, "model_0099999.pth")) evaluator = COCOEvaluator(dataset, ("bbox", ), False, output_dir=outdir) result = inference_on_dataset(model, val_loader, evaluator) print_csv_format(result) with open(outdir + "/evaluation_" + dataset + ".json", "w") as outfile: json.dump(result, outfile) else: files = glob.glob(cfg.OUTPUT_DIR + "/model_*.pth") # remove files for which evaluation has already been done already_evaluated = glob.glob(outdir + "/evaluation_*") files = [ f for f in files if outdir + "/evaluation_" + f.strip(cfg.OUTPUT_DIR).strip("/model_").strip(".pth") + ".json" not in already_evaluated ]
LossEvalHook( cfg.TEST.EVAL_PERIOD, self.model, build_detection_test_loader( self.cfg, self.cfg.DATASETS.TEST[0], DatasetMapper(self.cfg, True)))) return hooks # Training trainer = MyTrainer(cfg) trainer.resume_or_load(resume=False) last_results = trainer.train() with open(f'{OUTPUT_DIR}/model_trained_results_last.pkl', 'wb') as f: pickle.dump(last_results, file=f) # Evaluate print('Evaluating...') evaluator = COCOEvaluator("val_kitti-mots", ( "segm", "bbox", ), False, output_dir=OUTPUT_DIR) val_loader = build_detection_test_loader(cfg, "val_kitti-mots") results = inference_on_dataset(trainer.model, val_loader, evaluator) with open(f'{OUTPUT_DIR}/model_trained_results.pkl', 'wb') as f: pickle.dump(results, file=f)
nargs=argparse.REMAINDER, ) args = parser.parse_args() logger = setup_logger() logger.info("Command line arguments: " + str(args)) cfg = setup_cfg(args) # create a torch model torch_model = build_model(cfg) DetectionCheckpointer(torch_model).resume_or_load(cfg.MODEL.WEIGHTS) # get a sample data data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) first_batch = next(iter(data_loader)) # convert and save caffe2 model caffe2_model = export_caffe2_model(cfg, torch_model, first_batch) caffe2_model.save_protobuf(args.output) # draw the caffe2 graph caffe2_model.save_graph(os.path.join(args.output, "model.svg"), inputs=first_batch) # run evaluation with the converted model if args.run_eval: dataset = cfg.DATASETS.TEST[0] data_loader = build_detection_test_loader(cfg, dataset) # NOTE: hard-coded evaluator. change to the evaluator for your dataset evaluator = COCOEvaluator(dataset, cfg, True, args.output) metrics = inference_on_dataset(caffe2_model, data_loader, evaluator) print_csv_format(metrics)
data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0]) first_batch = next(iter(data_loader)) # convert and save caffe2 model tracer = Caffe2Tracer(cfg, torch_model, first_batch) if args.format == "caffe2": caffe2_model = tracer.export_caffe2() caffe2_model.save_protobuf(args.output) # draw the caffe2 graph caffe2_model.save_graph(os.path.join(args.output, "model.svg"), inputs=first_batch) elif args.format == "onnx": onnx_model = tracer.export_onnx() onnx.save(onnx_model, os.path.join(args.output, "model.onnx")) elif args.format == "torchscript": ts_model = tracer.export_torchscript() ts_model.save(os.path.join(args.output, "model.ts")) from detectron2.export.torchscript import dump_torchscript_IR dump_torchscript_IR(ts_model, args.output) # run evaluation with the converted model if args.run_eval: assert args.format == "caffe2", "Python inference in other format is not yet supported." dataset = cfg.DATASETS.TEST[0] data_loader = build_detection_test_loader(cfg, dataset) # NOTE: hard-coded evaluator. change to the evaluator for your dataset evaluator = COCOEvaluator(dataset, output_dir=args.output) metrics = inference_on_dataset(caffe2_model, data_loader, evaluator) print_csv_format(metrics)
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml")) cfg.DATALOADER.NUM_WORKERS = 2 cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml") # Let training initialize from model zoo cfg.SOLVER.IMS_PER_BATCH = 2 cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR cfg.SOLVER.MAX_ITER = 1000 # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256 # faster, and good enough for this toy dataset (default: 512) cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class (ballon) cfg.DATASETS.TRAIN = ('my_dataset',) cfg.DATASETS.TEST = () os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) trainer = DefaultTrainer(cfg) trainer.build_train_loader = build_detection_train_loader(cfg, mapper=train_mapper) trainer.resume_or_load(resume=False) trainer.train() from contextlib import redirect_stdout #ouput config with open('test.yaml', 'w') as f: with redirect_stdout(f): print(cfg.dump()) from detectron2.evaluation import COCOEvaluator, inference_on_dataset from detectron2.data import build_detection_test_loader evaluator = COCOEvaluator("test_dataset", cfg, False, output_dir="./output/") val_loader = build_detection_test_loader(cfg, "test_dataset") inference_on_dataset(trainer.model, val_loader, evaluator)
cv2_imshow(out.get_image()[:, :, ::-1]) else: plt.imshow(out.get_image()[:, :, ::-1]) plt.show() # A more robust way to evaluate the model is to use a metric called Average Precision (AP) already implemented in the detectron2 package. If you want more precision on what the AP is, you can take a look [here](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.average_precision_score.html#sklearn.metrics.average_precision_score) and [here](https://en.wikipedia.org/w/index.php?title=Information_retrieval&oldid=793358396#Average_precision). # ### #TODO: expand on how to interpret AP # In[ ]: # In[36]: if IN_COLAB: evaluator = COCOEvaluator(f"{DATASET_NAME}_valid", cfg, False, output_dir="/content/eval_output/") else: evaluator = COCOEvaluator(f"{DATASET_NAME}_valid", cfg, False, output_dir="eval_output/") val_loader = build_detection_test_loader(cfg, f"{DATASET_NAME}_valid") print(inference_on_dataset(predictor.model, val_loader, evaluator)) # another equivalent way to evaluate the model is to use `trainer.test` # # Let's test our newly trained mode on a new video # ## We download a video from a URL
def main(): # register_coco_instances(f"sugar_beet_train", {}, f"/netscratch/naeem/structured_cwc/instances_train{year}.json", # f"/netscratch/naeem/structured_cwc/train/img/") # register_coco_instances(f"sugar_beet_valid", {}, f"/netscratch/naeem/structured_cwc/instances_valid{year}.json", # f"/netscratch/naeem/structured_cwc/valid/img/") register_coco_instances( "sugar_beet_train", {}, "/home/robot/datasets/structured_cwc/instances_train2016.json", "/home/robot/datasets/structured_cwc/train/img/") register_coco_instances( "sugar_beet_valid", {}, "/home/robot/datasets/structured_cwc/instances_valid2016.json", "/home/robot/datasets/structured_cwc/valid/img/") register_coco_instances( "sugar_beet_test", {}, "/home/robot/datasets/structured_cwc/instances_test2016.json", "/home/robot/datasets/structured_cwc/test/img/") cfg = get_cfg() cfg.merge_from_file( model_zoo.get_config_file( "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml")) cfg.DATASETS.TRAIN = (f"sugar_beet_train", ) cfg.DATASETS.TEST = (f"sugar_beet_test", ) cfg.DATALOADER.NUM_WORKERS = 8 cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url( "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml" ) # Let training initialize from model zoo cfg.SOLVER.IMS_PER_BATCH = 2 cfg.SOLVER.BASE_LR = 0.001 # pick a good LR cfg.SOLVER.MAX_ITER = 10000 # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset (default: 512) cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2 # cfg.OUTPUT_DIR = '/home/robot/datasets/MRCNN_training' os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) # trainer = DefaultTrainer(cfg) # trainer.resume_or_load(resume=True) # trainer.train() # cfg already contains everything we've set previously. Now we changed it a little bit for inference: cfg.MODEL.WEIGHTS = os.path.join( '/home/robot/git/detectron2/output/model_final.pth') cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold predictor = DefaultPredictor(cfg) evaluator = COCOEvaluator(f"sugar_beet_valid", cfg, False, output_dir="/home/robot/datasets/MRCNN_training") val_loader = build_detection_test_loader(cfg, f"sugar_beet_valid") # grad_cam = GradCam(model=trainer.model, # feature_module=trainer.model.layer4, # target_layer_names=["2"], use_cuda=True) # print(inference_on_dataset(trainer.model, val_loader, evaluator)) dataset_dicts = DatasetCatalog.get(f"sugar_beet_valid") def get_label(rgb_path): data_root, file_name = os.path.split( os.path.split(rgb_path)[0])[0], os.path.split(rgb_path)[1] return os.path.join(data_root, 'lbl', file_name) c = 0 for d in random.sample(dataset_dicts, 10): im = cv2.imread(d["file_name"]) lbl = cv2.imread(get_label(d["file_name"])) outputs = predictor(im) # outputs = grad_cam(im, 0) v = Visualizer( im[:, :, ::-1], scale=0.5, instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models ) out = v.draw_instance_predictions(outputs["instances"].to("cpu")) img = out.get_image() print(img.shape) img = Image.fromarray( np.concatenate([ img[:, :, ::-1], cv2.resize(lbl, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_AREA) ], axis=1)) img.save(f"{cfg.OUTPUT_DIR}/output{c}.jpeg") c = c + 1
# Saving the dicts to a JSON file: json_path = os.path.join(output_folder_path, "predictions.json") with open(json_path, "w") as json_file: json.dump(images_predictions, json_file, indent=2) # Fixing the image for PASCAL evaluation: for image in images: image["image_id"] = os.path.splitext(os.path.split( image["image_id"])[-1])[0] # Evaluating the prediction: # Setting up the COCO evaluation: coco_evaluator = COCOEvaluator(dataset_name, cfg, distributed=True, output_dir=output_folder_path) # Setting up the PASCAL VOC evaluation: my_dataset.dirname = os.path.join(output_folder_path, "PascalVOCAnnotations") my_dataset.split = 'test' my_dataset.year = 2012 dataset.to_pascal(my_dataset.dirname) # Converting the dataset to PASCAL VOC pascal_evaluator = PascalVOCDetectionEvaluator(dataset_name) # COCO evaluating: coco_evaluator.reset() coco_evaluator.process(images, outputs_list) coco_results = coco_evaluator.evaluate() # Dumping the COCO evaluation results:
def d2_train_model(train_json_path, train_images_dir, val_json_path, val_images_dir, ims_per_batch, model_lr, bach_size_per_img, max_train_iter, num_workers, num_labels): ########################## hyper-parameter setting: ## 1. models: model_name = "mask_rcnn_R_50_FPN_3x.yaml" cfgFile = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml" # train_json_path = "/home/user/qunosen/2_project/4_train/2_zhuchao/1_test/data/train.json" # val_json_path = "/home/user/qunosen/2_project/4_train/2_zhuchao/1_test/data/val.json" # train_images_dir = "/home/user/qunosen/2_project/4_train/2_zhuchao/1_test/data/train_images" # val_images_dir = "/home/user/qunosen/2_project/4_train/2_zhuchao/1_test/data/val_images" regist_train_name = 'zc_data' regist_val_name = 'zc_val_data' work_root = os.getcwd() log_file = os.path.join(work_root, "log_dat.txt") ############################## d2_start = time.clock() datetime_now = datetime.datetime.now() log = ("###" * 100 + "\n") * 5 + " %s\n" % ( str(datetime_now)) + "model_name: %s ..." % model_name print(log) print(log, file=open(log_file, "a")) log = "parameter setting:\n model_to_try:%s\n num_labels: %d\n ims_per_batch:%d\n num_workers:%d\n model_lr:%s\n max_train_iter:%d\n bach_size_per_img:%d\n"% \ (model_name,num_labels,ims_per_batch,num_workers,str(model_lr),max_train_iter,bach_size_per_img) print(log) print(log, file=open(log_file, "a")) #new_root = os.path.join(work_root,model_name) new_root = os.path.join( work_root, str(model_name) + "_%s_%s_%s_%s" % (str(model_lr), str(bach_size_per_img), str(max_train_iter), str(ims_per_batch))) if not os.path.exists(new_root): os.makedirs(new_root) os.chdir(new_root) if regist_train_name in DatasetCatalog._REGISTERED: log = 'regist_data exists before: %s , and try to del.... ' % regist_train_name print(log) print(log, file=open(log_file, "a")) DatasetCatalog._REGISTERED.pop(regist_train_name) else: log = 'regist_data : %s .... ' % regist_train_name print(log) print(log, file=open(log_file, "a")) register_coco_instances(regist_train_name, {}, train_json_path, train_images_dir) if regist_val_name in DatasetCatalog._REGISTERED: log = 'regist_data exists before: %s , and try to del.... ' % regist_val_name print(log) print(log, file=open(log_file, "a")) DatasetCatalog._REGISTERED.pop(regist_val_name) else: log = 'regist_data : %s .... ' % regist_val_name print(log) print(log, file=open(log_file, "a")) register_coco_instances(regist_val_name, {}, val_json_path, val_images_dir) train_metadata = MetadataCatalog.get(regist_train_name) val_metadata = MetadataCatalog.get(regist_val_name) trainset_dicts = DatasetCatalog.get(regist_train_name) #print(trainset_dicts) valset_dicts = DatasetCatalog.get(regist_val_name) #print(valset_dicts) #print() # #### trainning: cfg = get_cfg() mode_config = cfgFile log = "model_to_train: %s ..." % mode_config print(log) print(log, file=open(log_file, "a")) cfg.merge_from_file(model_zoo.get_config_file(mode_config)) cfg.DATASETS.TRAIN = (regist_train_name, ) cfg.DATASETS.TEST = (regist_val_name, ) # no metrics implemented for this dataset cfg.DATALOADER.NUM_WORKERS = num_workers cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url( mode_config) ## out_model : ./output/model_final.pth # cfg.MODEL.WEIGHTS = "./output/model_final.pth" # initialize from model zoo cfg.SOLVER.IMS_PER_BATCH = ims_per_batch cfg.SOLVER.BASE_LR = model_lr cfg.SOLVER.MAX_ITER = ( max_train_iter ) # 300 iterations seems good enough, but you can certainly train longer cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = ( bach_size_per_img) # faster, and good enough for this toy dataset cfg.MODEL.ROI_HEADS.NUM_CLASSES = num_labels #len(select_cats) # 5 classes ['chair', 'table', 'swivelchair', 'sofa', 'bed'] os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) trainer = DefaultTrainer(cfg) trainer.resume_or_load(resume=False) trainer.train() model_path = os.path.join(new_root, 'output/model_final.pth') if os.path.exists(model_path): log = "model_save: %s" % model_path print(log) print(log, file=open(log_file, "a")) #### predict cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.2 # set the testing threshold for this model cfg.DATASETS.TEST = (regist_val_name, ) predictor = DefaultPredictor(cfg) #print(predictor) out_model_dir = os.path.join(new_root, "output") out_dir = os.path.join(out_model_dir, 'result_' + str(model_name)) if not os.path.exists(out_dir): os.makedirs(out_dir) test_dir = val_images_dir #os.path.join(work_dir,"./val_images") #test_dir = '/home/user/jupyter/dataset/coco/select_coco/my_dataset/images' imgs_list = [ os.path.join(test_dir, file_name) for file_name in os.listdir(test_dir) if file_name.endswith(".jpg") or file_name.endswith(".png") or file_name.endswith(".bmp") ] for d in imgs_list: im = cv2.imread(d) outputs = predictor(im) v = Visualizer(im[:, :, ::-1], metadata=train_metadata, scale=0.9, instance_mode=ColorMode.IMAGE_BW) v = v.draw_instance_predictions(outputs["instances"].to("cpu")) predict_file = os.path.join( out_dir, os.path.splitext(os.path.basename(d))[0] + "_predict.png") cv2.imwrite(predict_file, v.get_image()[:, :, ::-1]) if os.path.exists(predict_file): print("Done: %s" % predict_file) #### evaluate evaluator = COCOEvaluator(regist_val_name, cfg, False, output_dir="./output/") val_loader = build_detection_test_loader(cfg, regist_val_name) #inference_on_dataset(trainer.model, val_loader, evaluator) my_eval = inference_on_dataset(trainer.model, val_loader, evaluator) print(my_eval) log = ("%s evaluate: \n" % (model_name), my_eval) print(log, file=open(log_file, "a")) ############### DatasetCatalog._REGISTERED.pop(regist_train_name) DatasetCatalog._REGISTERED.pop(regist_val_name) log = "clean regist_data: %s and %s" % (regist_train_name, regist_val_name) print(log) print(log, file=open(log_file, "a")) d2_end = time.clock() log = "model %s : it takes %s ." % (model_name, str(d2_end - d2_start)) print(log) print(log, file=open(log_file, "a")) os.chdir(work_root) else: print("NotFound: {}".format(model_path))
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 # faster, and good enough for this toy dataset cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3 #仅有一类(ballon) # cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[8], [16], [32], [64], [128]] # cfg.MODEL.RPN.IN_FEATURES = ['p1', 'p2', 'p3', 'p4', 'p5', 'p6'] os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) trainer = DefaultTrainer(cfg) trainer.resume_or_load(resume=False) # trainer.train() cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 #测试的阈值 cfg.DATASETS.TEST = ("TestSet", ) predictor = DefaultPredictor(cfg) evaluator = COCOEvaluator("TestSet", output_dir='./output') val_loader = build_detection_test_loader(cfg, "TestSet") inference_on_dataset(predictor.model, val_loader, evaluator) #随机选部分图片,可视化 predVisRoot = '/home/yu/Documents/IncubitChallenge/Data1/predVis' meta_dicts = MetadataCatalog.get('TestSet') import random import cv2 import matplotlib.pyplot as plt from detectron2.utils.visualizer import Visualizer from detectron2.utils.visualizer import ColorMode dataset_dicts = myDataFuncTest() for d in myDataFuncTest():
def build_evaluator(cfg): evaluator = COCOEvaluator(_name, cfg, False, output_dir=cfg.OUTPUT_DIR) val_loader = build_detection_test_loader(cfg, _name) return evaluator, val_loader
def main(args): # Register datasets print("Registering wheat_detection_train") DatasetCatalog.register( "wheat_detection_train", lambda path=args.train_annot_fp: get_detectron_dicts(path)) MetadataCatalog.get("wheat_detection_train").set(thing_classes=["Wheat"]) print("Registering wheat_detection_val") DatasetCatalog.register( "wheat_detection_val", lambda path=args.val_annot_fp: get_detectron_dicts(path)) MetadataCatalog.get("wheat_detection_val").set(thing_classes=["Wheat"]) # Set up configurations cfg = get_cfg() if not args.model_dir: cfg.merge_from_file( model_zoo.get_config_file(f"COCO-Detection/{args.model}.yaml")) cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url( f"COCO-Detection/{args.model}.yaml") cfg.DATASETS.TRAIN = ("wheat_detection_train", ) cfg.DATASETS.TEST = ("wheat_detection_val", ) cfg.SOLVER.IMS_PER_BATCH = args.ims_per_batch cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 cfg.SOLVER.BASE_LR = args.lr cfg.SOLVER.MAX_ITER = args.max_iter cfg.SOLVER.WARMUP_ITERS = args.warmup_iters cfg.SOLVER.GAMMA = args.gamma cfg.SOLVER.STEPS = args.lr_decay_steps cfg.DATALOADER.NUM_WORKERS = 6 cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 cfg.MODEL.RETINANET.NUM_CLASSES = 1 cfg.OUTPUT_DIR = f"{args.model}__iter-{args.max_iter}__lr-{args.lr}" if os.path.exists(cfg.OUTPUT_DIR): shutil.rmtree(cfg.OUTPUT_DIR) os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) # Save config with open(os.path.join(cfg.OUTPUT_DIR, "config.yaml"), "w") as f: f.write(cfg.dump()) else: print("Loading model from ", args.model_dir) cfg.merge_from_file(os.path.join(args.model_dir, "config.yaml")) cfg.MODEL.WEIGHTS = os.path.join(args.model_dir, "model_final.pth") cfg.OUTPUT_DIR = args.model_dir # Train setup_logger(output=os.path.join(cfg.OUTPUT_DIR, "terminal_output.log")) trainer = DefaultTrainer(cfg) trainer.resume_or_load(resume=False) if args.train: trainer.train() # Evaluate if args.eval: evaluator = COCOEvaluator("wheat_detection_val", cfg, False, output_dir=cfg.OUTPUT_DIR) eval_results = trainer.test(cfg=cfg, model=trainer.model, evaluators=evaluator) with open(os.path.join(cfg.OUTPUT_DIR, "eval_results.json"), "w") as f: json.dump(eval_results, f)
register_datadict(datadic_train, "sample_fashion_train") register_datadict(datadic_test, "sample_fashion_test") # cfg = setup(args) cfg = get_cfg() # Add Solver etc. add_imaterialist_config(cfg) # Merge from config file. config_file = "/home/dyt811/Git/cvnnig/iMaterialist2020/configs/config.yaml" cfg.merge_from_file(config_file) # Load the final weight. cfg.MODEL.WEIGHTS = str(path_model / "model_0109999.pth") cfg.OUTPUT_DIR = str(path_output) trainer = DefaultTrainer(cfg) # load weights trainer.resume_or_load(resume=False) # Evaluate performance using AP metric implemented in COCO API evaluator = COCOEvaluator("sample_fashion_test", cfg, False, output_dir=str(path_output)) val_loader = build_detection_test_loader(cfg, "sample_fashion_test", mapper=iMatDatasetMapper(cfg)) inference_on_dataset(trainer.model, val_loader, evaluator)
cfg.SOLVER.MAX_ITER = 50000 # 300 iterations seems good enough, but you can certainly train longer cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 cfg.MODEL.ROI_HEADS.NUM_CLASSES = 5 # 5 classes (Plate, Carrot, Celery, Pretzel, Gripper) os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) trainer = DefaultTrainer(cfg) trainer.resume_or_load(resume=False) trainer.train() cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.9 # set the testing threshold for this model cfg.DATASETS.TEST = ("test_set") predictor = DefaultPredictor(cfg) # for d in random.sample(dataset_dicts_test, 10): # im = cv2.imread(d["file_name"]) # outputs = predictor(im) # v = Visualizer(im[:, :, ::-1], # metadata=test_metadata, # scale=0.8, # instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels # ) # v = v.draw_instance_predictions(outputs["instances"].to("cpu")) # cv2.imshow('Output Image', v.get_image()[:, :, ::-1]) # cv2.waitKey(0) # Evaluate Performance evaluator = COCOEvaluator("test_set", ("bbox", "segm"), False, output_dir="./output/") test_loader = build_detection_test_loader(cfg, "test_set") print(inference_on_dataset(trainer.model, test_loader, evaluator)) # # another equivalent way to evaluate the model is to use `trainer.test`
def build_evaluator(cls, cfg, dataset_name): return COCOEvaluator("nflimpact_test", ('bbox', ), use_fast_impl=False, output_dir="output")
labels=['GT', 'MRCNN']) plt.savefig(folder + "/" + op + "/" + os.path.basename(d['file_name'])[:-4] + '_compare.pdf') plt.close() performance_cons_off["name"] = d["file_name"] performance.append(performance_cons_off) #%% performance np.array([performance[i]["f1_score"] for i in range(len(performance))]).mean() #%% from detectron2.evaluation import COCOEvaluator, inference_on_dataset from detectron2.data import build_detection_test_loader evaluator = COCOEvaluator("balloon_val", cfg, False, output_dir="./output/") val_loader = build_detection_test_loader(cfg, "balloon_val") inference_on_dataset(trainer.model, val_loader, evaluator) # another equivalent way is to use trainer.test #%% detectron2_neurons.__name #%% from skimage.draw import polygon from caiman.base.rois import nf_match_neurons_in_binary_masks import matplotlib.pyplot as plt im = cv2.imread(d["file_name"]) outputs = predictor(im) v = Visualizer(
outputs = predictor(img_thermal) name = files_names[i].split('.')[0] out_name = out_folder +'/'+ name + '_' + model + '_result.jpg' v = Visualizer(img_thermal[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2) v = v.draw_instance_predictions(outputs["instances"].to("cpu")) #cv2.imshow('img',v.get_image()[:, :, ::-1]) v.save(out_name) #cv2.waitKey() #pdb.set_trace() """ #cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set the testing threshold for this model cfg.DATASETS.TEST = ("FLIR", ) predictor = DefaultPredictor(cfg) from detectron2.evaluation import COCOEvaluator, inference_on_dataset from detectron2.data import build_detection_test_loader from tools.plain_train_net import do_test #pdb.set_trace() evaluator = COCOEvaluator("FLIR", cfg, False, output_dir="./output/") val_loader = build_detection_test_loader(cfg, "FLIR") inference_on_dataset(trainer.model, val_loader, evaluator) #pdb.set_trace()
v = Visualizer( im[:, :, ::-1], metadata=shark_metadata, scale=0.2, # instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels ) v = v.draw_instance_predictions(outputs["instances"].to("cpu")) # Print the class ID classID = ((dictionary["annotations"])[0])["category_id"] print(ClassList[classID]) img = v.get_image()[:, :, ::-1] # cv2_imshow(img) filename = "inferenceOutputs/" + dictionary["file_name"] cv2.imwrite(filename, img) ################################## # Evaluation ################################## # AP from detectron2.evaluation import COCOEvaluator, inference_on_dataset from detectron2.data import build_detection_test_loader evaluator = COCOEvaluator("shark_val", cfg, False, output_dir="./output/") # evaluator = COCOEvaluator("shark_train", cfg, False, output_dir="./output/") val_loader = build_detection_test_loader(cfg, "shark_val") # val_loader = build_detection_test_loader(cfg, "shark_train") inference_on_dataset(trainer.model, val_loader, evaluator) # another equivalent way is to use trainer.test
#cfg.MODEL.WEIGHTS = os.path.join('/home/010796032/PytorchWork/output_waymo', "model_0179999.pth") cfg.MODEL.WEIGHTS = os.path.join( '/home/010796032/MyRepo/Detectron2output/', "model_0029999.pth") cfg.SOLVER.IMS_PER_BATCH = 4 cfg.SOLVER.LR_SCHEDULER_NAME = 'WarmupCosineLR' cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR cfg.SOLVER.MAX_ITER = 180000 # 140000 # you may need to train longer for a practical dataset cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 #128 #512#128 # faster, and good enough for this toy dataset (default: 512) cfg.MODEL.ROI_HEADS.NUM_CLASSES = len( FULL_LABEL_CLASSES) #12 # Kitti has 9 classes (including donot care) cfg.TEST.EVAL_PERIOD = 20000 # 5000 #cfg.INPUT.ROTATION_ANGLES=[0, 90, 180] now = datetime.now() current_time = now.strftime("%H:%M:%S") print("Current Time =", current_time) predictor = DefaultPredictor(cfg) evaluator = COCOEvaluator("waymococo2_val", cfg, False, output_dir=outputpath) val_loader = build_detection_test_loader(cfg, "waymococo2_val") inference_on_dataset(predictor.model, val_loader, evaluator) # os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) # trainer = Trainer(cfg)#DefaultTrainer(cfg) # trainer.resume_or_load(resume=True) # trainer.train()
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = args.batch # CAUTION!!! If you set this to a value different of 0, the training stage takes a really long # time. We should check how to plot train and validation loss curves properly # ALSO, I THINK this computes the TEST loss (not validation loss), so it's better to SET IT TO 0 cfg.TEST.EVAL_PERIOD = 0 # frequence of validation loss computations (to plot curves) if args.augm: trainer = MyTrainerAugm(cfg) else: trainer = MyTrainer(cfg) trainer.resume_or_load(resume=True) trainer.train() ###-------INFERENCE AND EVALUATION--------------------------- cfg.MODEL.WEIGHTS = os.path.join( cfg.OUTPUT_DIR, "model_final.pth") # path to the model we just trained ### MAP ##### evaluator = COCOEvaluator(dataset + '_test', cfg, False, output_dir=cfg.OUTPUT_DIR) val_loader = build_detection_test_loader(cfg, dataset + '_test') print('---------------------------------------------------------') print('Evaluation with model ', model_path) print(inference_on_dataset(trainer.model, val_loader, evaluator)) print('---------------------------------------------------------')
# carplate_metadata = MetadataCatalog.get("carplate_train") MetadataCatalog.get("carplate_val").set(evaluator_type='coco') cfg = get_cfg() cfg.merge_from_file(os.path.join(ROOT, CONFIG, "mask_rcnn_R_50_FPN_3x.yaml")) cfg.DATASETS.TRAIN = ("carplate", ) cfg.DATASETS.TEST = () cfg.DATALOADER.NUM_WORKERS = 2 cfg.MODEL.DEVICE = DEVICE cfg.MODEL.WEIGHTS = os.path.join( ROOT, WEIGHTS, "mask_rcnn_R50_model_final.pth") # Let training initialize from model zoo cfg.SOLVER.IMS_PER_BATCH = 2 cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR cfg.SOLVER.MAX_ITER = 300 # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset (default: 512) cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 # only has one class (ballon) os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) trainer = DefaultTrainer(cfg) trainer.resume_or_load(resume=False) # trainer.train() from detectron2.evaluation import COCOEvaluator, inference_on_dataset from detectron2.data import build_detection_test_loader evaluator = COCOEvaluator("carplate_val", cfg, False, output_dir="./output/") val_loader = build_detection_test_loader(cfg, "carplate_val") inference_on_dataset(trainer.model, val_loader, evaluator) # another equivalent way is to use trainer.test
def build_evaluator(cls, cfg, dataset_name, output_folder=None): if output_folder is None: os.makedirs("coco_eval", exist_ok=True) output_folder = "coco_eval" return COCOEvaluator(dataset_name, cfg, False, output_folder)
def do_evaluation(dataset, trainer, cfg): sigmas = [0.025, 0.025, 0.025, 0.025, 0.025, 0.025, 0.107, 0.107] #TODO: tune evaluator # evaluator = COCOEvaluator(dataset, cfg, False, output_dir="./output/") evaluator = COCOEvaluator(dataset_name=dataset, tasks=("bbox","keypoints"), distributed=True, output_dir="./output/", use_fast_impl=True, kpt_oks_sigmas=sigmas) data_loader = build_detection_test_loader(cfg, dataset) print(inference_on_dataset(trainer.model, data_loader, evaluator)) #takes inputs (through the dataloader-2nd arg),
cfg.DATASETS.TEST = ("weapons_val", ) cfg.DATALOADER.NUM_WORKERS = 4 cfg.SOLVER.IMS_PER_BATCH = 2 cfg.SOLVER.BASE_LR = 0.001 cfg.SOLVER.WARMUP_ITERS = 1000 cfg.SOLVER.MAX_ITER = 1500 cfg.SOLVER.STEPS = (1000, 1500) cfg.SOLVER.GAMMA = 0.05 cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 8 cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(classes) cfg.TEST.EVAL_PERIOD = 500 cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.85 cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS = False os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) trainer = CocoTrainer(cfg) trainer.resume_or_load(resume=False) predictor = DefaultPredictor(cfg) evaluator = COCOEvaluator("weapons_val", cfg, False, output_dir="./output/") val_loader = build_detection_test_loader(cfg, "weapons_val") inference_on_dataset(trainer.model, val_loader, evaluator) experiment_folder = './output/'
path_save_name = "/content/drive/My Drive/retina_0/" + mm re.save(path_save_name) plt.imshow(aa) # making new cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") cfg.DATASETS.TEST = ("dataset_train0", ) cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set the testing threshold for this model predictor = DefaultPredictor(cfg) test_metadata = MetadataCatalog.get("dataset_train0",) from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_test_loader from detectron2.evaluation import COCOEvaluator, inference_on_dataset evaluator = COCOEvaluator("dataset_train0", cfg, False, output_dir="./output/") val_loader = build_detection_test_loader(cfg, "dataset_train0") inference_on_dataset(trainer.model, val_loader, evaluator) """## second""" cfg = get_cfg() # cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml")) cfg.merge_from_file(model_zoo.get_config_file("COCO-Detection/retinanet_R_50_FPN_3x.yaml")) cfg.DATASETS.TRAIN = ("dataset_val1",) cfg.DATASETS.TEST = ("dataset_train1",) cfg.DATALOADER.NUM_WORKERS = 1 cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/retinanet_R_50_FPN_3x.yaml") # Let training initialize from model zoo cfg.SOLVER.IMS_PER_BATCH = 4 cfg.SOLVER.BASE_LR = 0.00025 cfg.MODEL.RETINANET.NUM_CLASSES = 2
cfg.DATASETS.TEST = () cfg.DATALOADER.NUM_WORKERS = 4 # Let training initialize from model zoo cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url( 'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml') cfg.SOLVER.IMS_PER_BATCH = 4 cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR cfg.SOLVER.MAX_ITER = 1000 # 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # faster, and good enough for this toy dataset (default: 512) cfg.MODEL.ROI_HEADS.NUM_CLASSES = 20 os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) trainer = DefaultTrainer(cfg) trainer.resume_or_load(resume=False) trainer.train() # Inference should use the config with parameters that are used in training # cfg now already contains everything we've set previously. We changed it a little bit for inference: cfg.MODEL.WEIGHTS = os.path.join( cfg.OUTPUT_DIR, 'model_final.pth') # path to the model we just trained cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold predictor = DefaultPredictor(cfg) evaluator = COCOEvaluator('my_dataset_val', ('segm', ), False, output_dir='./output/') val_loader = build_detection_test_loader(cfg, 'my_dataset_val') print(inference_on_dataset(trainer.model, val_loader, evaluator)) print(trainer.test(cfg, build_model(cfg)))
image_rgb_i = cv2.merge([r, g, b]) plt.imshow(image_rgb_i) plt.show() ########### evaluate ########### evaluate ########### evaluate ########### evaluate ########### evaluate ########### evaluate # We can also evaluate its performance using AP metric implemented in COCO API. from detectron2.evaluation import COCOEvaluator, inference_on_dataset from detectron2.data import build_detection_test_loader #evaluator = COCOEvaluator("my_dataset_val", ("bbox", "segm"), False, output_dir="./output/") evaluator = COCOEvaluator("balloon_val", ("bbox", "segm"), False, output_dir="./output/") #val_loader = build_detection_test_loader(cfg, "my_dataset_val") val_loader = build_detection_test_loader(cfg, "balloon_val") print(inference_on_dataset(trainer.model, val_loader, evaluator)) # another equivalent way to evaluate the model is to use `trainer.test` ########### end od eval ########### # Inference with a keypoint detection model #cfg = get_cfg() # get a fresh new config #cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")) #cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set threshold for this model #cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml") #predictor = DefaultPredictor(cfg) #outputs = predictor(im) #v = Visualizer(im[:,:,::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url( "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml" ) # Let training initialize from model zoo cfg.SOLVER.IMS_PER_BATCH = 2 cfg.SOLVER.BASE_LR = 0.0025 # pick a good LR cfg.SOLVER.MAX_ITER = 100 # 300 iterations seems good enough for this toy dataset; you may need to train longer for a practical dataset cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 # faster, and good enough for this toy dataset (default: 512) cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2 # cfg.INPUT.MASK_FORMAT = 'rle' # cfg.INPUT.MASK_FORMAT='bitmask' os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) trainer = DefaultTrainer(cfg) trainer.resume_or_load(resume=False) trainer.train() # EVALUATION evaluator = COCOEvaluator("kitti-mots", cfg, False, output_dir="./output/") val_loader = build_detection_test_loader(cfg, "kitti-mots") inference_on_dataset(trainer.model, val_loader, evaluator) # another equivalent way is to use trainer.test '''for d in random.sample(dataset_dicts, 5): img = cv2.imread(d["file_name"]) visualizer = Visualizer( img[:, :, ::-1], metadata=MetadataCatalog.get("mots"), scale=0.5 ) vis = visualizer.draw_dataset_dict(d) cv2.imshow("here", vis.get_image()[:, :, ::-1]) cv2.waitKey(0) cv2.destroyAllWindows()'''
def build_evaluator(cls, cfg, dataset_name, output_folder=None): if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") return COCOEvaluator(dataset_name, cfg, True, output_folder)
def main(): print('Starting ') if TRAIN: print('Training with the following config \n') print('Base Learning Rate: ', BASE_LEARNING_RATE) print('Momentum: ', MOMENTUM) print('Weight Decay', WEIGHT_DECAY) print('Number of Epochs: ', NUMBER_OF_EPOCHS) print('Crop: ', CROP_ENABLE) if CROP_ENABLE: print('Crop size and type: ', CROP_SIZE, ' ', CROP_TYPE) if INPUT_SIZE_ENABLE: print('Input size min: ', MIN_SIZE_TEST) print('Input size max: ', MAX_SIZE_TEST) else: print('Inference on ', VAL_SET) print('FLIP + CROP') print('\n\n') # Configuration cfg = get_cfg() if GET_VALIDATION_PLOTS: cfg.TEST.EVAL_PERIOD = EVAL_PERIOD if VAL_SET == 'KITTI-MOTS': cfg.OUTPUT_DIR = PATH_KITTI_MOTS + '/EPOCHS_' + str(NUMBER_OF_EPOCHS) elif VAL_SET == 'MOTSChallenge': cfg.OUTPUT_DIR = PATH_MOTSCHALLENGE + '/EPOCHS_' cfg.merge_from_file(model_zoo.get_config_file(MODEL)) cfg.DATALOADER.NUM_WORKERS = 2 if USE_KITTI_MOTS_WEIGHTS: cfg.MODEL.WEIGHTS = KITTI_MOTS_WEIGHTS else: cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(MODEL) cfg.SOLVER.IMS_PER_BATCH = 2 cfg.SOLVER.LR_SCHEDULER_NAME = LEARNING_RATE_SCHEDULER cfg.SOLVER.BASE_LR = BASE_LEARNING_RATE cfg.SOLVER.MAX_ITER = NUMBER_OF_EPOCHS #cfg.SOLVER.MOMENTUM = MOMENTUM #cfg.SOLVER.WEIGHT_DECAY = WEIGHT_DECAY #cfg.SOLVER.STEPS = (MIN_STEP, MAX_STEP) cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = BATCH_SIZE cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = SCORE_THRESHOLD if TRAIN_SET == 'KITTI-MOTS': cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3 if INPUT_SIZE_ENABLE: cfg.INPUT.MIN_SIZE_TRAIN = MIN_SIZE_TRAIN cfg.INPUT.MAX_SIZE_TRAIN = MAX_SIZE_TRAIN cfg.INPUT.MIN_SIZE_TEST = MIN_SIZE_TEST cfg.INPUT.MAX_SIZE_TEST = MAX_SIZE_TEST if CROP_ENABLE: cfg.INPUT.CROP.ENABLE = CROP_ENABLE cfg.INPUT.CROP.TYPE = CROP_TYPE cfg.INPUT.CROP.SIZE = CROP_SIZE os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) # Finish configuration depending on the procedure we are performing datasets = ['dataset-train', 'dataset-val'] cfg.DATASETS.TRAIN = (datasets[0], ) cfg.DATASETS.TEST = (datasets[1], ) # Register the datasets registered = DATASETS_REGISTERED if not registered: for d in datasets: DatasetCatalog.register(d, lambda d=d: dataset(d)) MetadataCatalog.get(d).set( thing_classes=["Pedestrian", "None", "Car"]) kitti_mots_metadata_train = MetadataCatalog.get(datasets[0]) kitti_mots_metadata_validation = MetadataCatalog.get(datasets[1]) MetadataCatalog.get(cfg.DATASETS.TRAIN[0]).set(crop=True, flip=True, saturation=False) # Set-up trainer if GET_VALIDATION_PLOTS: trainer = KITTITrainer(cfg) else: trainer = CTrainer(cfg) """trainer = KITTITrainer(cfg)""" trainer.resume_or_load(resume=False) # Train if wanted if TRAIN: print('Start training') trainer.train() cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") # Start evaluation if GET_VALIDATION_PLOTS: trainer.test(cfg, trainer.model) plot_loss_curve(cfg, MODEL_NAME) else: evaluator = COCOEvaluator(datasets[1], cfg, False, output_dir=cfg.OUTPUT_DIR) val_loader = build_detection_test_loader(cfg, datasets[1]) inference_on_dataset(trainer.model, val_loader, evaluator)