def task_b_MOTS_training(model_name, model_file): #model_name = model_name + '_inference' print('Running task B for model', model_name) SAVE_PATH = os.path.join('./results_week_5_task_b', model_name) os.makedirs(SAVE_PATH, exist_ok=True) # Load model and configuration print('Loading Model') cfg = get_cfg() cfg.merge_from_file(model_zoo.get_config_file(model_file)) cfg.DATASETS.TRAIN = ('MOTS_train',) cfg.DATASETS.TEST = ('KITTIMOTS_val',) cfg.DATALOADER.NUM_WORKERS = 0 cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 cfg.OUTPUT_DIR = SAVE_PATH cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file) cfg.SOLVER.IMS_PER_BATCH = 4 cfg.SOLVER.BASE_LR = 0.00025 cfg.SOLVER.MAX_ITER = 200 cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256 cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3 cfg.TEST.SCORE_THRESH = 0.5 # Training print('Training') trainer = DefaultTrainer(cfg) val_loss = ValidationLoss(cfg) trainer.register_hooks([val_loss]) trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1] trainer.resume_or_load(resume=False) trainer.train() # Evaluation print('Evaluating') evaluator = COCOEvaluator('KITTIMOTS_val', cfg, False, output_dir=SAVE_PATH) trainer.model.load_state_dict(val_loss.weights) trainer.test(cfg, trainer.model, evaluators=[evaluator]) print('Plotting losses') draw_loss(cfg, cfg.SOLVER.MAX_ITER, model_name, SAVE_PATH) # Qualitative results: visualize some results print('Getting qualitative results') predictor = DefaultPredictor(cfg) predictor.model.load_state_dict(trainer.model.state_dict()) inputs = kitti_val() #inputs = inputs[:20] + inputs[-20:] inputs = inputs[220:233] + inputs[1995:2100] for i, input in enumerate(inputs): file_name = input['file_name'] print('Prediction on image ' + file_name) img = cv2.imread(file_name) outputs = predictor(img) v = Visualizer( img[:, :, ::-1], metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=0.8, instance_mode=ColorMode.IMAGE) v = v.draw_instance_predictions(outputs['instances'].to('cpu')) cv2.imwrite(os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
def task_b(model_name, model_file): save_path = Path("output/task_b") / model_name os.makedirs(save_path, exist_ok=True) cfg = base_cfg(model_file, save_path) cfg.DATALOADER.NUM_WORKERS = 0 cfg.SOLVER.IMS_PER_BATCH = 4 cfg.SOLVER.BASE_LR = 0.00025 cfg.SOLVER.MAX_ITER = 1000 cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256 cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3 cfg.TEST.SCORE_THRESH = 0.5 trainer = DefaultTrainer(cfg) val_loss = ValidationLoss(cfg) trainer.register_hooks([val_loss]) trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1] trainer.resume_or_load(resume=False) trainer.train() evaluator = COCOEvaluator("kitti-mots-val", cfg, False, output_dir=save_path) trainer.model.load_state_dict(val_loss.weights) trainer.test(cfg, trainer.model, evaluators=[evaluator]) plot_validation_loss(cfg, cfg.SOLVER.MAX_ITER, model_name, save_path) get_qualitative_results(cfg, save_path)
def train(output, iou=None, nms=None, rpn=None): batch_size = 2 DatasetCatalog.clear() register_kitti_mots_dataset("datasets/KITTI-MOTS/training/image_02", "datasets/KITTI-MOTS/instances_txt", ("kitti_mots_train", "kitti_mots_test"), image_extension="png") cfg_file = "Cityscapes/mask_rcnn_R_50_FPN.yaml" output_dir = output cfg = get_cfg() cfg.merge_from_file(model_zoo.get_config_file(cfg_file)) cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(cfg_file) cfg.SEED = 42 cfg.DATASETS.TRAIN = ("kitti_mots_train",) cfg.DATASETS.TEST = ("kitti_mots_test",) cfg.DATALOADER.NUM_WORKERS = 4 cfg.SOLVER.IMS_PER_BATCH = batch_size cfg.SOLVER.BASE_LR = 0.0002 * batch_size / 16 # pick a good LR cfg.SOLVER.MAX_ITER = 7500 cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2 cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 cfg.OUTPUT_DIR = output_dir if iou is not None: cfg.MODEL.RPN.IOU_THRESHOLDS = [iou[0], iou[1]] if nms is not None: cfg.MODEL.RPN.NMS_THRESH = nms if rpn is not None: cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN = rpn[0] cfg.MODEL.RPN.PRE_NMS_TOPK_TEST = rpn[1] os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) trainer = DefaultTrainer(cfg) val_loss = ValidationLoss(cfg) trainer.register_hooks([val_loss]) trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1] trainer.resume_or_load(resume=True) trainer.train() evaluator = COCOEvaluator("kitti_mots_test", cfg, False, output_dir=output_dir) trainer.test(cfg, trainer.model, evaluators=[evaluator]) plot_losses(cfg)
def main(args): # Register licenseplates dataset register_licenseplates_voc("licenseplates_train", data_path, "train") register_licenseplates_voc("licenseplates_test", data_path, "test") # Setup model configuration cfg = setup_cfg(opt, data_path) # print(cfg) # exit() os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) trainer = DefaultTrainer(cfg) val_loss = ValidationLoss(cfg) trainer.register_hooks([val_loss]) trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1] if os.path.isdir(opt.resume_training): trainer.resume_or_load(resume=opt.resume_training) else: trainer.resume_or_load(resume=False) return trainer.train()
cfg.DATASETS.TEST = ("kitti_mots_test", ) cfg.DATALOADER.NUM_WORKERS = 4 cfg.SOLVER.IMS_PER_BATCH = batch_size cfg.SOLVER.BASE_LR = 0.0002 * batch_size / 16 # pick a good LR cfg.SOLVER.MAX_ITER = 5000 cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512 cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2 cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 cfg.OUTPUT_DIR = output_dir os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) trainer = DefaultTrainer(cfg) val_loss = ValidationLoss(cfg) trainer.register_hooks([val_loss]) trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1] trainer.resume_or_load(resume=True) trainer.train() if not opts.train_only: evaluator = COCOEvaluator("kitti_mots_test", cfg, False, output_dir=output_dir) trainer.test(cfg, trainer.model, evaluators=[evaluator]) plot_losses(cfg) predictor = DefaultPredictor(cfg) predictor.model.load_state_dict(trainer.model.state_dict())
def experiment_1(exp_name, model_file): print('Running Task B experiment', exp_name) SAVE_PATH = os.path.join('./results_week_6_task_b', exp_name) os.makedirs(SAVE_PATH, exist_ok=True) # Loading data print('Loading data') kittiloader = KittiMots() def rkitti_train(): return kittiloader.get_dicts(flag='train', method='complete', percentage=1.0) def rkitti_val(): return kittiloader.get_dicts(flag='val') def rkitti_test(): return kittiloader.get_dicts(flag='test') DatasetCatalog.register('KITTI_train', rkitti_train) MetadataCatalog.get('KITTI_train').set( thing_classes=list(KITTI_CATEGORIES.keys())) DatasetCatalog.register('KITTI_val', rkitti_val) MetadataCatalog.get('KITTI_val').set( thing_classes=list(KITTI_CATEGORIES.keys())) DatasetCatalog.register('KITTI_test', rkitti_test) MetadataCatalog.get('KITTI_test').set( thing_classes=list(KITTI_CATEGORIES.keys())) # Load model and configuration print('Loading Model') cfg = get_cfg() cfg.merge_from_file(model_zoo.get_config_file(model_file)) cfg.DATASETS.TRAIN = ('KITTI_train', ) cfg.DATASETS.TEST = ('KITTI_val', ) cfg.DATALOADER.NUM_WORKERS = 4 cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 cfg.OUTPUT_DIR = SAVE_PATH cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file) cfg.SOLVER.IMS_PER_BATCH = 4 cfg.SOLVER.BASE_LR = 0.00025 cfg.SOLVER.MAX_ITER = 4000 cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256 cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3 cfg.TEST.SCORE_THRESH = 0.5 # Training print('Training') trainer = DefaultTrainer(cfg) val_loss = ValidationLoss(cfg) trainer.register_hooks([val_loss]) trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1] trainer.resume_or_load(resume=False) trainer.train() # Evaluation print('Evaluating') cfg.DATASETS.TEST = ('KITTI_test', ) evaluator = COCOEvaluator('KITTI_test', cfg, False, output_dir=SAVE_PATH) trainer.model.load_state_dict(val_loss.weights) trainer.test(cfg, trainer.model, evaluators=[evaluator]) print('Plotting losses') plot_validation_loss(cfg, cfg.SOLVER.MAX_ITER, exp_name, SAVE_PATH, 'validation_loss.png') # Qualitative results: visualize some results print('Getting qualitative results') predictor = DefaultPredictor(cfg) predictor.model.load_state_dict(trainer.model.state_dict()) inputs = rkitti_test() inputs = [inputs[i] for i in TEST_INFERENCE_VALUES] for i, input in enumerate(inputs): file_name = input['file_name'] print('Prediction on image ' + file_name) img = cv2.imread(file_name) outputs = predictor(img) v = Visualizer(img[:, :, ::-1], metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=0.8, instance_mode=ColorMode.IMAGE) v = v.draw_instance_predictions(outputs['instances'].to('cpu')) cv2.imwrite( os.path.join(SAVE_PATH, 'Inference_' + exp_name + '_inf_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
def training_loop(SAVE_PATH, model_name, model_file, hyperparams, dataloader, checkpoint=None, visualize=True): # Load model and configuration print('Loading Model') cfg = get_cfg() cfg.merge_from_file(model_zoo.get_config_file(model_file)) cfg.DATASETS.TRAIN = ('MOTS_train', ) cfg.DATASETS.TEST = ('KITTIMOTS_val', ) cfg.DATALOADER.NUM_WORKERS = 0 cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 cfg.OUTPUT_DIR = SAVE_PATH if checkpoint: last_checkpoint = torch.load(checkpoint) new_path = checkpoint.split('.')[0]+'_modified.pth' last_checkpoint['iteration'] = -1 torch.save(last_checkpoint,new_path) cfg.MODEL.WEIGHTS = new_path else: cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file) cfg.SOLVER.IMS_PER_BATCH = hyperparams['batch'] cfg.SOLVER.BASE_LR = hyperparams['lr'] cfg.SOLVER.LR_SCHEDULER_NAME = hyperparams['scheduler'] cfg.MODEL.RPN.IOU_THRESHOLDS = hyperparams['iou'] cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN = hyperparams['top_k_train'] cfg.SOLVER.MAX_ITER = 1000 cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256 cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3 cfg.TEST.SCORE_THRESH = 0.5 # Training print('Training') trainer = DefaultTrainer(cfg) val_loss = ValidationLoss(cfg) trainer.register_hooks([val_loss]) trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1] trainer.resume_or_load(resume=False) trainer.train() # Evaluation print('Evaluating') evaluator = COCOEvaluator('KITTIMOTS_val', cfg, False, output_dir=SAVE_PATH) trainer.model.load_state_dict(val_loss.weights) trainer.test(cfg, trainer.model, evaluators=[evaluator]) print('Plotting losses') plot_validation_loss(cfg, cfg.SOLVER.MAX_ITER, model_name, SAVE_PATH) if visualize: # Qualitative results: visualize some results print('Getting qualitative results') predictor = DefaultPredictor(cfg) predictor.model.load_state_dict(trainer.model.state_dict()) def kitti_val(): return dataloader.get_dicts(train_flag=False) inputs = kitti_val() inputs = inputs[:20] + inputs[-20:] for i, input in enumerate(inputs): file_name = input['file_name'] print('Prediction on image ' + file_name) img = cv2.imread(file_name) outputs = predictor(img) v = Visualizer( img[:, :, ::-1], metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=0.8, instance_mode=ColorMode.IMAGE) v = v.draw_instance_predictions(outputs['instances'].to('cpu')) cv2.imwrite(os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
def KITTIMOTS_training_and_evaluation_task(model_name, model_file): path = os.path.join(SAVE_PATH, 'train_task', model_name) if not os.path.exists(path): os.makedirs(path) # Load Data print('Loading Data.') dataloader = KITTIMOTS_Dataloader() def kittimots_train(): return dataloader.get_dicts(train_flag=True) def kittimots_test(): return dataloader.get_dicts(train_flag=False) DatasetCatalog.register("KITTIMOTS_train", kittimots_train) MetadataCatalog.get("KITTIMOTS_train").set( thing_classes=list(KITTI_CATEGORIES.keys())) DatasetCatalog.register("KITTIMOTS_test", kittimots_test) MetadataCatalog.get("KITTIMOTS_test").set( thing_classes=list(KITTI_CATEGORIES.keys())) NUM_IMGS = len(kittimots_train()) print(NUM_IMGS) # PARAMETERS print('Loading Model.') cfg = get_cfg() cfg.merge_from_file(model_zoo.get_config_file(model_file)) cfg.DATASETS.TRAIN = ('KITTIMOTS_train', ) cfg.DATASETS.TEST = ('KITTIMOTS_test', ) cfg.DATALOADER.NUM_WORKERS = 0 cfg.OUTPUT_DIR = SAVE_PATH cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file) cfg.SOLVER.IMS_PER_BATCH = 4 cfg.SOLVER.BASE_LR = 0.00025 cfg.SOLVER.MAX_ITER = NUM_IMGS // cfg.SOLVER.IMS_PER_BATCH + 1 cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256 cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2 # Training print('Training....') os.makedirs(cfg.OUTPUT_DIR, exist_ok=True) trainer = DefaultTrainer(cfg) val_loss = ValidationLoss(cfg) trainer.register_hooks([val_loss]) trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1] trainer.resume_or_load(resume=False) trainer.train() # EVALUATION print('Evaluating....') evaluator = COCOEvaluator("KITTIMOTS_test", cfg, False, output_dir="./output/") trainer.model.load_state_dict(val_loss.weights) trainer.test(cfg, trainer.model, evaluators=[evaluator]) plot_validation_loss(cfg) # Qualitative results print('Inference on trained model') predictor = DefaultPredictor(cfg) predictor.model.load_state_dict(trainer.model.state_dict()) dataloader = Inference_Dataloader() dataset = dataloader.load_data() print('Getting Qualitative Results...') for i, img_path in enumerate(dataset['test'][:20]): img = cv2.imread(img_path) outputs = predictor(img) v = Visualizer(img[:, :, ::-1], metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=0.8, instance_mode=ColorMode.IMAGE) v = v.draw_instance_predictions(outputs['instances'].to('cpu')) cv2.imwrite( os.path.join( path, 'Inference_' + model_name + '_trained_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
def task_b_MOTS_and_KITTI_training(model_name, model_file): # model_name = model_name + '_inference' print('Running task B for model', model_name) SAVE_PATH = os.path.join('./results_week_5_task_c', model_name) os.makedirs(SAVE_PATH, exist_ok=True) # Load model and configuration print('Loading Model') cfg = get_cfg() cfg.merge_from_file(model_zoo.get_config_file(model_file)) cfg.DATASETS.TRAIN = ('MOTS_KITTI_train', ) cfg.DATASETS.TEST = ('KITTIMOTS_val', ) cfg.DATALOADER.NUM_WORKERS = 0 cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 cfg.OUTPUT_DIR = SAVE_PATH cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file) cfg.SOLVER.IMS_PER_BATCH = 4 cfg.SOLVER.BASE_LR = 0.00025 cfg.SOLVER.LR_SCHEDULER_NAME = "WarmupCosineLR" #hyperparameters #cfg.SOLVER.LR_POLICY = 'steps_with_decay' #cfg.SOLVER.STEPS = [0, 1000, 2000] #cfg.SOLVER.GAMMA = 0.1 #cfg.DATASETS.TRAIN.USE_FLIPPED = True #Eeste no va #cfg.MODEL.RPN.IOU_THRESHOLDS = [0.1, 0.9] #defatults 0.3 and 0.7 #cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]#default: [[32, 64, 128, 256, 512]] #cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]] #End of hyperparameters playing cfg.SOLVER.MAX_ITER = 1000 cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256 cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3 cfg.TEST.SCORE_THRESH = 0.5 print(cfg) # Training print('Training') trainer = DefaultTrainer(cfg) val_loss = ValidationLoss(cfg) trainer.register_hooks([val_loss]) trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1] trainer.resume_or_load(resume=False) trainer.train() # Evaluation print('Evaluating') evaluator = COCOEvaluator('KITTIMOTS_val', cfg, False, output_dir=SAVE_PATH) trainer.model.load_state_dict(val_loss.weights) trainer.test(cfg, trainer.model, evaluators=[evaluator]) print('Plotting losses') plot_validation_loss(cfg, cfg.SOLVER.MAX_ITER, model_name, SAVE_PATH) # Qualitative results: visualize some results print('Getting qualitative results') predictor = DefaultPredictor(cfg) predictor.model.load_state_dict(trainer.model.state_dict()) inputs = kitti_val() # inputs = inputs[:20] + inputs[-20:] inputs = inputs[220:233] + inputs[1995:2100] for i, input in enumerate(inputs): file_name = input['file_name'] print('Prediction on image ' + file_name) img = cv2.imread(file_name) outputs = predictor(img) v = Visualizer(img[:, :, ::-1], metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=0.8, instance_mode=ColorMode.IMAGE) v = v.draw_instance_predictions(outputs['instances'].to('cpu')) cv2.imwrite( os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' + str(i) + '.png'), v.get_image()[:, :, ::-1])