def Model(self, model_name="resnet18", gpu_devices=[0]): ''' User function: Set Model parameters Available Models resnet18 resnet34 resnet50 resnet101 resnet152 Args: model_name (str): Select model from available models gpu_devices (list): List of GPU Device IDs to be used in training Returns: None ''' num_classes = self.system_dict["local"]["dataset_train"].num_classes() if model_name == "resnet18": retinanet = model.resnet18(num_classes=num_classes, pretrained=True) elif model_name == "resnet34": retinanet = model.resnet34(num_classes=num_classes, pretrained=True) elif model_name == "resnet50": retinanet = model.resnet50(num_classes=num_classes, pretrained=True) elif model_name == "resnet101": retinanet = model.resnet101(num_classes=num_classes, pretrained=True) elif model_name == "resnet152": retinanet = model.resnet152(num_classes=num_classes, pretrained=True) if self.system_dict["params"]["use_gpu"]: self.system_dict["params"]["gpu_devices"] = gpu_devices if len(self.system_dict["params"]["gpu_devices"]) == 1: os.environ["CUDA_VISIBLE_DEVICES"] = str( self.system_dict["params"]["gpu_devices"][0]) else: os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([ str(id) for id in self.system_dict["params"]["gpu_devices"] ]) self.system_dict["local"][ "device"] = 'cuda' if torch.cuda.is_available() else 'cpu' retinanet = retinanet.to(self.system_dict["local"]["device"]) retinanet = torch.nn.DataParallel(retinanet).to( self.system_dict["local"]["device"]) retinanet.training = True retinanet.train() retinanet.module.freeze_bn() self.system_dict["local"]["model"] = retinanet
def export( checkpoint: str, output_path, num_classes: Optional[int] = 1, model_arch: Optional[str] = "resnet-50", input_size: Optional[Tuple[int, int]] = (512, 512), batch_size: Optional[int] = 1, verbose: Optional[bool] = False, ): assert output_path.endswith( ".onnx"), "`output_path` must be path to the output `onnx` file" if model_arch == "resnet-18": net = model.resnet18(num_classes) elif model_arch == "resnet-34": net = model.resnet34(num_classes) elif model_arch == "resnet-50": net = model.resnet50(num_classes) elif model_arch == "resnet-101": net = model.resnet101(num_classes) elif model_arch == "resnet-152": net = model.resnet152(num_classes) else: raise NotImplementedError device = torch.device( "cuda:0") if torch.cuda.is_available() else torch.device("cpu") logger.info(f"using device: {device}") net = net.to(device) state_dict = torch.load(checkpoint, map_location=device) state_dict = remove_module(state_dict) net.load_state_dict(state_dict) logger.info(f"successfully loaded saved checkpoint.") dummy_input = torch.randn(batch_size, 3, input_size[0], input_size[1]) net.eval() net.export = True dummy_input = dummy_input.to(device) logger.info(f"exporting to {output_path}...") torch.onnx.export( net, dummy_input, output_path, opset_version=11, verbose=verbose, input_names=["input"], output_names=["anchors", "classification", "regression"], ) logger.info("export complete")
def Model(self, model_name="resnet18", gpu_devices=[0]): num_classes = self.system_dict["local"]["dataset_train"].num_classes() if model_name == "resnet18": retinanet = model.resnet18(num_classes=num_classes, pretrained=True) elif model_name == "resnet34": retinanet = model.resnet34(num_classes=num_classes, pretrained=True) elif model_name == "resnet50": retinanet = model.resnet50(num_classes=num_classes, pretrained=True) elif model_name == "resnet101": retinanet = model.resnet101(num_classes=num_classes, pretrained=True) elif model_name == "resnet152": retinanet = model.resnet152(num_classes=num_classes, pretrained=True) if self.system_dict["params"]["use_gpu"]: self.system_dict["params"]["gpu_devices"] = gpu_devices if len(self.system_dict["params"]["gpu_devices"]) == 1: os.environ["CUDA_VISIBLE_DEVICES"] = str( self.system_dict["params"]["gpu_devices"][0]) else: os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([ str(id) for id in self.system_dict["params"]["gpu_devices"] ]) self.system_dict["local"][ "device"] = 'cuda' if torch.cuda.is_available() else 'cpu' retinanet = retinanet.to(self.system_dict["local"]["device"]) retinanet = torch.nn.DataParallel(retinanet).to( self.system_dict["local"]["device"]) retinanet.training = True retinanet.train() retinanet.module.freeze_bn() self.system_dict["local"]["model"] = retinanet
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument("--data_config", type=str, default="data/retina_label/custom.data", help="path to data config file") parser.add_argument( "--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation") parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=15) parser.add_argument("--batch_size", type=int, default=4, help="size of each image batch") parser.add_argument('--pretrained_model', type=str, default=None, help='load pretrained model') parser.add_argument('--optim_scheduler', type=str, default=None, help='load pretrained optimizer and scheduler') parser.add_argument( "--attack_type", type=str, default="Normal", help="type of adversarial attack; Normal or FGSM or PGD") parser.add_argument("--eps", type=str, default='2', help="epsilon value for FGSM") parser.add_argument("--alpha", type=float, default=0.5) parser.add_argument( "--sign_grad", type=bool, default=True, help="whether use signed gradient and alpha=2.5*eps/iter in PGD") parser.add_argument("--iterations", type=int, default=10) parser.add_argument("--irl", type=int, default=0) parser.add_argument("--irl_noise_type", type=str, default='in_domain') parser.add_argument("--irl_loss_type", type=int, default=1) parser.add_argument("--irl_attack_type", type=str, default='fgsm', help="type of attack to be implemented in small case") parser.add_argument("--irl_alpha", type=float, default='0.8') parser.add_argument("--irl_beta", type=float, default='0.2') parser.add_argument("--irl_gamma", type=float, default='1') parser.add_argument("--irl_alt", type=int, default=0) parser.add_argument( "--irl_avg", type=int, default=0, help="Set true to average over all layers in irl distance loss") parser.add_argument( "--mix_thre", type=float, default=0.5, help= "percentage of clean data in each mixed batch; range:[0,1], the larger, the more clean data there are in each batch" ) parser.add_argument("--checkpoint_interval", type=int, default=1, help="interval between saving model weights") parser.add_argument("--evaluation_interval", type=int, default=1, help="interval evaluations on validation set") parser.add_argument("--evaluation_attack_interval", type=int, default=3, help="interval evaluations on validation set") parser.add_argument("--evalute_attacktype", type=str, default='FGSM', help="FGSM/Randn/Normal") parser = parser.parse_args(args) print(parser) eps = convert_eps(parser.eps) training_name = train_name(parser, eps) os.makedirs(f"checkpoints/retina/{training_name}", exist_ok=False) print(f"checkpoints stored as {training_name}") # Get data configuration data_config = parse_data_config(parser.data_config) train_path = data_config["train"] val_path = data_config["val"] class_names = data_config["names"] dataset_train = CSVDataset(train_file=train_path, class_list=class_names, transform=transforms.Compose( [Augmenter(), Resizer()])) dataset_val = CSVDataset(train_file=val_path, class_list=class_names, transform=transforms.Compose([Resizer()])) sampler = AspectRatioBasedSampler(dataset_train, batch_size=parser.batch_size, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=parser.n_cpu, collate_fn=collater, batch_sampler=sampler) # sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=parser.batch_size, drop_last=False) # dataloader_val = DataLoader(dataset_val, num_workers=parser.n_cpu, collate_fn=collater, batch_sampler=sampler_val) if parser.pretrained_model: retinanet = torch.load(parser.pretrained_model) else: if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101( num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152( num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') if torch.cuda.is_available(): retinanet = retinanet.cuda() retinanet = torch.nn.DataParallel(retinanet).cuda() else: retinanet = torch.nn.DataParallel(retinanet) use_irl = bool(parser.irl) irl_alt = bool(parser.irl_alt) irl_avg = bool(parser.irl_avg) if use_irl: irl_obj = IRL(noise_types=[parser.irl_noise_type], adv_attack_type=parser.irl_attack_type, model_type='retina', loss_type=parser.irl_loss_type, epsilon=eps, alpha=parser.alpha, iterations=parser.iterations) act_file_name = ('retina_fnl_layers-resnet4_loss-type' + str(parser.irl_loss_type) + '_' + parser.irl_noise_type + '_alt' + str(parser.irl_alt)) act_file_name += f"-alpha{parser.irl_alpha}-beta{parser.irl_beta}-gamma{parser.irl_gamma}_activations.txt" print("Saving activations in: ", str(act_file_name)) retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) if parser.optim_scheduler is not None: optim_scheduler = torch.load(parser.optim_scheduler) optimizer.load_state_dict(optim_scheduler['optimizer']) scheduler.load_state_dict(optim_scheduler['scheduler']) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) print('Starting training.') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") for epoch_num in range(parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): optimizer.zero_grad() batch_mixed = mix_batch(retinanet, data['img'], data['annot'], data['img'].shape[0], epsilon=eps, alpha=parser.alpha, mix_thre=parser.mix_thre, attack_type=parser.attack_type, model_type='retina', sign_grad=parser.sign_grad) if use_irl and (not irl_alt or epoch_num % 2 == 1): classification_loss, regression_loss, activations = retinanet( [Variable(batch_mixed.to(device)), data['annot']], send_activations=True) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() noise_loss, distance_loss = irl_obj.compute_losses( model=retinanet, images=data['img'], targets=data['annot'], activations=activations, epoch_num=epoch_num, batch_num=iter_num, training_name=act_file_name, avg_layers=irl_avg) regular_loss = classification_loss + regression_loss loss = parser.irl_alpha * regular_loss + parser.irl_beta * noise_loss + parser.irl_gamma * distance_loss else: classification_loss, regression_loss = retinanet( [Variable(batch_mixed.to(device)), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) if iter_num % 500 == 0: if use_irl and (not irl_alt or epoch_num % 2 == 1): print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Noise Loss: {:1.5f} | Distance Loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(parser.irl_alpha * classification_loss), float(parser.irl_alpha * regression_loss), float(parser.irl_beta * noise_loss), float(parser.irl_gamma * distance_loss), np.mean(loss_hist))) del noise_loss del distance_loss else: print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss scheduler.step(np.mean(epoch_loss)) if epoch_num % parser.checkpoint_interval == 0: torch.save( retinanet.module, f"checkpoints/retina/{training_name}/ckpt_{epoch_num}.pt") torch.save( { 'optimizer': optimizer.state_dict(), 'scheduler': scheduler.state_dict() }, f"checkpoints/retina/{training_name}/optim_scheduler_{epoch_num}.pt" ) if epoch_num % parser.evaluation_interval == 0: print("\n------Evaluating model------") AP, mAP = csv_eval.evaluate(dataset_val, retinanet) print('Epoch: {} | AP: {} | mAP: {}'.format(epoch_num, AP, mAP)) # write logs of the model to log.txt, format: epoch number, mAP, AP per class print( f"{epoch_num},{mAP},{AP[0][0]},{AP[1][0]},{AP[2][0]},{AP[3][0]},{AP[4][0]},{AP[5][0]},{AP[6][0]},{AP[7][0]},{AP[8][0]},{AP[9][0]}\n" ) with open(f"checkpoints/retina/{training_name}/log.txt", 'a+') as log: log.write( f"{epoch_num},{mAP},{AP[0][0]},{AP[1][0]},{AP[2][0]},{AP[3][0]},{AP[4][0]},{AP[5][0]},{AP[6][0]},{AP[7][0]},{AP[8][0]},{AP[9][0]}\n" ) # Evaluating the model on noise now if parser.evalute_attacktype and epoch_num % parser.evaluation_attack_interval == 0: print("\n-------Evaluating on noise-----") AP_n, mAP_n = csv_eval.evaluate( dataset_val, retinanet, perturbed=parser.evalute_attacktype, _epsilon=eps) print('Noise Epoch: {} | AP: {} | mAP: {}'.format( epoch_num, AP_n, mAP_n)) with open(f"checkpoints/retina/{training_name}/log_attack.txt", 'a+') as log: log.write( f"{epoch_num},{mAP_n},{AP_n[0][0]},{AP_n[1][0]},{AP_n[2][0]},{AP_n[3][0]},{AP_n[4][0]},{AP_n[5][0]},{AP_n[6][0]},{AP_n[7][0]},{AP_n[8][0]},{AP_n[9][0]}\n" )
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument('--configfile', help='Path to the config file', default='config.txt', type=str) parser.add_argument( '--model', help= 'Path to the pretrained model file state dict where training must start from, ' 'if you want to use a pretrained retinanet.', default=None, type=str) parser = parser.parse_args(args) configs = configparser.ConfigParser() configs.read(parser.configfile) try: batchsize = int(configs['TRAINING']['batchsize']) depth = int(configs['TRAINING']['depth']) maxepochs = int(configs['TRAINING']['maxepochs']) maxside = int(configs['TRAINING']['maxside']) minside = int(configs['TRAINING']['minside']) savepath = configs['TRAINING']['savepath'] lr_start = float(configs['TRAINING']['lr_start']) lr_reduce_on_plateau_factor = float( configs['TRAINING']['lr_reduce_on_plateau_factor']) lr_reduce_on_plateau_patience = int( configs['TRAINING']['lr_reduce_on_plateau_patience']) earlystopping_patience = int( configs['TRAINING']['earlystopping_patience']) try: ratios = json.loads(configs['MODEL']['ratios']) scales = json.loads(configs['MODEL']['scales']) except Exception as e: print(e) print('USING DEFAULT RATIOS AND SCALES') ratios = None scales = None except Exception as e: print(e) print( 'CONFIG FILE IS INVALID. PLEASE REFER TO THE EXAMPLE CONFIG FILE AT config.txt' ) sys.exit() model_save_dir = datetime.now().strftime( "%d_%b_%Y_%H_%M") if savepath == 'datetime' else savepath if not os.path.exists(model_save_dir): os.makedirs(model_save_dir, exist_ok=True) # Copy the config file into the model save directory shutil.copy(parser.configfile, os.path.join(model_save_dir, 'config.txt')) # Create the data loaders if parser.csv_train is None: raise ValueError('Must provide --csv_train,') if parser.csv_classes is None: raise ValueError('Must provide --csv_classes,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose([ Normalizer(), Augmenter(), Resizer(min_side=minside, max_side=maxside) ])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose([ Normalizer(), Resizer(min_side=minside, max_side=maxside) ])) sampler = AspectRatioBasedSampler(dataset_train, batch_size=batchsize, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) dataloader_val = None if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True, ratios=ratios, scales=scales, no_nms=False) elif depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True, ratios=ratios, scales=scales, no_nms=False) elif depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True, ratios=ratios, scales=scales, no_nms=False) elif depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True, ratios=ratios, scales=scales, no_nms=False) elif depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True, ratios=ratios, scales=scales, no_nms=False) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: if torch.cuda.is_available(): retinanet = retinanet.cuda() if torch.cuda.is_available(): retinanet = torch.nn.DataParallel(retinanet).cuda() else: retinanet = torch.nn.DataParallel(retinanet) retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=lr_start) scheduler = optim.lr_scheduler.ReduceLROnPlateau( optimizer, patience=lr_reduce_on_plateau_patience, verbose=True, factor=lr_reduce_on_plateau_factor, cooldown=1, min_lr=1e-10) loss_hist = collections.deque(maxlen=500) if (parser.model): print( f'TRYING TO LOAD PRETRAINED MODEL AVAILABLE AT: {parser.model}. MAKE SURE THE MODEL CONFIGS MATCH!!!!!' ) if torch.cuda.is_available(): retinanet.load_state_dict(torch.load(parser.model)) else: retinanet.load_state_dict( torch.load(parser.model, map_location=torch.device('cpu'))) print(f'LOADED PRETRAINED MODEL : {parser.model}') retinanet.train() retinanet.module.freeze_bn() earlystopping = EarlyStopping(patience=earlystopping_patience, verbose=True, delta=1e-10, path=os.path.join(model_save_dir, 'best_model.pt')) print('Num training images: {}'.format(len(dataset_train))) loss_dict = OrderedDict() val_loss_dict = OrderedDict() for epoch_num in range(maxepochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] epoch_val_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() if torch.cuda.is_available(): classification_loss, regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) else: classification_loss, regression_loss = retinanet( [data['img'].float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist)), end='\r', flush=True) del classification_loss del regression_loss except Exception as e: print(e) continue if (len(epoch_loss)): loss_dict[epoch_num] = np.mean(epoch_loss) print('') if dataloader_val is not None: print('Evaluating dataset') for iter_num, data in enumerate(dataloader_val): try: with torch.no_grad(): if torch.cuda.is_available(): val_classification_loss, val_regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) else: val_classification_loss, val_regression_loss = retinanet( [data['img'].float(), data['annot']]) val_classification_loss = val_classification_loss.mean( ) val_regression_loss = val_classification_loss.mean() val_loss = val_classification_loss + val_regression_loss print('Validation Loss: {:1.5f}'.format(val_loss), end='\r', flush=True) epoch_val_loss.append(float(val_loss)) except Exception as e: print(e) continue print('') if (len(epoch_val_loss)): val_loss_dict[epoch_num] = np.mean(epoch_val_loss) retinanet.eval() mAP = csv_eval.evaluate(dataset_val, retinanet) print('-----------------') print(mAP) print('-----------------') scheduler.step(np.mean(epoch_loss)) model_save_path = os.path.join(model_save_dir, f'retinanet_{epoch_num}.pt') save_model(retinanet, model_save_path) print(f'Saved model of epoch {epoch_num} to {model_save_path}') earlystopping(val_loss_dict[epoch_num], retinanet) if earlystopping.early_stop: print("Early stopping") break retinanet.eval() save_model(retinanet, os.path.join(model_save_dir, 'model_final.pt')) with open(os.path.join(model_save_dir, 'loss_history.txt'), 'w') as f: for epoch_num, loss in loss_dict.items(): f.write(f'{epoch_num}:{loss} \n') with open(os.path.join(model_save_dir, 'val_loss_history.txt'), 'w') as f: for epoch_num, loss in val_loss_dict.items(): f.write(f'{epoch_num}:{loss} \n') # Write configs to model save directory configs = configparser.ConfigParser() configs.read(os.path.join(model_save_dir, 'config.txt')) configs['TRAINING']['num_classes'] = str(dataset_train.num_classes()) for iter_num, data in enumerate(dataloader_train): configs['MODEL']['input_shape'] = str( list(data['img'].float().numpy().shape[1:])) break # Write class mapping to the model configs. with open(parser.csv_classes, 'r') as f: labels = load_classes_from_csv_reader(csv.reader(f, delimiter=',')) configs['LABELMAP'] = {str(i): str(j) for i, j in labels.items()} with open(os.path.join(model_save_dir, 'config.txt'), 'w') as configfile: configs.write(configfile)
def main(args=None): parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.', default='show') parser.add_argument('--coco_path', help='Path to COCO directory', default='/mnt/marathon') parser.add_argument('--image_size', help='image size', type=int, nargs=2, default=IMAGE_SIZE) parser.add_argument('--limit', help='limit', type=int, nargs=2, default=(0, 0)) parser.add_argument('--batch_size', help='batch size', type=int, default=BATCH_SIZE) parser.add_argument('--num_works', help='num works', type=int, default=NUM_WORKERS) parser.add_argument('--num_classes', help='num classes', type=int, default=3) parser.add_argument('--merge_val', help='merge_val', type=int, default=MERGE_VAL) parser.add_argument('--do_aug', help='do_aug', type=int, default=DO_AUG) parser.add_argument('--lr_choice', default=LR_CHOICE, choices=['lr_scheduler', 'lr_map', 'lr_fn'], type=str) parser.add_argument('--lr', help='lr', type=float, default=LR) parser.add_argument("--lr_map", dest="lr_map", action=StoreDictKeyPair, default=LR_MAP) parser.add_argument("--lr_fn", dest="lr_fn", action=StoreDictKeyPair, default=LR_FN) parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=DEPTH) parser.add_argument('--epochs', help='Number of epochs', type=int, default=EPOCHS) parser = parser.parse_args(args) print('dataset:', parser.dataset) print('depth:', parser.depth) print('epochs:', parser.epochs) print('image_size:', parser.image_size) print('batch_size:', parser.batch_size) print('num_works:', parser.num_works) print('merge_val:', parser.merge_val) print('do_aug:', parser.do_aug) print('lr_choice:', parser.lr_choice) print('lr:', parser.lr) print('lr_map:', parser.lr_map) print('lr_fn:', parser.lr_fn) print('num_classes:', parser.num_classes) print('limit:', parser.limit) # Create the data loaders # dataset_train, _ = torch.utils.data.random_split(dataset_train, [NUM_COCO_DATASET_TRAIN, len(dataset_train) - NUM_COCO_DATASET_TRAIN]) # dataset_val, _ = torch.utils.data.random_split(dataset_val, [NUM_COCO_DATASET_VAL, len(dataset_val) - NUM_COCO_DATASET_VAL]) transform_train = None transform_vail = None collate_fn = None if parser.do_aug: transform_train = get_augumentation('train', parser.image_size[0], parser.image_size[1]) transform_vail = get_augumentation('test', parser.image_size[0], parser.image_size[1]) collate_fn = detection_collate else: transform_train = transforms.Compose([ # Normalizer(), # Augmenter(), Resizer(*parser.image_size)]) transform_vail = transforms.Compose([ # Normalizer(), Resizer(*parser.image_size)]) collate_fn = collater if parser.dataset == 'h5': dataset_train = H5CoCoDataset('{}/train_small.hdf5'.format(parser.coco_path), 'train_small') dataset_val = H5CoCoDataset('{}/test.hdf5'.format(parser.coco_path), 'test') else: dataset_train = CocoDataset(parser.coco_path, set_name='train_small', do_aug=parser.do_aug, transform=transform_train, limit_len=parser.limit[0]) dataset_val = CocoDataset(parser.coco_path, set_name='test', do_aug=parser.do_aug, transform=transform_vail, limit_len=parser.limit[1]) # 混合val if parser.merge_val: dataset_train += dataset_val print('training images: {}'.format(len(dataset_train))) print('val images: {}'.format(len(dataset_val))) steps_pre_epoch = len(dataset_train) // parser.batch_size print('steps_pre_epoch:', steps_pre_epoch) sampler = AspectRatioBasedSampler(dataset_train, batch_size=parser.batch_size, drop_last=False) dataloader_train = DataLoader(dataset_train, batch_size=1, num_workers=parser.num_works, shuffle=False, collate_fn=collate_fn, batch_sampler=sampler) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=parser.num_classes, pretrained=PRETRAINED) elif parser.depth == 34: retinanet = model.resnet34(num_classes=parser.num_classes, pretrained=PRETRAINED) elif parser.depth == 50: retinanet = model.resnet50(num_classes=parser.num_classes, pretrained=PRETRAINED) elif parser.depth == 101250: retinanet = model.resnet101with50weight(num_classes=parser.num_classes, pretrained=PRETRAINED) elif parser.depth == 101: retinanet = model.resnet101(num_classes=parser.num_classes, pretrained=PRETRAINED) elif parser.depth == 152: retinanet = model.resnet152(num_classes=parser.num_classes, pretrained=PRETRAINED) else: raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152') retinanet = retinanet.cuda() retinanet = torch.nn.DataParallel(retinanet).cuda() retinanet.training = True if parser.lr_choice == 'lr_map': lr_now = lr_change_map(1, 0, parser.lr_map) elif parser.lr_choice == 'lr_fn': lr_now = float(parser.lr_fn['LR_START']) elif parser.lr_choice == 'lr_scheduler': lr_now = parser.lr # optimizer = optim.Adam(retinanet.parameters(), lr=lr_now) optimizer = optim.AdamW(retinanet.parameters(), lr=lr_now) # optimizer = optim.SGD(retinanet.parameters(), lr=lr_now, momentum=0.9, weight_decay=5e-4) # optimizer = optim.SGD(retinanet.parameters(), lr=lr_now) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=PATIENCE, factor=FACTOR, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() iteration_loss_path = 'iteration_loss.csv' if os.path.isfile(iteration_loss_path): os.remove(iteration_loss_path) epoch_loss_path = 'epoch_loss.csv' if os.path.isfile(epoch_loss_path): os.remove(epoch_loss_path) eval_train_path = 'eval_train_result.csv' if os.path.isfile(eval_train_path): os.remove(eval_train_path) eval_val_path = 'eval_val_result.csv' if os.path.isfile(eval_val_path): os.remove(eval_val_path) USE_KAGGLE = True if os.environ.get('KAGGLE_KERNEL_RUN_TYPE', False) else False if USE_KAGGLE: iteration_loss_path = '/kaggle/working/' + iteration_loss_path epoch_loss_path = '/kaggle/working/' + epoch_loss_path eval_val_path = '/kaggle/working/' + eval_val_path eval_train_path = '/kaggle/working/' + eval_train_path with open(epoch_loss_path, 'a+') as epoch_loss_file, \ open(iteration_loss_path, 'a+') as iteration_loss_file, \ open(eval_train_path, 'a+') as eval_train_file, \ open(eval_val_path, 'a+') as eval_val_file: epoch_loss_file.write('epoch_num,mean_epoch_loss\n') iteration_loss_file.write('epoch_num,iteration,classification_loss,regression_loss,iteration_loss\n') eval_train_file.write('epoch_num,map50\n') eval_val_file.write('epoch_num,map50\n') for epoch_num in range(parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): optimizer.zero_grad() classification_loss, regression_loss = retinanet([data['img'].cuda().float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) iteration_loss = np.mean(loss_hist) print('\rEpoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'.format( epoch_num+1, iter_num+1, float(classification_loss), float(regression_loss), iteration_loss), end=' ' * 50) iteration_loss_file.write('{},{},{:1.5f},{:1.5f},{:1.5f}\n'.format(epoch_num+1, epoch_num * steps_pre_epoch + (iter_num+1), float(classification_loss), float(regression_loss), iteration_loss)) iteration_loss_file.flush() del classification_loss del regression_loss mean_epoch_loss = np.mean(epoch_loss) epoch_loss_file.write('{},{:1.5f}\n'.format(epoch_num+1, mean_epoch_loss)) epoch_loss_file.flush() if parser.lr_choice == 'lr_map': lr_now = lr_change_map(epoch_num+1, lr_now, parser.lr_map) adjust_learning_rate(optimizer, lr_now) elif parser.lr_choice == 'lr_fn': lr_now = lrfn(epoch_num+1, parser.lr_fn) adjust_learning_rate(optimizer, lr_now) elif parser.lr_choice == 'lr_scheduler': scheduler.step(mean_epoch_loss) # if parser.dataset != 'show': # print('Evaluating dataset_train') # coco_eval.evaluate_coco(dataset_train, retinanet, parser.dataset, parser.do_aug, eval_train_file, epoch_num) print('Evaluating dataset_val') coco_eval.evaluate_coco(dataset_val, retinanet, parser.dataset, parser.do_aug, eval_val_file, epoch_num) return parser
def main(args=None): parser = argparse.ArgumentParser( description= 'RegiGraph Pytorch Implementation Training Script. - Ahmed Nassar (ETHZ, IRISA).' ) parser.add_argument("--batch_size", type=int, default=4, help="The number of images per batch") parser.add_argument("--lr", type=float, default=1e-4) parser.add_argument( '--dataset_root', default='../datasets', help= 'Dataset root directory path [../datasets/VOC, ../datasets/mapillary]') parser.add_argument('--dataset', default='Pasadena', choices=['Pasadena', 'Pasadena_Aerial', 'mapillary'], type=str, help='Pasadena, Pasadena_Aerial or mapillary') parser.add_argument("--overfit", type=int, default="0") parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument("--num_epochs", type=int, default=100) parser.add_argument("--log_path", type=str, default="tensorboard/") parser.add_argument("--saved_path", type=str, default="trained_models") parser.add_argument("--test_interval", type=int, default=1, help="Number of epoches between testing phases") parser.add_argument( "--es_min_delta", type=float, default=0.0, help= "Early stopping's parameter: minimum change loss to qualify as an improvement" ) parser.add_argument( "--es_patience", type=int, default=0, help= "Early stopping's parameter: number of epochs with no improvement after which training will be stopped. Set to 0 to disable this technique." ) parser.add_argument("--cluster", type=int, default=0) opt = parser.parse_args(args) if torch.cuda.is_available(): num_gpus = torch.cuda.device_count() torch.cuda.manual_seed(123) else: torch.manual_seed(123) if (opt.dataset == 'Pasadena' or opt.dataset == 'mapillary' or opt.dataset == 'Pasadena_Aerial'): train_dataset = VOCDetection(root=opt.dataset_root, overfit=opt.overfit, image_sets="trainval", transform=transforms.Compose([ Normalizer(), Augmenter(), Resizer() ]), dataset_name=opt.dataset) valid_dataset = VOCDetection(root=opt.dataset_root, overfit=opt.overfit, image_sets="val", transform=transforms.Compose( [Normalizer(), Resizer()]), dataset_name=opt.dataset) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') # sampler = AspectRatioBasedSampler(train_dataset, batch_size=2, drop_last=False) training_params = { "batch_size": opt.batch_size, "shuffle": False, "drop_last": True, "collate_fn": collater, "num_workers": 4 } training_generator = DataLoader(train_dataset, **training_params) if valid_dataset is not None: test_params = { "batch_size": opt.batch_size, "shuffle": False, "drop_last": False, "collate_fn": collater, "num_workers": 4 } # sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) test_generator = DataLoader(valid_dataset, **test_params) # Create the model if opt.depth == 18: retinanet = model.resnet18(num_classes=train_dataset.num_classes(), pretrained=True) elif opt.depth == 34: retinanet = model.resnet34(num_classes=train_dataset.num_classes(), pretrained=True) elif opt.depth == 50: retinanet = model.resnet50(num_classes=train_dataset.num_classes(), pretrained=True) elif opt.depth == 101: retinanet = model.resnet101(num_classes=train_dataset.num_classes(), pretrained=True) elif opt.depth == 152: retinanet = model.resnet152(num_classes=train_dataset.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: if torch.cuda.is_available(): retinanet = retinanet.cuda() if torch.cuda.is_available(): retinanet = torch.nn.DataParallel(retinanet).cuda() else: retinanet = torch.nn.DataParallel(retinanet) if os.path.isdir(opt.log_path): shutil.rmtree(opt.log_path) os.makedirs(opt.log_path) if not os.path.isdir(opt.saved_path): os.makedirs(opt.saved_path) retinanet.training = True writer = SummaryWriter(opt.log_path + "regigraph_bs_" + str(opt.batch_size) + "_dataset_" + opt.dataset + "_backbone_" + str(opt.depth)) optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) best_loss = 1e5 best_epoch = 0 retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(train_dataset))) num_iter_per_epoch = len(training_generator) for epoch in range(opt.num_epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] progress_bar = tqdm(training_generator) for iter, data in enumerate(progress_bar): try: optimizer.zero_grad() if torch.cuda.is_available(): classification_loss, regression_loss, graph_loss = retinanet( [ data['img'].cuda().float(), data['annot'], data['geo'], data['batch_map'] ]) else: classification_loss, regression_loss, graph_loss = retinanet( [ data['img'].float(), data['annot'], data['geo'], data['batch_map'] ]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() graph_loss = graph_loss.mean() loss = classification_loss + regression_loss + graph_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) total_loss = np.mean(epoch_loss) if opt.cluster == 0: progress_bar.set_description( 'Epoch: {}/{}. Iteration: {}/{}. Cls loss: {:.5f}. Reg loss: {:.5f}. Graph loss: {:.5f}. Batch loss: {:.5f} Total loss: {:.5f}' .format(epoch + 1, opt.num_epochs, iter + 1, num_iter_per_epoch, classification_loss, regression_loss, graph_loss, float(loss), total_loss)) writer.add_scalar('Train/Total_loss', total_loss, epoch * num_iter_per_epoch + iter) writer.add_scalar('Train/Regression_loss', regression_loss, epoch * num_iter_per_epoch + iter) writer.add_scalar('Train/Classfication_loss (focal loss)', classification_loss, epoch * num_iter_per_epoch + iter) writer.add_scalar('Train/Graph_loss', graph_loss, epoch * num_iter_per_epoch + iter) del classification_loss del regression_loss del graph_loss except Exception as e: print(e) continue scheduler.step(np.mean(epoch_loss)) if epoch % opt.test_interval == 0: retinanet.eval() loss_regression_ls = [] loss_classification_ls = [] loss_graph_ls = [] for iter, data in enumerate(test_generator): with torch.no_grad(): if torch.cuda.is_available(): classification_loss, regression_loss, graph_loss = retinanet( [ data['img'].cuda().float(), data['annot'], data['geo'], data['batch_map'] ]) else: classification_loss, regression_loss, graph_loss = retinanet( [ data['img'].float(), data['annot'], data['geo'], data['batch_map'] ]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() graph_loss = graph_loss.mean() loss_classification_ls.append(float(classification_loss)) loss_regression_ls.append(float(regression_loss)) loss_graph_ls.append(float(graph_loss)) # print(len(loss_classification_ls),len(loss_regression_ls),len(loss_graph_ls)) cls_loss = np.mean(loss_classification_ls) reg_loss = np.mean(loss_regression_ls) gph_loss = np.mean(loss_graph_ls) loss = cls_loss + reg_loss + gph_loss print( '- Val Epoch: {}/{}. Classification loss: {:1.5f}. Regression loss: {:1.5f}. * Graph loss: {:1.5f}. Total loss: {:1.5f}' .format(epoch + 1, opt.num_epochs, cls_loss, reg_loss, gph_loss, np.mean(loss))) writer.add_scalar('Test/Total_loss', loss, epoch) writer.add_scalar('Test/Regression_loss', reg_loss, epoch) writer.add_scalar('Test/Graph_loss (graph loss)', gph_loss, epoch) writer.add_scalar('Test/Classfication_loss (focal loss)', cls_loss, epoch) if loss + opt.es_min_delta < best_loss: best_loss = loss best_epoch = epoch # mAP = csv_eval.evaluate(valid_dataset, retinanet) # print(mAP) torch.save( retinanet.module, os.path.join( opt.saved_path, "regigraph_bs_" + str(opt.batch_size) + "_dataset_" + opt.dataset + "_epoch_" + str(epoch + 1) + "_backbone_" + str(opt.depth) + ".pth")) # Early stopping if epoch - best_epoch > opt.es_patience > 0: print( "Stop training at epoch {}. The lowest loss achieved is {}" .format(epoch, loss)) break writer.close()
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.') # parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument( '--dataset_root', default='/root/data/VOCdevkit/', help= 'Dataset root directory path [/root/data/VOCdevkit/, /root/data/coco/, /root/data/FLIR_ADAS]' ) parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument( '--resume', default=None, type=str, help='Checkpoint state_dict file to resume training from') parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--batch_size', default=16, type=int, help='Batch size for training') parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) parser.add_argument('--lr', '--learning_rate', default=1e-4, type=float, help='initial learning rate') parser.add_argument('--weight_decay', default=5e-4, type=float, help='Weight decay') parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument("--log", default=False, action="store_true", help="Write log file.") parser = parser.parse_args(args) network_name = 'RetinaNet-Res{}'.format(parser.depth) # print('network_name:', network_name) net_logger = logging.getLogger('Network Logger') formatter = logging.Formatter(LOGGING_FORMAT) streamhandler = logging.StreamHandler() streamhandler.setFormatter(formatter) net_logger.addHandler(streamhandler) if parser.log: net_logger.setLevel(logging.INFO) # logging.basicConfig(level=logging.DEBUG, format=LOGGING_FORMAT, # filename=os.path.join('log', '{}.log'.format(network_name)), filemode='a') filehandler = logging.FileHandler(os.path.join( 'log', '{}.log'.format(network_name)), mode='a') filehandler.setFormatter(formatter) net_logger.addHandler(filehandler) net_logger.info('Network Name: {:>20}'.format(network_name)) # Create the data loaders if parser.dataset == 'coco': if parser.dataset_root is None: raise ValueError( 'Must provide --dataset_root when training on COCO,') dataset_train = CocoDataset(parser.dataset_root, set_name='train2017', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.dataset_root, set_name='val2017', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'FLIR': if parser.dataset_root is None: raise ValueError( 'Must provide --dataset_root when training on FLIR,') _scale = 1.2 dataset_train = FLIRDataset(parser.dataset_root, set_name='train', transform=transforms.Compose([ Normalizer(), Augmenter(), Resizer(min_side=int(512 * _scale), max_side=int(640 * _scale), logger=net_logger) ])) dataset_val = FLIRDataset(parser.dataset_root, set_name='val', transform=transforms.Compose([ Normalizer(), Resizer(min_side=int(512 * _scale), max_side=int(640 * _scale)) ])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be FLIR, COCO or csv), exiting.' ) # Original RetinaNet code # sampler = AspectRatioBasedSampler(dataset_train, batch_size=2, drop_last=False) # dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) # if dataset_val is not None: # sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) # dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) dataloader_train = DataLoader(dataset_train, batch_size=parser.batch_size, num_workers=parser.workers, shuffle=True, collate_fn=collater, pin_memory=True) dataloader_val = DataLoader(dataset_val, batch_size=1, num_workers=parser.workers, shuffle=False, collate_fn=collater, pin_memory=True) build_param = {'logger': net_logger} if parser.resume is not None: net_logger.info('Loading Checkpoint : {}'.format(parser.resume)) retinanet = torch.load(parser.resume) s_b = parser.resume.rindex('_') s_e = parser.resume.rindex('.') start_epoch = int(parser.resume[s_b + 1:s_e]) + 1 net_logger.info('Continue on {} Epoch'.format(start_epoch)) else: # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True, **build_param) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True, **build_param) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True, **build_param) elif parser.depth == 101: retinanet = model.resnet101( num_classes=dataset_train.num_classes(), pretrained=True, **build_param) elif parser.depth == 152: retinanet = model.resnet152( num_classes=dataset_train.num_classes(), pretrained=True, **build_param) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') start_epoch = 0 use_gpu = True if use_gpu: if torch.cuda.is_available(): retinanet = retinanet.cuda() if torch.cuda.is_available(): retinanet = torch.nn.DataParallel(retinanet).cuda() else: retinanet = torch.nn.DataParallel(retinanet) retinanet.training = True net_logger.info('Weight Decay : {}'.format(parser.weight_decay)) net_logger.info('Learning Rate : {}'.format(parser.lr)) # optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) optimizer = optim.Adam(retinanet.parameters(), lr=parser.lr, weight_decay=parser.weight_decay) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() # print('Num training images: {}'.format(len(dataset_train))) net_logger.info('Num Training Images: {}'.format(len(dataset_train))) for epoch_num in range(start_epoch, parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() # print(data['img'][0,:,:,:].shape) # print(data['annot']) if torch.cuda.is_available(): classification_loss, regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) else: classification_loss, regression_loss = retinanet( [data['img'].float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) if (iter_num % 10 == 0): _log = 'Epoch: {} | Iter: {} | Class loss: {:1.5f} | BBox loss: {:1.5f} | Running loss: {:1.5f}'.format( epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist)) net_logger.info(_log) del classification_loss del regression_loss except Exception as e: print(e) continue if (epoch_num + 1) % 1 == 0: test(dataset_val, retinanet, epoch_num, parser, net_logger) # if parser.dataset == 'coco': # print('Evaluating dataset') # coco_eval.evaluate_coco(dataset_val, retinanet) # elif parser.dataset == 'csv' and parser.csv_val is not None: # print('Evaluating dataset') # mAP = csv_eval.evaluate(dataset_val, retinanet) scheduler.step(np.mean(epoch_loss)) print('Learning Rate:', str(scheduler._last_lr)) torch.save( retinanet.module, os.path.join( 'saved', '{}_{}_{}.pt'.format(parser.dataset, network_name, epoch_num))) retinanet.eval() torch.save(retinanet, 'model_final.pt')
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument('--model_save_path', help='Path to save model', type=str) parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) parser = parser.parse_args(args) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size=8, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: if torch.cuda.is_available(): retinanet = retinanet.cuda() if torch.cuda.is_available(): retinanet = torch.nn.DataParallel(retinanet).cuda() else: retinanet = torch.nn.DataParallel(retinanet) retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) # add draw tensorboard code writer = SummaryWriter(log_dir='./logs/416*416/', flush_secs=60) # if Cuda: # graph_inputs = torch.from_numpy(np.random.rand(1, 3, input_shape[0], input_shape[1])).type( # torch.FloatTensor).cuda() # else: # graph_inputs = torch.from_numpy(np.random.rand(1, 3, input_shape[0], input_shape[1])).type(torch.FloatTensor) # writer.add_graph(model, (graph_inputs,)) # add gap save model count variable n = 0 for epoch_num in range(parser.epochs): n += 1 retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] ### begin calculate train loss for iter_num, data in enumerate(dataloader_train): # try: optimizer.zero_grad() if torch.cuda.is_available(): classification_loss, regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) else: classification_loss, regression_loss = retinanet( [data['img'].float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss # except Exception as e: # print(e) # continue ### begin calculate valid loss for iter_num, data in enumerate(dataloader_val): # try: optimizer.zero_grad() if torch.cuda.is_available(): classification_loss, regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) else: classification_loss, regression_loss = retinanet( [data['img'].float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss_hist.append(float(loss)) print( 'Epoch: {} | Iteration: {} | Valid-Classification loss: {:1.5f} | Valid-Regression loss: {:1.5f} | Running Valid loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) print('Epoch: {} | mAP: {:.3f}'.format(epoch_num, float(mAP))) scheduler.step(np.mean(epoch_loss)) if n % 10 == 0: torch.save( retinanet.module, parser.model_save_path + '/' + '{}_retinanet_{}_{:.3f}.pt'.format( parser.dataset, epoch_num, mAP)) retinanet.eval() torch.save(retinanet, parser.model_save_path + '/' + 'model_final.pt')
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument( '--dataset', help='Dataset type, must be one of csv or coco.') #数据集类型 parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) #选择与训练模型 parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) parser = parser.parse_args(args) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') #决定图片数据集的顺序和batch_size,返回的是图片的分组 sampler = AspectRatioBasedSampler(dataset_train, batch_size=2, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: retinanet = retinanet.cuda() #多GPU运行 retinanet = torch.nn.DataParallel(retinanet).cuda() retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) #collections:模块实现了特定目标的容器,以提供Python标准内建容器 dict、list、set、tuple 的替代选择 #collections.deque:返回双向队列对象,最长长度为500 loss_hist = collections.deque(maxlen=500) # model.train() :启用 BatchNormalization 和 Dropout # model.eval() :不启用 BatchNormalization 和 Dropout retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) for epoch_num in range(parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() classification_loss, regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue #反向传播 loss.backward() #梯度裁剪,梯度小于/大于阈值时,更新的梯度为阈值(此处为小于0.1) torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) #更新所有的参数,一旦梯度被如backward()之类的函数计算好后,我们就可以调用这个函数 optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss except Exception as e: print(e) continue if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) #optimizer.step()通常用在每个mini-batch之中,而scheduler.step()通常用在epoch里面 #有用了optimizer.step(),模型才会更新,而scheduler.step()是对lr进行调整。 scheduler.step(np.mean(epoch_loss)) torch.save(retinanet.module, '{}_retinanet_{}.pt'.format(parser.dataset, epoch_num)) retinanet.eval() torch.save(retinanet, 'model_final.pt')
def main(args=None): parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument('--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)') parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--config', help='Config file path that contains scale and ratio values', type=str) parser.add_argument('--epochs', help='Number of epochs', type=int, default=50) parser.add_argument('--init-lr', help='Initial learning rate for training process', type=float, default=1e-3) parser.add_argument('--batch-size', help='Number of input images per step', type=int, default=1) parser.add_argument('--num-workers', help='Number of worker used in dataloader', type=int, default=1) # For resuming training from saved checkpoint parser.add_argument('--resume', help='Whether to resume training from checkpoint', action='store_true') parser.add_argument('--saved-ckpt', help='Resume training from this checkpoint', type=str) parser.add_argument('--multi-gpus', help='Allow to use multi gpus for training task', action='store_true') parser.add_argument('--snapshots', help='Location to save training snapshots', type=str, default="snapshots") parser.add_argument('--log-dir', help='Location to save training logs', type=str, default="logs") parser.add_argument('--expr-augs', help='Allow to use use experiment augmentation methods', action='store_true') parser.add_argument('--aug-methods', help='(Experiment) Augmentation methods to use, separate by comma symbol', type=str, default="rotate,hflip,brightness,contrast") parser.add_argument('--aug-prob', help='Probability of applying (experiment) augmentation in range [0.,1.]', type=float, default=0.5) parser = parser.parse_args(args) train_transforms = [Normalizer(), Resizer(), Augmenter()] # Define transform methods if parser.expr_augs: aug_map = get_aug_map(p=parser.aug_prob) aug_methods = parser.aug_methods.split(",") for aug in aug_methods: if aug in aug_map.keys(): train_transforms.append(aug_map[aug]) else: print(f"{aug} is not available.") # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose(train_transforms)) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose([Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError('Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose(train_transforms)) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()])) else: raise ValueError('Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size=parser.batch_size, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=parser.num_workers, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=parser.batch_size, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=parser.num_workers, collate_fn=collater, batch_sampler=sampler_val) config = dict({"scales": None, "ratios": None}) if parser.config: config = load_config(parser.config, config) if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True, ratios=config["ratios"], scales=config["scales"]) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True, ratios=config["ratios"], scales=config["scales"]) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True, ratios=config["ratios"], scales=config["scales"]) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True, ratios=config["ratios"], scales=config["scales"]) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True, ratios=config["ratios"], scales=config["scales"]) else: raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152') optimizer = optim.Adam(retinanet.parameters(), lr=parser.init_lr) if parser.resume: if not parser.saved_ckpt: print("No saved checkpoint provided for resuming training. Exiting now...") return if not os.path.exists(parser.saved_ckpt): print("Invalid saved checkpoint path. Exiting now...") return # Restore last state retinanet, optimizer, start_epoch = load_ckpt(parser.saved_ckpt, retinanet, optimizer) if parser.epochs <= start_epoch: print("Number of epochs must be higher than number of trained epochs of saved checkpoint.") return use_gpu = True if use_gpu: print("Using GPU for training process") if torch.cuda.is_available(): if parser.multi_gpus: print("Using multi-gpus for training process") retinanet = torch.nn.DataParallel(retinanet.cuda(), device_ids=[0,1]) else: retinanet = torch.nn.DataParallel(retinanet.cuda()) else: retinanet = torch.nn.DataParallel(retinanet) retinanet.training = True scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=1, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) # Tensorboard writer writer = SummaryWriter(parser.log_dir) # Save snapshots dir if not os.path.exists(parser.snapshots): os.makedirs(parser.snapshots) best_mAP = 0 start_epoch = 0 if not parser.resume else start_epoch for epoch_num in range(start_epoch, parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] epoch_csf_loss = [] epoch_reg_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() if torch.cuda.is_available(): with torch.cuda.device(0): classification_loss, regression_loss = retinanet([data['img'].cuda().float(), data['annot']]) else: classification_loss, regression_loss = retinanet([data['img'].float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss epoch_csf_loss.append(float(classification_loss)) epoch_reg_loss.append(float(regression_loss)) if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) print( '\rEpoch: {}/{} | Iteration: {}/{} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'.format( (epoch_num + 1), parser.epochs, (iter_num + 1), len(dataloader_train), float(classification_loss), float(regression_loss), np.mean(loss_hist)), end='') del classification_loss del regression_loss except Exception as e: print(e) continue # writer.add_scalar("Loss/train", loss, epoch_num) _epoch_loss = np.mean(epoch_loss) _epoch_csf_loss = np.mean(epoch_reg_loss) _epoch_reg_loss = np.mean(epoch_reg_loss) if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) scheduler.step(_epoch_loss) elif parser.dataset == 'csv' and parser.csv_val is not None: print('\nEvaluating dataset') APs = csv_eval.evaluate(dataset_val, retinanet) mAP = round(mean(APs[ap][0] for ap in APs.keys()), 5) print("mAP: %f" %mAP) writer.add_scalar("validate/mAP", mAP, epoch_num) # Handle lr_scheduler wuth mAP value scheduler.step(mAP) lr = get_lr(optimizer) writer.add_scalar("train/classification-loss", _epoch_csf_loss, epoch_num) writer.add_scalar("train/regression-loss", _epoch_reg_loss, epoch_num) writer.add_scalar("train/loss", _epoch_loss, epoch_num) writer.add_scalar("train/learning-rate", lr, epoch_num) # Save model file, optimizer and epoch number checkpoint = { 'epoch': epoch_num, 'state_dict': retinanet.state_dict(), 'optimizer': optimizer.state_dict(), } # torch.save(retinanet.module, os.path.join(parser.snapshots, '{}_retinanet_{}.pt'.format(parser.dataset, epoch_num))) # Check whether this epoch's model achieves highest mAP value is_best = False if best_mAP < mAP: best_mAP = mAP is_best = True save_ckpt(checkpoint, is_best, parser.snapshots, '{}_retinanet_{}.pt'.format(parser.dataset, epoch_num + 1)) print('\n') retinanet.eval() torch.save(retinanet, 'model_final.pt') writer.flush()
num_classes=opt.num_class, pretrained=False, conf_threshold=opt.confidence, nms_iou_threshold=opt.nms_threshold, ) elif opt.backbone == "resnet-50": model = model.resnet50( num_classes=opt.num_class, pretrained=False, conf_threshold=opt.confidence, nms_iou_threshold=opt.nms_threshold, ) elif opt.backbone == "resnet-101": model = model.resnet101( num_classes=opt.num_class, pretrained=False, conf_threshold=opt.confidence, nms_iou_threshold=opt.nms_threshold, ) elif opt.backbone == "resnet-152": model = model.resnet152( num_classes=opt.num_class, pretrained=False, conf_threshold=opt.confidence, nms_iou_threshold=opt.nms_threshold, ) else: raise NotImplementedError device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") logger.info(f"using device {device}")
def main(args=None): parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument('--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)') parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) parser.add_argument('--dcn_layers', type =str, help = 'comma seperated str where laters to be used, 0..3',default = None) parser.add_argument('--use_depth', action='store_true', help='if specified, use depth for deformconv') parser = parser.parse_args(args) use_dcn = [False, False, False, False] if parser.dcn_layers is not None: _t = parser.dcn_layers.split(',') for __t in _t: use_dcn[int(__t)] = True # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose([Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError('Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()])) else: raise ValueError('Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size=128, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True, use_dcn = use_dcn, use_depth = parser.use_depth) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True writer = SummaryWriter() if use_gpu: if torch.cuda.is_available(): retinanet = retinanet.cuda() if torch.cuda.is_available(): retinanet = torch.nn.DataParallel(retinanet).cuda() else: retinanet = torch.nn.DataParallel(retinanet) retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) #mAP = csv_eval.evaluate(dataset_val, retinanet) global_step = 0 for epoch_num in range(parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() global_step += 1 if torch.cuda.is_available(): if parser.use_depth and 'depth' in data: classification_loss, regression_loss = retinanet([data['img'].cuda().float(), data['annot']],depth = data['depth'].cuda()) else: classification_loss, regression_loss = retinanet([data['img'].cuda().float(), data['annot']]) else: if parser.use_depth and 'depth' in data: classification_loss, regression_loss = retinanet([data['img'].float(), data['annot']],depth=data['depth']) else: classification_loss, regression_loss = retinanet([data['img'].float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() writer.add_scalar('CLS Loss',classification_loss,global_step) writer.add_scalar('REG Loss',regression_loss,global_step) loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'.format( epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss except Exception as e: print(e) continue if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) scheduler.step(np.mean(epoch_loss)) torch.save(retinanet.module, '{}_retinanet_{}.pt'.format(parser.dataset, epoch_num)) retinanet.eval() torch.save(retinanet, 'model_final.pt') writer.close()
def main(): global opt opt = parser.parse_args() init_distributed_mode(opt) dataset = ImageDirectory(opt.image_dir) sampler = torch.utils.data.distributed.DistributedSampler(dataset) loader = torch.utils.data.DataLoader( dataset, sampler=sampler, batch_size=opt.batch_size, num_workers=opt.num_workers, pin_memory=True, shuffle=False, drop_last=True, collate_fn=custom_collate, ) logger.info("Building data done with {} images loaded.".format(len(dataset))) if opt.backbone == "resnet-18": model = arch.resnet18( num_classes=opt.num_class, pretrained=False, conf_threshold=opt.confidence, nms_iou_threshold=opt.nms_threshold, ) elif opt.backbone == "resnet-34": model = arch.resnet34( num_classes=opt.num_class, pretrained=False, conf_threshold=opt.confidence, nms_iou_threshold=opt.nms_threshold, ) elif opt.backbone == "resnet-50": model = arch.resnet50( num_classes=opt.num_class, pretrained=False, conf_threshold=opt.confidence, nms_iou_threshold=opt.nms_threshold, ) elif opt.backbone == "resnet-101": model = arch.resnet101( num_classes=opt.num_class, pretrained=False, conf_threshold=opt.confidence, nms_iou_threshold=opt.nms_threshold, ) elif opt.backbone == "resnet-152": model = arch.resnet152( num_classes=opt.num_class, pretrained=False, conf_threshold=opt.confidence, nms_iou_threshold=opt.nms_threshold, ) else: raise NotImplementedError ckpt = torch.load(opt.weights) model.load_state_dict(ckpt.state_dict()) model.cuda() model.eval() # if opt.rank == 0: # logger.info(model) logger.info(f"successfully loaded saved checkpoint.") model = nn.parallel.DistributedDataParallel( model, device_ids=[opt.gpu_to_work_on], find_unused_parameters=True, ) for i, (batch, filenames) in tqdm(enumerate(loader), total=len(loader)): preds = dict() with torch.no_grad(): img_id, confs, classes, bboxes = model(batch[0].float().cuda()) img_id = img_id.cpu().numpy().tolist() confs = confs.cpu().numpy() classes = classes.cpu().numpy() bboxes = bboxes.cpu().numpy().astype(np.int32) for i, imgid in enumerate(img_id): f = filenames[imgid] pr = { "bbox": bboxes[i].tolist(), "confidence": float(confs[i]), "class_index": int(classes[i]), } if f in preds: preds[f].append(pr) else: preds[f] = [pr] for img_filename, detection in preds.items(): with open(os.path.join(opt.output_dir, img_filename.replace("jpg", "json")), "w") as f: json.dump(detection, f, indent=2)
def main(args=None): parser = argparse.ArgumentParser(description = 'Simple training script for training a RetinaNet network.') parser.add_argument('--s', help = 'training session', type = int) parser.add_argument('--bs', help = 'batch size', type = int, default = 4) parser.add_argument('--lr', help = 'learning rate', type = float, default = 0.001) parser.add_argument('--save_int', help = 'interval for saving model', type = int) parser.add_argument('--dataset', help = 'Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help = 'Path to COCO directory') parser.add_argument('--csv_train', help = 'Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help = 'Path to file containing class list (see readme)') parser.add_argument('--csv_val', help = 'Path to file containing validation annotations (optional, see readme)') parser.add_argument('--depth', help = 'Resnet depth, must be one of 18, 34, 50, 101, 152', type = int, default = 50) parser.add_argument('--epochs', help = 'Number of epochs', type = int, default = 100) parser.add_argument('--use_tb', help = 'whether to use tensorboard', action = 'store_true') parser.add_argument('--use_aug', help = 'whether to use data augmentation', action = 'store_true') parser = parser.parse_args(args) session = parser.s session_dir = 'session_{:02d}'.format(session) assert os.path.isdir('models'), '[ERROR] models folder not exist' assert os.path.isdir('logs'), '[ERROR] logs folder not exist' model_dir = os.path.join('models', session_dir) logs_dir = os.path.join('logs', session_dir) if not os.path.isdir(model_dir): os.mkdir(model_dir) if not os.path.isdir(logs_dir): os.mkdir(logs_dir) # set up tensorboard logger tb_writer = None if parser.use_tb: tb_writer = SummaryWriter('logs') # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') if parser.use_aug: #transform = transforms.Compose([Normalizer(), Augmenter(), Resizer()])) dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform = transforms.Compose([Normalizer(), Augmenter(), ToTensor()])) else: dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform = transforms.Compose([Normalizer(), ToTensor()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform = transforms.Compose([Normalizer(), ToTensor()])) #transform = transforms.Compose([Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError('Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Augmenter(), ToTensor()])) #transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Augmenter(), ToTensor()])) #transform=transforms.Compose([Normalizer(), Resizer()])) else: raise ValueError('Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size = parser.bs, drop_last = False) dataloader_train = DataLoader(dataset_train, num_workers = 0, collate_fn = collater, batch_sampler = sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size = parser.bs, drop_last = False) dataloader_val = DataLoader(dataset_val, num_workers = 0, collate_fn = collater, batch_sampler = sampler_val) print('# classes: {}'.format(dataset_train.num_classes)) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes = dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes = dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes = dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes = dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes = dataset_train.num_classes(), pretrained=True) else: raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: retinanet = retinanet.cuda() # disable multi-GPU train retinanet = torch.nn.DataParallel(retinanet).cuda() retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr = parser.lr) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience = 3, verbose = True) loss_hist = collections.deque(maxlen = 500) retinanet.train() #retinanet.module.freeze_bn() if DataParallel activated retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) for epoch_num in range(parser.epochs): retinanet.train() # retinanet.module.freeze_bn() if DataParallel activated retinanet.module.freeze_bn() epoch_loss = [] iter_per_epoch = len(dataloader_train) for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() assert data['img'][0].shape[0] == 3, '[ERROR] data first dim should be 3! ({})'.format(data['img'][0].shape) # data['img']: (B, C, H, W) # data['annot']: [x1, y1, x2, y2, class_id] classification_loss, regression_loss = retinanet([data['img'].cuda().float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) # epoch starts from 0 if (iter_num + 1) % 1 == 0: print( 'Epoch: {} | Iteration: {} | Total loss: {:1.5f} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'.format( epoch_num, iter_num, float(loss), float(classification_loss), float(regression_loss), np.mean(loss_hist) ) ) # update tensorboard if tb_writer is not None: crt_iter = (epoch_num) * iter_per_epoch + (iter_num + 1) tb_dict = { 'total_loss': float(loss), 'classification_loss': float(classification_loss), 'regression_loss': float(regression_loss) } tb_writer.add_scalars('session_{:02d}/loss'.format(session), tb_dict, crt_iter) del classification_loss del regression_loss except Exception as e: print(e) continue if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) scheduler.step(np.mean(epoch_loss)) if (epoch_num + 1) % parser.save_int == 0: # retinanet (before DataParallel): <class 'retinanet.model.ResNet'>, no self.module # retinanet (after DataParallel): <class 'torch.nn.parallel.data_parallel.DataParallel>, self.module available # retinanet.module (after DataParallel): <class 'retinanet.model.ResNet'> torch.save(retinanet.module.state_dict(), os.path.join(model_dir, 'retinanet_s{:02d}_e{:03d}.pth'.format(session, epoch_num))) if parser.use_tb: tb_writer.close() retinanet.eval() torch.save(retinanet.module.state_dict(), os.path.join(model_dir, 'retinanet_s{:02d}_e{:03d}.pth'.format(session, epoch_num)))
def main(args=None): parser = argparse.ArgumentParser( description= 'Simple paps training script for training a RetinaNet network.') parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--learn_rate', help='learn_rate epochs', type=float, default=0.0008) parser.add_argument('--start_epoch', help='start_epoch', type=int, default=0) parser.add_argument('--end_epoch', help='end_epoch', type=int, default=200) parser.add_argument('--batch_size', help='Number of batchs', type=int, default=64) parser.add_argument('--train_data', help='train data file', default='data/train.npy') parser.add_argument('--test_data', help='test data file', default='data/test.npy') parser.add_argument('--saved_dir', help='saved dir', default='trained_models/resnet101_320/') parser.add_argument('--gpu_num', help='default gpu', type=int, default=3) parser.add_argument('--ismultigpu', help='multi gpu support', type=bool, default=False) parser.add_argument('--freeze_ex_bn', help='freeze batch norm', type=bool, default=False) parser.add_argument('--num_workers', help='cpu core', type=int, default=12) parser.add_argument('--target_threshold', help='target_threshold', type=float, default=0.7) parser.add_argument('--topk', help=' topk', type=int, default=20) parser.add_argument('--filter_option', help=' topk', type=int, default=1) parser = parser.parse_args(args) print('batch_size ', parser.batch_size) print('learn_rate ', parser.learn_rate) print(' start_epoch {} end_epoch {}'.format(parser.start_epoch, parser.end_epoch)) print('ismultigpu', parser.ismultigpu) print('freeze_ex_bn', parser.freeze_ex_bn) print('target_threshold {} topk {} filter_option {}'.format( parser.target_threshold, parser.topk, parser.filter_option)) # GPU 할당 변경하기 GPU_NUM = parser.gpu_num device = torch.device( f'cuda:{GPU_NUM}' if torch.cuda.is_available() else 'cpu') torch.cuda.set_device(device) # change allocation of current GPU print('Current cuda device ', torch.cuda.current_device()) # check resnet101 = models.resnet101(progress=False, pretrained=True) ret_model = model.resnet101(num_classes=2, device=device) ret_model.load_state_dict(resnet101.state_dict(), strict=False) # In Batch norm initial setting, set r to and set to 1 for bias for fast convergence state_dict = ret_model.state_dict() for s in state_dict: if 'bn' in s and 'residualafterFPN' in s: if 'weight' in s: shape = state_dict[s].shape state_dict[s] = torch.zeros(shape) elif 'bias' in s: shape = state_dict[s].shape state_dict[s] = torch.ones(shape) ret_model.load_state_dict(state_dict) # criterion = FocalLoss(device) criterion = PapsLoss(device, parser.target_threshold, parser.topk, parser.filter_option) criterion = criterion.to(device) optimizer = optim.Adam(ret_model.parameters(), lr=1e-7) scheduler = CosineAnnealingWarmUpRestarts(optimizer, T_0=20, T_mult=2, eta_max=parser.learn_rate, T_up=5, gamma=0.5) saved_dir = parser.saved_dir if os.path.isfile(saved_dir + 'model.pt'): state = torch.load(saved_dir + 'model.pt') ret_model.load_state_dict(state['state_dict']) optimizer.load_state_dict(state['optimizer']) scheduler.load_state_dict(state['scheduler']) last_loss = state['loss'] else: last_loss = 0.6 if parser.ismultigpu: ret_model = torch.nn.DataParallel(ret_model, device_ids=[3, 4, 5], output_device=GPU_NUM).to(device) # ret_model = DataParallelModel(ret_model, device_ids = device_ids) ret_model.to(device) # ret_model.module.freeze_bn() batch_size = parser.batch_size dataset_train = PapsDataset('data/', set_name='train_2class', transform=train_transforms) train_data_loader = DataLoader(dataset_train, batch_size=batch_size, shuffle=True, num_workers=parser.num_workers, pin_memory=True, collate_fn=collate_fn) dataset_val = PapsDataset('data/', set_name='val_2class', transform=val_transforms) val_data_loader = DataLoader(dataset_val, batch_size=1, shuffle=False, num_workers=4, collate_fn=collate_fn) s_epoch = parser.start_epoch e_epoch = parser.end_epoch ret_model.training = True paps_train.train_paps(dataloader=train_data_loader, model=ret_model, criterion=criterion, saved_dir=saved_dir, optimizer=optimizer, scheduler=scheduler, device=device, s_epoch=s_epoch, e_epoch=e_epoch, last_loss=last_loss) ret_model.training = False # ret_model.eval() paps_eval.evaluate_paps(dataset=dataset_val, dataloader=val_data_loader, model=ret_model, saved_dir=parser.saved_dir, device=device, threshold=0.5)
def main(args=None): parser = argparse.ArgumentParser( description="Simple training script for training a RetinaNet network." ) parser.add_argument("--dataset", help="Dataset type, must be one of csv or coco.") parser.add_argument("--model", default=None, help="Path to trained model") parser.add_argument("--coco_path", help="Path to COCO directory") parser.add_argument( "--csv_train", help="Path to file containing training annotations (see readme)" ) parser.add_argument( "--csv_classes", help="Path to file containing class list (see readme)" ) parser.add_argument( "--csv_val", help="Path to file containing validation annotations (optional, see readme)", ) parser.add_argument( "--depth", help="Resnet depth, must be one of 18, 34, 50, 101, 152", type=int, default=50, ) parser.add_argument("--epochs", help="Number of epochs", type=int, default=100) parser.add_argument( "--result_dir", default="results", help="Path to store training results", type=str, ) parser.add_argument( "--batch_num", default=8, help="Number of samples in a batch", type=int ) parser = parser.parse_args(args) print(parser) # parameters BATCH_SIZE = parser.batch_num IMAGE_MIN_SIDE = 1440 IMAGE_MAX_SIDE = 2560 # Create the data loaders if parser.dataset == "coco": if parser.coco_path is None: raise ValueError("Must provide --coco_path when training on COCO,") # TODO: parameterize arguments for Resizer, and other transform functions # resizer: min_side=608, max_side=1024 dataset_train = CocoDataset( parser.coco_path, # set_name="train2017", set_name="train_images_full", transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer(passthrough=True),] ), ) dataset_val = CocoDataset( parser.coco_path, # set_name="val2017", set_name="val_images_full", transform=transforms.Compose([Normalizer(), Resizer(passthrough=True),]), ) elif parser.dataset == "csv": if parser.csv_train is None: raise ValueError("Must provide --csv_train when training on COCO,") if parser.csv_classes is None: raise ValueError("Must provide --csv_classes when training on COCO,") dataset_train = CSVDataset( train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()]), ) if parser.csv_val is None: dataset_val = None print("No validation annotations provided.") else: dataset_val = CSVDataset( train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()]), ) else: raise ValueError("Dataset type not understood (must be csv or coco), exiting.") sampler = AspectRatioBasedSampler( dataset_train, batch_size=BATCH_SIZE, drop_last=False ) dataloader_train = DataLoader( dataset_train, num_workers=16, collate_fn=collater, batch_sampler=sampler ) if dataset_val is not None: sampler_val = AspectRatioBasedSampler( dataset_val, batch_size=BATCH_SIZE, drop_last=False ) dataloader_val = DataLoader( dataset_val, num_workers=16, collate_fn=collater, batch_sampler=sampler_val ) # Create the model if parser.depth == 18: retinanet = model.resnet18( num_classes=dataset_train.num_classes(), pretrained=True ) elif parser.depth == 34: retinanet = model.resnet34( num_classes=dataset_train.num_classes(), pretrained=True ) elif parser.depth == 50: retinanet = model.resnet50( num_classes=dataset_train.num_classes(), pretrained=True ) elif parser.depth == 101: retinanet = model.resnet101( num_classes=dataset_train.num_classes(), pretrained=True ) elif parser.depth == 152: retinanet = model.resnet152( num_classes=dataset_train.num_classes(), pretrained=True ) else: raise ValueError("Unsupported model depth, must be one of 18, 34, 50, 101, 152") if parser.model: retinanet = torch.load(parser.model) use_gpu = True if use_gpu: if torch.cuda.is_available(): retinanet = retinanet.cuda() if torch.cuda.is_available(): retinanet = torch.nn.DataParallel(retinanet).cuda() else: retinanet = torch.nn.DataParallel(retinanet) retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.ReduceLROnPlateau( optimizer, patience=3, verbose=True ) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print("Num training images: {}".format(len(dataset_train))) for epoch_num in range(parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] p_bar = tqdm(dataloader_train) for iter_num, data in enumerate(p_bar): try: optimizer.zero_grad() if torch.cuda.is_available(): classification_loss, regression_loss = retinanet( [data["img"].cuda().float(), data["annot"]] ) else: classification_loss, regression_loss = retinanet( [data["img"].float(), data["annot"]] ) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) mean_loss = np.mean(loss_hist) p_bar.set_description( f"Epoch: {epoch_num} | Iteration: {iter_num} | " f"Class loss: {float(classification_loss.item()):.5f} | " f"Regr loss: {float(regression_loss.item()):.5f} | " f"Running loss: {mean_loss:.5f}" ) del classification_loss del regression_loss except Exception as e: print(e) continue if parser.dataset == "coco": print("Evaluating dataset") coco_eval.evaluate_coco( dataset_val, retinanet, result_dir=parser.result_dir ) elif parser.dataset == "csv" and parser.csv_val is not None: print("Evaluating dataset") mAP = csv_eval.evaluate(dataset_val, retinanet) scheduler.step(np.mean(epoch_loss)) # TODO: Fix string formating mix (adopt homogeneous format) torch.save( retinanet.module, f"{parser.result_dir}/" + "{}_retinanet_{}.pt".format(parser.dataset, epoch_num), ) retinanet.eval() torch.save(retinanet, "model_final.pt")
def main(args=None): parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument('--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)') parser.add_argument('--iou',default='05') parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) parser = parser.parse_args(args) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose([Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError('Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()])) val_dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()])) else: raise ValueError('Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size=8, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=8, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: if torch.cuda.is_available(): retinanet = retinanet.cuda() if torch.cuda.is_available(): retinanet = torch.nn.DataParallel(retinanet).cuda() else: retinanet = torch.nn.DataParallel(retinanet) retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=5e-5) lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) multistep_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[5,8,11,20], gamma=0.2) loss_hist = collections.deque(maxlen=500) val_loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) for epoch_num in range(parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] val_epoch_loss=[] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() if torch.cuda.is_available(): classification_loss, regression_loss = retinanet([data['img'].cuda().float(), data['annot']]) else: classification_loss, regression_loss = retinanet([data['img'].float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) print( 'Train: Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f} | Epoch loss: {:1.5f} '.format( epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist),epoch_loss[-1])) del classification_loss del regression_loss except Exception as e: print(e) continue for iter_num, data in enumerate(dataloader_val): try: #optimizer.zero_grad() #retinanet.eval() with torch.no_grad(): if torch.cuda.is_available(): classification_loss, regression_loss = retinanet((data['img'].cuda().float(), data['annot'])) else: classification_loss, regression_loss = retinanet((data['img'].float(), data['annot'])) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue #loss.backward() #torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) #optimizer.step() val_loss_hist.append(float(loss)) val_epoch_loss.append(float(loss)) print( 'Val: Epoch: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f} | Epoch loss: {:1.5f} '.format( epoch_num, float(classification_loss), float(regression_loss), np.mean(val_loss_hist),val_epoch_loss[-1])) del classification_loss del regression_loss except Exception as e: print(e) continue if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') #mAP_train = csv_eval.evaluate(val_dataset_train,retinanet,iou_threshold=float(parser.iou)/10) mAP_val = csv_eval.evaluate(dataset_val, retinanet,iou_threshold=float(parser.iou)/10) #writer.add_scalar('train_mAP_Questions',mAP_train[0][0],epoch_num) writer.add_scalar('val_mAP_Questions', mAP_val[0][0], epoch_num) writer.add_scalar('val_loss',np.mean(val_epoch_loss),epoch_num) writer.add_scalar('train_loss',np.mean(epoch_loss),epoch_num) lr_scheduler.step(np.mean(epoch_loss)) #one_scheduler.step() multistep_scheduler.step() torch.save(retinanet.module, '{}_retinanet_{}.pt'.format(parser.iou, epoch_num)) retinanet.eval() torch.save(retinanet, 'model_final.pt')
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument('--exp_name', help='Path to folder for saving the model and log', type=str) parser.add_argument('--output_folder', help='Path to folder for saving all the experiments', type=str) parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) # 100 parser.add_argument('--batch_size', help='Batch size', type=int, default=2) parser.add_argument('--lr', help='Number of epochs', type=float, default=1e-5) parser.add_argument('--caption', help='Any thing in particular about the experiment', type=str) parser.add_argument('--server', help='seerver name', type=str, default='ultron') parser.add_argument('--detector', help='detection algo', type=str, default='RetinaNet') parser.add_argument('--arch', help='model architecture', type=str) parser.add_argument('--pretrain', default=False, action='store_true') parser.add_argument('--freeze_batchnorm', default=False, action='store_true') parser = parser.parse_args(args) output_folder_path = os.path.join(parser.output_folder, parser.exp_name) if not os.path.exists(output_folder_path): os.makedirs(output_folder_path) PARAMS = { 'dataset': parser.dataset, 'exp_name': parser.exp_name, 'depth': parser.depth, 'epochs': parser.epochs, 'batch_size': parser.batch_size, 'lr': parser.lr, 'caption': parser.caption, 'server': parser.server, 'arch': parser.arch, 'pretrain': parser.pretrain, 'freeze_batchorm': parser.freeze_batchnorm } exp = neptune.create_experiment( name=parser.exp_name, params=PARAMS, tags=[parser.arch, parser.detector, parser.dataset, parser.server]) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size=parser.batch_size, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18 and parser.arch == 'Resnet': retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=parser.pretrain) elif parser.depth == 10 and parser.arch == 'Resnet': retinanet = model.resnet10(num_classes=dataset_train.num_classes(), pretrained=parser.pretrain) elif parser.depth == 18 and parser.arch == 'BiRealNet18': checkpoint_path = None if parser.pretrain: checkpoint_path = '/media/Rozhok/Bi-Real-net/pytorch_implementation/BiReal18_34/models/imagenet_baseline/checkpoint.pth.tar' retinanet = birealnet18(checkpoint_path, num_classes=dataset_train.num_classes()) elif parser.depth == 34 and parser.arch == 'Resnet': retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=parser.pretrain) elif parser.depth == 50 and parser.arch == 'Resnet': retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=parser.pretrain) elif parser.depth == 101 and parser.arch == 'Resnet': retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=parser.pretrain) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=parser.pretrain) elif parser.arch == 'ofa': print("Model is ResNet50D.") bn_momentum = 0.1 bn_eps = 1e-5 retinanet = ResNet50D( n_classes=dataset_train.num_classes(), bn_param=(bn_momentum, bn_eps), dropout_rate=0, width_mult=1.0, depth_param=3, expand_ratio=0.35, ) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') print(retinanet) use_gpu = True if use_gpu: if torch.cuda.is_available(): retinanet = retinanet.cuda() if torch.cuda.is_available(): retinanet = torch.nn.DataParallel(retinanet).cuda() else: retinanet = torch.nn.DataParallel(retinanet) retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=parser.lr) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() if parser.freeze_batchnorm: retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) for epoch_num in range(parser.epochs): exp.log_metric('Current lr', float(optimizer.param_groups[0]['lr'])) exp.log_metric('Current epoch', int(epoch_num)) retinanet.train() if parser.freeze_batchnorm: retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() if torch.cuda.is_available(): classification_loss, regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) else: classification_loss, regression_loss = retinanet( [data['img'].float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) exp.log_metric('Training: Classification loss', float(classification_loss)) exp.log_metric('Training: Regression loss', float(regression_loss)) exp.log_metric('Training: Totalloss', float(loss)) del classification_loss del regression_loss except Exception as e: print(e) continue if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet, output_folder_path, exp=exp) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) scheduler.step(np.mean(epoch_loss)) torch.save( retinanet.module, os.path.join( output_folder_path, '{}_retinanet_{}.pt'.format(parser.dataset, epoch_num))) retinanet.eval() torch.save(retinanet, os.path.join(output_folder_path, 'model_final.pt'))
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152, 5032, 10132', type=int, default=10148) parser.add_argument('--epochs', help='Number of epochs', type=int, default=200) parser = parser.parse_args(args) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size=2, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 5032: retinanet = model.resnext50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 10132: retinanet = model.resnext101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 10148: retinanet = model_SE.SEresnext101( num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: if torch.cuda.is_available(): retinanet = retinanet.cuda() if torch.cuda.is_available(): retinanet = torch.nn.DataParallel(retinanet).cuda() else: retinanet = torch.nn.DataParallel(retinanet) retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) #change_weight_decay scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) for epoch_num in range(parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] total_classification_loss = 0.0 total_regression_loss = 0.0 epoch_number = 0 for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() if torch.cuda.is_available(): classification_loss, regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) else: classification_loss, regression_loss = retinanet( [data['img'].float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) ############################# # total_classification_loss += classification_loss # total_regression_loss += regression_loss # epoch_number = epoch_num fp = open(output_path + "clas_reg_loss.txt", "a") fp.write( str(epoch_num) + ',' + str(float(classification_loss)) + ',' + str(float(regression_loss)) + ',' + str(np.mean(loss_hist)) + '\n') # writer.add_scalar('Classification_loss', float(classification_loss), epoch_num) # writer.add_scalar('Regression_loss', float(regression_loss), epoch_num) # writer.flush() ############################# print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss except Exception as e: print(e) continue ############################# if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) scheduler.step(np.mean(epoch_loss)) torch.save( retinanet.module, output_path + '{}_retinanet_{}.pt'.format(parser.dataset, epoch_num)) retinanet.eval() torch.save(retinanet, output_path + 'model_final.pt')
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.', default='coco') parser.add_argument( '--coco_path', help='Path to COCO directory', default= '/media/zhuzhu/ec114170-f406-444f-bee7-a3dc0a86cfa2/dataset/coco') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) parser.add_argument('--use-gpu', help='training on cpu or gpu', action='store_false', default=True) parser.add_argument('--device-ids', help='GPU device ids', default=[0]) args = parser.parse_args() # ------------------------------ Create the data loaders ----------------------------- if args.dataset == 'coco': if args.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(args.coco_path, set_name='train2017', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(args.coco_path, set_name='val2017', transform=transforms.Compose( [Normalizer(), Resizer()])) sampler_train = AspectRatioBasedSampler(dataset_train, batch_size=2, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler_train) sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if args.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif args.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif args.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=False) elif args.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif args.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') if args.use_gpu: retinanet = nn.DataParallel(retinanet, device_ids=args.device_ids).cuda() # retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) print('Num training images: {}'.format(len(dataset_train))) for epoch_num in range(args.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() classification_loss, regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) # 梯度的最大范数为0.1 optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss except Exception as e: print(e) continue if args.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) scheduler.step(np.mean(epoch_loss)) torch.save(retinanet.module, '{}_retinanet_{}.pt'.format(args.dataset, epoch_num)) retinanet.eval() torch.save(retinanet, 'model_final.pt')
def main(args=None): parser = argparse.ArgumentParser( description="Simple training script for training a RetinaNet network.") parser.add_argument("--dataset", help="Dataset type, must be one of csv or coco.") parser.add_argument("--coco_path", help="Path to COCO directory") parser.add_argument( "--csv_train", help="Path to file containing training annotations (see readme)") parser.add_argument("--csv_classes", help="Path to file containing class list (see readme)") parser.add_argument( "--csv_val", help= "Path to file containing validation annotations (optional, see readme)", ) parser.add_argument( "--depth", help="Resnet depth, must be one of 18, 34, 50, 101, 152", type=int, default=50, ) parser.add_argument("--batch_size", help="Batch size", type=int, default=2) parser.add_argument("--epochs", help="Number of epochs", type=int, default=100) parser.add_argument("--workers", help="Number of workers of dataleader", type=int, default=4) parser = parser.parse_args(args) writer = SummaryWriter("logs") # Create the data loaders if parser.dataset == "coco": if parser.coco_path is None: raise ValueError("Must provide --coco_path when training on COCO,") dataset_train = CocoDataset( parser.coco_path, set_name="train2017", transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()]), ) dataset_val = CocoDataset( parser.coco_path, set_name="val2017", transform=transforms.Compose([Normalizer(), Resizer()]), ) elif parser.dataset == "csv": if parser.csv_train is None: raise ValueError("Must provide --csv_train when training on COCO,") if parser.csv_classes is None: raise ValueError( "Must provide --csv_classes when training on COCO,") dataset_train = CSVDataset( train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()]), ) if parser.csv_val is None: dataset_val = None print("No validation annotations provided.") else: dataset_val = CSVDataset( train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()]), ) else: raise ValueError( "Dataset type not understood (must be csv or coco), exiting.") sampler = AspectRatioBasedSampler(dataset_train, batch_size=parser.batch_size, drop_last=False) dataloader_train = DataLoader( dataset_train, num_workers=parser.workers, collate_fn=collater, batch_sampler=sampler, ) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=parser.workers, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( "Unsupported model depth, must be one of 18, 34, 50, 101, 152") use_gpu = True if use_gpu: retinanet = retinanet.cuda() retinanet = torch.nn.DataParallel(retinanet).cuda() retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=10, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print("Num training images: {}".format(len(dataset_train))) global_step = 0 for epoch_num in range(parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): global_step = iter_num + epoch_num * len(dataloader_train) try: optimizer.zero_grad() classification_loss, regression_loss = retinanet( [data["img"].cuda().float(), data["annot"]]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) if iter_num % 10 == 0: print( "Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}" .format( epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist), )) writer.add_scalars( "training", { "loss": loss, "loss_cls": classification_loss, "loss_reg": regression_loss, }, global_step, ) del classification_loss del regression_loss except Exception as e: print(e) continue if parser.dataset == "coco": print("Evaluating dataset") coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == "csv" and parser.csv_val is not None: print("Evaluating dataset") mAP = csv_eval.evaluate(dataset_val, retinanet) valid_mAP = [x[0] for x in mAP.values() if x[1] > 0] mmAP = sum(valid_mAP) / len(mAP) writer.add_scalars("validation", {"mmAP": mmAP}, global_step) scheduler.step(np.mean(epoch_loss)) torch.save( retinanet.module, "checkpoints/{}_retinanet_{}.pt".format(parser.dataset, epoch_num), ) retinanet.eval() torch.save(retinanet, "checkpoints/odel_final.pt")
def main(args=None): parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', default='csv', help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument('--csv_train', default='dataset/pascal_train.csv', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', default='dataset/classes.csv', help='Path to file containing class list (see readme)') parser.add_argument('--csv_val', default='dataset/pascal_val.csv', help='Path to file containing validation annotations (optional, see readme)') parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) parser.add_argument('--weights_folder', help='path to save weight', type=str, required=True) parser = parser.parse_args(args) if not os.path.exists(parser.weights_folder): os.makedirs(parser.weights_folder) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose([Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError('Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()])) else: raise ValueError('Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size=5, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=4, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=8, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=4, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: if torch.cuda.is_available(): retinanet = retinanet.cuda() if torch.cuda.is_available(): retinanet = torch.nn.DataParallel(retinanet).cuda() else: retinanet = torch.nn.DataParallel(retinanet) retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) # import ipdb; ipdb.set_trace() for epoch_num in range(parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] total_loss = 0 total_regression_loss = 0 total_classification_loss = 0 with tqdm(dataloader_train, unit="batch") as tepoch: for data in tepoch: # for iter_num, data in tepoch:#enumerate(dataloader_train): tepoch.set_description(f"Epoch {epoch_num}") try: optimizer.zero_grad() if torch.cuda.is_available(): classification_loss, regression_loss = retinanet([data['img'].cuda().float(), data['annot']]) else: classification_loss, regression_loss = retinanet([data['img'].float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss total_loss = total_loss + loss total_regression_loss = total_regression_loss + regression_loss total_classification_loss = total_classification_loss + classification_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) # print( # 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'.format( # epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) tepoch.set_postfix(cls_loss="{:1.5f}".format(classification_loss), reg_loss="{:1.5f}".format(regression_loss)) time.sleep(0.1) del classification_loss del regression_loss except Exception as e: print(e) continue tb.add_scalar('Training loss', total_loss, epoch_num) tb.add_scalar('Training regression loss', total_regression_loss, epoch_num) tb.add_scalar('Training accuracy loss', total_classification_loss, epoch_num) if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) scheduler.step(np.mean(epoch_loss)) torch.save(retinanet.module, '{}/{}_retinanet_{}.pt'.format(parser.weights_folder,parser.dataset, epoch_num)) retinanet.eval() torch.save(retinanet, '{}/model_final.pt'.format(parser.weights_folder))
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.', default='csv') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) parser.add_argument('--batch_size', help='Batch size', type=int, default=2) parser.add_argument('--num_workers', help='Number of workers', type=int, default=4) parser.add_argument('--models_out', help='The directory to save models', type=str) parser = parser.parse_args(args) if not os.path.exists(parser.models_out): os.makedirs(parser.models_out) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size=parser.batch_size, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=parser.num_workers, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=parser.num_workers, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: retinanet = retinanet.cuda() retinanet = torch.nn.DataParallel(retinanet).cuda() retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) writer = SummaryWriter(log_dir="tensor_log/" + parser.models_out) global_steps = 0 for epoch_num in range(parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() classification_loss, regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) running_loss = np.mean(loss_hist) print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), running_loss)) global_steps += 1 writer.add_scalar("Loss/Classification", float(classification_loss), global_steps) writer.add_scalar("Loss/Regression", float(regression_loss), global_steps) writer.add_scalar("Loss/Running", running_loss, global_steps) del classification_loss del regression_loss except Exception as e: print(e) continue if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) #for k, v in mAP.items(): # writer.add_scalar("Accuracy/map_{}".format(k), v, epoch_num) scheduler.step(np.mean(epoch_loss)) torch.save( retinanet.module, os.path.join( parser.models_out, '{}_retinanet_{}.pt'.format(parser.dataset, epoch_num))) retinanet.eval() torch.save(retinanet, os.path.join(parser.models_out, 'model_final.pt'))
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.', default='csv') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)', default='data/train_retinanet.csv') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)', default='data/class_retinanet.csv') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)', default='data/val_retinanet.csv') parser.add_argument('--model_path', default='coco_resnet_50_map_0_335_state_dict.pt', help='Path to file containing pretrained retinanet') parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs_detection', help='Number of epochs for detection', type=int, default=50) parser.add_argument('--epochs_classification', help='Number of epochs for classification', type=int, default=50) parser = parser.parse_args(args) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size=1, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if parser.model_path is not None: print('loading ', parser.model_path) if 'coco' in parser.model_path: retinanet.load_state_dict(torch.load(parser.model_path), strict=False) else: retinanet = torch.load(parser.model_path) print('Pretrained model loaded!') if use_gpu: if torch.cuda.is_available(): retinanet = retinanet.cuda() if torch.cuda.is_available(): retinanet = torch.nn.DataParallel(retinanet).cuda() else: retinanet = torch.nn.DataParallel(retinanet) #Here training the detection retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=4, verbose=True) loss_hist = collections.deque(maxlen=500) loss_style_classif = nn.CrossEntropyLoss() retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) mAP_list = [] mAPbest = 0 for epoch_num in range(parser.epochs_detection): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() if torch.cuda.is_available(): [classification_loss, regression_loss], style = retinanet( [data['img'].cuda().float(), data['annot']]) else: [classification_loss, regression_loss ], style = retinanet([data['img'].float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() if torch.cuda.is_available(): style_loss = loss_style_classif( style, torch.tensor(data['style']).cuda()) else: style_loss = loss_style_classif( style, torch.tensor(data['style'])) loss = classification_loss + regression_loss + style_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.4f} | Regression loss: {:1.4f} | Style loss: {:1.4f} | Running loss: {:1.4f}' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), float(style_loss), np.mean(loss_hist))) del classification_loss del regression_loss del style_loss except Exception as e: print(e) continue if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAPclasses, mAP, accu = csv_eval.evaluate(dataset_val, retinanet) mAP_list.append(mAP) print('mAP_list', mAP_list) if mAP > mAPbest: print('Saving best checkpoint') torch.save(retinanet, 'model_best.pt') mAPbest = mAP scheduler.step(np.mean(epoch_loss)) torch.save(retinanet.module, '{}_retinanet_{}.pt'.format(parser.dataset, epoch_num)) retinanet.eval() torch.save(retinanet, 'model_final.pt') # Here we aggregate all the data to don't have to appy the Retinanet during training. retinanet.load_state_dict(torch.load('model_best.pt').state_dict()) List_feature = [] List_target = [] retinanet.training = False retinanet.eval() retinanet.module.style_inference = True retinanet.module.freeze_bn() epoch_loss = [] with torch.no_grad(): for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() if torch.cuda.is_available(): _, _, feature_vec = retinanet(data['img'].cuda().float()) else: _, _, feature_vec = retinanet(data['img'].float()) List_feature.append(torch.squeeze(feature_vec).cpu()) List_target.append(data['style'][0]) except Exception as e: print(e) continue print('END of preparation of the data for classification of style') # Here begins Style training. Need to set to style_train. They are using the same loader, as it was expected to train both at the same time. batch_size_classification = 64 dataloader_train_style = torch.utils.data.DataLoader( StyleDataset(List_feature, List_target), batch_size=batch_size_classification) retinanet.load_state_dict(torch.load('model_best.pt').state_dict()) # Here training the detection retinanet.module.style_inference = False retinanet.module.style_train(True) retinanet.training = True retinanet.train() optimizer = optim.Adam( retinanet.module.styleClassificationModel.parameters(), lr=5e-3, weight_decay=1e-3) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', patience=4, verbose=True) loss_hist = collections.deque(maxlen=500) loss_style_classif = nn.CrossEntropyLoss() retinanet.train() retinanet.module.freeze_bn() criterion = nn.CrossEntropyLoss() accu_list = [] accubest = 0 for epoch_num in range(parser.epochs_classification): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] total = 0 correct = 0 for iter_num, data in enumerate(dataloader_train_style): try: optimizer.zero_grad() inputs, targets = data if torch.cuda.is_available(): inputs, targets = inputs.cuda(), targets.cuda() outputs = retinanet.module.styleClassificationModel( inputs, 0, 0, 0, True) loss = criterion(outputs, targets) loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) total += targets.size(0) _, predicted = torch.max(outputs.data, 1) correct += predicted.eq(targets.data).cpu().sum() print( '| Epoch [%3d/%3d] Iter[%3d/%3d]\t\tLoss: %.4f Acc@1: %.3f%%' % (epoch_num, parser.epochs_classification, iter_num + 1, (len(dataloader_train_style) // batch_size_classification) + 1, loss.item(), 100. * correct / total)) except Exception as e: print(e) continue if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAPclasses, mAP, accu = csv_eval.evaluate(dataset_val, retinanet) accu_list.append(accu) print('mAP_list', mAP_list, 'accu_list', accu_list) if accu > accubest: print('Saving best checkpoint') torch.save(retinanet.module, 'model_best_classif.pt') accubest = accu scheduler.step(accu) torch.save(retinanet.module, '{}_retinanet_{}.pt'.format(parser.dataset, epoch_num)) retinanet.eval() torch.save(retinanet.module, 'model_final.pt')
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', type=str, default='csv', help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument( '--csv_train', type=str, default= r'/usr/idip/idip/liuan/project/pytorch_retinanet/RetinaNet-PFA-SPANet/train.csv', help='Path to file containing training annotations (see readme)') parser.add_argument( '--csv_classes', type=str, default= r'/usr/idip/idip/liuan/project/pytorch_retinanet/RetinaNet-PFA-SPANet/class.csv', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', type=str, default= r'/usr/idip/idip/liuan/project/pytorch_retinanet/RetinaNet-PFA-SPANet/val.csv', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument( '--model_save_path', type=str, default= r'/usr/idip/idip/liuan/project/pytorch_retinanet/RetinaNet-PFA-SPANet/model/resnet101+PFA+CFPN/', help='Path to save model') parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=101) parser.add_argument('--epochs', help='Number of epochs', type=int, default=150) parser.add_argument('--iter_num', help='Iter number of saving checkpoint', type=int, default=5) parser = parser.parse_args(args) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size=2, drop_last=False) # 将自定义的Dataset根据batch size大小、是否shuffle等封装成一个Batch Size大小的Tensor,用于后面的训练 dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: if torch.cuda.is_available(): retinanet = retinanet.cuda() if torch.cuda.is_available(): retinanet = torch.nn.DataParallel(retinanet).cuda() else: retinanet = torch.nn.DataParallel(retinanet) retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) # add gap save model count variable n = 0 for epoch_num in range(parser.epochs): n += 1 retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): # try: optimizer.zero_grad() if torch.cuda.is_available(): classification_loss, regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) else: classification_loss, regression_loss = retinanet( [data['img'].float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss # except Exception as e: # print(e) # continue if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) scheduler.step(np.mean(epoch_loss)) if n % parser.iter_num == 0: torch.save( retinanet.module, parser.model_save_path + '/' + '{}_retinanet_{}.pt'.format(parser.dataset, epoch_num)) retinanet.eval() torch.save(retinanet, parser.model_save_path + '/' + 'model_final.pt')
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.') # parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument('--HW2_path', help='Path to HW2 directory') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) parser = parser.parse_args(args) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'HW2': if parser.HW2_path is None: raise ValueError('Must provide --HW2_path when training on HW2,') dataset_train = HW2Dataset(parser.HW2_path, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) #dataset_val = HW2Dataset(parser.HW2_path, # transform=transforms.Compose([Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') # sampler = AspectRatioBasedSampler(dataset_train, batch_size=2, drop_last=False) dataloader_train = DataLoader(dataset_train, batch_size=8, num_workers=3, collate_fn=collater) # if dataset_val is not None: # sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) # dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) #retinanet.load_state_dict(torch.load('coco_resnet_50_map_0_335_state_dict.pt')) #retinanet_state = retinanet.state_dict() #loaded = torch.load('coco_resnet_50_map_0_335_state_dict.pt') #pretrained = {k:v for k, v in loaded.items() if k in retinanet_state} #retinanet_state.update(pretrained) #retinanet.load_state_dict(retinanet_state) retinanet = torch.load('saved_models_3/HW2_retinanet_0.pt') elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: if torch.cuda.is_available(): retinanet = retinanet.cuda() if torch.cuda.is_available(): retinanet = torch.nn.DataParallel(retinanet).cuda() else: retinanet = torch.nn.DataParallel(retinanet) retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) for epoch_num in range(pre_epoch, parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() if torch.cuda.is_available(): classification_loss, regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) else: classification_loss, regression_loss = retinanet( [data['img'].float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss except Exception as e: print(e) continue # if parser.dataset == 'coco': # print('Evaluating dataset') # coco_eval.evaluate_coco(dataset_val, retinanet) # elif parser.dataset == 'csv' and parser.csv_val is not None: # print('Evaluating dataset') # mAP = csv_eval.evaluate(dataset_val, retinanet) scheduler.step(np.mean(epoch_loss)) torch.save( retinanet.module, 'saved_models_3/{}_retinanet_{}.pt'.format(parser.dataset, epoch_num)) # retinanet.eval() torch.save(retinanet, 'saved_models_3/model_final.pt')
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument("--load_model_path", type=str, default=None, help="Path to model (.pt) file.") parser.add_argument('--dataset_type', help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument('--backbone', help='Backbone choice: [ResNet, ResNeXt]', type=str, default='ResNet') parser.add_argument( '--depth', help='ResNet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) parser.add_argument("--batch_size", type=int, default=2, help="size of the batches") parser.add_argument("--lr", type=float, default=1e-5, help="adam: learning rate") parser = parser.parse_args(args) results_dir = "results" save_images_dir = os.path.join(results_dir, "images") save_models_dir = os.path.join(results_dir, "saved_models") os.makedirs(results_dir, exist_ok=True) os.makedirs(save_images_dir, exist_ok=True) os.makedirs(save_models_dir, exist_ok=True) # Get today datetime today = datetime.date.today() today = "%d%02d%02d" % (today.year, today.month, today.day) # Get current timme now = time.strftime("%H%M%S") # Backbone name backbone_name = parser.backbone + str(parser.depth) # DataSet name dataset_path = '' # Create the data loaders if parser.dataset_type == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') # dataset_train = CocoDataset(parser.coco_path, set_name='train2017', # transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()])) # dataset_val = CocoDataset(parser.coco_path, set_name='val2017', # transform=transforms.Compose([Normalizer(), Resizer()])) dataset_train = CocoDataset( parser.coco_path, set_name='train', # transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()])) transform=transforms.Compose( [Normalizer(), AugmenterWithImgaug(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val', transform=transforms.Compose( [Normalizer(), Resizer()])) dataset_path = parser.coco_path elif parser.dataset_type == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) dataset_path = parser.csv_train else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size=parser.batch_size, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Retrain the model if parser.load_model_path is not None: # Load pretrained models print("\nLoading model from: [%s]" % parser.load_model_path) retinanet = torch.load(parser.load_model_path) print("\nStart retrain...") # Create the model else: print("\nStart train...") if parser.backbone == 'ResNet': if parser.depth == 18: retinanet = model.resnet18( num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34( num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50( num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101( num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152( num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152' ) elif parser.backbone == 'ResNeXt': if parser.depth == 50: retinanet = model.resnext50_32x4d( num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnext101_32x8d( num_classes=dataset_train.num_classes(), pretrained=True) pass else: raise ValueError( "Unsupported model depth, must be one of 50, 101") else: raise ValueError("Choice a backbone, [ResNet, ResNeXt]") # Get dataset name dataset_name = os.path.split(dataset_path)[-1] # Checkpoint name save_ckpt_name = r"%s_%s-%s-RetinaNet-backbone(%s)-ep(%d)-bs(%d)-lr(%s)" \ % (today, now, dataset_name, backbone_name, parser.epochs, parser.batch_size, parser.lr) os.makedirs(os.path.join(save_images_dir, "%s" % save_ckpt_name), exist_ok=True) os.makedirs(os.path.join(save_models_dir, "%s" % save_ckpt_name), exist_ok=True) tb_log_path = os.path.join("tf_log", save_ckpt_name) tb_writer = SummaryWriter(os.path.join(results_dir, tb_log_path)) use_gpu = True if use_gpu: retinanet = retinanet.cuda() retinanet = torch.nn.DataParallel(retinanet).cuda() retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=parser.lr) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) val_loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) epoch_prev_time = time.time() for epoch_num in range(parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] total_classification_loss = 0.0 total_regression_loss = 0.0 total_running_loss = 0.0 total_val_classification_loss = 0.0 total_val_regression_loss = 0.0 total_val_running_loss = 0.0 batch_prev_time = time.time() for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() classification_loss, regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) # sum the loss for tensorboard at this batch total_regression_loss += regression_loss total_classification_loss += classification_loss total_running_loss += loss.item() # log = 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'.format( # epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist)) # Determine approximate time left data_done = iter_num data_left = len(dataloader_train) - data_done batch_time_left = datetime.timedelta( seconds=data_left * (time.time() - batch_prev_time)) batch_time_left = chop_microseconds(batch_time_left) batches_done = epoch_num * len(dataloader_train) + iter_num batches_left = parser.epochs * len( dataloader_train) - batches_done total_time_left = datetime.timedelta( seconds=batches_left * (time.time() - epoch_prev_time)) total_time_left = chop_microseconds(total_time_left) batch_prev_time = time.time() epoch_prev_time = time.time() # Print training step log prefix_log = '[Epoch: {}/{}] | [Batch: {}/{}]'.format( epoch_num + 1, parser.epochs, iter_num + 1, len(dataloader_train)) suffix_log = '[Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}] ETA: {} / {}'.format( float(classification_loss), float(regression_loss), np.mean(loss_hist), batch_time_left, total_time_left) printProgressBar(iteration=iter_num + 1, total=len(dataloader_train), prefix=prefix_log, suffix=suffix_log) del classification_loss del regression_loss except Exception as e: print(e) continue # Validation with torch.no_grad(): val_batch_prev_time = time.time() for iter_num, data in enumerate(dataloader_val): try: val_classification_loss, val_regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) val_classification_loss = val_classification_loss.mean() val_regression_loss = val_regression_loss.mean() val_loss = val_classification_loss + val_regression_loss if bool(val_loss == 0): continue val_loss_hist.append(float(val_loss)) # sum the loss for tensorboard at this batch total_val_regression_loss += val_regression_loss total_val_classification_loss += val_classification_loss total_val_running_loss += val_loss.item() # Determine approximate time left data_done = iter_num data_left = len(dataloader_val) - data_done val_batch_time_left = datetime.timedelta( seconds=data_left * (time.time() - val_batch_prev_time)) val_batch_time_left = chop_microseconds( val_batch_time_left) batches_done = epoch_num * len(dataloader_val) + ( epoch_num + 1) * len(dataloader_train) + iter_num batches_left = parser.epochs * (len( dataloader_train) + len(dataloader_val)) - batches_done total_time_left = datetime.timedelta( seconds=batches_left * (time.time() - epoch_prev_time)) total_time_left = chop_microseconds(total_time_left) val_batch_prev_time = time.time() epoch_prev_time = time.time() # Print training step log prefix_log = 'Validation: [Epoch: {}/{}] | [Batch: {}/{}]'.format( epoch_num + 1, parser.epochs, iter_num + 1, len(dataloader_val)) suffix_log = '[Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}] ETA: {} / {}'.format( float(val_classification_loss), float(val_regression_loss), np.mean(val_loss_hist), val_batch_time_left, total_time_left) printProgressBar(iteration=iter_num + 1, total=len(dataloader_val), prefix=prefix_log, suffix=suffix_log) del val_classification_loss del val_regression_loss except Exception as e: print(e) continue # Evaluate AP if parser.dataset_type == 'coco': print('Evaluating dataset') # coco_eval.evaluate_coco(dataset_val, retinanet) coco_eval.evaluate_coco_and_save_image( dataset_val, retinanet, os.path.join(save_images_dir, save_ckpt_name), epoch_num + 1) elif parser.dataset_type == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) scheduler.step(np.mean(epoch_loss)) # calculate loss average average_classification_loss = total_classification_loss / len( dataloader_train) average_regression_loss = total_regression_loss / len(dataloader_train) average_running_loss = total_running_loss / len(dataloader_train) # TensorBoard tb_writer.add_scalar(tag='Classification Loss', scalar_value=average_classification_loss, global_step=epoch_num + 1) tb_writer.add_scalar(tag='Regression Loss', scalar_value=average_regression_loss, global_step=epoch_num + 1) tb_writer.add_scalar(tag='Total Loss', scalar_value=average_running_loss, global_step=epoch_num + 1) # Save model print("\nSave model to [%s] at %d epoch\n" % (save_ckpt_name, epoch_num + 1)) checkpoint_path = os.path.join( save_models_dir, "%s/RetinaNet_backbone(%s)_%d.pt" % (save_ckpt_name, backbone_name, epoch_num + 1)) torch.save(retinanet.module, checkpoint_path) # torch.save(retinanet.module, '{}_retinanet_{}.pt'.format(parser.dataset_type, epoch_num + 1)) retinanet.eval() torch.save(retinanet, 'model_final.pt')
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) parser.add_argument('--finetune', help='if load trained retina model', type=bool, default=False) parser.add_argument('--gpu', help='', type=bool, default=False) parser.add_argument('--batch_size', help='', type=int, default=2) parser.add_argument('--c', help='continue with formal model', type=bool, default=False) parser.add_argument('--model', help='model path') parser = parser.parse_args(args) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') #sampler = AspectRatioBasedSampler(dataset_train, batch_size=2, drop_last=False) sampler = AspectRatioBasedSampler(dataset_train, parser.batch_size, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=16, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=8, collate_fn=collater, batch_sampler=sampler_val) epochpassed = 0 # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') if parser.c: retinanet = torch.load(parser.model) #import pdb #pdb.set_trace() epochpassed = int(parser.model.split('.')[1].split('_')[-1]) use_gpu = parser.gpu #torch.cuda.set_device(5) #import pdb #pdb.set_trace() if use_gpu: if torch.cuda.is_available(): retinanet = retinanet.cuda() if use_gpu and torch.cuda.is_available(): retinanet = torch.nn.DataParallel(retinanet).cuda() else: retinanet = torch.nn.DataParallel(retinanet) retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) #original:1e-5 #optimizer =optim.SGD(retinanet.parameters(), lr=0.01,weight_decay=0.0001, momentum=0.9) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) writer = SummaryWriter() for epoch_num in range(parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] epoch_classification_loss = [] epoch_regression_loss = [] for iter_num, data in enumerate(dataloader_train): try: #import pdb #pdb.set_trace() optimizer.zero_grad() if use_gpu and torch.cuda.is_available(): classification_loss, regression_loss = retinanet( [data['img'].cuda().float(), data['annot'].cuda()]) else: classification_loss, regression_loss = retinanet( [data['img'].float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) epoch_classification_loss.append(float(classification_loss)) epoch_regression_loss.append(float(regression_loss)) print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Epoch loss: {:1.5f}\r' .format(epoch_num + epochpassed, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist)), end='') del classification_loss del regression_loss except Exception as e: print(e) continue print( 'Epoch: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Epoch loss: {:1.5f}' .format(epoch_num + epochpassed, np.mean(epoch_classification_loss), np.mean(epoch_regression_loss), np.mean(epoch_loss))) writer.add_scalar('lossrecord/regressionloss', np.mean(epoch_regression_loss), epoch_num + epochpassed) writer.add_scalar('lossrecord/classificationloss', np.mean(epoch_regression_loss), epoch_num + epochpassed) writer.add_scalar('lossrecord/epochloss', np.mean(epoch_loss), epoch_num + epochpassed) if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) scheduler.step(np.mean(epoch_loss)) if epoch_num % 10 == 0: torch.save( retinanet.module, './models/{}_retinanet{}_highResolution4fold_{}.pt'.format( parser.dataset, parser.depth, epoch_num + epochpassed)) #retinanet.eval() torch.save( retinanet.module, './models/{}_retinanet{}_highResolution4fold_{}.pt'.format( parser.dataset, parser.depth, parser.epochs + epochpassed)) writer.close()
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=25) parser = parser.parse_args(args) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') # create samplers for both training and validation # using muti CPU cores to accelerate data loading sampler_train1 = torch.utils.data.SequentialSampler(dataset_train) sampler_train2 = torch.utils.data.BatchSampler(sampler_train1, batch_size=1, drop_last=True) dataloader_train = DataLoader(dataset_train, num_workers=10, collate_fn=collater, batch_sampler=sampler_train2) sampler_val1 = torch.utils.data.SequentialSampler(dataset_val) sampler_val2 = torch.utils.data.BatchSampler(sampler_val1, batch_size=1, drop_last=True) dataloader_val = DataLoader(dataset_val, num_workers=10, collate_fn=collater, batch_sampler=sampler_val2) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: if torch.cuda.is_available(): retinanet = retinanet.cuda() if torch.cuda.is_available(): retinanet = torch.nn.DataParallel(retinanet).cuda() else: retinanet = torch.nn.DataParallel(retinanet) retinanet.training = True # ADAM optimizer optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) # using tensorboardX to show training process writer = SummaryWriter('log') iter_sum = 0 time_sum = 0 frame_num = 8 for epoch_num in range(parser.epochs): # only work for frame_num > 8 frame_list = collections.deque(maxlen=frame_num) anno_list = collections.deque(maxlen=frame_num) retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for index, data in enumerate(dataloader_train): try: frame_list.append(data['img']) anno_list.append(data['annot']) # if frame_num != 32: if index < 31: continue if index >= 697 and index <= 697 + 32: continue # real_frame is the frame we used for fish detection # It's the last frame in the batch group real_frame = frame_list[-1] # the annotation for real_frame annot = anno_list[-1] # drop useless frames data['img'] = torch.cat(list(frame_list), dim=0) optimizer.zero_grad() classification_loss, regression_loss = retinanet([ data['img'].cuda().float(), real_frame.cuda().float(), annot.cuda().float() ]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) writer.add_scalar('loss_hist', np.mean(loss_hist), iter_sum) writer.add_scalar('classification_loss', float(classification_loss), iter_sum) writer.add_scalar('regression_loss', float(regression_loss), iter_sum) writer.add_scalar('loss', float(loss), iter_sum) print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, index, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss iter_sum = iter_sum + 1 except Exception as e: print(e) continue if parser.dataset == 'coco': print('Evaluating dataset') # evaluate coco coco_eval.evaluate_coco(dataset_val, dataloader_val, retinanet, frame_num) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) scheduler.step(np.mean(epoch_loss)) torch.save( retinanet.module, 'checkpoint/{}_retinanet_{}.pt'.format(parser.dataset, epoch_num)) retinanet.eval() torch.save(retinanet, 'save/model_final.pt') writer.close()