def model_init(model_name): device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if model_name == 'retinanet' : #weight_file_path = '/content/retinanet/resnet34-333f7ec4.pth' #weight_file_path = '/content/retinanet/CP_epoch5.pth' weight_file_path = '/content/retinanet/retinanet50_pretrained.pth' total_keys = len(list(torch.load(weight_file_path).keys())) # Create the model if total_keys >= 102 and total_keys < 182 : retinanet = model.resnet18(num_classes=num_classes, pretrained=False) elif total_keys >= 182 and total_keys < 267: retinanet = model.resnet34(num_classes=num_classes, pretrained=False) elif total_keys >= 267 and total_keys < 522: retinanet = model.resnet50(num_classes=num_classes, pretrained=False) elif total_keys >= 522 and total_keys < 777: retinanet = model.resnet101(num_classes=num_classes, pretrained=False) elif total_keys >= 777: retinanet = model.resnet152(num_classes=num_classes, pretrained=False) else: raise ValueError('Unsupported model backbone, must be one of resnet18, resnet34, resnet50, resnet101, resnet152') retinanet.load_state_dict(torch.load(weight_file_path, map_location=device), strict=False) # Initialisng Model with loaded weights print('model initialized..') return retinanet, device
def detect_single_image(checkpoint, image_path, visualize=False): device = torch.device(type='cuda') if torch.cuda.is_available() else torch.device(type='cpu') configs = combine_values(checkpoint['model_specs']['training_configs'], checkpoint['hp_values']) labels = checkpoint['labels'] num_classes = len(labels) retinanet = model.resnet152(num_classes=num_classes, scales=configs['anchor_scales'], ratios=configs['anchor_ratios']) #TODO: make depth an input parameter retinanet.load_state_dict(checkpoint['model']) retinanet = retinanet.to(device=device) retinanet.eval() img = skimage.io.imread(image_path) if len(img.shape) == 2: img = skimage.color.gray2rgb(img) img = img.astype(np.float32) / 255.0 transform = transforms.Compose([Normalizer(), Resizer(min_side=608)]) #TODO: make this dynamic data = transform({'img': img, 'annot': np.zeros((0, 5))}) img = data['img'] img = img.unsqueeze(0) img = img.permute(0, 3, 1, 2) with torch.no_grad(): scores, classification, transformed_anchors = retinanet(img.to(device=device).float()) idxs = np.where(scores.cpu() > 0.5)[0] scale = data['scale'] detections_list = [] for j in range(idxs.shape[0]): bbox = transformed_anchors[idxs[j], :] label_idx = int(classification[idxs[j]]) label_name = labels[label_idx] score = scores[idxs[j]].item() # un resize for eval against gt bbox /= scale bbox.round() x1 = int(bbox[0]) y1 = int(bbox[1]) x2 = int(bbox[2]) y2 = int(bbox[3]) detections_list.append([label_name, str(score), str(x1), str(y1), str(x2), str(y2)]) img_name = image_path.split('/')[-1].split('.')[0] filename = img_name + '.txt' path = os.path.dirname(image_path) filepathname = os.path.join(path, filename) with open(filepathname, 'w', encoding='utf8') as f: for single_det_list in detections_list: for i, x in enumerate(single_det_list): f.write(str(x)) f.write(' ') f.write('\n') if visualize: unnormalize = UnNormalizer() return filepathname
def load_model(self): self.checkpoint = torch.load(self.model_checkpoint_file_path, map_location=lambda storage, loc: storage) self.model_args = self.checkpoint['args'] self.num_classes = None if self.model_args.model_type == 'food179': self.num_classes = 179 elif self.model_args.model_type == 'nsfw': self.num_classes = 5 else: raise ('Not Implemented!') if self.model_args.model_arc == 'resnet18': self.model = model.resnet18(num_classes=self.num_classes, zero_init_residual=True) elif self.model_args.model_arc == 'resnet34': self.model = model.resnet34(num_classes=self.num_classes, zero_init_residual=True) elif self.model_args.model_arc == 'resnet50': self.model = model.resnet50(num_classes=self.num_classes, zero_init_residual=True) elif self.model_args.model_arc == 'resnet101': self.model = model.resnet101(num_classes=self.num_classes, zero_init_residual=True) elif self.model_args.model_arc == 'resnet152': self.model = model.resnet152(num_classes=self.num_classes, zero_init_residual=True) elif self.model_args.model_arc == 'mobilenet': self.model = model.MobileNetV2(n_class=self.num_classes, input_size=256) else: raise ('Not Implemented!') self.model = nn.DataParallel(self.model) self.model.load_state_dict(self.checkpoint['model_state_dict']) self.model_epoch = self.checkpoint['epoch'] self.model_test_acc = self.checkpoint['test_acc'] self.model_best_acc = self.checkpoint['best_acc'] self.model_test_acc_top5 = self.checkpoint['test_acc_top5'] self.model_class_to_idx = self.checkpoint['class_to_idx'] self.model_idx_to_class = { v: k for k, v in self.model_class_to_idx.items() } self.model_train_history_dict = self.checkpoint['train_history_dict'] self.mean = self.checkpoint['NORM_MEAN'] self.std = self.checkpoint['NORM_STD'] self.model.eval() return
def main(args=None): data_set = { x: guipang(cfg=cfg['dataset_guipang'], part=x) for x in ['train', 'val'] } # data_set = { # x: qiafan(cfg=cfg['dataset_qiafan'], part=x) for x in ['train', 'val'] # } data_loader = { x: data.DataLoader(data_set[x], batch_size=cfg['batch_size'], num_workers=4, shuffle=True, pin_memory=False) for x in ['train', 'val'] } # Create the model if cfg['depth'] == 18: retinanet = model.resnet18( num_classes=dataset_train.num_classes(), pretrained=True) elif cfg['depth'] == 34: retinanet = model.resnet34( num_classes=dataset_train.num_classes(), pretrained=True) elif cfg['depth'] == 50: retinanet = model.resnet50( num_classes=dataset_train.num_classes(), pretrained=True) elif cfg['depth'] == 101: retinanet = model.resnet101( num_classes=dataset_train.num_classes(), pretrained=True) elif cfg['depth'] == 152: retinanet = model.resnet152( num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: retinanet = retinanet.cuda() retinanet = torch.nn.DataParallel(retinanet).cuda() optimizer = optim.Adam(retinanet.parameters(), lr=1e-5)
def main(args=None): from dataloader import JinNanDataset, Augmenter, UnNormalizer, Normalizer,Resizer from torch.utils.data import Dataset, DataLoader from torchvision import datasets, models, transforms import model import torch import argparse parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset',default='jingnan', help='Dataset type, must be one of csv or coco.') parser.add_argument('--threshold',help='treshold') parser.add_argument('--dataset_path', help='Path to file containing training and validation annotations (optional, see readme)') parser.add_argument('--model_path',help=('the model path')) parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser = parser.parse_args(args) dataset_val=JinNanDataset(parser.dataset_path, set_name='val', transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()])) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_val.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_val.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_val.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_val.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_val.num_classes(), pretrained=True) else: raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152') retinanet=torch.load(parser.model_path) use_gpu = True if use_gpu: retinanet = retinanet.cuda() retinanet.eval() print('Evaluating dataset') evaluate_jinnan(dataset_val, retinanet)
def build(self, depth=50, learning_rate=1e-5, ratios=[0.5, 1, 2], scales=[2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)]): # Create the model if depth == 18: retinanet = model.resnet18(num_classes=self.dataset_train.num_classes(), ratios=ratios, scales=scales, weights_dir=self.weights_dir_path, pretrained=True) elif depth == 34: retinanet = model.resnet34(num_classes=self.dataset_train.num_classes(), ratios=ratios, scales=scales, weights_dir=self.weights_dir_path, pretrained=True) elif depth == 50: retinanet = model.resnet50(num_classes=self.dataset_train.num_classes(), ratios=ratios, scales=scales, weights_dir=self.weights_dir_path, pretrained=True) elif depth == 101: retinanet = model.resnet101(num_classes=self.dataset_train.num_classes(), ratios=ratios, scales=scales, weights_dir=self.weights_dir_path, pretrained=True) elif depth == 152: retinanet = model.resnet152(num_classes=self.dataset_train.num_classes(), ratios=ratios, scales=scales, weights_dir=self.weights_dir_path, pretrained=True) else: raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152') self.retinanet = retinanet.to(device=self.device) self.retinanet.training = True self.optimizer = optim.Adam(self.retinanet.parameters(), lr=learning_rate) self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=3, verbose=True) if self.checkpoint is not None: self.retinanet.load_state_dict(self.checkpoint['model']) self.optimizer.load_state_dict(self.checkpoint['optimizer']) self.scheduler.load_state_dict(self.checkpoint['scheduler']) # TODO: test this, is it done right? # TODO is it right to resume_read_trial optimizer and schedular like this??? self.ratios = ratios self.scales = scales self.depth = depth
def set_models(self, dataset_train): # Create the model if self.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif self.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif self.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif self.depth == 101: retinanet = model.resnet101( num_classes=dataset_train.num_classes(), pretrained=True) elif self.depth == 152: retinanet = model.resnet152( num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs retinanet = nn.DataParallel(retinanet) self.retinanet = retinanet.to(self.device) self.retinanet.training = True self.optimizer = optim.Adam(self.retinanet.parameters(), lr=self.lr) # This lr_shceduler reduce the learning rate based on the models's validation loss self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, patience=3, verbose=True) self.loss_hist = collections.deque(maxlen=500)
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.', default="csv") parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)', default="binary_class.csv") parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=18) parser.add_argument('--epochs', help='Number of epochs', type=int, default=500) parser.add_argument('--epochs_only_det', help='Number of epochs to train detection part', type=int, default=1) parser.add_argument('--max_epochs_no_improvement', help='Max epochs without improvement', type=int, default=100) parser.add_argument('--pretrained_model', help='Path of .pt file with pretrained model', default='esposallescsv_retinanet_0.pt') parser.add_argument('--model_out', help='Path of .pt file with trained model to save', default='trained') parser.add_argument('--score_threshold', help='Score above which boxes are kept', type=float, default=0.5) parser.add_argument('--nms_threshold', help='Score above which boxes are kept', type=float, default=0.2) parser.add_argument('--max_boxes', help='Max boxes to be fed to recognition', default=95) parser.add_argument('--seg_level', help='[line, word], to choose anchor aspect ratio', default='word') parser.add_argument( '--early_stop_crit', help='Early stop criterion, detection (map) or transcription (cer)', default='cer') parser.add_argument('--max_iters_epoch', help='Max steps per epoch (for debugging)', default=1000000) parser.add_argument('--train_htr', help='Train recognition or not', default='True') parser.add_argument('--train_det', help='Train detection or not', default='True') parser.add_argument( '--binary_classifier', help= 'Wether to use classification branch as binary or not, multiclass instead.', default='False') parser.add_argument( '--htr_gt_box', help='Train recognition branch with box gt (for debugging)', default='False') parser.add_argument( '--ner_branch', help='Train named entity recognition with separate branch', default='False') parser = parser.parse_args(args) if parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train') dataset_name = parser.csv_train.split("/")[-2] dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') # Files for training log experiment_id = str(time.time()).split('.')[0] valid_cer_f = open('trained_models/' + parser.model_out + 'log.txt', 'w') for arg in vars(parser): if getattr(parser, arg) is not None: valid_cer_f.write( str(arg) + ' ' + str(getattr(parser, arg)) + '\n') current_commit = subprocess.check_output(['git', 'rev-parse', 'HEAD']) valid_cer_f.write(str(current_commit)) valid_cer_f.write( "epoch_num cer best cer mAP best mAP time\n") valid_cer_f.close() sampler = AspectRatioBasedSampler(dataset_train, batch_size=1, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=0, collate_fn=collater, batch_sampler=sampler_val) if not os.path.exists('trained_models'): os.mkdir('trained_models') # Create the model train_htr = parser.train_htr == 'True' htr_gt_box = parser.htr_gt_box == 'True' ner_branch = parser.ner_branch == 'True' binary_classifier = parser.binary_classifier == 'True' torch.backends.cudnn.benchmark = False alphabet = dataset_train.alphabet if os.path.exists(parser.pretrained_model): retinanet = torch.load(parser.pretrained_model) retinanet.classificationModel = ClassificationModel( num_features_in=256, num_anchors=retinanet.anchors.num_anchors, num_classes=dataset_train.num_classes()) if ner_branch: retinanet.nerModel = NERModel( feature_size=256, pool_h=retinanet.pool_h, n_classes=dataset_train.num_classes(), pool_w=retinanet.pool_w) else: if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True, max_boxes=int(parser.max_boxes), score_threshold=float( parser.score_threshold), seg_level=parser.seg_level, alphabet=alphabet, train_htr=train_htr, htr_gt_box=htr_gt_box, ner_branch=ner_branch, binary_classifier=binary_classifier) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True, max_boxes=int(parser.max_boxes), score_threshold=float( parser.score_threshold), seg_level=parser.seg_level, alphabet=alphabet, train_htr=train_htr, htr_gt_box=htr_gt_box) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101( num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152( num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True train_htr = parser.train_htr == 'True' train_det = parser.train_det == 'True' retinanet.htr_gt_box = parser.htr_gt_box == 'True' retinanet.train_htr = train_htr retinanet.epochs_only_det = parser.epochs_only_det if use_gpu: retinanet = retinanet.cuda() retinanet = torch.nn.DataParallel(retinanet).cuda() retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=50, verbose=True) loss_hist = collections.deque(maxlen=500) ctc = CTCLoss() retinanet.train() retinanet.module.freeze_bn() best_cer = 1000 best_map = 0 epochs_no_improvement = 0 verbose_each = 20 optimize_each = 1 objective = 100 best_objective = 10000 print(('Num training images: {}'.format(len(dataset_train)))) for epoch_num in range(parser.epochs): cers = [] retinanet.training = True retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): if iter_num > int(parser.max_iters_epoch): break try: if iter_num % optimize_each == 0: optimizer.zero_grad() (classification_loss, regression_loss, ctc_loss, ner_loss) = retinanet([ data['img'].cuda().float(), data['annot'], ctc, epoch_num ]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() if train_det: if train_htr: loss = ctc_loss + classification_loss + regression_loss + ner_loss else: loss = classification_loss + regression_loss + ner_loss elif train_htr: loss = ctc_loss else: continue if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) if iter_num % verbose_each == 0: print(( 'Epoch: {} | Step: {} |Classification loss: {:1.5f} | Regression loss: {:1.5f} | CTC loss: {:1.5f} | NER loss: {:1.5f} | Running loss: {:1.5f} | Total loss: {:1.5f}\r' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), float(ctc_loss), float(ner_loss), np.mean(loss_hist), float(loss), "\r"))) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) torch.cuda.empty_cache() except Exception as e: print(e) continue if parser.dataset == 'csv' and parser.csv_val is not None and train_det: print('Evaluating dataset') mAP, text_mAP, current_cer = csv_eval.evaluate( dataset_val, retinanet, score_threshold=parser.score_threshold) #text_mAP,_ = csv_eval_binary_map.evaluate(dataset_val, retinanet,score_threshold=parser.score_threshold) objective = current_cer * (1 - mAP) retinanet.eval() retinanet.training = False retinanet.score_threshold = float(parser.score_threshold) '''for idx,data in enumerate(dataloader_val): if idx>int(parser.max_iters_epoch): break print("Eval CER on validation set:",idx,"/",len(dataset_val),"\r") image_name = dataset_val.image_names[idx].split('/')[-1].split('.')[-2] #generate_pagexml(image_name,data,retinanet,parser.score_threshold,parser.nms_threshold,dataset_val) text_gt =".".join(dataset_val.image_names[idx].split('.')[:-1])+'.txt' f =open(text_gt,'r') text_gt_lines=f.readlines()[0] transcript_pred = get_transcript(image_name,data,retinanet,float(parser.score_threshold),float(parser.nms_threshold),dataset_val,alphabet) cers.append(float(editdistance.eval(transcript_pred,text_gt_lines))/len(text_gt_lines))''' t = str(time.time()).split('.')[0] valid_cer_f.close() #print("GT",text_gt_lines) #print("PREDS SAMPLE:",transcript_pred) if parser.early_stop_crit == 'cer': if float(objective) < float( best_objective): #float(current_cer)<float(best_cer): best_cer = current_cer best_objective = objective epochs_no_improvement = 0 torch.save( retinanet.module, 'trained_models/' + parser.model_out + '{}_retinanet.pt'.format(parser.dataset)) else: epochs_no_improvement += 1 if mAP > best_map: best_map = mAP elif parser.early_stop_crit == 'map': if mAP > best_map: best_map = mAP epochs_no_improvement = 0 torch.save( retinanet.module, 'trained_models/' + parser.model_out + '{}_retinanet.pt'.format(parser.dataset)) else: epochs_no_improvement += 1 if float(current_cer) < float(best_cer): best_cer = current_cer if train_det: print(epoch_num, "mAP: ", mAP, " best mAP", best_map) if train_htr: print("VALID CER:", current_cer, "best CER", best_cer) print("Epochs no improvement:", epochs_no_improvement) valid_cer_f = open('trained_models/' + parser.model_out + 'log.txt', 'a') valid_cer_f.write( str(epoch_num) + " " + str(current_cer) + " " + str(best_cer) + ' ' + str(mAP) + ' ' + str(best_map) + ' ' + str(text_mAP) + '\n') if epochs_no_improvement > 3: for param_group in optimizer.param_groups: if param_group['lr'] > 10e-5: param_group['lr'] *= 0.1 if epochs_no_improvement >= parser.max_epochs_no_improvement: print("TRAINING FINISHED AT EPOCH", epoch_num, ".") sys.exit() scheduler.step(np.mean(epoch_loss)) torch.cuda.empty_cache() retinanet.eval()
def get_resnet152(pretrained=False, num_classes=9): img_model = resnet152(pretrained) img_model.fc = nn.Linear(2048, num_classes) return img_model
def main(args=None): #def main(epoch): parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument('--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)') parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) #parser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint') parser.add_argument('--start-epoch', default=0, type=int, help='manual epoch number (useful on restarts)') parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser = parser.parse_args(args) #args = parser.parse_args() #parser = parser.parse_args(epoch) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose([Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError('Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()])) else: raise ValueError('Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size=4, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: retinanet = retinanet.cuda() #retinanet().load_state_dict(torch.load('/users/wenchi/ghwwc/Pytorch-retinanet-master/resnet50-19c8e357.pth')) #if True: #print('==> Resuming from checkpoint..') #checkpoint = torch.load('/users/wenchi/ghwwc/Pytorch-retinanet-master/coco_retinanet_2.pt') #retinanet().load_state_dict(checkpoint) #best_loss = checkpoint['loss'] #start_epoch = checkpoint['epoch'] retinanet = torch.nn.DataParallel(retinanet).cuda() retinanet.training = True #optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) optimizer = optim.SGD(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() #retinanet.freeze_bn() #for train from a middle state retinanet.module.freeze_bn() #for train from the very beginning print('Num training images: {}'.format(len(dataset_train))) for epoch_num in range(parser.start_epoch, parser.epochs): if parser.resume: if os.path.isfile(parser.resume): print("=>loading checkpoint '{}'".format(parser.resume)) checkpoint = torch.load(parser.resume) print(parser.start_epoch) #parser.start_epoch = checkpoint['epoch'] #retinanet.load_state_dict(checkpoint['state_dict']) retinanet=checkpoint #retinanet.load_state_dict(checkpoint) print(retinanet) #optimizer.load_state_dict(checkpoint) print("=> loaded checkpoint '{}' (epoch {})".format(parser.resume, checkpoint)) else: print("=> no checkpoint found at '{}'".format(parser.resume)) retinanet.train() retinanet.freeze_bn() #retinanet.module.freeze_bn() if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() classification_loss, regression_loss = retinanet([data['img'].cuda().float(), data['annot'].cuda()]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) print('Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'.format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss except Exception as e: print(e) continue if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) scheduler.step(np.mean(epoch_loss)) #torch.save(retinanet.module, '{}_retinanet_101_{}.pt'.format(parser.dataset, epoch_num)) torch.save(retinanet, '{}_retinanet_dilation_experiment1_{}.pt'.format(parser.dataset, epoch_num)) name = '{}_retinanet_dilation_experiment1_{}.pt'.format(parser.dataset, epoch_num) parser.resume = '/users/wenchi/ghwwc/pytorch-retinanet-master_new/name' retinanet.eval() torch.save(retinanet, 'model_final_dilation_experiment1.pt'.format(epoch_num))
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) parser.add_argument('--optimizer', help='[SGD | Adam]', type=str, default='SGD') parser.add_argument('--model', help='Path to model (.pt) file.') parser = parser.parse_args(args) # Create the data loaders print("\n[Phase 1]: Creating DataLoader for {} dataset".format( parser.dataset)) if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2014', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2014', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size=8, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=8, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=16, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=8, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') print('| Num training images: {}'.format(len(dataset_train))) print('| Num test images : {}'.format(len(dataset_val))) print("\n[Phase 2]: Preparing RetinaNet Detection Model...") use_gpu = torch.cuda.is_available() if use_gpu: device = torch.device('cuda') retinanet = retinanet.to(device) retinanet = torch.nn.DataParallel(retinanet, device_ids=range( torch.cuda.device_count())) print("| Using %d GPUs for Train/Validation!" % torch.cuda.device_count()) retinanet.training = True if parser.optimizer == 'Adam': optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) # not mentioned print("| Adam Optimizer with Learning Rate = {}".format(1e-5)) elif parser.optimizer == 'SGD': optimizer = optim.SGD(retinanet.parameters(), lr=1e-2, momentum=0.9, weight_decay=1e-4) print("| SGD Optimizer with Learning Rate = {}".format(1e-2)) else: raise ValueError('Unsupported Optimizer, must be one of [SGD | Adam]') scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn( ) # Freeze the BN parameters to ImageNet configuration # Check if there is a 'checkpoints' path if not osp.exists('./checkpoints/'): os.makedirs('./checkpoints/') print("\n[Phase 3]: Training Model on {} dataset...".format( parser.dataset)) for epoch_num in range(parser.epochs): epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() classification_loss, regression_loss = retinanet( [data['img'].to(device), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.001) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) sys.stdout.write('\r') sys.stdout.write( '| Epoch: {} | Iteration: {}/{} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num + 1, iter_num + 1, len(dataloader_train), float(classification_loss), float(regression_loss), np.mean(loss_hist))) sys.stdout.flush() del classification_loss del regression_loss except Exception as e: print(e) continue print("\n| Saving current best model at epoch {}...".format(epoch_num + 1)) torch.save( retinanet.state_dict(), './checkpoints/{}_retinanet_{}.pt'.format(parser.dataset, epoch_num + 1)) if parser.dataset == 'coco': #print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet, device) elif parser.dataset == 'csv' and parser.csv_val is not None: #print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet, device) scheduler.step(np.mean(epoch_loss)) retinanet.eval() torch.save(retinanet.state_dict(), './checkpoints/model_final.pt')
def main(config): # set seed for reproducibility np.random.seed(0) torch.manual_seed(0) torch.cuda.manual_seed(0) # create folder for model newpath = './models/' + config.model_date if config.save_model: os.makedirs(newpath) # Create the data loaders if config.csv_train is None: raise ValueError('Must provide --csv_train when training on csv,') if config.csv_classes is None: raise ValueError('Must provide --csv_classes when training on csv,') train_dataset = datasets.ImageFolder(os.path.join(config.data_dir, 'train')) dataset_train = GetDataset(train_file=config.csv_train, class_list=config.csv_classes, transform=transforms.Compose( [Augmenter(), Resizer()]), dataset=train_dataset, seed=0) dataloader_train = DataLoader(dataset_train, batch_size=config.batch_size, shuffle=True, num_workers=1, collate_fn=collater) if config.csv_val is None: dataset_val = None print('No validation annotations provided.') else: valid_dataset = datasets.ImageFolder( os.path.join(config.data_dir, 'valid')) dataset_val = GetDataset(train_file=config.csv_val, class_list=config.csv_classes, transform=transforms.Compose([Resizer()]), dataset=valid_dataset, seed=0) # Create the model if config.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif config.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif config.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif config.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif config.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') if config.use_gpu: retinanet = retinanet.cuda() retinanet = torch.nn.DataParallel(retinanet).cuda() retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) best_valid_map = 0 counter = 0 batch_size = config.batch_size for epoch_num in range(config.epochs): print('\nEpoch: {}/{}'.format(epoch_num + 1, config.epochs)) retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] train_batch_time = AverageMeter() train_losses = AverageMeter() tic = time.time() with tqdm(total=len(dataset_train)) as pbar: for iter_num, data in enumerate(dataloader_train): # try: optimizer.zero_grad() siamese_loss, classification_loss, regression_loss = retinanet( [ data['img'].cuda().float(), data['annot'], data['pair'].cuda().float() ]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss + siamese_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() epoch_loss.append(float(loss)) toc = time.time() train_losses.update(float(loss), batch_size) train_batch_time.update(toc - tic) tic = time.time() pbar.set_description(("{:.1f}s - loss: {:.3f}".format( train_batch_time.val, train_losses.val, ))) pbar.update(batch_size) del classification_loss del regression_loss del siamese_loss # except Exception as e: # print('Training error: ', e) # continue if config.csv_val is not None: print('Evaluating dataset') mAP, correct = eval_new.evaluate(dataset_val, retinanet) # is_best = mAP[0][0] > best_valid_map # best_valid_map = max(mAP[0][0], best_valid_map) is_best = correct > best_valid_map best_valid_map = max(correct, best_valid_map) if is_best: counter = 0 else: counter += 1 if counter > 3: print("[!] No improvement in a while, stopping training.") break scheduler.step(np.mean(epoch_loss)) if is_best and config.save_model: torch.save( retinanet.state_dict(), './models/{}/best_retinanet.pt'.format(config.model_date)) if config.save_model: torch.save( retinanet.state_dict(), './models/{}/{}_retinanet_{}.pt'.format( config.model_date, config.depth, epoch_num)) msg = "train loss: {:.3f} - val map: {:.3f} - val acc: {:.3f}%" print( msg.format(train_losses.avg, mAP[0][0], (100. * correct) / len(dataset_val)))
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a CTracker network.') parser.add_argument('--dataset', default='csv', type=str, help='Dataset type, must be one of csv or coco.') parser.add_argument('--model_dir', default='./ctracker/', type=str, help='Path to save the model.') parser.add_argument( '--root_path', default='/Dataset/Tracking/MOT17/', type=str, help='Path of the directory containing both label and images') parser.add_argument( '--csv_train', default='train_annots.csv', type=str, help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', default='train_labels.csv', type=str, help='Path to file containing class list (see readme)') parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) parser.add_argument('--print_freq', help='Print frequency', type=int, default=100) parser.add_argument( '--save_every', help='Save a checkpoint of model at given interval of epochs', type=int, default=5) parser = parser.parse_args(args) print(parser) print(parser.model_dir) if not os.path.exists(parser.model_dir): os.makedirs(parser.model_dir) # Create the data loaders if parser.dataset == 'csv': if (parser.csv_train is None) or (parser.csv_train == ''): raise ValueError('Must provide --csv_train when training on COCO,') if (parser.csv_classes is None) or (parser.csv_classes == ''): raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(parser.root_path, train_file=os.path.join(parser.root_path, parser.csv_train), class_list=os.path.join(parser.root_path, parser.csv_classes), \ transform=transforms.Compose([RandomSampleCrop(), PhotometricDistort(), Augmenter(), Normalizer()]))#transforms.Compose([Normalizer(), Augmenter(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') # sampler = AspectRatioBasedSampler(dataset_train, batch_size=2, drop_last=False) sampler = AspectRatioBasedSampler(dataset_train, batch_size=8, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=32, collate_fn=collater, batch_sampler=sampler) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: retinanet = retinanet.cuda() retinanet = torch.nn.DataParallel(retinanet).cuda() retinanet.training = True # optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) optimizer = optim.Adam(retinanet.parameters(), lr=5e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) total_iter = 0 for epoch_num in range(parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: total_iter = total_iter + 1 optimizer.zero_grad() (classification_loss, regression_loss), reid_loss = retinanet([ data['img'].cuda().float(), data['annot'], data['img_next'].cuda().float(), data['annot_next'] ]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() reid_loss = reid_loss.mean() # loss = classification_loss + regression_loss + track_classification_losses loss = classification_loss + regression_loss + reid_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) # print frequency default=100 or e.g. --print_freq 500 if total_iter % parser.print_freq == 0: print( 'Epoch: {} | Iter: {} | Cls loss: {:1.5f} | Reid loss: {:1.5f} | Reg loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss), float(reid_loss), float(regression_loss), np.mean(loss_hist))) except Exception as e: print(e) continue scheduler.step(np.mean(epoch_loss)) # Save a checkpoint of model at given interval of epochs e.g. --save_every 10 if epoch_num % parser.save_every == 0: torch.save( retinanet, os.path.join(parser.model_dir, "weights_epoch_" + str(epoch_num) + ".pt")) retinanet.eval() torch.save(retinanet, os.path.join(parser.model_dir, 'model_final.pt')) run_from_train(parser.model_dir, parser.root_path)
def detect(checkpoint, output_dir, home_path=None, visualize=False): device = torch.device(type='cuda') if torch.cuda.is_available() else torch.device(type='cpu') if home_path is None: home_path = checkpoint['model_specs']['data']['home_path'] if os.getcwd().split('/')[-1] == 'ObjectDetNet': home_path = os.path.join('..', home_path) # must have a file to predict on called "predict_on" pred_on_path = os.path.join(home_path, 'predict_on') #create output path output_path = os.path.join(home_path, 'predictions', output_dir) try: os.makedirs(output_path) except FileExistsError: if output_dir != 'check0': raise Exception('there are already predictions for model: ' + output_dir) else: logger.info('there was already a check0 in place, erasing and predicting again from scratch') shutil.rmtree(output_path) os.makedirs(output_path) logger.info('inside ' + str(pred_on_path) + ': ' + str(os.listdir(pred_on_path))) dataset_val = PredDataset(pred_on_path=pred_on_path, transform=transforms.Compose([Normalizer(), Resizer(min_side=608)])) #TODO make resize an input param logger.info('dataset prepared') dataloader_val = DataLoader(dataset_val, num_workers=0, collate_fn=collater, batch_sampler=None) logger.info('data loader initialized') labels = checkpoint['labels'] logger.info('labels are: ' + str(labels)) num_classes = len(labels) configs = combine_values(checkpoint['model_specs']['training_configs'], checkpoint['hp_values']) logger.info('initializing retinanet model') if checkpoint['model_specs']['training_configs']['depth'] == 50: retinanet = model.resnet50(num_classes=num_classes, scales=configs['anchor_scales'], ratios=configs['anchor_ratios']) #TODO: make depth an input parameter elif checkpoint['model_specs']['training_configs']['depth'] == 152: retinanet = model.resnet152(num_classes=num_classes, scales=configs['anchor_scales'], ratios=configs['anchor_ratios']) logger.info('loading weights') retinanet.load_state_dict(checkpoint['model']) retinanet = retinanet.to(device=device) logger.info('model to device: ' + str(device)) retinanet.eval() unnormalize = UnNormalizer() def draw_caption(image, box, caption): b = np.array(box).astype(int) cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2) cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1) for idx, data in enumerate(dataloader_val): scale = data['scale'][0] with torch.no_grad(): st = time.time() scores, classification, transformed_anchors = retinanet(data['img'].to(device=device).float()) print('Elapsed time: {}'.format(time.time() - st)) idxs = np.where(scores.cpu() > 0.5)[0] if visualize: img = np.array(255 * unnormalize(data['img'][0, :, :, :])).copy() img[img < 0] = 0 img[img > 255] = 255 img = np.transpose(img, (1, 2, 0)) img = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2RGB) detections_list = [] for j in range(idxs.shape[0]): bbox = transformed_anchors[idxs[j], :] if visualize: x1 = int(bbox[0]) y1 = int(bbox[1]) x2 = int(bbox[2]) y2 = int(bbox[3]) label_idx = int(classification[idxs[j]]) label_name = labels[label_idx] score = scores[idxs[j]].item() if visualize: draw_caption(img, (x1, y1, x2, y2), label_name) cv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=2) print(label_name) # un resize for eval against gt bbox /= scale bbox.round() x1 = int(bbox[0]) y1 = int(bbox[1]) x2 = int(bbox[2]) y2 = int(bbox[3]) detections_list.append([label_name, str(score), str(x1), str(y1), str(x2), str(y2)]) img_name = dataset_val.image_names[idx].split('/')[-1] i_name = img_name.split('.')[0] filename = i_name + '.txt' filepathname = os.path.join(output_path, filename) with open(filepathname, 'w', encoding='utf8') as f: for single_det_list in detections_list: for i, x in enumerate(single_det_list): f.write(str(x)) f.write(' ') f.write('\n') if visualize: save_to_path = os.path.join(output_path, img_name) cv2.imwrite(save_to_path, img) cv2.waitKey(0) return output_path
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) parser = parser.parse_args(args) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') # dataset_train = CocoDataset(parser.coco_path, set_name='trainval35k', transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val5k', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') #sampler = AspectRatioBasedSampler(dataset_train, batch_size=16, drop_last=False) #dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_val.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_val.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_val.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_val.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_val.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True retinanet.load_state_dict( torch.load("coco_resnet_50_map_0_335_state_dict.pt", encoding='latin1')) if use_gpu: retinanet = retinanet.cuda() # retinanet = torch.nn.DataParallel(retinanet).cuda() #retinanet.training = True #optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) #scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) #loss_hist = collections.deque(maxlen=500) retinanet.eval() #retinanet.module.freeze_bn() # print('Num training images: {}'.format(len(dataset_train))) if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet)
def main(config): np.random.seed(0) random.seed(0) torch.manual_seed(0) model_path = './models/' + config.model_date + '/best_retinanet.pt' val_dataset = datasets.ImageFolder(os.path.join(config.data_dir, 'test')) dataset_val = GetDataset(train_file=config.csv_val, class_list=config.csv_classes, transform=transforms.Compose([Resizer()]), dataset=val_dataset, seed=0) dataloader_val = DataLoader(dataset_val, batch_size=1, num_workers=1, collate_fn=collater) # Create the model if config.depth == 18: retinanet = model.resnet18(num_classes=dataset_val.num_classes(), pretrained=True) elif config.depth == 34: retinanet = model.resnet34(num_classes=dataset_val.num_classes(), pretrained=True) elif config.depth == 50: retinanet = model.resnet50(num_classes=dataset_val.num_classes(), pretrained=True) elif config.depth == 101: retinanet = model.resnet101(num_classes=dataset_val.num_classes(), pretrained=True) elif config.depth == 152: retinanet = model.resnet152(num_classes=dataset_val.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: retinanet = torch.nn.DataParallel(retinanet).cuda() retinanet.load_state_dict(torch.load(model_path)) retinanet.eval() correct = 0 if config.eval_map: mAP = eval_new.evaluate(dataset_val, retinanet, iou_threshold=config.thres) avg_time = AverageMeter() for idx, data in enumerate(dataloader_val): with torch.no_grad(): st = time.time() scores, classification, transformed_anchors, similarity = retinanet( [data['img'].cuda().float(), data['pair'].float()]) avg_time.update(time.time() - st) scores = scores.cpu().numpy() idxs = np.where(scores > 0.5) img = np.array(255 * data['img'][0, :, :, :]).copy() img[img < 0] = 0 img[img > 255] = 255 img = np.transpose(img, (1, 2, 0)) pair = np.array(255 * data['pair'][0, :, :, :]).copy() pair[pair < 0] = 0 pair[pair > 255] = 255 pair = np.transpose(pair, (1, 2, 0)) if config.plot: img = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2RGB) pair = cv2.cvtColor(pair.astype(np.uint8), cv2.COLOR_BGR2RGB) max_sim = 0.0 annot = dataset_val.get_annot(idx) annot = annot['annot'] bbox_true = annot[annot[:, 5] == 1, :4] for j in range(idxs[0].shape[0]): bbox = transformed_anchors[idxs[0][j], :] x1 = int(bbox[0]) y1 = int(bbox[1]) x2 = int(bbox[2]) y2 = int(bbox[3]) sim = similarity[j, 0].item() if sim > max_sim: max_sim = sim bbox_est = np.asarray([[x1, y1, x2, y2]]) if config.plot: draw_caption(img, (x1, y1, x2, y2), str(round(sim, 2))) cv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=2) iou = compute_overlap(bbox_true, bbox_est / data['scale'][0]) if iou.any() > config.thres and max_sim > config.sim_thres: correct += 1 max_sim = 0 # print('Iter = {}, number correct = {}'.format(idx+1, correct)) if config.plot: cv2.imshow('pair', pair) cv2.imshow('img', img) cv2.waitKey(0) # press q to quit, any other to view next image print('Final Accuracy is {}'.format(correct / config.test_trials)) print('Average time: {}'.format(avg_time.val))
def main(args=None): parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset',default="csv", help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path',default="/home/mayank-s/PycharmProjects/Datasets/coco",help='Path to COCO directory') parser.add_argument('--csv_train',default="berkely_ready_to_train_for_retinanet_pytorch.csv", help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes',default="berkely_class.csv", help='Path to file containing class list (see readme)') parser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)') parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=200) # parser.add_argument('--resume', default=0, help='resume from checkpoint') parser = parser.parse_args(args) # print(args.resume) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2014', transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2014', transform=transforms.Compose([Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError('Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()])) else: raise ValueError('Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size=4, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=0, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True # if use_gpu: if torch.cuda.is_available(): retinanet = retinanet.cuda() retinanet = torch.nn.DataParallel(retinanet).cuda() retinanet.training = True ###################################################################################3 # # args.resume=0 # Resume_model = False # start_epoch=0 # if Resume_model: # print('==> Resuming from checkpoint..') # checkpoint = torch.load('./checkpoint/saved_with_epochs/retina_fpn_1') # retinanet.load_state_dict(checkpoint['net']) # best_loss = checkpoint['loss'] # start_epoch = checkpoint['epoch'] # print('Resuming from epoch:{ep} loss:{lp}'.format(ep=start_epoch, lp=best_loss)) ##################################################################################### optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) retinanet = torch.load("./checkpoint/retina_fpn_1") # epoch_num=start_epoch for epoch_num in range(parser.epochs): # retinanet.train()retina_fpn_1 # retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() if torch.cuda.is_available(): classification_loss, regression_loss = retinanet([data['img'].cuda().float(), data['annot']]) else: classification_loss, regression_loss = retinanet([data['img'].float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) print('Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'.format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss except Exception as e: print(e) continue # print("Saving model...") # name = "./checkpoint/retina_fpn_" + str(epoch_num) # torch.save(retinanet, name) # ###################################################################333 print('Saving..') state = { 'net': retinanet.module.state_dict(), 'loss': loss_hist, 'epoch': epoch_num, } if not os.path.isdir('checkpoint/saved_with_epochs'): os.mkdir('checkpoint/saved_with_epochs') # checkpoint_path="./checkpoint/Ckpt_"+ name = "./checkpoint/saved_with_epochs/retina_fpn_" + str(epoch_num) torch.save(state, name) # torch.save(state, './checkpoint/retinanet.pth') ##################################################################### '''if parser.dataset == 'coco':
def main(args=None): """ todo s: ################## ToDo ######################## 1. download more images using image_utils and isic-arhive. Also, use more online resources for data. 2. Use Augmentations fromPytorchSSD using pascal voc data format. 3. use pair augmentation, random erase 4. download more images for each classes. 5. preprocessing and feature extraction 6. bigger 500 px image size. big image tends to make 7. use ResNet-152 for better peromance. 8. adversarial training, use crosssentropy, focal loss 9. use similar optimizatio adam and learning rate schedule like wider face pedestrian dataset. 10.BGR to RGB 11. multi scale testing. 12. soft nms 13. save model and load from previous epoch 14. https://github.com/uoguelph-mlrg/Cutout """ parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', default="wider_pedestrain", help='Dataset type, must be one of csv or coco.') parser.add_argument( '--coco_path', default= "/media/milton/ssd1/research/competitions/data_wider_pedestrian/", help='Path to COCO directory') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=152) parser.add_argument('--epochs', help='Number of epochs', type=int, default=200) parser = parser.parse_args(args) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train_wider_pedestrian', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val_wider_pedestrian', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'wider_pedestrain': dataset_train = CocoDataset(parser.coco_path, set_name='train_wider_pedestrian', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val_wider_pedestrian', transform=transforms.Compose( [Normalizer(), Resizer()])) # dataset_test = CocoDataset(parser.coco_path, set_name='test_wider_pedestrian', # transform=transforms.Compose([Normalizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') batch_size = 4 num_classes = 1 print("Total Train:{}".format(len(dataset_train))) sampler = AspectRatioBasedSampler(dataset_train, batch_size=batch_size, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=4, collate_fn=collater, batch_sampler=sampler) print("Total Validation:{}".format(len(dataset_val))) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=batch_size, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=1, collate_fn=collater, batch_sampler=sampler_val) best_saved_model_name = "checkpoint/resnet{}_{}_best_model.pth".format( parser.depth, parser.dataset) best_mAP = 0 start_epoch = 0 # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=num_classes, pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=num_classes, pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=num_classes, pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=num_classes, pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=num_classes, pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True optimizer = optim.Adam(retinanet.parameters(), lr=0.001) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) if use_gpu: retinanet_sk = copy.deepcopy( retinanet.cpu() ) # will hold the raw model, later it will be loaded with new model weight to test in seperate gpus. retinanet = retinanet.cuda() retinanet = torch.nn.DataParallel(retinanet) try: print("Loading model and optimizer from checkpoint '{}'".format( best_saved_model_name)) checkpoint = torch.load(best_saved_model_name) retinanet.load_state_dict(checkpoint['model'].state_dict()) best_mAP = checkpoint['map'] start_epoch = checkpoint['epoch'] optimizer.load_state_dict(checkpoint['optimizer'].state_dict()) # optimizer.load_state_dict(checkpoint['optimizer_state']) print("Loaded checkpoint '{}' (epoch {})".format( best_saved_model_name, checkpoint['epoch'])) start_epoch = checkpoint['epoch'] print( '==> Resuming Sucessfully from checkpoint from epoch {} with mAP {:.7f}..' .format(start_epoch, best_mAP)) except Exception as e: print("\nExcpetion: {}".format(repr(e))) print('\n==> Resume Failed...') retinanet.training = True total_loss = losses.loss loss_hist = collections.deque(maxlen=500) retinanet.train() freeze_bn(retinanet) print('Num training images: {}'.format(len(dataset_train))) for epoch_num in range(start_epoch, parser.epochs): retinanet.train() freeze_bn(retinanet) epoch_loss = [] # threshold=0.05 for iter_num, data in enumerate(dataloader_train): iter_per_epoch = len(dataset_train) / batch_size step = epoch_num * iter_per_epoch + iter_num if iter_num == 0: print('Iteration PEr eEpoch: {}'.format(iter_per_epoch)) try: optimizer.zero_grad() classification, regression, anchors = retinanet( data['img'].cuda().float()) classification_loss, regression_loss = total_loss( classification, regression, anchors, data['annot']) loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) writer.add_scalar('Classification loss', classification_loss, step) writer.add_scalar('Regression loss', regression_loss, step) writer.add_scalar("Running Loss", np.mean(loss_hist), step) msg = 'Epoch:{}, Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'.format( epoch_num, float(classification_loss), float(regression_loss), np.mean(loss_hist)) progress_bar(iter_num, iter_per_epoch, msg) # print('Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'.format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) # break # if iter_num==0: # break if iter_num % 500 == 0: save_model(retinanet, optimizer, best_saved_model_name, epoch_num + 1, epoch_num) if False: retinanet.eval() test_data = get_test_loader_for_upload(1) # coco_eval.evaluate_wider_pedestrian_for_upload(epoch_num, test_data, retinanet, retinanet_sk) new_map = coco_eval.evaluate_wider_pedestrian( epoch_num, dataset_val, retinanet, retinanet_sk, threshold) # to validate epoch_saved_model_name = "checkpoint/resnet{}_{}_epoch_{}.pth".format( parser.depth, parser.dataset, epoch_num) save_model(retinanet, optimizer, epoch_saved_model_name, new_map, epoch_num) # print("\nepoch:{}, validation average precision score:{}".format(epoch_num, new_map)) if new_map == None: continue writer.add_scalar('validation mAP', new_map, epoch_num) scheduler.step(np.mean(epoch_loss)) if new_map > best_mAP: print( "Found new best model with mAP:{:.7f}, over {:.7f}" .format(new_map, best_mAP)) save_model(retinanet, optimizer, best_saved_model_name, new_map, epoch_num) best_mAP = new_map coco_eval.evaluate_wider_pedestrian_for_upload( parser.depth, epoch_num, test_data, retinanet, retinanet_sk) retinanet.train() except Exception as e: print(e) if parser.dataset == 'coco': print('\n==>Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet, threshold=0.2) save_model(retinanet, optimizer, best_saved_model_name, 0.5, epoch_num) continue elif parser.dataset == 'wider_pedestrain': for threshold in range(16, 90, 10): threshold = threshold / 100 test_data = get_test_loader_for_upload(1) # coco_eval.evaluate_wider_pedestrian_for_upload(epoch_num, test_data, retinanet, retinanet_sk) new_map = coco_eval.evaluate_wider_pedestrian( epoch_num, dataset_val, retinanet, retinanet_sk, threshold) # to validate epoch_saved_model_name = "checkpoint/resnet{}_{}_epoch_{}.pth".format( parser.depth, parser.dataset, epoch_num) save_model(retinanet, optimizer, epoch_saved_model_name, new_map, epoch_num) # print("\nepoch:{}, validation average precision score:{}".format(epoch_num, new_map)) if new_map == None: continue writer.add_scalar('validation mAP', new_map, epoch_num) scheduler.step(np.mean(epoch_loss)) # if new_map>best_mAP: print( "Found new best model with mAP:{:.7f}, over {:.7f}".format( new_map, best_mAP)) save_model(retinanet, optimizer, best_saved_model_name, new_map, epoch_num) best_mAP = new_map coco_eval.evaluate_wider_pedestrian_for_upload( epoch_num, test_data, retinanet, retinanet_sk, threshold, new_map) retinanet.train()
def main(args=None): """ In current implementation, if test csv is provided, we use that as validation set and combine the val and train csv's as the csv for training. If train_all_labeled_data flag is use, then we combine all 3 (if test is provided) for training and use a prespecified learning rate step schedule. """ parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)', default=None) parser.add_argument( '--csv_test', help= 'Path to file containing test annotations (optional, if provided, train & val will be combined for training and test will be used for evaluation)', default=None) parser.add_argument('--lr', type=float, default=2e-5) parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=101) parser.add_argument('--epochs', help='Number of epochs', type=int, default=25) parser.add_argument('--model_output_dir', type=str, default='models') parser.add_argument( '--train_all_labeled_data', help= 'Combine train, val, and test into 1 training set. Will use prespecified learning rate scheduler steps', action='store_true') parser.add_argument('--resnet-backbone-normalization', choices=['batch_norm', 'group_norm'], type=str, default='batch_norm') parser = parser.parse_args(args) print('Learning Rate: {}'.format(parser.lr)) print("Normalization: ", parser.resnet_backbone_normalization) # Create folder - will raise error if folder exists assert (os.path.exists(parser.model_output_dir) == False) os.mkdir(parser.model_output_dir) if parser.csv_train is None: raise ValueError('Must provide --csv_train when training,') if parser.csv_classes is None: raise ValueError('Must provide --csv_classes when training,') if not parser.csv_val and parser.csv_test: raise ValueError( "Cannot specify test set without specifying validation set") if parser.train_all_labeled_data: csv_paths = [parser.csv_train, parser.csv_val, parser.csv_test] train_csv = [] for path in csv_paths: if isinstance(path, str): train_csv.append(path) val_csv = None else: if parser.csv_train and parser.csv_val and parser.csv_test: train_csv = [parser.csv_train, parser.csv_val ] # Combine train and val sets for training val_csv = parser.csv_test else: train_csv = parser.csv_train val_csv = parser.csv_val print('loading train data') print(train_csv) dataset_train = CSVDataset(train_file=train_csv, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) print(dataset_train.__len__()) if val_csv is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=val_csv, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) print('putting data into loader') sampler = AspectRatioBasedSampler(dataset_train, batch_size=2, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model print('creating model') if parser.depth == 18: retinanet = model.resnet18( num_classes=dataset_train.num_classes(), pretrained=True, normalization=parser.resnet_backbone_normalization) elif parser.depth == 34: retinanet = model.resnet34( num_classes=dataset_train.num_classes(), pretrained=True, normalization=parser.resnet_backbone_normalization) elif parser.depth == 50: retinanet = model.resnet50( num_classes=dataset_train.num_classes(), pretrained=True, normalization=parser.resnet_backbone_normalization) elif parser.depth == 101: retinanet = model.resnet101( num_classes=dataset_train.num_classes(), pretrained=True, normalization=parser.resnet_backbone_normalization) elif parser.depth == 152: retinanet = model.resnet152( num_classes=dataset_train.num_classes(), pretrained=True, normalization=parser.resnet_backbone_normalization) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: retinanet = retinanet.cuda() retinanet = torch.nn.DataParallel(retinanet).cuda() retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=parser.lr) lr_factor = 0.3 if not parser.train_all_labeled_data: scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, factor=lr_factor, verbose=True) else: # these milestones are for when using the lung masks - not for unmasked lung data scheduler = optim.lr_scheduler.MultiStepLR( optimizer, milestones=[12, 16, 20, 24], gamma=lr_factor) # masked training #scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[14, 18, 22, 26], gamma=lr_factor) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() #initialize tensorboard writer = SummaryWriter(comment=parser.model_output_dir) # Augmentation seq = iaa.Sequential([ iaa.Fliplr(0.5), iaa.Flipud(0.5), iaa.Affine(scale={ "x": (1.0, 1.2), "y": (1.0, 1.2) }, rotate=(-20, 20), shear=(-4, 4)) ], random_order=True) def augment(data, seq): for n, img in enumerate(data['img']): # imgaug needs dim in format (H, W, C) image = data['img'][n].permute(1, 2, 0).numpy() bbs_array = [] for ann in data['annot'][n]: x1, y1, x2, y2, _ = ann bbs_array.append(BoundingBox(x1=x1, y1=y1, x2=x2, y2=y2)) bbs = BoundingBoxesOnImage(bbs_array, shape=image.shape) image_aug, bbs_aug = seq(image=image, bounding_boxes=bbs) # save augmented image and chage dims to (C, H, W) data['img'][n] = torch.tensor(image_aug.copy()).permute(2, 0, 1) # save augmented annotations for i, bbox in enumerate(bbs_aug.bounding_boxes): x1, y1, x2, y2 = bbox.x1, bbox.y1, bbox.x2, bbox.y2 obj_class = data['annot'][n][i][-1] data['annot'][n][i] = torch.tensor([x1, y1, x2, y2, obj_class]) return data print('Num training images: {}'.format(len(dataset_train))) dir_training_images = os.path.join(os.getcwd(), writer.log_dir, 'training_images') os.mkdir(dir_training_images) best_validation_loss = None best_validation_map = None for epoch_num in range(parser.epochs): writer.add_scalar('Train/LR', optimizer.param_groups[0]['lr'], epoch_num) retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() data = augment(data, seq) # save a few training images to see what augmentation looks like if iter_num % 100 == 0 and epoch_num == 0: x1, y1, x2, y2, _ = data['annot'][0][0] fig, ax = plt.subplots(1) ax.imshow(data['img'][0][1]) rect = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=1, edgecolor='r', facecolor='none', alpha=1) ax.add_patch(rect) fig.savefig( os.path.join(dir_training_images, '{}.png'.format(iter_num))) plt.close() classification_loss, regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() if parser.resnet_backbone_normalization == 'batch_norm': torch.nn.utils.clip_grad_norm_( parameters=retinanet.parameters(), max_norm=0.1) else: torch.nn.utils.clip_grad_norm_( parameters=retinanet.parameters(), max_norm=0.01 ) # Decrease norm to reduce risk of exploding gradients optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss except Exception as e: print(e) continue writer.add_scalar('Train/Loss', np.mean(epoch_loss), epoch_num) if not parser.train_all_labeled_data: print('Evaluating Validation Loss...') with torch.no_grad(): retinanet.train() val_losses, val_class_losses, val_reg_losses = [], [], [] for val_iter_num, val_data in enumerate(dataloader_val): try: val_classification_loss, val_regression_loss = retinanet( [ val_data['img'].cuda().float(), val_data['annot'] ]) val_losses.append( float(val_classification_loss) + float(val_regression_loss)) val_class_losses.append(float(val_classification_loss)) val_reg_losses.append(float(val_regression_loss)) del val_classification_loss, val_regression_loss except Exception as e: print(e) continue print( 'VALIDATION Epoch: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Total loss: {:1.5f}' .format(epoch_num, np.mean(val_class_losses), np.mean(val_reg_losses), np.mean(val_losses))) # Save model with best validation loss if best_validation_loss is None: best_validation_loss = np.mean(val_losses) if best_validation_loss >= np.mean(val_losses): best_validation_loss = np.mean(val_losses) torch.save( retinanet.module, parser.model_output_dir + '/best_result_valloss.pt') writer.add_scalar('Validation/Loss', np.mean(val_losses), epoch_num) # Calculate Validation mAP print('Evaluating validation mAP') mAP = csv_eval.evaluate(dataset_val, retinanet) print("Validation mAP: " + str(mAP[0][0])) if best_validation_map is None: best_validation_map = mAP[0][0] elif best_validation_map < mAP[0][0]: best_validation_map = mAP[0][0] torch.save( retinanet.module, parser.model_output_dir + '/best_result_valmAP.pt') writer.add_scalar('Validation/mAP', mAP[0][0], epoch_num) if not parser.train_all_labeled_data: scheduler.step(np.mean(val_losses)) else: scheduler.step() torch.save( retinanet.module, parser.model_output_dir + '/retinanet_{}.pt'.format(epoch_num)) retinanet.eval() torch.save(retinanet, parser.model_output_dir + '/model_final.pt')
def infer(img_dir,classes_csv,model_fname,resnet_depth,score_thresh,out_dir, results_fname): # Create dataset img_list = [] if not isinstance(img_dir, list): img_dir = [img_dir] for dir in img_dir: for file in os.listdir(dir): if file.endswith(".png"): img_list.append(dir + file) dataset_val = CustomDataset(img_list=img_list, class_list=classes_csv, transform=transforms.Compose([Normalizer(), Resizer()])) sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=1, collate_fn=collater, batch_sampler=sampler_val) print(dataset_val.num_classes()) # Create the model if resnet_depth == 18: retinanet = model.resnet18(num_classes=dataset_val.num_classes()) elif resnet_depth == 34: retinanet = model.resnet34(num_classes=dataset_val.num_classes()) elif resnet_depth == 50: retinanet = model.resnet50(num_classes=dataset_val.num_classes()) elif resnet_depth == 101: retinanet = model.resnet101(num_classes=dataset_val.num_classes()) elif resnet_depth == 152: retinanet = model.resnet152(num_classes=dataset_val.num_classes()) else: raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152') state_dict = torch.load(model_fname) from collections import OrderedDict new_state_dict = OrderedDict() for k, v in state_dict.items(): name = k[7:] # remove `module.` new_state_dict[name] = v # load params retinanet.load_state_dict(new_state_dict) if use_gpu: retinanet = retinanet.cuda() retinanet.eval() unnormalize = UnNormalizer() def draw_caption(image, box, caption): b = np.array(box).astype(int) cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2) cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1) results = [] for idx, data in enumerate(dataloader_val): with torch.no_grad(): st = time.time() scores, classification, transformed_anchors = retinanet(data['img'].cuda().float()) print('Elapsed time: {}, Num objects: {}'.format(time.time() - st, len(scores))) idxs = np.where(scores > score_thresh) img = np.array(255 * unnormalize(data['img'][0, :, :, :])).copy() img[img < 0] = 0 img[img > 255] = 255 img = np.transpose(img, (1, 2, 0)).astype(np.uint8).copy() bboxes = [] for j in range(idxs[0].shape[0]): bbox = transformed_anchors[idxs[0][j], :] x1 = int(bbox[0] / data['scale'][0]) y1 = int(bbox[1] / data['scale'][0]) x2 = int(bbox[2] / data['scale'][0]) y2 = int(bbox[3] / data['scale'][0]) label_name = dataset_val.labels[int(classification[idxs[0][j]])] draw_caption(img, (x1, y1, x2, y2), label_name) cv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=2) score = float(scores[idxs[0][j]]) bboxes.append([x1, y1, x2, y2, score]) img_fname = ntpath.basename(data['img_fname'][0]) results.append([img_fname, bboxes]) # fig, ax = plt.subplots(figsize=(12, 12)) # ax.imshow(img, interpolation='bilinear') with open(out_dir+results_fname,"wb") as output_file: pickle.dump(results, output_file)
def main(): global NORM_MEAN, NORM_STD, coconut_model, train_history_dict for arg in vars(args): print(str(arg) + ': ' + str(getattr(args, arg))) print('=' * 100) # Build Model base on dataset and arc num_classes = None if args.model_type == 'food179': num_classes = 179 NORM_MEAN = FOOD179_MEAN NORM_STD = FOOD179_STD elif args.model_type == 'nsfw': num_classes = 5 NORM_MEAN = NSFW_MEAN NORM_STD = NSFW_STD else: raise ('Not Implemented!') if args.model_arc == 'resnet18': coconut_model = model.resnet18(num_classes=num_classes, zero_init_residual=True) elif args.model_arc == 'resnet34': coconut_model = model.resnet34(num_classes=num_classes, zero_init_residual=True) elif args.model_arc == 'resnet50': coconut_model = model.resnet50(num_classes=num_classes, zero_init_residual=True) elif args.model_arc == 'resnet101': coconut_model = model.resnet101(num_classes=num_classes, zero_init_residual=True) elif args.model_arc == 'resnet152': coconut_model = model.resnet152(num_classes=num_classes, zero_init_residual=True) elif args.model_arc == 'mobilenet': coconut_model = model.MobileNetV2(n_class=num_classes, input_size=256) else: raise ('Not Implemented!') coconut_model = nn.DataParallel(coconut_model) if args.cuda: coconut_model = coconut_model.cuda() torch.backends.benchmark = True print("CUDA Enabled") gpu_count = torch.cuda.device_count() print('Total of %d GPU available' % (gpu_count)) args.train_batch_size = args.train_batch_size * gpu_count args.test_batch_size = args.test_batch_size * gpu_count print('args.train_batch_size: %d' % (args.train_batch_size)) print('args.test_batch_size: %d' % (args.test_batch_size)) model_parameters = filter(lambda p: p.requires_grad, coconut_model.parameters()) params = sum([np.prod(p.size()) for p in model_parameters]) print('Total of %d parameters' % (params)) # Build Training start_epoch = 0 best_acc = 0 optimizer = None scheduler = None milestones = [50, 150, 250] if args.train_optimizer == 'sgd': optimizer = optim.SGD(coconut_model.parameters(), lr=args.lr, momentum=0.9, nesterov=True, weight_decay=args.l2_reg) scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=0.1) elif args.train_optimizer == 'adam': optimizer = optim.Adam(coconut_model.parameters(), lr=args.lr) scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=0.1) elif args.train_optimizer == 'adabound': optimizer = adabound.AdaBound(coconut_model.parameters(), lr=1e-3, final_lr=0.1) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=150, gamma=0.1, last_epoch=-1) global_steps = 0 if not args.start_from_begining: filename = args.model_checkpoint_path if args.load_gpu_model_on_cpu: checkpoint = torch.load(filename, map_location=lambda storage, loc: storage) else: checkpoint = torch.load(filename) coconut_model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['model_optimizer']) best_acc = checkpoint['best_acc'] train_history_dict = checkpoint['train_history_dict'] scheduler.optimizer = optimizer # Not sure if this actually works start_epoch = checkpoint['epoch'] global_steps = checkpoint['global_steps'] print(filename + ' loaded!') data_loaders = load_datasets() train_ops(start_epoch=start_epoch, model=coconut_model, optimizer=optimizer, scheduler=scheduler, data_loaders=data_loaders, best_acc=best_acc, global_steps=global_steps)
def train(img_dir,classes_csv,model_fname=None,resnet_depth=50,epochs=1000,steps=100,train_split=0.8,out_dir ='',out_prefix=''): if not os.path.exists(out_dir): os.makedirs(out_dir) # Create the data loaders # Get all image fnames in folder img_list = [] if not isinstance(img_dir, list): img_dir = [img_dir] for dir in img_dir: for file in os.listdir(dir): if file.endswith(".png"): img_list.append(dir + file) randomised_list = random.sample(img_list, len(img_list)) num_train = int(0.8*len(img_list)) train_imgs, val_imgs = randomised_list[:num_train], randomised_list[num_train:] dataset_train = CustomDataset(img_list=train_imgs, class_list=classes_csv, transform=transforms.Compose([Normalizer(), Augmenter(), Resizer()])) dataset_val = CustomDataset(img_list=val_imgs, class_list=classes_csv,transform=transforms.Compose([Normalizer(), Resizer()])) sampler = AspectRatioBasedSampler(dataset_train, batch_size=2, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if resnet_depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif resnet_depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif resnet_depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif resnet_depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif resnet_depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152') # retinanet = torch.load(model_fname) if use_gpu: retinanet = retinanet.cuda() retinanet = torch.nn.DataParallel(retinanet).cuda() if model_fname is not None: retinanet.load_state_dict(torch.load(model_fname)) retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() start_time = time.clock() print('Num training images: {}'.format(len(dataset_train))) for epoch_num in range(epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() classification_loss, regression_loss = retinanet([data['img'].cuda().float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) # print('Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}'.format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss except Exception as e: print(e) continue print('Epoch: {} | Running loss: {:1.5f} | Elapsed Time: {}'.format(epoch_num, np.mean(loss_hist),(time.clock() - start_time)/60)) mAP = csv_eval.evaluate(dataset_val, retinanet) scheduler.step(np.mean(epoch_loss)) if (epoch_num) % steps == 0: torch.save(retinanet.module, '{}{}_model_{}.pt'.format(out_dir, out_prefix, epoch_num)) torch.save(retinanet.state_dict(), '{}{}_state_{}.pt'.format(out_dir, out_prefix, epoch_num)) torch.save(retinanet, out_dir + '{}model_final.pt'.format(out_prefix)) torch.save(retinanet.state_dict(), out_dir + '{}state_final_.pt'.format(out_prefix))
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--coco_path', help='Path to COCO directory', type=str, default='./data/coco') parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--checkpoint', help='The path to the checkpoint.', type=str, default=None) parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) parser.add_argument('--batch_size', help='Number of batch', type=int, default=16) parser.add_argument('--gpu_ids', help='Gpu parallel', type=str, default='1, 2') parser = parser.parse_args(args) # Create the data lodaders dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose( [Normalizer(), Resizer()])) sampler = AspectRatioBasedSampler(dataset_train, batch_size=4, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=16, collate_fn=collater, batch_sampler=sampler) sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: retinanet = retinanet.cuda() gpu_ids = parser.gpu_ids.split(',') device = torch.device("cuda:" + gpu_ids[0]) torch.cuda.set_device(device) gpu_ids = list(map(int, gpu_ids)) retinanet = torch.nn.DataParallel(retinanet, device_ids=gpu_ids).to(device) if parser.checkpoint: pretrained = torch.load(parser.checkpoint).state_dict() retinanet.module.load_state_dict(pretrained) # add tensorboard to record train log retinanet.training = True writer = SummaryWriter('./log') # writer.add_graph(retinanet, input_to_model=[images, labels]) retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) for epoch_num in range(parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() classification_loss, regression_loss = retinanet( [data['img'].to(device), data['ann'].to(device)]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) writer.add_scalar('Loss/train', loss, iter_num) writer.add_scalar('Loss/reg_loss', regression_loss, iter_num) writer.add_scalar('Loss/cls_loss', classification_loss, iter_num) epoch_loss.append(float(loss)) if (iter_num + 1) % 1000 == 0: print('Save model') torch.save( retinanet.module, 'COCO_retinanet_epoch{}_iter{}.pt'.format( epoch_num, iter_num)) print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss except Exception as e: print(e) continue print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet, writer) scheduler.step(np.mean(epoch_loss)) torch.save(retinanet.module, 'COCO_retinanet_{}.pt'.format(epoch_num)) retinanet.eval() torch.save(retinanet, 'model_final.pt'.format(epoch_num))
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=50) parser.add_argument('--model_name', help='name of the model to save') parser.add_argument('--pretrained', help='pretrained model name') parser = parser.parse_args(args) # Create the data loaders dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Resizer(), Augmenter(), Normalizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Resizer(), Normalizer()])) sampler = AspectRatioBasedSampler(dataset_train, batch_size=2, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=16, collate_fn=collater, batch_sampler=sampler) #dataloader_train = DataLoader(dataset_train, num_workers=16, collate_fn=collater, batch_size=8, shuffle=True) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=2, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=16, collate_fn=collater, batch_sampler=sampler_val) #dataloader_val = DataLoader(dataset_train, num_workers=16, collate_fn=collater, batch_size=8, shuffle=True) # Create the model_pose_level_attention if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes()) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes()) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes()) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes()) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes()) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') if ckpt: retinanet = torch.load('') print('load ckpt') else: retinanet_dict = retinanet.state_dict() pretrained_dict = torch.load('./weight/' + parser.pretrained) pretrained_dict = { k: v for k, v in pretrained_dict.items() if k in retinanet_dict } retinanet_dict.update(pretrained_dict) retinanet.load_state_dict(retinanet_dict) print('load pretrained backbone') print(retinanet) retinanet = torch.nn.DataParallel(retinanet, device_ids=[0]) retinanet.cuda() retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) #optimizer = optim.SGD(retinanet.parameters(), lr=1e-3, momentum=0.9, weight_decay=1e-4) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) #scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.1) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) f_map = open('./mAP_txt/' + parser.model_name + '.txt', 'a') writer = SummaryWriter(log_dir='./summary') iters = 0 for epoch_num in range(0, parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] #scheduler.step() for iter_num, data in enumerate(dataloader_train): iters += 1 optimizer.zero_grad() classification_loss_f, regression_loss_f, classification_loss_v, regression_loss_v = retinanet( [ data['img'].cuda().float(), data['annot'], data['vbox'], data['ignore'] ]) classification_loss_f = classification_loss_f.mean() regression_loss_f = regression_loss_f.mean() classification_loss_v = classification_loss_v.mean() regression_loss_v = regression_loss_v.mean() loss = classification_loss_f + regression_loss_f + classification_loss_v + regression_loss_v if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) print( 'Epoch: {} | Iteration: {} | Classification loss_f: {:1.5f} | Regression loss_f: {:1.5f} | Classification loss_v {:1.5f} | Regression loss_v {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss_f), float(regression_loss_f), float(classification_loss_v), float(regression_loss_v), np.mean(loss_hist))) writer.add_scalar('classification_loss_f', classification_loss_f, iters) writer.add_scalar('regression_loss_f', regression_loss_f, iters) writer.add_scalar('classification_loss_v', classification_loss_v, iters) writer.add_scalar('regression_loss_v', regression_loss_v, iters) writer.add_scalar('loss', loss, iters) if parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) f_map.write('mAP:{}, epoch:{}'.format(mAP[0][0], epoch_num)) f_map.write('\n') scheduler.step(np.mean(epoch_loss)) torch.save(retinanet.module, './ckpt/' + parser.model_name + '_{}.pt'.format(epoch_num)) retinanet.eval() writer.export_scalars_to_json( "./summary/' + parser.pretrained + 'all_scalars.json") f_map.close() writer.close()
def main(args=None): parser = argparse.ArgumentParser(description='Simple testing script for RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.',default = "csv") parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)',default="binary_class.csv") parser.add_argument('--csv_val', help='Path to file containing validation annotations (optional, see readme)') parser.add_argument('--csv_box_annot', help='Path to file containing predicted box annotations ') parser.add_argument('--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=18) parser.add_argument('--epochs', help='Number of epochs', type=int, default=500) parser.add_argument('--model', help='Path of .pt file with trained model',default = 'esposallescsv_retinanet_0.pt') parser.add_argument('--model_out', help='Path of .pt file with trained model to save',default = 'trained') parser.add_argument('--score_threshold', help='Score above which boxes are kept',default=0.15) parser.add_argument('--nms_threshold', help='Score above which boxes are kept',default=0.2) parser.add_argument('--max_epochs_no_improvement', help='Max epochs without improvement',default=100) parser.add_argument('--max_boxes', help='Max boxes to be fed to recognition',default=50) parser.add_argument('--seg_level', help='Line or word, to choose anchor aspect ratio',default='line') parser.add_argument('--htr_gt_box',help='Train recognition branch with box gt (for debugging)',default=False) parser = parser.parse_args(args) # Create the data loaders if parser.dataset == 'csv': if parser.csv_classes is None: raise ValueError('Must provide --csv_classes when training on COCO,') if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()])) if parser.csv_box_annot is not None: box_annot_data = CSVDataset(train_file=parser.csv_box_annot, class_list=parser.csv_classes, transform=transforms.Compose([Normalizer(), Resizer()])) else: box_annot_data = None else: raise ValueError('Dataset type not understood (must be csv or coco), exiting.') if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=0, collate_fn=collater, batch_sampler=sampler_val) if box_annot_data is not None: sampler_val = AspectRatioBasedSampler(box_annot_data, batch_size=1, drop_last=False) dataloader_box_annot = DataLoader(box_annot_data, num_workers=0, collate_fn=collater, batch_sampler=sampler_val) else: dataloader_box_annot = dataloader_val if not os.path.exists('trained_models'): os.mkdir('trained_models') # Create the model alphabet=dataset_val.alphabet if os.path.exists(parser.model): retinanet = torch.load(parser.model) else: if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_val.num_classes(), pretrained=True,max_boxes=int(parser.max_boxes),score_threshold=float(parser.score_threshold),seg_level=parser.seg_level,alphabet=alphabet) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError('Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: retinanet = retinanet.cuda() retinanet = torch.nn.DataParallel(retinanet).cuda() #retinanet = torch.load('../Documents/TRAINED_MODELS/pytorch-retinanet/esposallescsv_retinanet_99.pt') #print "LOADED pretrained MODEL\n\n" optimizer = optim.Adam(retinanet.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=4, verbose=True) loss_hist = collections.deque(maxlen=500) ctc = CTCLoss() retinanet.module.freeze_bn() best_cer = 1000 epochs_no_improvement=0 cers=[] retinanet.eval() retinanet.module.epochs_only_det = 0 #retinanet.module.htr_gt_box = False retinanet.training=False if parser.score_threshold is not None: retinanet.module.score_threshold = float(parser.score_threshold) '''if parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') ''' mAP = csv_eval.evaluate(dataset_val, retinanet,score_threshold=retinanet.module.score_threshold) aps = [] for k,v in mAP.items(): aps.append(v[0]) print ("VALID mAP:",np.mean(aps)) print("score th",retinanet.module.score_threshold) for idx,data in enumerate(dataloader_box_annot): print("Eval CER on validation set:",idx,"/",len(dataloader_box_annot),"\r") if box_annot_data: image_name = box_annot_data.image_names[idx].split('/')[-1].split('.')[-2] else: image_name = dataset_val.image_names[idx].split('/')[-1].split('.')[-2] #generate_pagexml(image_name,data,retinanet,parser.score_threshold,parser.nms_threshold,dataset_val) text_gt_path="/".join(dataset_val.image_names[idx].split('/')[:-1]) text_gt = os.path.join(text_gt_path,image_name+'.txt') f =open(text_gt,'r') text_gt_lines=f.readlines()[0] transcript_pred = get_transcript(image_name,data,retinanet,retinanet.module.score_threshold,float(parser.nms_threshold),dataset_val,alphabet) cers.append(float(editdistance.eval(transcript_pred,text_gt_lines))/len(text_gt_lines)) print("GT",text_gt_lines) print("PREDS SAMPLE:",transcript_pred) print("VALID CER:",np.mean(cers),"best CER",best_cer) print("GT",text_gt_lines) print("PREDS SAMPLE:",transcript_pred) print("VALID CER:",np.mean(cers),"best CER",best_cer)
#model.load_state_dict(custom_weight_init(model,args.pretrainedWeights)) elif args.network == "cResNet18": from model import customResNet from model import custom_weight_init from model import BasicBlock from model import freeze_layers model = customResNet(BasicBlock, [2, 2, 2, 2]) model.load_state_dict(custom_weight_init(model,args.pretrainedWeights)) #freeze all but two last 2 layers freeze_layers(model,2) elif args.network == "ResNet152": from model import resnet152, BasicBlock, ResNet model = resnet152(args.pretrainedWeights,20) print(model.modules) if use_cuda: print('Using GPU') model.cuda() else: print('Using CPU') optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) def train(epoch): model.train() for batch_idx, (data, target) in enumerate(train_loader):
def main(args=None): parser = argparse.ArgumentParser( description="Simple training script for training a RetinaNet network.") parser.add_argument( "--dataset", help="Dataset type, must be one of csv or coco or ycb.") parser.add_argument("--path", help="Path to dataset directory") parser.add_argument( "--csv_train", help="Path to file containing training annotations (see readme)") parser.add_argument("--csv_classes", help="Path to file containing class list (see readme)") parser.add_argument("--csv_val", help="Path to file containing validation annotations " "(optional, see readme)") parser.add_argument( "--depth", help="Resnet depth, must be one of 18, 34, 50, 101, 152", type=int, default=50) parser.add_argument("--epochs", help="Number of epochs", type=int, default=100) parser.add_argument("--evaluate_every", default=20, type=int) parser.add_argument("--print_every", default=20, type=int) parser.add_argument('--distributed', action="store_true", help='Run model in distributed mode with DataParallel') parser = parser.parse_args(args) # Create the data loaders if parser.dataset == "coco": if parser.path is None: raise ValueError( "Must provide --path when training on non-CSV datasets") dataset_train = CocoDataset(parser.path, ann_file="instances_train2014.json", set_name="train2014", transform=transforms.Compose([ Normalizer(), Augmenter(), Resizer(min_side=512, max_side=512) ])) dataset_val = CocoDataset(parser.path, ann_file="instances_val2014.cars.json", set_name="val2014", transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == "ycb": dataset_train = YCBDataset(parser.path, "image_sets/train.txt", transform=transforms.Compose([ Normalizer(), Augmenter(), Resizer(min_side=512, max_side=512) ]), train=True) dataset_val = YCBDataset(parser.path, "image_sets/val.txt", transform=transforms.Compose( [Normalizer(), Resizer()]), train=False) elif parser.dataset == "csv": if parser.csv_train is None: raise ValueError("Must provide --csv_train when training on COCO,") if parser.csv_classes is None: raise ValueError( "Must provide --csv_classes when training on COCO,") dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print("No validation annotations provided.") else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( "Dataset type not understood (must be csv or coco), exiting.") sampler = AspectRatioBasedSampler(dataset_train, batch_size=12, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=8, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=4, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( "Unsupported model depth, must be one of 18, 34, 50, 101, 152") print("CUDA available: {}".format(torch.cuda.is_available())) if torch.cuda.is_available(): device = "cuda" else: device = "cpu" retinanet = retinanet.to(device) if parser.distributed: retinanet = torch.nn.DataParallel(retinanet) optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) print("Num training images: {}".format(len(dataset_train))) best_mean_avg_prec = 0.0 for epoch_num in range(parser.epochs): retinanet.train() retinanet.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() classification_loss, regression_loss = retinanet( [data["img"].to(device).float(), data["annot"]]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss.item())) epoch_loss.append(float(loss.item())) if parser.print_every % iter_num == 0: print("Epoch: {} | Iteration: {}/{} | " "Classification loss: {:1.5f} | " "Regression loss: {:1.5f} | " "Running loss: {:1.5f}".format( epoch_num, iter_num, len(dataloader_train), float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss except Exception as e: print(e) continue if ((epoch_num + 1) % parser.evaluate_every == 0) or epoch_num + 1 == parser.epochs: mAP = 0.0 if parser.dataset == "coco": print("Evaluating dataset") mAP = coco_eval.evaluate_coco(dataset_val, retinanet) else: print("Evaluating dataset") AP = eval.evaluate(dataset_val, retinanet) mAP = np.asarray([x[0] for x in AP.values()]).mean() print("Val set mAP: ", mAP) if mAP > best_mean_avg_prec: best_mean_avg_prec = mAP torch.save( retinanet.state_dict(), "{}_retinanet_best_mean_ap_{}.pt".format( parser.dataset, epoch_num)) scheduler.step(np.mean(epoch_loss)) retinanet.eval() torch.save(retinanet.state_dict(), "retinanet_model_final.pt")
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', default="csv", help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument( '--csv_train', default="./data/train_only.csv", help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', default="./data/classes.csv", help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', default="./data/train_only.csv", help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument('--voc_train', default="./data/voc_train", help='Path to containing images and annAnnotations') parser.add_argument('--voc_val', default="./data/bov_train", help='Path to containing images and annAnnotations') parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=101) parser.add_argument('--epochs', help='Number of epochs', type=int, default=40) parser = parser.parse_args(args) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'voc': if parser.voc_train is None: raise ValueError( 'Must provide --voc_train when training on PASCAL VOC,') dataset_train = XML_VOCDataset( img_path=parser.voc_train + 'JPEGImages/', xml_path=parser.voc_train + 'Annotations/', class_list=class_list, transform=transforms.Compose( [Normalizer(), Augmenter(), ResizerMultiScale()])) if parser.voc_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = XML_VOCDataset( img_path=parser.voc_val + 'JPEGImages/', xml_path=parser.voc_val + 'Annotations/', class_list=class_list, transform=transforms.Compose([Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size=1, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=2, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=2, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: retinanet = retinanet.cuda() retinanet = torch.nn.DataParallel(retinanet).cuda() retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-4) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=15, verbose=True, mode="max") #scheduler = optim.lr_scheduler.StepLR(optimizer,8) loss_hist = collections.deque(maxlen=1024) retinanet.train() retinanet.module.freeze_bn() if not os.path.exists("./logs"): os.mkdir("./logs") log_file = open("./logs/log.txt", "w") print('Num training images: {}'.format(len(dataset_train))) best_map = 0 print("Training models...") for epoch_num in range(parser.epochs): #scheduler.step(epoch_num) retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): #print('iter num is: ', iter_num) try: #print(csv_eval.evaluate(dataset_val[:20], retinanet)[0]) #print(type(csv_eval.evaluate(dataset_val, retinanet))) #print('iter num is: ', iter_num % 10 == 0) optimizer.zero_grad() classification_loss, regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss #print(loss) if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) if iter_num % 50 == 0: print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) log_file.write( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f} \n' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss except Exception as e: print(e) continue if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) elif parser.dataset == 'voc' and parser.voc_val is not None: print('Evaluating dataset') mAP = voc_eval.evaluate(dataset_val, retinanet) try: is_best_map = mAP[0][0] > best_map best_map = max(mAP[0][0], best_map) except: pass if is_best_map: print("Get better map: ", best_map) torch.save(retinanet.module, './logs/{}_scale15_{}.pt'.format(epoch_num, best_map)) shutil.copyfile( './logs/{}_scale15_{}.pt'.format(epoch_num, best_map), "./best_models/model.pt") else: print("Current map: ", best_map) scheduler.step(best_map) retinanet.eval() torch.save(retinanet, './logs/model_final.pt')
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument( '--csv_train', help='Path to file containing training annotations (see readme)') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument( '--csv_val', help= 'Path to file containing validation annotations (optional, see readme)' ) parser.add_argument( '--depth', help='Resnet depth, must be one of 18, 34, 50, 101, 152', type=int, default=50) parser.add_argument('--epochs', help='Number of epochs', type=int, default=100) parser = parser.parse_args(args) # Create the data loaders if parser.dataset == 'coco': if parser.coco_path is None: raise ValueError('Must provide --coco_path when training on COCO,') dataset_train = CocoDataset(parser.coco_path, set_name='train2017', transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'csv': if parser.csv_train is None: raise ValueError('Must provide --csv_train when training on COCO,') if parser.csv_classes is None: raise ValueError( 'Must provide --csv_classes when training on COCO,') dataset_train = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Augmenter(), Resizer()])) if parser.csv_val is None: dataset_val = None print('No validation annotations provided.') else: dataset_val = CSVDataset(train_file=parser.csv_val, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') sampler = AspectRatioBasedSampler(dataset_train, batch_size=2, drop_last=False) dataloader_train = DataLoader(dataset_train, num_workers=3, collate_fn=collater, batch_sampler=sampler) if dataset_val is not None: sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=3, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_train.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_train.num_classes(), pretrained=True) else: raise ValueError( 'Unsupported model depth, must be one of 18, 34, 50, 101, 152') use_gpu = True if use_gpu: retinanet = retinanet.cuda() retinanet = torch.nn.DataParallel(retinanet).cuda() retinanet.training = True optimizer = optim.Adam(retinanet.parameters(), lr=1e-5) scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=3, verbose=True) loss_hist = collections.deque(maxlen=500) retinanet.train() retinanet.module.freeze_bn() print('Num training images: {}'.format(len(dataset_train))) for epoch_num in range(parser.epochs): retinanet.train() retinanet.module.freeze_bn() epoch_loss = [] for iter_num, data in enumerate(dataloader_train): try: optimizer.zero_grad() classification_loss, regression_loss = retinanet( [data['img'].cuda().float(), data['annot']]) classification_loss = classification_loss.mean() regression_loss = regression_loss.mean() loss = classification_loss + regression_loss if bool(loss == 0): continue loss.backward() torch.nn.utils.clip_grad_norm_(retinanet.parameters(), 0.1) optimizer.step() loss_hist.append(float(loss)) epoch_loss.append(float(loss)) print( 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f} | Running loss: {:1.5f}' .format(epoch_num, iter_num, float(classification_loss), float(regression_loss), np.mean(loss_hist))) del classification_loss del regression_loss except Exception as e: print(e) continue if parser.dataset == 'coco': print('Evaluating dataset') coco_eval.evaluate_coco(dataset_val, retinanet) elif parser.dataset == 'csv' and parser.csv_val is not None: print('Evaluating dataset') mAP = csv_eval.evaluate(dataset_val, retinanet) scheduler.step(np.mean(epoch_loss)) torch.save( retinanet.module, '{}_retinanet_dilation_{}.pt'.format(parser.dataset, epoch_num)) retinanet.eval() torch.save(retinanet, 'model_final_dilation.pt'.format(epoch_num))
def main(args=None): parser = argparse.ArgumentParser( description='Simple training script for training a RetinaNet network.') parser.add_argument('--dataset', help='Dataset type, must be one of csv or coco.') parser.add_argument('--coco_path', help='Path to COCO directory') parser.add_argument('--csv_classes', help='Path to file containing class list (see readme)') parser.add_argument('--csv_val', help='Path to file containing validation annotations \ (optional, see readme)') parser.add_argument('--model', help='Path to model (.pt) file.') parser = parser.parse_args(args) if parser.dataset == 'coco': dataset_val = CocoDataset(parser.coco_path, set_name='val2017', transform=transforms.Compose( [Normalizer(), Resizer()])) elif parser.dataset == 'csv': dataset_val = CSVDataset(train_file=parser.csv_train, class_list=parser.csv_classes, transform=transforms.Compose( [Normalizer(), Resizer()])) else: raise ValueError( 'Dataset type not understood (must be csv or coco), exiting.') sampler_val = AspectRatioBasedSampler(dataset_val, batch_size=1, drop_last=False) dataloader_val = DataLoader(dataset_val, num_workers=1, collate_fn=collater, batch_sampler=sampler_val) # Create the model if parser.depth == 18: retinanet = model.resnet18(num_classes=dataset_val.num_classes(), pretrained=True) elif parser.depth == 34: retinanet = model.resnet34(num_classes=dataset_val.num_classes(), pretrained=True) elif parser.depth == 50: retinanet = model.resnet50(num_classes=dataset_val.num_classes(), pretrained=True) elif parser.depth == 101: retinanet = model.resnet101(num_classes=dataset_val.num_classes(), pretrained=True) elif parser.depth == 152: retinanet = model.resnet152(num_classes=dataset_val.num_classes(), pretrained=True) else: raise ValueError( "Unsupported model depth, must be one of 18, 34, 50, 101, 152") retinanet.load_state_dict(torch.load(parser.model)) if torch.cuda.is_available(): device = "cuda" else: device = "cpu" retinanet = retinanet.to(device) retinanet.eval() unnormalize = UnNormalizer() def draw_caption(image, box, caption): b = np.array(box).astype(int) cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2) cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1) for idx, data in enumerate(dataloader_val): with torch.no_grad(): img = data['img'].to(device).float() st = time.time() scores, classification, transformed_anchors = retinanet(img) print('Elapsed time: {}'.format(time.time() - st)) idxs = np.where(scores > 0.5) img = np.array(255 * unnormalize(data['img'][0, :, :, :])).copy() img[img < 0] = 0 img[img > 255] = 255 img = np.transpose(img, (1, 2, 0)) img = cv2.cvtColor(img.astype(np.uint8), cv2.COLOR_BGR2RGB) for j in range(idxs[0].shape[0]): bbox = transformed_anchors[idxs[0][j], :] x1 = int(bbox[0]) y1 = int(bbox[1]) x2 = int(bbox[2]) y2 = int(bbox[3]) label_name = dataset_val.labels[int( classification[idxs[0][j]])] draw_caption(img, (x1, y1, x2, y2), label_name) cv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=2) print(label_name) cv2.imshow('img', img) cv2.waitKey(0)